xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 7ebbd9d0)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(tcg_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(tcg_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(tcg_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(tcg_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(tcg_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(tcg_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exception this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_EQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859         c->u.s64.a = cc_src;
860         c->u.s64.b = cc_dst;
861         break;
862 
863     case CC_OP_TM_32:
864     case CC_OP_TM_64:
865     case CC_OP_ICM:
866         c->u.s64.a = tcg_temp_new_i64();
867         c->u.s64.b = tcg_constant_i64(0);
868         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869         break;
870 
871     case CC_OP_ADDU:
872     case CC_OP_SUBU:
873         c->is_64 = true;
874         c->u.s64.b = tcg_constant_i64(0);
875         switch (mask) {
876         case 8 | 2:
877         case 4 | 1: /* result */
878             c->u.s64.a = cc_dst;
879             break;
880         case 8 | 4:
881         case 2 | 1: /* carry */
882             c->u.s64.a = cc_src;
883             break;
884         default:
885             g_assert_not_reached();
886         }
887         break;
888 
889     case CC_OP_STATIC:
890         c->is_64 = false;
891         c->u.s32.a = cc_op;
892         switch (mask) {
893         case 0x8 | 0x4 | 0x2: /* cc != 3 */
894             cond = TCG_COND_NE;
895             c->u.s32.b = tcg_constant_i32(3);
896             break;
897         case 0x8 | 0x4 | 0x1: /* cc != 2 */
898             cond = TCG_COND_NE;
899             c->u.s32.b = tcg_constant_i32(2);
900             break;
901         case 0x8 | 0x2 | 0x1: /* cc != 1 */
902             cond = TCG_COND_NE;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906             cond = TCG_COND_EQ;
907             c->u.s32.a = tcg_temp_new_i32();
908             c->u.s32.b = tcg_constant_i32(0);
909             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910             break;
911         case 0x8 | 0x4: /* cc < 2 */
912             cond = TCG_COND_LTU;
913             c->u.s32.b = tcg_constant_i32(2);
914             break;
915         case 0x8: /* cc == 0 */
916             cond = TCG_COND_EQ;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         case 0x4 | 0x2 | 0x1: /* cc != 0 */
920             cond = TCG_COND_NE;
921             c->u.s32.b = tcg_constant_i32(0);
922             break;
923         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924             cond = TCG_COND_NE;
925             c->u.s32.a = tcg_temp_new_i32();
926             c->u.s32.b = tcg_constant_i32(0);
927             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928             break;
929         case 0x4: /* cc == 1 */
930             cond = TCG_COND_EQ;
931             c->u.s32.b = tcg_constant_i32(1);
932             break;
933         case 0x2 | 0x1: /* cc > 1 */
934             cond = TCG_COND_GTU;
935             c->u.s32.b = tcg_constant_i32(1);
936             break;
937         case 0x2: /* cc == 2 */
938             cond = TCG_COND_EQ;
939             c->u.s32.b = tcg_constant_i32(2);
940             break;
941         case 0x1: /* cc == 3 */
942             cond = TCG_COND_EQ;
943             c->u.s32.b = tcg_constant_i32(3);
944             break;
945         default:
946             /* CC is masked by something else: (8 >> cc) & mask.  */
947             cond = TCG_COND_NE;
948             c->u.s32.a = tcg_temp_new_i32();
949             c->u.s32.b = tcg_constant_i32(0);
950             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952             break;
953         }
954         break;
955 
956     default:
957         abort();
958     }
959     c->cond = cond;
960 }
961 
962 /* ====================================================================== */
963 /* Define the insn format enumeration.  */
964 #define F0(N)                         FMT_##N,
965 #define F1(N, X1)                     F0(N)
966 #define F2(N, X1, X2)                 F0(N)
967 #define F3(N, X1, X2, X3)             F0(N)
968 #define F4(N, X1, X2, X3, X4)         F0(N)
969 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971 
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975 
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983 
984 /* This is the way fields are to be accessed out of DisasFields.  */
985 #define have_field(S, F)  have_field1((S), FLD_O_##F)
986 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
987 
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990     return (s->fields.presentO >> c) & 1;
991 }
992 
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994                       enum DisasFieldIndexC c)
995 {
996     assert(have_field1(s, o));
997     return s->fields.c[c];
998 }
999 
1000 /* Describe the layout of each field in each format.  */
1001 typedef struct DisasField {
1002     unsigned int beg:8;
1003     unsigned int size:8;
1004     unsigned int type:2;
1005     unsigned int indexC:6;
1006     enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008 
1009 typedef struct DisasFormatInfo {
1010     DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012 
1013 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1028 
1029 #define F0(N)                     { { } },
1030 #define F1(N, X1)                 { { X1 } },
1031 #define F2(N, X1, X2)             { { X1, X2 } },
1032 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1036 
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040 
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057 
1058 /* Generally, we'll extract operands into this structures, operate upon
1059    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1060    of routines below for more details.  */
1061 typedef struct {
1062     TCGv_i64 out, out2, in1, in2;
1063     TCGv_i64 addr1;
1064     TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066 
1067 /* Instructions can place constraints on their operands, raising specification
1068    exceptions if they are violated.  To make this easy to automate, each "in1",
1069    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070    of the following, or 0.  To make this easy to document, we'll put the
1071    SPEC_<name> defines next to <name>.  */
1072 
1073 #define SPEC_r1_even    1
1074 #define SPEC_r2_even    2
1075 #define SPEC_r3_even    4
1076 #define SPEC_r1_f128    8
1077 #define SPEC_r2_f128    16
1078 
1079 /* Return values from translate_one, indicating the state of the TB.  */
1080 
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082    the PC (for whatever reason), so there's no need to do it again on
1083    exiting the TB.  */
1084 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1085 
1086 /* We have updated the PC and CC values.  */
1087 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1088 
1089 
1090 /* Instruction flags */
1091 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP      0x0008      /* binary floating point instruction */
1095 #define IF_DFP      0x0010      /* decimal floating point instruction */
1096 #define IF_PRIV     0x0020      /* privileged instruction */
1097 #define IF_VEC      0x0040      /* vector instruction */
1098 #define IF_IO       0x0080      /* input/output instruction */
1099 
1100 struct DisasInsn {
1101     unsigned opc:16;
1102     unsigned flags:16;
1103     DisasFormat fmt:8;
1104     unsigned fac:8;
1105     unsigned spec:8;
1106 
1107     const char *name;
1108 
1109     /* Pre-process arguments before HELP_OP.  */
1110     void (*help_in1)(DisasContext *, DisasOps *);
1111     void (*help_in2)(DisasContext *, DisasOps *);
1112     void (*help_prep)(DisasContext *, DisasOps *);
1113 
1114     /*
1115      * Post-process output after HELP_OP.
1116      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117      */
1118     void (*help_wout)(DisasContext *, DisasOps *);
1119     void (*help_cout)(DisasContext *, DisasOps *);
1120 
1121     /* Implement the operation itself.  */
1122     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123 
1124     uint64_t data;
1125 };
1126 
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations.  */
1129 
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132     if (dest == s->pc_tmp) {
1133         per_branch(s, true);
1134         return DISAS_NEXT;
1135     }
1136     if (use_goto_tb(s, dest)) {
1137         update_cc_op(s);
1138         per_breaking_event(s);
1139         tcg_gen_goto_tb(0);
1140         tcg_gen_movi_i64(psw_addr, dest);
1141         tcg_gen_exit_tb(s->base.tb, 0);
1142         return DISAS_NORETURN;
1143     } else {
1144         tcg_gen_movi_i64(psw_addr, dest);
1145         per_branch(s, false);
1146         return DISAS_PC_UPDATED;
1147     }
1148 }
1149 
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151                                  bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153     DisasJumpType ret;
1154     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155     TCGLabel *lab;
1156 
1157     /* Take care of the special cases first.  */
1158     if (c->cond == TCG_COND_NEVER) {
1159         ret = DISAS_NEXT;
1160         goto egress;
1161     }
1162     if (is_imm) {
1163         if (dest == s->pc_tmp) {
1164             /* Branch to next.  */
1165             per_branch(s, true);
1166             ret = DISAS_NEXT;
1167             goto egress;
1168         }
1169         if (c->cond == TCG_COND_ALWAYS) {
1170             ret = help_goto_direct(s, dest);
1171             goto egress;
1172         }
1173     } else {
1174         if (!cdest) {
1175             /* E.g. bcr %r0 -> no branch.  */
1176             ret = DISAS_NEXT;
1177             goto egress;
1178         }
1179         if (c->cond == TCG_COND_ALWAYS) {
1180             tcg_gen_mov_i64(psw_addr, cdest);
1181             per_branch(s, false);
1182             ret = DISAS_PC_UPDATED;
1183             goto egress;
1184         }
1185     }
1186 
1187     if (use_goto_tb(s, s->pc_tmp)) {
1188         if (is_imm && use_goto_tb(s, dest)) {
1189             /* Both exits can use goto_tb.  */
1190             update_cc_op(s);
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             tcg_gen_goto_tb(0);
1201             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202             tcg_gen_exit_tb(s->base.tb, 0);
1203 
1204             /* Branch taken.  */
1205             gen_set_label(lab);
1206             per_breaking_event(s);
1207             tcg_gen_goto_tb(1);
1208             tcg_gen_movi_i64(psw_addr, dest);
1209             tcg_gen_exit_tb(s->base.tb, 1);
1210 
1211             ret = DISAS_NORETURN;
1212         } else {
1213             /* Fallthru can use goto_tb, but taken branch cannot.  */
1214             /* Store taken branch destination before the brcond.  This
1215                avoids having to allocate a new local temp to hold it.
1216                We'll overwrite this in the not taken case anyway.  */
1217             if (!is_imm) {
1218                 tcg_gen_mov_i64(psw_addr, cdest);
1219             }
1220 
1221             lab = gen_new_label();
1222             if (c->is_64) {
1223                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224             } else {
1225                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226             }
1227 
1228             /* Branch not taken.  */
1229             update_cc_op(s);
1230             tcg_gen_goto_tb(0);
1231             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232             tcg_gen_exit_tb(s->base.tb, 0);
1233 
1234             gen_set_label(lab);
1235             if (is_imm) {
1236                 tcg_gen_movi_i64(psw_addr, dest);
1237             }
1238             per_breaking_event(s);
1239             ret = DISAS_PC_UPDATED;
1240         }
1241     } else {
1242         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1243            Most commonly we're single-stepping or some other condition that
1244            disables all use of goto_tb.  Just update the PC and exit.  */
1245 
1246         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247         if (is_imm) {
1248             cdest = tcg_constant_i64(dest);
1249         }
1250 
1251         if (c->is_64) {
1252             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253                                 cdest, next);
1254             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255         } else {
1256             TCGv_i32 t0 = tcg_temp_new_i32();
1257             TCGv_i64 t1 = tcg_temp_new_i64();
1258             TCGv_i64 z = tcg_constant_i64(0);
1259             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260             tcg_gen_extu_i32_i64(t1, t0);
1261             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262             per_branch_cond(s, TCG_COND_NE, t1, z);
1263         }
1264 
1265         ret = DISAS_PC_UPDATED;
1266     }
1267 
1268  egress:
1269     return ret;
1270 }
1271 
1272 /* ====================================================================== */
1273 /* The operations.  These perform the bulk of the work for any insn,
1274    usually after the operands have been loaded and output initialized.  */
1275 
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278     tcg_gen_abs_i64(o->out, o->in2);
1279     return DISAS_NEXT;
1280 }
1281 
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285     return DISAS_NEXT;
1286 }
1287 
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291     return DISAS_NEXT;
1292 }
1293 
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297     tcg_gen_mov_i64(o->out2, o->in2);
1298     return DISAS_NEXT;
1299 }
1300 
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303     tcg_gen_add_i64(o->out, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309     tcg_gen_movi_i64(cc_src, 0);
1310     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317     switch (s->cc_op) {
1318     case CC_OP_ADDU:
1319         /* The carry value is already in cc_src (1,0). */
1320         break;
1321     case CC_OP_SUBU:
1322         tcg_gen_addi_i64(cc_src, cc_src, 1);
1323         break;
1324     default:
1325         gen_op_calc_cc(s);
1326         /* fall through */
1327     case CC_OP_STATIC:
1328         /* The carry flag is the msb of CC; compute into cc_src. */
1329         tcg_gen_extu_i32_i64(cc_src, cc_op);
1330         tcg_gen_shri_i64(cc_src, cc_src, 1);
1331         break;
1332     }
1333 }
1334 
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337     compute_carry(s);
1338     tcg_gen_add_i64(o->out, o->in1, o->in2);
1339     tcg_gen_add_i64(o->out, o->out, cc_src);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345     compute_carry(s);
1346 
1347     TCGv_i64 zero = tcg_constant_i64(0);
1348     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350 
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357 
1358     o->in1 = tcg_temp_new_i64();
1359     if (non_atomic) {
1360         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361     } else {
1362         /* Perform the atomic addition in memory. */
1363         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364                                      s->insn->data);
1365     }
1366 
1367     /* Recompute also for atomic case: needed for setting CC. */
1368     tcg_gen_add_i64(o->out, o->in1, o->in2);
1369 
1370     if (non_atomic) {
1371         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372     }
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379 
1380     o->in1 = tcg_temp_new_i64();
1381     if (non_atomic) {
1382         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383     } else {
1384         /* Perform the atomic addition in memory. */
1385         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386                                      s->insn->data);
1387     }
1388 
1389     /* Recompute also for atomic case: needed for setting CC. */
1390     tcg_gen_movi_i64(cc_src, 0);
1391     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392 
1393     if (non_atomic) {
1394         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395     }
1396     return DISAS_NEXT;
1397 }
1398 
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_and_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425     int shift = s->insn->data & 0xff;
1426     int size = s->insn->data >> 8;
1427     uint64_t mask = ((1ull << size) - 1) << shift;
1428     TCGv_i64 t = tcg_temp_new_i64();
1429 
1430     tcg_gen_shli_i64(t, o->in2, shift);
1431     tcg_gen_ori_i64(t, t, ~mask);
1432     tcg_gen_and_i64(o->out, o->in1, t);
1433 
1434     /* Produce the CC from only the bits manipulated.  */
1435     tcg_gen_andi_i64(cc_dst, o->out, mask);
1436     set_cc_nz_u64(s, cc_dst);
1437     return DISAS_NEXT;
1438 }
1439 
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443     return DISAS_NEXT;
1444 }
1445 
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449     return DISAS_NEXT;
1450 }
1451 
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455     return DISAS_NEXT;
1456 }
1457 
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467     return DISAS_NEXT;
1468 }
1469 
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472     o->in1 = tcg_temp_new_i64();
1473 
1474     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476     } else {
1477         /* Perform the atomic operation in memory. */
1478         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479                                      s->insn->data);
1480     }
1481 
1482     /* Recompute also for atomic case: needed for setting CC. */
1483     tcg_gen_and_i64(o->out, o->in1, o->in2);
1484 
1485     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487     }
1488     return DISAS_NEXT;
1489 }
1490 
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493     pc_to_link_info(o->out, s, s->pc_tmp);
1494     if (o->in2) {
1495         tcg_gen_mov_i64(psw_addr, o->in2);
1496         per_branch(s, false);
1497         return DISAS_PC_UPDATED;
1498     } else {
1499         return DISAS_NEXT;
1500     }
1501 }
1502 
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505     TCGv_i64 t;
1506 
1507     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508         pc_to_link_info(o->out, s, s->pc_tmp);
1509         return;
1510     }
1511     gen_op_calc_cc(s);
1512     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514     t = tcg_temp_new_i64();
1515     tcg_gen_shri_i64(t, psw_mask, 16);
1516     tcg_gen_andi_i64(t, t, 0x0f000000);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518     tcg_gen_extu_i32_i64(t, cc_op);
1519     tcg_gen_shli_i64(t, t, 28);
1520     tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522 
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525     save_link_info(s, o);
1526     if (o->in2) {
1527         tcg_gen_mov_i64(psw_addr, o->in2);
1528         per_branch(s, false);
1529         return DISAS_PC_UPDATED;
1530     } else {
1531         return DISAS_NEXT;
1532     }
1533 }
1534 
1535 /*
1536  * Disassemble the target of a branch. The results are returned in a form
1537  * suitable for passing into help_branch():
1538  *
1539  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540  *   branches, whose DisasContext *S contains the relative immediate field RI,
1541  *   are considered fixed. All the other branches are considered computed.
1542  * - int IMM is the value of RI.
1543  * - TCGv_i64 CDEST is the address of the computed target.
1544  */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1546     if (have_field(s, ri)) {                                                   \
1547         if (unlikely(s->ex_value)) {                                           \
1548             cdest = tcg_temp_new_i64();                                        \
1549             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1550             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1551             is_imm = false;                                                    \
1552         } else {                                                               \
1553             is_imm = true;                                                     \
1554         }                                                                      \
1555     } else {                                                                   \
1556         is_imm = false;                                                        \
1557     }                                                                          \
1558     imm = is_imm ? get_field(s, ri) : 0;                                       \
1559 } while (false)
1560 
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563     DisasCompare c;
1564     bool is_imm;
1565     int imm;
1566 
1567     pc_to_link_info(o->out, s, s->pc_tmp);
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, 0xf);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576     int m1 = get_field(s, m1);
1577     DisasCompare c;
1578     bool is_imm;
1579     int imm;
1580 
1581     /* BCR with R2 = 0 causes no branching */
1582     if (have_field(s, r2) && get_field(s, r2) == 0) {
1583         if (m1 == 14) {
1584             /* Perform serialization */
1585             /* FIXME: check for fast-BCR-serialization facility */
1586             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587         }
1588         if (m1 == 15) {
1589             /* Perform serialization */
1590             /* FIXME: perform checkpoint-synchronisation */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         return DISAS_NEXT;
1594     }
1595 
1596     disas_jdest(s, i2, is_imm, imm, o->in2);
1597     disas_jcc(s, &c, m1);
1598     return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600 
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603     int r1 = get_field(s, r1);
1604     DisasCompare c;
1605     bool is_imm;
1606     TCGv_i64 t;
1607     int imm;
1608 
1609     c.cond = TCG_COND_NE;
1610     c.is_64 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_constant_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618 
1619     disas_jdest(s, i2, is_imm, imm, o->in2);
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_shri_i64(t, regs[r1], 32);
1635     tcg_gen_subi_i64(t, t, 1);
1636     store_reg32h_i64(r1, t);
1637     c.u.s32.a = tcg_temp_new_i32();
1638     c.u.s32.b = tcg_constant_i32(0);
1639     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640 
1641     return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     DisasCompare c;
1648     bool is_imm;
1649     int imm;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = true;
1653 
1654     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655     c.u.s64.a = regs[r1];
1656     c.u.s64.b = tcg_constant_i64(0);
1657 
1658     disas_jdest(s, i2, is_imm, imm, o->in2);
1659     return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661 
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664     int r1 = get_field(s, r1);
1665     int r3 = get_field(s, r3);
1666     DisasCompare c;
1667     bool is_imm;
1668     TCGv_i64 t;
1669     int imm;
1670 
1671     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672     c.is_64 = false;
1673 
1674     t = tcg_temp_new_i64();
1675     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676     c.u.s32.a = tcg_temp_new_i32();
1677     c.u.s32.b = tcg_temp_new_i32();
1678     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680     store_reg32_i64(r1, t);
1681 
1682     disas_jdest(s, i2, is_imm, imm, o->in2);
1683     return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685 
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688     int r1 = get_field(s, r1);
1689     int r3 = get_field(s, r3);
1690     DisasCompare c;
1691     bool is_imm;
1692     int imm;
1693 
1694     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695     c.is_64 = true;
1696 
1697     if (r1 == (r3 | 1)) {
1698         c.u.s64.b = load_reg(r3 | 1);
1699     } else {
1700         c.u.s64.b = regs[r3 | 1];
1701     }
1702 
1703     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704     c.u.s64.a = regs[r1];
1705 
1706     disas_jdest(s, i2, is_imm, imm, o->in2);
1707     return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709 
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712     int imm, m3 = get_field(s, m3);
1713     bool is_imm;
1714     DisasCompare c;
1715 
1716     c.cond = ltgt_cond[m3];
1717     if (s->insn->data) {
1718         c.cond = tcg_unsigned_cond(c.cond);
1719     }
1720     c.is_64 = true;
1721     c.u.s64.a = o->in1;
1722     c.u.s64.b = o->in2;
1723 
1724     o->out = NULL;
1725     disas_jdest(s, i4, is_imm, imm, o->out);
1726     if (!is_imm && !o->out) {
1727         imm = 0;
1728         o->out = get_address(s, 0, get_field(s, b4),
1729                              get_field(s, d4));
1730     }
1731 
1732     return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734 
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1738     set_cc_static(s);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1745     set_cc_static(s);
1746     return DISAS_NEXT;
1747 }
1748 
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1752     set_cc_static(s);
1753     return DISAS_NEXT;
1754 }
1755 
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757                                    bool m4_with_fpe)
1758 {
1759     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760     uint8_t m3 = get_field(s, m3);
1761     uint8_t m4 = get_field(s, m4);
1762 
1763     /* m3 field was introduced with FPE */
1764     if (!fpe && m3_with_fpe) {
1765         m3 = 0;
1766     }
1767     /* m4 field was introduced with FPE */
1768     if (!fpe && m4_with_fpe) {
1769         m4 = 0;
1770     }
1771 
1772     /* Check for valid rounding modes. Mode 3 was introduced later. */
1773     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774         gen_program_exception(s, PGM_SPECIFICATION);
1775         return NULL;
1776     }
1777 
1778     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780 
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784 
1785     if (!m34) {
1786         return DISAS_NORETURN;
1787     }
1788     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1789     set_cc_static(s);
1790     return DISAS_NEXT;
1791 }
1792 
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796 
1797     if (!m34) {
1798         return DISAS_NORETURN;
1799     }
1800     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1801     set_cc_static(s);
1802     return DISAS_NEXT;
1803 }
1804 
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808 
1809     if (!m34) {
1810         return DISAS_NORETURN;
1811     }
1812     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1813     set_cc_static(s);
1814     return DISAS_NEXT;
1815 }
1816 
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820 
1821     if (!m34) {
1822         return DISAS_NORETURN;
1823     }
1824     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1849     set_cc_static(s);
1850     return DISAS_NEXT;
1851 }
1852 
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856 
1857     if (!m34) {
1858         return DISAS_NORETURN;
1859     }
1860     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1873     set_cc_static(s);
1874     return DISAS_NEXT;
1875 }
1876 
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880 
1881     if (!m34) {
1882         return DISAS_NORETURN;
1883     }
1884     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1897     set_cc_static(s);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1909     set_cc_static(s);
1910     return DISAS_NEXT;
1911 }
1912 
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916 
1917     if (!m34) {
1918         return DISAS_NORETURN;
1919     }
1920     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1921     set_cc_static(s);
1922     return DISAS_NEXT;
1923 }
1924 
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928 
1929     if (!m34) {
1930         return DISAS_NORETURN;
1931     }
1932     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1933     return DISAS_NEXT;
1934 }
1935 
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939 
1940     if (!m34) {
1941         return DISAS_NORETURN;
1942     }
1943     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950 
1951     if (!m34) {
1952         return DISAS_NORETURN;
1953     }
1954     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983 
1984     if (!m34) {
1985         return DISAS_NORETURN;
1986     }
1987     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993     int r2 = get_field(s, r2);
1994     TCGv_i128 pair = tcg_temp_new_i128();
1995     TCGv_i64 len = tcg_temp_new_i64();
1996 
1997     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1998     set_cc_static(s);
1999     tcg_gen_extr_i128_i64(o->out, len, pair);
2000 
2001     tcg_gen_add_i64(regs[r2], regs[r2], len);
2002     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003 
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009     int l = get_field(s, l1);
2010     TCGv_i64 src;
2011     TCGv_i32 vl;
2012     MemOp mop;
2013 
2014     switch (l + 1) {
2015     case 1:
2016     case 2:
2017     case 4:
2018     case 8:
2019         mop = ctz32(l + 1) | MO_TE;
2020         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
2021         src = tcg_temp_new_i64();
2022         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
2023         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2024         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
2025         return DISAS_NEXT;
2026     default:
2027         vl = tcg_constant_i32(l);
2028         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2029         set_cc_static(s);
2030         return DISAS_NEXT;
2031     }
2032 }
2033 
2034 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2035 {
2036     int r1 = get_field(s, r1);
2037     int r2 = get_field(s, r2);
2038     TCGv_i32 t1, t2;
2039 
2040     /* r1 and r2 must be even.  */
2041     if (r1 & 1 || r2 & 1) {
2042         gen_program_exception(s, PGM_SPECIFICATION);
2043         return DISAS_NORETURN;
2044     }
2045 
2046     t1 = tcg_constant_i32(r1);
2047     t2 = tcg_constant_i32(r2);
2048     gen_helper_clcl(cc_op, tcg_env, t1, t2);
2049     set_cc_static(s);
2050     return DISAS_NEXT;
2051 }
2052 
2053 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2054 {
2055     int r1 = get_field(s, r1);
2056     int r3 = get_field(s, r3);
2057     TCGv_i32 t1, t3;
2058 
2059     /* r1 and r3 must be even.  */
2060     if (r1 & 1 || r3 & 1) {
2061         gen_program_exception(s, PGM_SPECIFICATION);
2062         return DISAS_NORETURN;
2063     }
2064 
2065     t1 = tcg_constant_i32(r1);
2066     t3 = tcg_constant_i32(r3);
2067     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2068     set_cc_static(s);
2069     return DISAS_NEXT;
2070 }
2071 
2072 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2073 {
2074     int r1 = get_field(s, r1);
2075     int r3 = get_field(s, r3);
2076     TCGv_i32 t1, t3;
2077 
2078     /* r1 and r3 must be even.  */
2079     if (r1 & 1 || r3 & 1) {
2080         gen_program_exception(s, PGM_SPECIFICATION);
2081         return DISAS_NORETURN;
2082     }
2083 
2084     t1 = tcg_constant_i32(r1);
2085     t3 = tcg_constant_i32(r3);
2086     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2087     set_cc_static(s);
2088     return DISAS_NEXT;
2089 }
2090 
2091 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2092 {
2093     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2094     TCGv_i32 t1 = tcg_temp_new_i32();
2095 
2096     tcg_gen_extrl_i64_i32(t1, o->in1);
2097     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2098     set_cc_static(s);
2099     return DISAS_NEXT;
2100 }
2101 
2102 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2103 {
2104     TCGv_i128 pair = tcg_temp_new_i128();
2105 
2106     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2107     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2108 
2109     set_cc_static(s);
2110     return DISAS_NEXT;
2111 }
2112 
2113 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2114 {
2115     TCGv_i64 t = tcg_temp_new_i64();
2116     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2117     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2118     tcg_gen_or_i64(o->out, o->out, t);
2119     return DISAS_NEXT;
2120 }
2121 
2122 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2123 {
2124     int d2 = get_field(s, d2);
2125     int b2 = get_field(s, b2);
2126     TCGv_i64 addr, cc;
2127 
2128     /* Note that in1 = R3 (new value) and
2129        in2 = (zero-extended) R1 (expected value).  */
2130 
2131     addr = get_address(s, 0, b2, d2);
2132     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2133                                get_mem_index(s), s->insn->data | MO_ALIGN);
2134 
2135     /* Are the memory and expected values (un)equal?  Note that this setcond
2136        produces the output CC value, thus the NE sense of the test.  */
2137     cc = tcg_temp_new_i64();
2138     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2139     tcg_gen_extrl_i64_i32(cc_op, cc);
2140     set_cc_static(s);
2141 
2142     return DISAS_NEXT;
2143 }
2144 
2145 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2146 {
2147     int r1 = get_field(s, r1);
2148 
2149     o->out_128 = tcg_temp_new_i128();
2150     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2151 
2152     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2153     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2154                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2155 
2156     /*
2157      * Extract result into cc_dst:cc_src, compare vs the expected value
2158      * in the as yet unmodified input registers, then update CC_OP.
2159      */
2160     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2161     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2162     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2163     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2164     set_cc_nz_u64(s, cc_dst);
2165 
2166     return DISAS_NEXT;
2167 }
2168 
2169 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2170 {
2171     int r3 = get_field(s, r3);
2172     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2173 
2174     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2175         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2176     } else {
2177         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2178     }
2179 
2180     set_cc_static(s);
2181     return DISAS_NEXT;
2182 }
2183 
2184 #ifndef CONFIG_USER_ONLY
2185 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2186 {
2187     MemOp mop = s->insn->data;
2188     TCGv_i64 addr, old, cc;
2189     TCGLabel *lab = gen_new_label();
2190 
2191     /* Note that in1 = R1 (zero-extended expected value),
2192        out = R1 (original reg), out2 = R1+1 (new value).  */
2193 
2194     addr = tcg_temp_new_i64();
2195     old = tcg_temp_new_i64();
2196     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2197     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2198                                get_mem_index(s), mop | MO_ALIGN);
2199 
2200     /* Are the memory and expected values (un)equal?  */
2201     cc = tcg_temp_new_i64();
2202     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2203     tcg_gen_extrl_i64_i32(cc_op, cc);
2204 
2205     /* Write back the output now, so that it happens before the
2206        following branch, so that we don't need local temps.  */
2207     if ((mop & MO_SIZE) == MO_32) {
2208         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2209     } else {
2210         tcg_gen_mov_i64(o->out, old);
2211     }
2212 
2213     /* If the comparison was equal, and the LSB of R2 was set,
2214        then we need to flush the TLB (for all cpus).  */
2215     tcg_gen_xori_i64(cc, cc, 1);
2216     tcg_gen_and_i64(cc, cc, o->in2);
2217     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2218 
2219     gen_helper_purge(tcg_env);
2220     gen_set_label(lab);
2221 
2222     return DISAS_NEXT;
2223 }
2224 #endif
2225 
2226 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2227 {
2228     TCGv_i64 t1 = tcg_temp_new_i64();
2229     TCGv_i32 t2 = tcg_temp_new_i32();
2230     tcg_gen_extrl_i64_i32(t2, o->in1);
2231     gen_helper_cvd(t1, t2);
2232     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2233     return DISAS_NEXT;
2234 }
2235 
2236 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2237 {
2238     int m3 = get_field(s, m3);
2239     TCGLabel *lab = gen_new_label();
2240     TCGCond c;
2241 
2242     c = tcg_invert_cond(ltgt_cond[m3]);
2243     if (s->insn->data) {
2244         c = tcg_unsigned_cond(c);
2245     }
2246     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2247 
2248     /* Trap.  */
2249     gen_trap(s);
2250 
2251     gen_set_label(lab);
2252     return DISAS_NEXT;
2253 }
2254 
2255 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2256 {
2257     int m3 = get_field(s, m3);
2258     int r1 = get_field(s, r1);
2259     int r2 = get_field(s, r2);
2260     TCGv_i32 tr1, tr2, chk;
2261 
2262     /* R1 and R2 must both be even.  */
2263     if ((r1 | r2) & 1) {
2264         gen_program_exception(s, PGM_SPECIFICATION);
2265         return DISAS_NORETURN;
2266     }
2267     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2268         m3 = 0;
2269     }
2270 
2271     tr1 = tcg_constant_i32(r1);
2272     tr2 = tcg_constant_i32(r2);
2273     chk = tcg_constant_i32(m3);
2274 
2275     switch (s->insn->data) {
2276     case 12:
2277         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2278         break;
2279     case 14:
2280         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2281         break;
2282     case 21:
2283         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2284         break;
2285     case 24:
2286         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2287         break;
2288     case 41:
2289         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2290         break;
2291     case 42:
2292         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2293         break;
2294     default:
2295         g_assert_not_reached();
2296     }
2297 
2298     set_cc_static(s);
2299     return DISAS_NEXT;
2300 }
2301 
2302 #ifndef CONFIG_USER_ONLY
2303 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2304 {
2305     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2306     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2307     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2308 
2309     gen_helper_diag(tcg_env, r1, r3, func_code);
2310     return DISAS_NEXT;
2311 }
2312 #endif
2313 
2314 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2315 {
2316     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2317     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2318     return DISAS_NEXT;
2319 }
2320 
2321 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2322 {
2323     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2324     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2325     return DISAS_NEXT;
2326 }
2327 
2328 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2329 {
2330     TCGv_i128 t = tcg_temp_new_i128();
2331 
2332     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2333     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2334     return DISAS_NEXT;
2335 }
2336 
2337 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2338 {
2339     TCGv_i128 t = tcg_temp_new_i128();
2340 
2341     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2342     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2343     return DISAS_NEXT;
2344 }
2345 
2346 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2347 {
2348     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2349     return DISAS_NEXT;
2350 }
2351 
2352 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2353 {
2354     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2355     return DISAS_NEXT;
2356 }
2357 
2358 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2359 {
2360     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2361     return DISAS_NEXT;
2362 }
2363 
2364 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2365 {
2366     int r2 = get_field(s, r2);
2367     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2368     return DISAS_NEXT;
2369 }
2370 
2371 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2372 {
2373     /* No cache information provided.  */
2374     tcg_gen_movi_i64(o->out, -1);
2375     return DISAS_NEXT;
2376 }
2377 
2378 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2379 {
2380     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2381     return DISAS_NEXT;
2382 }
2383 
2384 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2385 {
2386     int r1 = get_field(s, r1);
2387     int r2 = get_field(s, r2);
2388     TCGv_i64 t = tcg_temp_new_i64();
2389     TCGv_i64 t_cc = tcg_temp_new_i64();
2390 
2391     /* Note the "subsequently" in the PoO, which implies a defined result
2392        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2393     gen_op_calc_cc(s);
2394     tcg_gen_extu_i32_i64(t_cc, cc_op);
2395     tcg_gen_shri_i64(t, psw_mask, 32);
2396     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2397     store_reg32_i64(r1, t);
2398     if (r2 != 0) {
2399         store_reg32_i64(r2, psw_mask);
2400     }
2401     return DISAS_NEXT;
2402 }
2403 
2404 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2405 {
2406     int r1 = get_field(s, r1);
2407     TCGv_i32 ilen;
2408     TCGv_i64 v1;
2409 
2410     /* Nested EXECUTE is not allowed.  */
2411     if (unlikely(s->ex_value)) {
2412         gen_program_exception(s, PGM_EXECUTE);
2413         return DISAS_NORETURN;
2414     }
2415 
2416     update_psw_addr(s);
2417     update_cc_op(s);
2418 
2419     if (r1 == 0) {
2420         v1 = tcg_constant_i64(0);
2421     } else {
2422         v1 = regs[r1];
2423     }
2424 
2425     ilen = tcg_constant_i32(s->ilen);
2426     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2427 
2428     return DISAS_PC_CC_UPDATED;
2429 }
2430 
2431 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2432 {
2433     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2434 
2435     if (!m34) {
2436         return DISAS_NORETURN;
2437     }
2438     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2439     return DISAS_NEXT;
2440 }
2441 
2442 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2443 {
2444     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2445 
2446     if (!m34) {
2447         return DISAS_NORETURN;
2448     }
2449     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2450     return DISAS_NEXT;
2451 }
2452 
2453 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2454 {
2455     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2456 
2457     if (!m34) {
2458         return DISAS_NORETURN;
2459     }
2460     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2461     return DISAS_NEXT;
2462 }
2463 
2464 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2465 {
2466     /* We'll use the original input for cc computation, since we get to
2467        compare that against 0, which ought to be better than comparing
2468        the real output against 64.  It also lets cc_dst be a convenient
2469        temporary during our computation.  */
2470     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2471 
2472     /* R1 = IN ? CLZ(IN) : 64.  */
2473     tcg_gen_clzi_i64(o->out, o->in2, 64);
2474 
2475     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2476        value by 64, which is undefined.  But since the shift is 64 iff the
2477        input is zero, we still get the correct result after and'ing.  */
2478     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2479     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2480     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2481     return DISAS_NEXT;
2482 }
2483 
2484 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2485 {
2486     int m3 = get_field(s, m3);
2487     int pos, len, base = s->insn->data;
2488     TCGv_i64 tmp = tcg_temp_new_i64();
2489     uint64_t ccm;
2490 
2491     switch (m3) {
2492     case 0xf:
2493         /* Effectively a 32-bit load.  */
2494         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2495         len = 32;
2496         goto one_insert;
2497 
2498     case 0xc:
2499     case 0x6:
2500     case 0x3:
2501         /* Effectively a 16-bit load.  */
2502         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2503         len = 16;
2504         goto one_insert;
2505 
2506     case 0x8:
2507     case 0x4:
2508     case 0x2:
2509     case 0x1:
2510         /* Effectively an 8-bit load.  */
2511         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2512         len = 8;
2513         goto one_insert;
2514 
2515     one_insert:
2516         pos = base + ctz32(m3) * 8;
2517         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2518         ccm = ((1ull << len) - 1) << pos;
2519         break;
2520 
2521     case 0:
2522         /* Recognize access exceptions for the first byte.  */
2523         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2524         gen_op_movi_cc(s, 0);
2525         return DISAS_NEXT;
2526 
2527     default:
2528         /* This is going to be a sequence of loads and inserts.  */
2529         pos = base + 32 - 8;
2530         ccm = 0;
2531         while (m3) {
2532             if (m3 & 0x8) {
2533                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2534                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2535                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2536                 ccm |= 0xffull << pos;
2537             }
2538             m3 = (m3 << 1) & 0xf;
2539             pos -= 8;
2540         }
2541         break;
2542     }
2543 
2544     tcg_gen_movi_i64(tmp, ccm);
2545     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2546     return DISAS_NEXT;
2547 }
2548 
2549 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2550 {
2551     int shift = s->insn->data & 0xff;
2552     int size = s->insn->data >> 8;
2553     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2554     return DISAS_NEXT;
2555 }
2556 
2557 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2558 {
2559     TCGv_i64 t1, t2;
2560 
2561     gen_op_calc_cc(s);
2562     t1 = tcg_temp_new_i64();
2563     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2564     t2 = tcg_temp_new_i64();
2565     tcg_gen_extu_i32_i64(t2, cc_op);
2566     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2567     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2568     return DISAS_NEXT;
2569 }
2570 
2571 #ifndef CONFIG_USER_ONLY
2572 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2573 {
2574     TCGv_i32 m4;
2575 
2576     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2577         m4 = tcg_constant_i32(get_field(s, m4));
2578     } else {
2579         m4 = tcg_constant_i32(0);
2580     }
2581     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2582     return DISAS_NEXT;
2583 }
2584 
2585 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2586 {
2587     TCGv_i32 m4;
2588 
2589     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2590         m4 = tcg_constant_i32(get_field(s, m4));
2591     } else {
2592         m4 = tcg_constant_i32(0);
2593     }
2594     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2595     return DISAS_NEXT;
2596 }
2597 
2598 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2599 {
2600     gen_helper_iske(o->out, tcg_env, o->in2);
2601     return DISAS_NEXT;
2602 }
2603 #endif
2604 
2605 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2606 {
2607     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2608     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2609     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2610     TCGv_i32 t_r1, t_r2, t_r3, type;
2611 
2612     switch (s->insn->data) {
2613     case S390_FEAT_TYPE_KMA:
2614         if (r3 == r1 || r3 == r2) {
2615             gen_program_exception(s, PGM_SPECIFICATION);
2616             return DISAS_NORETURN;
2617         }
2618         /* FALL THROUGH */
2619     case S390_FEAT_TYPE_KMCTR:
2620         if (r3 & 1 || !r3) {
2621             gen_program_exception(s, PGM_SPECIFICATION);
2622             return DISAS_NORETURN;
2623         }
2624         /* FALL THROUGH */
2625     case S390_FEAT_TYPE_PPNO:
2626     case S390_FEAT_TYPE_KMF:
2627     case S390_FEAT_TYPE_KMC:
2628     case S390_FEAT_TYPE_KMO:
2629     case S390_FEAT_TYPE_KM:
2630         if (r1 & 1 || !r1) {
2631             gen_program_exception(s, PGM_SPECIFICATION);
2632             return DISAS_NORETURN;
2633         }
2634         /* FALL THROUGH */
2635     case S390_FEAT_TYPE_KMAC:
2636     case S390_FEAT_TYPE_KIMD:
2637     case S390_FEAT_TYPE_KLMD:
2638         if (r2 & 1 || !r2) {
2639             gen_program_exception(s, PGM_SPECIFICATION);
2640             return DISAS_NORETURN;
2641         }
2642         /* FALL THROUGH */
2643     case S390_FEAT_TYPE_PCKMO:
2644     case S390_FEAT_TYPE_PCC:
2645         break;
2646     default:
2647         g_assert_not_reached();
2648     };
2649 
2650     t_r1 = tcg_constant_i32(r1);
2651     t_r2 = tcg_constant_i32(r2);
2652     t_r3 = tcg_constant_i32(r3);
2653     type = tcg_constant_i32(s->insn->data);
2654     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2655     set_cc_static(s);
2656     return DISAS_NEXT;
2657 }
2658 
2659 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2660 {
2661     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2662     set_cc_static(s);
2663     return DISAS_NEXT;
2664 }
2665 
2666 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2667 {
2668     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2669     set_cc_static(s);
2670     return DISAS_NEXT;
2671 }
2672 
2673 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2674 {
2675     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2676     set_cc_static(s);
2677     return DISAS_NEXT;
2678 }
2679 
2680 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2681 {
2682     /* The real output is indeed the original value in memory;
2683        recompute the addition for the computation of CC.  */
2684     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2685                                  s->insn->data | MO_ALIGN);
2686     /* However, we need to recompute the addition for setting CC.  */
2687     if (addu64) {
2688         tcg_gen_movi_i64(cc_src, 0);
2689         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2690     } else {
2691         tcg_gen_add_i64(o->out, o->in1, o->in2);
2692     }
2693     return DISAS_NEXT;
2694 }
2695 
2696 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2697 {
2698     return help_laa(s, o, false);
2699 }
2700 
2701 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2702 {
2703     return help_laa(s, o, true);
2704 }
2705 
2706 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2707 {
2708     /* The real output is indeed the original value in memory;
2709        recompute the addition for the computation of CC.  */
2710     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2711                                  s->insn->data | MO_ALIGN);
2712     /* However, we need to recompute the operation for setting CC.  */
2713     tcg_gen_and_i64(o->out, o->in1, o->in2);
2714     return DISAS_NEXT;
2715 }
2716 
2717 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2718 {
2719     /* The real output is indeed the original value in memory;
2720        recompute the addition for the computation of CC.  */
2721     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2722                                 s->insn->data | MO_ALIGN);
2723     /* However, we need to recompute the operation for setting CC.  */
2724     tcg_gen_or_i64(o->out, o->in1, o->in2);
2725     return DISAS_NEXT;
2726 }
2727 
2728 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2729 {
2730     /* The real output is indeed the original value in memory;
2731        recompute the addition for the computation of CC.  */
2732     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2733                                  s->insn->data | MO_ALIGN);
2734     /* However, we need to recompute the operation for setting CC.  */
2735     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2736     return DISAS_NEXT;
2737 }
2738 
2739 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2740 {
2741     gen_helper_ldeb(o->out, tcg_env, o->in2);
2742     return DISAS_NEXT;
2743 }
2744 
2745 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2746 {
2747     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2748 
2749     if (!m34) {
2750         return DISAS_NORETURN;
2751     }
2752     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2753     return DISAS_NEXT;
2754 }
2755 
2756 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2757 {
2758     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2759 
2760     if (!m34) {
2761         return DISAS_NORETURN;
2762     }
2763     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2764     return DISAS_NEXT;
2765 }
2766 
2767 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2768 {
2769     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2770 
2771     if (!m34) {
2772         return DISAS_NORETURN;
2773     }
2774     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2775     return DISAS_NEXT;
2776 }
2777 
2778 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2779 {
2780     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2781     return DISAS_NEXT;
2782 }
2783 
2784 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2785 {
2786     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2787     return DISAS_NEXT;
2788 }
2789 
2790 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2791 {
2792     tcg_gen_shli_i64(o->out, o->in2, 32);
2793     return DISAS_NEXT;
2794 }
2795 
2796 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2797 {
2798     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2799     return DISAS_NEXT;
2800 }
2801 
2802 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2803 {
2804     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2805     return DISAS_NEXT;
2806 }
2807 
2808 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2809 {
2810     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2811     return DISAS_NEXT;
2812 }
2813 
2814 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2815 {
2816     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2817     return DISAS_NEXT;
2818 }
2819 
2820 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2821 {
2822     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2823     return DISAS_NEXT;
2824 }
2825 
2826 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2827 {
2828     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2829                        MO_TESL | s->insn->data);
2830     return DISAS_NEXT;
2831 }
2832 
2833 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2834 {
2835     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2836                        MO_TEUL | s->insn->data);
2837     return DISAS_NEXT;
2838 }
2839 
2840 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2841 {
2842     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2843                         MO_TEUQ | s->insn->data);
2844     return DISAS_NEXT;
2845 }
2846 
2847 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2848 {
2849     TCGLabel *lab = gen_new_label();
2850     store_reg32_i64(get_field(s, r1), o->in2);
2851     /* The value is stored even in case of trap. */
2852     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2853     gen_trap(s);
2854     gen_set_label(lab);
2855     return DISAS_NEXT;
2856 }
2857 
2858 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2859 {
2860     TCGLabel *lab = gen_new_label();
2861     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2862     /* The value is stored even in case of trap. */
2863     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2864     gen_trap(s);
2865     gen_set_label(lab);
2866     return DISAS_NEXT;
2867 }
2868 
2869 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2870 {
2871     TCGLabel *lab = gen_new_label();
2872     store_reg32h_i64(get_field(s, r1), o->in2);
2873     /* The value is stored even in case of trap. */
2874     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2875     gen_trap(s);
2876     gen_set_label(lab);
2877     return DISAS_NEXT;
2878 }
2879 
2880 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2881 {
2882     TCGLabel *lab = gen_new_label();
2883 
2884     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2885     /* The value is stored even in case of trap. */
2886     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2887     gen_trap(s);
2888     gen_set_label(lab);
2889     return DISAS_NEXT;
2890 }
2891 
2892 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2893 {
2894     TCGLabel *lab = gen_new_label();
2895     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2896     /* The value is stored even in case of trap. */
2897     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2898     gen_trap(s);
2899     gen_set_label(lab);
2900     return DISAS_NEXT;
2901 }
2902 
2903 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2904 {
2905     DisasCompare c;
2906 
2907     if (have_field(s, m3)) {
2908         /* LOAD * ON CONDITION */
2909         disas_jcc(s, &c, get_field(s, m3));
2910     } else {
2911         /* SELECT */
2912         disas_jcc(s, &c, get_field(s, m4));
2913     }
2914 
2915     if (c.is_64) {
2916         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2917                             o->in2, o->in1);
2918     } else {
2919         TCGv_i32 t32 = tcg_temp_new_i32();
2920         TCGv_i64 t, z;
2921 
2922         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2923 
2924         t = tcg_temp_new_i64();
2925         tcg_gen_extu_i32_i64(t, t32);
2926 
2927         z = tcg_constant_i64(0);
2928         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2929     }
2930 
2931     return DISAS_NEXT;
2932 }
2933 
2934 #ifndef CONFIG_USER_ONLY
2935 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2936 {
2937     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2938     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2939 
2940     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2941     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2942     s->exit_to_mainloop = true;
2943     return DISAS_TOO_MANY;
2944 }
2945 
2946 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2947 {
2948     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2949     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2950 
2951     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2952     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2953     s->exit_to_mainloop = true;
2954     return DISAS_TOO_MANY;
2955 }
2956 
2957 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2958 {
2959     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2960     set_cc_static(s);
2961     return DISAS_NEXT;
2962 }
2963 
2964 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2965 {
2966     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2967     return DISAS_NEXT;
2968 }
2969 
2970 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2971 {
2972     TCGv_i64 mask, addr;
2973 
2974     per_breaking_event(s);
2975 
2976     /*
2977      * Convert the short PSW into the normal PSW, similar to what
2978      * s390_cpu_load_normal() does.
2979      */
2980     mask = tcg_temp_new_i64();
2981     addr = tcg_temp_new_i64();
2982     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2983     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2984     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2985     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2986     gen_helper_load_psw(tcg_env, mask, addr);
2987     return DISAS_NORETURN;
2988 }
2989 
2990 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2991 {
2992     TCGv_i64 t1, t2;
2993 
2994     per_breaking_event(s);
2995 
2996     t1 = tcg_temp_new_i64();
2997     t2 = tcg_temp_new_i64();
2998     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2999                         MO_TEUQ | MO_ALIGN_8);
3000     tcg_gen_addi_i64(o->in2, o->in2, 8);
3001     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
3002     gen_helper_load_psw(tcg_env, t1, t2);
3003     return DISAS_NORETURN;
3004 }
3005 #endif
3006 
3007 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3008 {
3009     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3010     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3011 
3012     gen_helper_lam(tcg_env, r1, o->in2, r3);
3013     return DISAS_NEXT;
3014 }
3015 
3016 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3017 {
3018     int r1 = get_field(s, r1);
3019     int r3 = get_field(s, r3);
3020     TCGv_i64 t1, t2;
3021 
3022     /* Only one register to read. */
3023     t1 = tcg_temp_new_i64();
3024     if (unlikely(r1 == r3)) {
3025         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3026         store_reg32_i64(r1, t1);
3027         return DISAS_NEXT;
3028     }
3029 
3030     /* First load the values of the first and last registers to trigger
3031        possible page faults. */
3032     t2 = tcg_temp_new_i64();
3033     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3034     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3035     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3036     store_reg32_i64(r1, t1);
3037     store_reg32_i64(r3, t2);
3038 
3039     /* Only two registers to read. */
3040     if (((r1 + 1) & 15) == r3) {
3041         return DISAS_NEXT;
3042     }
3043 
3044     /* Then load the remaining registers. Page fault can't occur. */
3045     r3 = (r3 - 1) & 15;
3046     tcg_gen_movi_i64(t2, 4);
3047     while (r1 != r3) {
3048         r1 = (r1 + 1) & 15;
3049         tcg_gen_add_i64(o->in2, o->in2, t2);
3050         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3051         store_reg32_i64(r1, t1);
3052     }
3053     return DISAS_NEXT;
3054 }
3055 
3056 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3057 {
3058     int r1 = get_field(s, r1);
3059     int r3 = get_field(s, r3);
3060     TCGv_i64 t1, t2;
3061 
3062     /* Only one register to read. */
3063     t1 = tcg_temp_new_i64();
3064     if (unlikely(r1 == r3)) {
3065         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3066         store_reg32h_i64(r1, t1);
3067         return DISAS_NEXT;
3068     }
3069 
3070     /* First load the values of the first and last registers to trigger
3071        possible page faults. */
3072     t2 = tcg_temp_new_i64();
3073     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3074     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3075     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3076     store_reg32h_i64(r1, t1);
3077     store_reg32h_i64(r3, t2);
3078 
3079     /* Only two registers to read. */
3080     if (((r1 + 1) & 15) == r3) {
3081         return DISAS_NEXT;
3082     }
3083 
3084     /* Then load the remaining registers. Page fault can't occur. */
3085     r3 = (r3 - 1) & 15;
3086     tcg_gen_movi_i64(t2, 4);
3087     while (r1 != r3) {
3088         r1 = (r1 + 1) & 15;
3089         tcg_gen_add_i64(o->in2, o->in2, t2);
3090         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3091         store_reg32h_i64(r1, t1);
3092     }
3093     return DISAS_NEXT;
3094 }
3095 
3096 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3097 {
3098     int r1 = get_field(s, r1);
3099     int r3 = get_field(s, r3);
3100     TCGv_i64 t1, t2;
3101 
3102     /* Only one register to read. */
3103     if (unlikely(r1 == r3)) {
3104         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3105         return DISAS_NEXT;
3106     }
3107 
3108     /* First load the values of the first and last registers to trigger
3109        possible page faults. */
3110     t1 = tcg_temp_new_i64();
3111     t2 = tcg_temp_new_i64();
3112     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3113     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3114     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3115     tcg_gen_mov_i64(regs[r1], t1);
3116 
3117     /* Only two registers to read. */
3118     if (((r1 + 1) & 15) == r3) {
3119         return DISAS_NEXT;
3120     }
3121 
3122     /* Then load the remaining registers. Page fault can't occur. */
3123     r3 = (r3 - 1) & 15;
3124     tcg_gen_movi_i64(t1, 8);
3125     while (r1 != r3) {
3126         r1 = (r1 + 1) & 15;
3127         tcg_gen_add_i64(o->in2, o->in2, t1);
3128         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3129     }
3130     return DISAS_NEXT;
3131 }
3132 
3133 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3134 {
3135     TCGv_i64 a1, a2;
3136     MemOp mop = s->insn->data;
3137 
3138     /* In a parallel context, stop the world and single step.  */
3139     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3140         update_psw_addr(s);
3141         update_cc_op(s);
3142         gen_exception(EXCP_ATOMIC);
3143         return DISAS_NORETURN;
3144     }
3145 
3146     /* In a serial context, perform the two loads ... */
3147     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3148     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3149     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3150     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3151 
3152     /* ... and indicate that we performed them while interlocked.  */
3153     gen_op_movi_cc(s, 0);
3154     return DISAS_NEXT;
3155 }
3156 
3157 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3158 {
3159     o->out_128 = tcg_temp_new_i128();
3160     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3161                          MO_TE | MO_128 | MO_ALIGN);
3162     return DISAS_NEXT;
3163 }
3164 
3165 #ifndef CONFIG_USER_ONLY
3166 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3167 {
3168     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3169     return DISAS_NEXT;
3170 }
3171 #endif
3172 
3173 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3174 {
3175     tcg_gen_andi_i64(o->out, o->in2, -256);
3176     return DISAS_NEXT;
3177 }
3178 
3179 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3180 {
3181     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3182 
3183     if (get_field(s, m3) > 6) {
3184         gen_program_exception(s, PGM_SPECIFICATION);
3185         return DISAS_NORETURN;
3186     }
3187 
3188     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3189     tcg_gen_neg_i64(o->addr1, o->addr1);
3190     tcg_gen_movi_i64(o->out, 16);
3191     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3192     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3193     return DISAS_NEXT;
3194 }
3195 
3196 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3197 {
3198     const uint8_t monitor_class = get_field(s, i2);
3199 
3200     if (monitor_class & 0xf0) {
3201         gen_program_exception(s, PGM_SPECIFICATION);
3202         return DISAS_NORETURN;
3203     }
3204 
3205 #if !defined(CONFIG_USER_ONLY)
3206     gen_helper_monitor_call(tcg_env, o->addr1,
3207                             tcg_constant_i32(monitor_class));
3208 #endif
3209     /* Defaults to a NOP. */
3210     return DISAS_NEXT;
3211 }
3212 
3213 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3214 {
3215     o->out = o->in2;
3216     o->in2 = NULL;
3217     return DISAS_NEXT;
3218 }
3219 
3220 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3221 {
3222     int b2 = get_field(s, b2);
3223     TCGv ar1 = tcg_temp_new_i64();
3224     int r1 = get_field(s, r1);
3225 
3226     o->out = o->in2;
3227     o->in2 = NULL;
3228 
3229     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3230     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3231         tcg_gen_movi_i64(ar1, 0);
3232         break;
3233     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3234         tcg_gen_movi_i64(ar1, 1);
3235         break;
3236     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3237         if (b2) {
3238             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3239         } else {
3240             tcg_gen_movi_i64(ar1, 0);
3241         }
3242         break;
3243     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3244         tcg_gen_movi_i64(ar1, 2);
3245         break;
3246     }
3247 
3248     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3249     return DISAS_NEXT;
3250 }
3251 
3252 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3253 {
3254     o->out = o->in1;
3255     o->out2 = o->in2;
3256     o->in1 = NULL;
3257     o->in2 = NULL;
3258     return DISAS_NEXT;
3259 }
3260 
3261 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3262 {
3263     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3264 
3265     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3266     return DISAS_NEXT;
3267 }
3268 
3269 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3270 {
3271     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3272     return DISAS_NEXT;
3273 }
3274 
3275 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3276 {
3277     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3278 
3279     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3280     return DISAS_NEXT;
3281 }
3282 
3283 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3284 {
3285     int r1 = get_field(s, r1);
3286     int r2 = get_field(s, r2);
3287     TCGv_i32 t1, t2;
3288 
3289     /* r1 and r2 must be even.  */
3290     if (r1 & 1 || r2 & 1) {
3291         gen_program_exception(s, PGM_SPECIFICATION);
3292         return DISAS_NORETURN;
3293     }
3294 
3295     t1 = tcg_constant_i32(r1);
3296     t2 = tcg_constant_i32(r2);
3297     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3298     set_cc_static(s);
3299     return DISAS_NEXT;
3300 }
3301 
3302 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3303 {
3304     int r1 = get_field(s, r1);
3305     int r3 = get_field(s, r3);
3306     TCGv_i32 t1, t3;
3307 
3308     /* r1 and r3 must be even.  */
3309     if (r1 & 1 || r3 & 1) {
3310         gen_program_exception(s, PGM_SPECIFICATION);
3311         return DISAS_NORETURN;
3312     }
3313 
3314     t1 = tcg_constant_i32(r1);
3315     t3 = tcg_constant_i32(r3);
3316     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3317     set_cc_static(s);
3318     return DISAS_NEXT;
3319 }
3320 
3321 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3322 {
3323     int r1 = get_field(s, r1);
3324     int r3 = get_field(s, r3);
3325     TCGv_i32 t1, t3;
3326 
3327     /* r1 and r3 must be even.  */
3328     if (r1 & 1 || r3 & 1) {
3329         gen_program_exception(s, PGM_SPECIFICATION);
3330         return DISAS_NORETURN;
3331     }
3332 
3333     t1 = tcg_constant_i32(r1);
3334     t3 = tcg_constant_i32(r3);
3335     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3336     set_cc_static(s);
3337     return DISAS_NEXT;
3338 }
3339 
3340 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3341 {
3342     int r3 = get_field(s, r3);
3343     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3344     set_cc_static(s);
3345     return DISAS_NEXT;
3346 }
3347 
3348 #ifndef CONFIG_USER_ONLY
3349 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3350 {
3351     int r1 = get_field(s, l1);
3352     int r3 = get_field(s, r3);
3353     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3354     set_cc_static(s);
3355     return DISAS_NEXT;
3356 }
3357 
3358 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3359 {
3360     int r1 = get_field(s, l1);
3361     int r3 = get_field(s, r3);
3362     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3363     set_cc_static(s);
3364     return DISAS_NEXT;
3365 }
3366 #endif
3367 
3368 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3369 {
3370     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3371 
3372     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3373     return DISAS_NEXT;
3374 }
3375 
3376 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3377 {
3378     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3379 
3380     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3381     return DISAS_NEXT;
3382 }
3383 
3384 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3385 {
3386     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3387     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3388 
3389     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3390     set_cc_static(s);
3391     return DISAS_NEXT;
3392 }
3393 
3394 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3395 {
3396     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3397     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3398 
3399     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3400     set_cc_static(s);
3401     return DISAS_NEXT;
3402 }
3403 
3404 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3405 {
3406     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3407 
3408     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3409     return DISAS_NEXT;
3410 }
3411 
3412 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3413 {
3414     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3415     return DISAS_NEXT;
3416 }
3417 
3418 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3419 {
3420     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3421     return DISAS_NEXT;
3422 }
3423 
3424 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3425 {
3426     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3427     return DISAS_NEXT;
3428 }
3429 
3430 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3431 {
3432     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3433     return DISAS_NEXT;
3434 }
3435 
3436 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3437 {
3438     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3439     return DISAS_NEXT;
3440 }
3441 
3442 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3443 {
3444     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3445     return DISAS_NEXT;
3446 }
3447 
3448 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3449 {
3450     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3451     return DISAS_NEXT;
3452 }
3453 
3454 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3455 {
3456     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3457     return DISAS_NEXT;
3458 }
3459 
3460 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3461 {
3462     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3463     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3464     return DISAS_NEXT;
3465 }
3466 
3467 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3468 {
3469     TCGv_i64 r3 = load_freg(get_field(s, r3));
3470     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3471     return DISAS_NEXT;
3472 }
3473 
3474 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3475 {
3476     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3477     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3478     return DISAS_NEXT;
3479 }
3480 
3481 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3482 {
3483     TCGv_i64 r3 = load_freg(get_field(s, r3));
3484     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3485     return DISAS_NEXT;
3486 }
3487 
3488 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3489 {
3490     TCGv_i64 z = tcg_constant_i64(0);
3491     TCGv_i64 n = tcg_temp_new_i64();
3492 
3493     tcg_gen_neg_i64(n, o->in2);
3494     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3495     return DISAS_NEXT;
3496 }
3497 
3498 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3499 {
3500     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3501     return DISAS_NEXT;
3502 }
3503 
3504 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3505 {
3506     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3507     return DISAS_NEXT;
3508 }
3509 
3510 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3511 {
3512     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3513     tcg_gen_mov_i64(o->out2, o->in2);
3514     return DISAS_NEXT;
3515 }
3516 
3517 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3518 {
3519     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3520 
3521     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3522     set_cc_static(s);
3523     return DISAS_NEXT;
3524 }
3525 
3526 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3527 {
3528     tcg_gen_neg_i64(o->out, o->in2);
3529     return DISAS_NEXT;
3530 }
3531 
3532 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3533 {
3534     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3535     return DISAS_NEXT;
3536 }
3537 
3538 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3539 {
3540     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3541     return DISAS_NEXT;
3542 }
3543 
3544 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3545 {
3546     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3547     tcg_gen_mov_i64(o->out2, o->in2);
3548     return DISAS_NEXT;
3549 }
3550 
3551 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3552 {
3553     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3554 
3555     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3556     set_cc_static(s);
3557     return DISAS_NEXT;
3558 }
3559 
3560 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3561 {
3562     tcg_gen_or_i64(o->out, o->in1, o->in2);
3563     return DISAS_NEXT;
3564 }
3565 
3566 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3567 {
3568     int shift = s->insn->data & 0xff;
3569     int size = s->insn->data >> 8;
3570     uint64_t mask = ((1ull << size) - 1) << shift;
3571     TCGv_i64 t = tcg_temp_new_i64();
3572 
3573     tcg_gen_shli_i64(t, o->in2, shift);
3574     tcg_gen_or_i64(o->out, o->in1, t);
3575 
3576     /* Produce the CC from only the bits manipulated.  */
3577     tcg_gen_andi_i64(cc_dst, o->out, mask);
3578     set_cc_nz_u64(s, cc_dst);
3579     return DISAS_NEXT;
3580 }
3581 
3582 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3583 {
3584     o->in1 = tcg_temp_new_i64();
3585 
3586     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3587         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3588     } else {
3589         /* Perform the atomic operation in memory. */
3590         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3591                                     s->insn->data);
3592     }
3593 
3594     /* Recompute also for atomic case: needed for setting CC. */
3595     tcg_gen_or_i64(o->out, o->in1, o->in2);
3596 
3597     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3598         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3599     }
3600     return DISAS_NEXT;
3601 }
3602 
3603 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3604 {
3605     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3606 
3607     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3608     return DISAS_NEXT;
3609 }
3610 
3611 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3612 {
3613     int l2 = get_field(s, l2) + 1;
3614     TCGv_i32 l;
3615 
3616     /* The length must not exceed 32 bytes.  */
3617     if (l2 > 32) {
3618         gen_program_exception(s, PGM_SPECIFICATION);
3619         return DISAS_NORETURN;
3620     }
3621     l = tcg_constant_i32(l2);
3622     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3623     return DISAS_NEXT;
3624 }
3625 
3626 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3627 {
3628     int l2 = get_field(s, l2) + 1;
3629     TCGv_i32 l;
3630 
3631     /* The length must be even and should not exceed 64 bytes.  */
3632     if ((l2 & 1) || (l2 > 64)) {
3633         gen_program_exception(s, PGM_SPECIFICATION);
3634         return DISAS_NORETURN;
3635     }
3636     l = tcg_constant_i32(l2);
3637     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3638     return DISAS_NEXT;
3639 }
3640 
3641 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3642 {
3643     const uint8_t m3 = get_field(s, m3);
3644 
3645     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3646         tcg_gen_ctpop_i64(o->out, o->in2);
3647     } else {
3648         gen_helper_popcnt(o->out, o->in2);
3649     }
3650     return DISAS_NEXT;
3651 }
3652 
3653 #ifndef CONFIG_USER_ONLY
3654 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3655 {
3656     gen_helper_ptlb(tcg_env);
3657     return DISAS_NEXT;
3658 }
3659 #endif
3660 
3661 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3662 {
3663     int i3 = get_field(s, i3);
3664     int i4 = get_field(s, i4);
3665     int i5 = get_field(s, i5);
3666     int do_zero = i4 & 0x80;
3667     uint64_t mask, imask, pmask;
3668     int pos, len, rot;
3669 
3670     /* Adjust the arguments for the specific insn.  */
3671     switch (s->fields.op2) {
3672     case 0x55: /* risbg */
3673     case 0x59: /* risbgn */
3674         i3 &= 63;
3675         i4 &= 63;
3676         pmask = ~0;
3677         break;
3678     case 0x5d: /* risbhg */
3679         i3 &= 31;
3680         i4 &= 31;
3681         pmask = 0xffffffff00000000ull;
3682         break;
3683     case 0x51: /* risblg */
3684         i3 = (i3 & 31) + 32;
3685         i4 = (i4 & 31) + 32;
3686         pmask = 0x00000000ffffffffull;
3687         break;
3688     default:
3689         g_assert_not_reached();
3690     }
3691 
3692     /* MASK is the set of bits to be inserted from R2. */
3693     if (i3 <= i4) {
3694         /* [0...i3---i4...63] */
3695         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3696     } else {
3697         /* [0---i4...i3---63] */
3698         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3699     }
3700     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3701     mask &= pmask;
3702 
3703     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3704        insns, we need to keep the other half of the register.  */
3705     imask = ~mask | ~pmask;
3706     if (do_zero) {
3707         imask = ~pmask;
3708     }
3709 
3710     len = i4 - i3 + 1;
3711     pos = 63 - i4;
3712     rot = i5 & 63;
3713 
3714     /* In some cases we can implement this with extract.  */
3715     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3716         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3717         return DISAS_NEXT;
3718     }
3719 
3720     /* In some cases we can implement this with deposit.  */
3721     if (len > 0 && (imask == 0 || ~mask == imask)) {
3722         /* Note that we rotate the bits to be inserted to the lsb, not to
3723            the position as described in the PoO.  */
3724         rot = (rot - pos) & 63;
3725     } else {
3726         pos = -1;
3727     }
3728 
3729     /* Rotate the input as necessary.  */
3730     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3731 
3732     /* Insert the selected bits into the output.  */
3733     if (pos >= 0) {
3734         if (imask == 0) {
3735             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3736         } else {
3737             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3738         }
3739     } else if (imask == 0) {
3740         tcg_gen_andi_i64(o->out, o->in2, mask);
3741     } else {
3742         tcg_gen_andi_i64(o->in2, o->in2, mask);
3743         tcg_gen_andi_i64(o->out, o->out, imask);
3744         tcg_gen_or_i64(o->out, o->out, o->in2);
3745     }
3746     return DISAS_NEXT;
3747 }
3748 
3749 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3750 {
3751     int i3 = get_field(s, i3);
3752     int i4 = get_field(s, i4);
3753     int i5 = get_field(s, i5);
3754     TCGv_i64 orig_out;
3755     uint64_t mask;
3756 
3757     /* If this is a test-only form, arrange to discard the result.  */
3758     if (i3 & 0x80) {
3759         tcg_debug_assert(o->out != NULL);
3760         orig_out = o->out;
3761         o->out = tcg_temp_new_i64();
3762         tcg_gen_mov_i64(o->out, orig_out);
3763     }
3764 
3765     i3 &= 63;
3766     i4 &= 63;
3767     i5 &= 63;
3768 
3769     /* MASK is the set of bits to be operated on from R2.
3770        Take care for I3/I4 wraparound.  */
3771     mask = ~0ull >> i3;
3772     if (i3 <= i4) {
3773         mask ^= ~0ull >> i4 >> 1;
3774     } else {
3775         mask |= ~(~0ull >> i4 >> 1);
3776     }
3777 
3778     /* Rotate the input as necessary.  */
3779     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3780 
3781     /* Operate.  */
3782     switch (s->fields.op2) {
3783     case 0x54: /* AND */
3784         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3785         tcg_gen_and_i64(o->out, o->out, o->in2);
3786         break;
3787     case 0x56: /* OR */
3788         tcg_gen_andi_i64(o->in2, o->in2, mask);
3789         tcg_gen_or_i64(o->out, o->out, o->in2);
3790         break;
3791     case 0x57: /* XOR */
3792         tcg_gen_andi_i64(o->in2, o->in2, mask);
3793         tcg_gen_xor_i64(o->out, o->out, o->in2);
3794         break;
3795     default:
3796         abort();
3797     }
3798 
3799     /* Set the CC.  */
3800     tcg_gen_andi_i64(cc_dst, o->out, mask);
3801     set_cc_nz_u64(s, cc_dst);
3802     return DISAS_NEXT;
3803 }
3804 
3805 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3806 {
3807     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3808     return DISAS_NEXT;
3809 }
3810 
3811 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3812 {
3813     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3814     return DISAS_NEXT;
3815 }
3816 
3817 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3818 {
3819     tcg_gen_bswap64_i64(o->out, o->in2);
3820     return DISAS_NEXT;
3821 }
3822 
3823 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3824 {
3825     TCGv_i32 t1 = tcg_temp_new_i32();
3826     TCGv_i32 t2 = tcg_temp_new_i32();
3827     TCGv_i32 to = tcg_temp_new_i32();
3828     tcg_gen_extrl_i64_i32(t1, o->in1);
3829     tcg_gen_extrl_i64_i32(t2, o->in2);
3830     tcg_gen_rotl_i32(to, t1, t2);
3831     tcg_gen_extu_i32_i64(o->out, to);
3832     return DISAS_NEXT;
3833 }
3834 
3835 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3836 {
3837     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3838     return DISAS_NEXT;
3839 }
3840 
3841 #ifndef CONFIG_USER_ONLY
3842 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3843 {
3844     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3845     set_cc_static(s);
3846     return DISAS_NEXT;
3847 }
3848 
3849 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3850 {
3851     gen_helper_sacf(tcg_env, o->in2);
3852     /* Addressing mode has changed, so end the block.  */
3853     return DISAS_TOO_MANY;
3854 }
3855 #endif
3856 
3857 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3858 {
3859     int sam = s->insn->data;
3860     TCGv_i64 tsam;
3861     uint64_t mask;
3862 
3863     switch (sam) {
3864     case 0:
3865         mask = 0xffffff;
3866         break;
3867     case 1:
3868         mask = 0x7fffffff;
3869         break;
3870     default:
3871         mask = -1;
3872         break;
3873     }
3874 
3875     /* Bizarre but true, we check the address of the current insn for the
3876        specification exception, not the next to be executed.  Thus the PoO
3877        documents that Bad Things Happen two bytes before the end.  */
3878     if (s->base.pc_next & ~mask) {
3879         gen_program_exception(s, PGM_SPECIFICATION);
3880         return DISAS_NORETURN;
3881     }
3882     s->pc_tmp &= mask;
3883 
3884     tsam = tcg_constant_i64(sam);
3885     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3886 
3887     /* Always exit the TB, since we (may have) changed execution mode.  */
3888     return DISAS_TOO_MANY;
3889 }
3890 
3891 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3892 {
3893     int r1 = get_field(s, r1);
3894     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3895     return DISAS_NEXT;
3896 }
3897 
3898 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3899 {
3900     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3901     return DISAS_NEXT;
3902 }
3903 
3904 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3905 {
3906     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3907     return DISAS_NEXT;
3908 }
3909 
3910 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3911 {
3912     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3913     return DISAS_NEXT;
3914 }
3915 
3916 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3917 {
3918     gen_helper_sqeb(o->out, tcg_env, o->in2);
3919     return DISAS_NEXT;
3920 }
3921 
3922 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3923 {
3924     gen_helper_sqdb(o->out, tcg_env, o->in2);
3925     return DISAS_NEXT;
3926 }
3927 
3928 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3929 {
3930     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3931     return DISAS_NEXT;
3932 }
3933 
3934 #ifndef CONFIG_USER_ONLY
3935 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3936 {
3937     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3938     set_cc_static(s);
3939     return DISAS_NEXT;
3940 }
3941 
3942 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3943 {
3944     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3945     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3946 
3947     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3948     set_cc_static(s);
3949     return DISAS_NEXT;
3950 }
3951 #endif
3952 
3953 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3954 {
3955     DisasCompare c;
3956     TCGv_i64 a, h;
3957     TCGLabel *lab;
3958     int r1;
3959 
3960     disas_jcc(s, &c, get_field(s, m3));
3961 
3962     /* We want to store when the condition is fulfilled, so branch
3963        out when it's not */
3964     c.cond = tcg_invert_cond(c.cond);
3965 
3966     lab = gen_new_label();
3967     if (c.is_64) {
3968         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3969     } else {
3970         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3971     }
3972 
3973     r1 = get_field(s, r1);
3974     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3975     switch (s->insn->data) {
3976     case 1: /* STOCG */
3977         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3978         break;
3979     case 0: /* STOC */
3980         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3981         break;
3982     case 2: /* STOCFH */
3983         h = tcg_temp_new_i64();
3984         tcg_gen_shri_i64(h, regs[r1], 32);
3985         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3986         break;
3987     default:
3988         g_assert_not_reached();
3989     }
3990 
3991     gen_set_label(lab);
3992     return DISAS_NEXT;
3993 }
3994 
3995 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3996 {
3997     TCGv_i64 t;
3998     uint64_t sign = 1ull << s->insn->data;
3999     if (s->insn->data == 31) {
4000         t = tcg_temp_new_i64();
4001         tcg_gen_shli_i64(t, o->in1, 32);
4002     } else {
4003         t = o->in1;
4004     }
4005     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4006     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4007     /* The arithmetic left shift is curious in that it does not affect
4008        the sign bit.  Copy that over from the source unchanged.  */
4009     tcg_gen_andi_i64(o->out, o->out, ~sign);
4010     tcg_gen_andi_i64(o->in1, o->in1, sign);
4011     tcg_gen_or_i64(o->out, o->out, o->in1);
4012     return DISAS_NEXT;
4013 }
4014 
4015 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4016 {
4017     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4018     return DISAS_NEXT;
4019 }
4020 
4021 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4022 {
4023     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4024     return DISAS_NEXT;
4025 }
4026 
4027 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4028 {
4029     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4030     return DISAS_NEXT;
4031 }
4032 
4033 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4034 {
4035     gen_helper_sfpc(tcg_env, o->in2);
4036     return DISAS_NEXT;
4037 }
4038 
4039 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4040 {
4041     gen_helper_sfas(tcg_env, o->in2);
4042     return DISAS_NEXT;
4043 }
4044 
4045 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4046 {
4047     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4048     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4049     gen_helper_srnm(tcg_env, o->addr1);
4050     return DISAS_NEXT;
4051 }
4052 
4053 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4054 {
4055     /* Bits 0-55 are are ignored. */
4056     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4057     gen_helper_srnm(tcg_env, o->addr1);
4058     return DISAS_NEXT;
4059 }
4060 
4061 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4062 {
4063     TCGv_i64 tmp = tcg_temp_new_i64();
4064 
4065     /* Bits other than 61-63 are ignored. */
4066     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4067 
4068     /* No need to call a helper, we don't implement dfp */
4069     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4070     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4071     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4072     return DISAS_NEXT;
4073 }
4074 
4075 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4076 {
4077     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4078     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4079     set_cc_static(s);
4080 
4081     tcg_gen_shri_i64(o->in1, o->in1, 24);
4082     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4083     return DISAS_NEXT;
4084 }
4085 
4086 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4087 {
4088     int b1 = get_field(s, b1);
4089     int d1 = get_field(s, d1);
4090     int b2 = get_field(s, b2);
4091     int d2 = get_field(s, d2);
4092     int r3 = get_field(s, r3);
4093     TCGv_i64 tmp = tcg_temp_new_i64();
4094 
4095     /* fetch all operands first */
4096     o->in1 = tcg_temp_new_i64();
4097     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4098     o->in2 = tcg_temp_new_i64();
4099     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4100     o->addr1 = tcg_temp_new_i64();
4101     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4102 
4103     /* load the third operand into r3 before modifying anything */
4104     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4105 
4106     /* subtract CPU timer from first operand and store in GR0 */
4107     gen_helper_stpt(tmp, tcg_env);
4108     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4109 
4110     /* store second operand in GR1 */
4111     tcg_gen_mov_i64(regs[1], o->in2);
4112     return DISAS_NEXT;
4113 }
4114 
4115 #ifndef CONFIG_USER_ONLY
4116 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4117 {
4118     tcg_gen_shri_i64(o->in2, o->in2, 4);
4119     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4120     return DISAS_NEXT;
4121 }
4122 
4123 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4124 {
4125     gen_helper_sske(tcg_env, o->in1, o->in2);
4126     return DISAS_NEXT;
4127 }
4128 
4129 static void gen_check_psw_mask(DisasContext *s)
4130 {
4131     TCGv_i64 reserved = tcg_temp_new_i64();
4132     TCGLabel *ok = gen_new_label();
4133 
4134     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4135     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4136     gen_program_exception(s, PGM_SPECIFICATION);
4137     gen_set_label(ok);
4138 }
4139 
4140 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4141 {
4142     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4143 
4144     gen_check_psw_mask(s);
4145 
4146     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4147     s->exit_to_mainloop = true;
4148     return DISAS_TOO_MANY;
4149 }
4150 
4151 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4152 {
4153     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4154     return DISAS_NEXT;
4155 }
4156 #endif
4157 
4158 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4159 {
4160     gen_helper_stck(o->out, tcg_env);
4161     /* ??? We don't implement clock states.  */
4162     gen_op_movi_cc(s, 0);
4163     return DISAS_NEXT;
4164 }
4165 
4166 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4167 {
4168     TCGv_i64 c1 = tcg_temp_new_i64();
4169     TCGv_i64 c2 = tcg_temp_new_i64();
4170     TCGv_i64 todpr = tcg_temp_new_i64();
4171     gen_helper_stck(c1, tcg_env);
4172     /* 16 bit value store in an uint32_t (only valid bits set) */
4173     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4174     /* Shift the 64-bit value into its place as a zero-extended
4175        104-bit value.  Note that "bit positions 64-103 are always
4176        non-zero so that they compare differently to STCK"; we set
4177        the least significant bit to 1.  */
4178     tcg_gen_shli_i64(c2, c1, 56);
4179     tcg_gen_shri_i64(c1, c1, 8);
4180     tcg_gen_ori_i64(c2, c2, 0x10000);
4181     tcg_gen_or_i64(c2, c2, todpr);
4182     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4183     tcg_gen_addi_i64(o->in2, o->in2, 8);
4184     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4185     /* ??? We don't implement clock states.  */
4186     gen_op_movi_cc(s, 0);
4187     return DISAS_NEXT;
4188 }
4189 
4190 #ifndef CONFIG_USER_ONLY
4191 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4192 {
4193     gen_helper_sck(cc_op, tcg_env, o->in2);
4194     set_cc_static(s);
4195     return DISAS_NEXT;
4196 }
4197 
4198 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4199 {
4200     gen_helper_sckc(tcg_env, o->in2);
4201     return DISAS_NEXT;
4202 }
4203 
4204 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4205 {
4206     gen_helper_sckpf(tcg_env, regs[0]);
4207     return DISAS_NEXT;
4208 }
4209 
4210 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4211 {
4212     gen_helper_stckc(o->out, tcg_env);
4213     return DISAS_NEXT;
4214 }
4215 
4216 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4217 {
4218     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4219     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4220 
4221     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4222     return DISAS_NEXT;
4223 }
4224 
4225 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4226 {
4227     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4228     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4229 
4230     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4231     return DISAS_NEXT;
4232 }
4233 
4234 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4235 {
4236     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4237     return DISAS_NEXT;
4238 }
4239 
4240 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4241 {
4242     gen_helper_spt(tcg_env, o->in2);
4243     return DISAS_NEXT;
4244 }
4245 
4246 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4247 {
4248     gen_helper_stfl(tcg_env);
4249     return DISAS_NEXT;
4250 }
4251 
4252 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4253 {
4254     gen_helper_stpt(o->out, tcg_env);
4255     return DISAS_NEXT;
4256 }
4257 
4258 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4259 {
4260     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4261     set_cc_static(s);
4262     return DISAS_NEXT;
4263 }
4264 
4265 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4266 {
4267     gen_helper_spx(tcg_env, o->in2);
4268     return DISAS_NEXT;
4269 }
4270 
4271 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4272 {
4273     gen_helper_xsch(tcg_env, regs[1]);
4274     set_cc_static(s);
4275     return DISAS_NEXT;
4276 }
4277 
4278 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4279 {
4280     gen_helper_csch(tcg_env, regs[1]);
4281     set_cc_static(s);
4282     return DISAS_NEXT;
4283 }
4284 
4285 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4286 {
4287     gen_helper_hsch(tcg_env, regs[1]);
4288     set_cc_static(s);
4289     return DISAS_NEXT;
4290 }
4291 
4292 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4293 {
4294     gen_helper_msch(tcg_env, regs[1], o->in2);
4295     set_cc_static(s);
4296     return DISAS_NEXT;
4297 }
4298 
4299 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4300 {
4301     gen_helper_rchp(tcg_env, regs[1]);
4302     set_cc_static(s);
4303     return DISAS_NEXT;
4304 }
4305 
4306 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4307 {
4308     gen_helper_rsch(tcg_env, regs[1]);
4309     set_cc_static(s);
4310     return DISAS_NEXT;
4311 }
4312 
4313 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4314 {
4315     gen_helper_sal(tcg_env, regs[1]);
4316     return DISAS_NEXT;
4317 }
4318 
4319 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4320 {
4321     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4322     return DISAS_NEXT;
4323 }
4324 
4325 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4326 {
4327     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4328     gen_op_movi_cc(s, 3);
4329     return DISAS_NEXT;
4330 }
4331 
4332 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4333 {
4334     /* The instruction is suppressed if not provided. */
4335     return DISAS_NEXT;
4336 }
4337 
4338 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4339 {
4340     gen_helper_ssch(tcg_env, regs[1], o->in2);
4341     set_cc_static(s);
4342     return DISAS_NEXT;
4343 }
4344 
4345 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4346 {
4347     gen_helper_stsch(tcg_env, regs[1], o->in2);
4348     set_cc_static(s);
4349     return DISAS_NEXT;
4350 }
4351 
4352 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4353 {
4354     gen_helper_stcrw(tcg_env, o->in2);
4355     set_cc_static(s);
4356     return DISAS_NEXT;
4357 }
4358 
4359 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4360 {
4361     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4362     set_cc_static(s);
4363     return DISAS_NEXT;
4364 }
4365 
4366 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4367 {
4368     gen_helper_tsch(tcg_env, regs[1], o->in2);
4369     set_cc_static(s);
4370     return DISAS_NEXT;
4371 }
4372 
4373 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4374 {
4375     gen_helper_chsc(tcg_env, o->in2);
4376     set_cc_static(s);
4377     return DISAS_NEXT;
4378 }
4379 
4380 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4381 {
4382     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4383     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4384     return DISAS_NEXT;
4385 }
4386 
4387 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4388 {
4389     uint64_t i2 = get_field(s, i2);
4390     TCGv_i64 t;
4391 
4392     /* It is important to do what the instruction name says: STORE THEN.
4393        If we let the output hook perform the store then if we fault and
4394        restart, we'll have the wrong SYSTEM MASK in place.  */
4395     t = tcg_temp_new_i64();
4396     tcg_gen_shri_i64(t, psw_mask, 56);
4397     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4398 
4399     if (s->fields.op == 0xac) {
4400         tcg_gen_andi_i64(psw_mask, psw_mask,
4401                          (i2 << 56) | 0x00ffffffffffffffull);
4402     } else {
4403         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4404     }
4405 
4406     gen_check_psw_mask(s);
4407 
4408     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4409     s->exit_to_mainloop = true;
4410     return DISAS_TOO_MANY;
4411 }
4412 
4413 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4414 {
4415     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4416 
4417     if (s->base.tb->flags & FLAG_MASK_PER) {
4418         update_psw_addr(s);
4419         gen_helper_per_store_real(tcg_env);
4420     }
4421     return DISAS_NEXT;
4422 }
4423 #endif
4424 
4425 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4426 {
4427     gen_helper_stfle(cc_op, tcg_env, o->in2);
4428     set_cc_static(s);
4429     return DISAS_NEXT;
4430 }
4431 
4432 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4433 {
4434     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4435     return DISAS_NEXT;
4436 }
4437 
4438 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4439 {
4440     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4441     return DISAS_NEXT;
4442 }
4443 
4444 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4445 {
4446     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4447                        MO_TEUL | s->insn->data);
4448     return DISAS_NEXT;
4449 }
4450 
4451 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4452 {
4453     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4454                         MO_TEUQ | s->insn->data);
4455     return DISAS_NEXT;
4456 }
4457 
4458 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4459 {
4460     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4461     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4462 
4463     gen_helper_stam(tcg_env, r1, o->in2, r3);
4464     return DISAS_NEXT;
4465 }
4466 
4467 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4468 {
4469     int m3 = get_field(s, m3);
4470     int pos, base = s->insn->data;
4471     TCGv_i64 tmp = tcg_temp_new_i64();
4472 
4473     pos = base + ctz32(m3) * 8;
4474     switch (m3) {
4475     case 0xf:
4476         /* Effectively a 32-bit store.  */
4477         tcg_gen_shri_i64(tmp, o->in1, pos);
4478         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4479         break;
4480 
4481     case 0xc:
4482     case 0x6:
4483     case 0x3:
4484         /* Effectively a 16-bit store.  */
4485         tcg_gen_shri_i64(tmp, o->in1, pos);
4486         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4487         break;
4488 
4489     case 0x8:
4490     case 0x4:
4491     case 0x2:
4492     case 0x1:
4493         /* Effectively an 8-bit store.  */
4494         tcg_gen_shri_i64(tmp, o->in1, pos);
4495         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4496         break;
4497 
4498     default:
4499         /* This is going to be a sequence of shifts and stores.  */
4500         pos = base + 32 - 8;
4501         while (m3) {
4502             if (m3 & 0x8) {
4503                 tcg_gen_shri_i64(tmp, o->in1, pos);
4504                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4505                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4506             }
4507             m3 = (m3 << 1) & 0xf;
4508             pos -= 8;
4509         }
4510         break;
4511     }
4512     return DISAS_NEXT;
4513 }
4514 
4515 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4516 {
4517     int r1 = get_field(s, r1);
4518     int r3 = get_field(s, r3);
4519     int size = s->insn->data;
4520     TCGv_i64 tsize = tcg_constant_i64(size);
4521 
4522     while (1) {
4523         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4524                             size == 8 ? MO_TEUQ : MO_TEUL);
4525         if (r1 == r3) {
4526             break;
4527         }
4528         tcg_gen_add_i64(o->in2, o->in2, tsize);
4529         r1 = (r1 + 1) & 15;
4530     }
4531 
4532     return DISAS_NEXT;
4533 }
4534 
4535 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4536 {
4537     int r1 = get_field(s, r1);
4538     int r3 = get_field(s, r3);
4539     TCGv_i64 t = tcg_temp_new_i64();
4540     TCGv_i64 t4 = tcg_constant_i64(4);
4541     TCGv_i64 t32 = tcg_constant_i64(32);
4542 
4543     while (1) {
4544         tcg_gen_shl_i64(t, regs[r1], t32);
4545         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4546         if (r1 == r3) {
4547             break;
4548         }
4549         tcg_gen_add_i64(o->in2, o->in2, t4);
4550         r1 = (r1 + 1) & 15;
4551     }
4552     return DISAS_NEXT;
4553 }
4554 
4555 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4556 {
4557     TCGv_i128 t16 = tcg_temp_new_i128();
4558 
4559     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4560     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4561                          MO_TE | MO_128 | MO_ALIGN);
4562     return DISAS_NEXT;
4563 }
4564 
4565 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4566 {
4567     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4568     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4569 
4570     gen_helper_srst(tcg_env, r1, r2);
4571     set_cc_static(s);
4572     return DISAS_NEXT;
4573 }
4574 
4575 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4576 {
4577     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4578     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4579 
4580     gen_helper_srstu(tcg_env, r1, r2);
4581     set_cc_static(s);
4582     return DISAS_NEXT;
4583 }
4584 
4585 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4586 {
4587     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4588     return DISAS_NEXT;
4589 }
4590 
4591 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4592 {
4593     tcg_gen_movi_i64(cc_src, 0);
4594     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4595     return DISAS_NEXT;
4596 }
4597 
4598 /* Compute borrow (0, -1) into cc_src. */
4599 static void compute_borrow(DisasContext *s)
4600 {
4601     switch (s->cc_op) {
4602     case CC_OP_SUBU:
4603         /* The borrow value is already in cc_src (0,-1). */
4604         break;
4605     default:
4606         gen_op_calc_cc(s);
4607         /* fall through */
4608     case CC_OP_STATIC:
4609         /* The carry flag is the msb of CC; compute into cc_src. */
4610         tcg_gen_extu_i32_i64(cc_src, cc_op);
4611         tcg_gen_shri_i64(cc_src, cc_src, 1);
4612         /* fall through */
4613     case CC_OP_ADDU:
4614         /* Convert carry (1,0) to borrow (0,-1). */
4615         tcg_gen_subi_i64(cc_src, cc_src, 1);
4616         break;
4617     }
4618 }
4619 
4620 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4621 {
4622     compute_borrow(s);
4623 
4624     /* Borrow is {0, -1}, so add to subtract. */
4625     tcg_gen_add_i64(o->out, o->in1, cc_src);
4626     tcg_gen_sub_i64(o->out, o->out, o->in2);
4627     return DISAS_NEXT;
4628 }
4629 
4630 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4631 {
4632     compute_borrow(s);
4633 
4634     /*
4635      * Borrow is {0, -1}, so add to subtract; replicate the
4636      * borrow input to produce 128-bit -1 for the addition.
4637      */
4638     TCGv_i64 zero = tcg_constant_i64(0);
4639     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4640     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4641 
4642     return DISAS_NEXT;
4643 }
4644 
4645 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4646 {
4647     TCGv_i32 t;
4648 
4649     update_psw_addr(s);
4650     update_cc_op(s);
4651 
4652     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4653     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4654 
4655     t = tcg_constant_i32(s->ilen);
4656     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4657 
4658     gen_exception(EXCP_SVC);
4659     return DISAS_NORETURN;
4660 }
4661 
4662 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4663 {
4664     int cc = 0;
4665 
4666     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4667     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4668     gen_op_movi_cc(s, cc);
4669     return DISAS_NEXT;
4670 }
4671 
4672 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4673 {
4674     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4675     set_cc_static(s);
4676     return DISAS_NEXT;
4677 }
4678 
4679 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4680 {
4681     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4682     set_cc_static(s);
4683     return DISAS_NEXT;
4684 }
4685 
4686 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4687 {
4688     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4689     set_cc_static(s);
4690     return DISAS_NEXT;
4691 }
4692 
4693 #ifndef CONFIG_USER_ONLY
4694 
4695 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4696 {
4697     gen_helper_testblock(cc_op, tcg_env, o->in2);
4698     set_cc_static(s);
4699     return DISAS_NEXT;
4700 }
4701 
4702 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4703 {
4704     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4705     set_cc_static(s);
4706     return DISAS_NEXT;
4707 }
4708 
4709 #endif
4710 
4711 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4712 {
4713     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4714 
4715     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4716     set_cc_static(s);
4717     return DISAS_NEXT;
4718 }
4719 
4720 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4721 {
4722     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4723 
4724     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4725     set_cc_static(s);
4726     return DISAS_NEXT;
4727 }
4728 
4729 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4730 {
4731     TCGv_i128 pair = tcg_temp_new_i128();
4732 
4733     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4734     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4735     set_cc_static(s);
4736     return DISAS_NEXT;
4737 }
4738 
4739 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4740 {
4741     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4742 
4743     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4744     set_cc_static(s);
4745     return DISAS_NEXT;
4746 }
4747 
4748 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4749 {
4750     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4751 
4752     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4753     set_cc_static(s);
4754     return DISAS_NEXT;
4755 }
4756 
4757 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4758 {
4759     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4760     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4761     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4762     TCGv_i32 tst = tcg_temp_new_i32();
4763     int m3 = get_field(s, m3);
4764 
4765     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4766         m3 = 0;
4767     }
4768     if (m3 & 1) {
4769         tcg_gen_movi_i32(tst, -1);
4770     } else {
4771         tcg_gen_extrl_i64_i32(tst, regs[0]);
4772         if (s->insn->opc & 3) {
4773             tcg_gen_ext8u_i32(tst, tst);
4774         } else {
4775             tcg_gen_ext16u_i32(tst, tst);
4776         }
4777     }
4778     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4779 
4780     set_cc_static(s);
4781     return DISAS_NEXT;
4782 }
4783 
4784 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4785 {
4786     TCGv_i32 t1 = tcg_constant_i32(0xff);
4787 
4788     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4789     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4790     set_cc_static(s);
4791     return DISAS_NEXT;
4792 }
4793 
4794 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4795 {
4796     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4797 
4798     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4799     return DISAS_NEXT;
4800 }
4801 
4802 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4803 {
4804     int l1 = get_field(s, l1) + 1;
4805     TCGv_i32 l;
4806 
4807     /* The length must not exceed 32 bytes.  */
4808     if (l1 > 32) {
4809         gen_program_exception(s, PGM_SPECIFICATION);
4810         return DISAS_NORETURN;
4811     }
4812     l = tcg_constant_i32(l1);
4813     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4814     set_cc_static(s);
4815     return DISAS_NEXT;
4816 }
4817 
4818 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4819 {
4820     int l1 = get_field(s, l1) + 1;
4821     TCGv_i32 l;
4822 
4823     /* The length must be even and should not exceed 64 bytes.  */
4824     if ((l1 & 1) || (l1 > 64)) {
4825         gen_program_exception(s, PGM_SPECIFICATION);
4826         return DISAS_NORETURN;
4827     }
4828     l = tcg_constant_i32(l1);
4829     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4830     set_cc_static(s);
4831     return DISAS_NEXT;
4832 }
4833 
4834 
4835 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4836 {
4837     int d1 = get_field(s, d1);
4838     int d2 = get_field(s, d2);
4839     int b1 = get_field(s, b1);
4840     int b2 = get_field(s, b2);
4841     int l = get_field(s, l1);
4842     TCGv_i32 t32;
4843 
4844     o->addr1 = get_address(s, 0, b1, d1);
4845 
4846     /* If the addresses are identical, this is a store/memset of zero.  */
4847     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4848         o->in2 = tcg_constant_i64(0);
4849 
4850         l++;
4851         while (l >= 8) {
4852             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4853             l -= 8;
4854             if (l > 0) {
4855                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4856             }
4857         }
4858         if (l >= 4) {
4859             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4860             l -= 4;
4861             if (l > 0) {
4862                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4863             }
4864         }
4865         if (l >= 2) {
4866             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4867             l -= 2;
4868             if (l > 0) {
4869                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4870             }
4871         }
4872         if (l) {
4873             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4874         }
4875         gen_op_movi_cc(s, 0);
4876         return DISAS_NEXT;
4877     }
4878 
4879     /* But in general we'll defer to a helper.  */
4880     o->in2 = get_address(s, 0, b2, d2);
4881     t32 = tcg_constant_i32(l);
4882     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4883     set_cc_static(s);
4884     return DISAS_NEXT;
4885 }
4886 
4887 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4888 {
4889     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4890     return DISAS_NEXT;
4891 }
4892 
4893 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4894 {
4895     int shift = s->insn->data & 0xff;
4896     int size = s->insn->data >> 8;
4897     uint64_t mask = ((1ull << size) - 1) << shift;
4898     TCGv_i64 t = tcg_temp_new_i64();
4899 
4900     tcg_gen_shli_i64(t, o->in2, shift);
4901     tcg_gen_xor_i64(o->out, o->in1, t);
4902 
4903     /* Produce the CC from only the bits manipulated.  */
4904     tcg_gen_andi_i64(cc_dst, o->out, mask);
4905     set_cc_nz_u64(s, cc_dst);
4906     return DISAS_NEXT;
4907 }
4908 
4909 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4910 {
4911     o->in1 = tcg_temp_new_i64();
4912 
4913     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4914         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4915     } else {
4916         /* Perform the atomic operation in memory. */
4917         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4918                                      s->insn->data);
4919     }
4920 
4921     /* Recompute also for atomic case: needed for setting CC. */
4922     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4923 
4924     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4925         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4926     }
4927     return DISAS_NEXT;
4928 }
4929 
4930 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4931 {
4932     o->out = tcg_constant_i64(0);
4933     return DISAS_NEXT;
4934 }
4935 
4936 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4937 {
4938     o->out = tcg_constant_i64(0);
4939     o->out2 = o->out;
4940     return DISAS_NEXT;
4941 }
4942 
4943 #ifndef CONFIG_USER_ONLY
4944 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4945 {
4946     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4947 
4948     gen_helper_clp(tcg_env, r2);
4949     set_cc_static(s);
4950     return DISAS_NEXT;
4951 }
4952 
4953 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4954 {
4955     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4956     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4957 
4958     gen_helper_pcilg(tcg_env, r1, r2);
4959     set_cc_static(s);
4960     return DISAS_NEXT;
4961 }
4962 
4963 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4964 {
4965     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4966     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4967 
4968     gen_helper_pcistg(tcg_env, r1, r2);
4969     set_cc_static(s);
4970     return DISAS_NEXT;
4971 }
4972 
4973 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4974 {
4975     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4976     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4977 
4978     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4979     set_cc_static(s);
4980     return DISAS_NEXT;
4981 }
4982 
4983 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4984 {
4985     gen_helper_sic(tcg_env, o->in1, o->in2);
4986     return DISAS_NEXT;
4987 }
4988 
4989 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4990 {
4991     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4992     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4993 
4994     gen_helper_rpcit(tcg_env, r1, r2);
4995     set_cc_static(s);
4996     return DISAS_NEXT;
4997 }
4998 
4999 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5000 {
5001     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5002     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5003     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5004 
5005     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5006     set_cc_static(s);
5007     return DISAS_NEXT;
5008 }
5009 
5010 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5011 {
5012     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5013     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5014 
5015     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5016     set_cc_static(s);
5017     return DISAS_NEXT;
5018 }
5019 #endif
5020 
5021 #include "translate_vx.c.inc"
5022 
5023 /* ====================================================================== */
5024 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5025    the original inputs), update the various cc data structures in order to
5026    be able to compute the new condition code.  */
5027 
5028 static void cout_abs32(DisasContext *s, DisasOps *o)
5029 {
5030     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5031 }
5032 
5033 static void cout_abs64(DisasContext *s, DisasOps *o)
5034 {
5035     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5036 }
5037 
5038 static void cout_adds32(DisasContext *s, DisasOps *o)
5039 {
5040     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5041 }
5042 
5043 static void cout_adds64(DisasContext *s, DisasOps *o)
5044 {
5045     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5046 }
5047 
5048 static void cout_addu32(DisasContext *s, DisasOps *o)
5049 {
5050     tcg_gen_shri_i64(cc_src, o->out, 32);
5051     tcg_gen_ext32u_i64(cc_dst, o->out);
5052     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5053 }
5054 
5055 static void cout_addu64(DisasContext *s, DisasOps *o)
5056 {
5057     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5058 }
5059 
5060 static void cout_cmps32(DisasContext *s, DisasOps *o)
5061 {
5062     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5063 }
5064 
5065 static void cout_cmps64(DisasContext *s, DisasOps *o)
5066 {
5067     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5068 }
5069 
5070 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5071 {
5072     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5073 }
5074 
5075 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5076 {
5077     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5078 }
5079 
5080 static void cout_f32(DisasContext *s, DisasOps *o)
5081 {
5082     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5083 }
5084 
5085 static void cout_f64(DisasContext *s, DisasOps *o)
5086 {
5087     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5088 }
5089 
5090 static void cout_f128(DisasContext *s, DisasOps *o)
5091 {
5092     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5093 }
5094 
5095 static void cout_nabs32(DisasContext *s, DisasOps *o)
5096 {
5097     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5098 }
5099 
5100 static void cout_nabs64(DisasContext *s, DisasOps *o)
5101 {
5102     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5103 }
5104 
5105 static void cout_neg32(DisasContext *s, DisasOps *o)
5106 {
5107     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5108 }
5109 
5110 static void cout_neg64(DisasContext *s, DisasOps *o)
5111 {
5112     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5113 }
5114 
5115 static void cout_nz32(DisasContext *s, DisasOps *o)
5116 {
5117     tcg_gen_ext32u_i64(cc_dst, o->out);
5118     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5119 }
5120 
5121 static void cout_nz64(DisasContext *s, DisasOps *o)
5122 {
5123     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5124 }
5125 
5126 static void cout_s32(DisasContext *s, DisasOps *o)
5127 {
5128     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5129 }
5130 
5131 static void cout_s64(DisasContext *s, DisasOps *o)
5132 {
5133     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5134 }
5135 
5136 static void cout_subs32(DisasContext *s, DisasOps *o)
5137 {
5138     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5139 }
5140 
5141 static void cout_subs64(DisasContext *s, DisasOps *o)
5142 {
5143     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5144 }
5145 
5146 static void cout_subu32(DisasContext *s, DisasOps *o)
5147 {
5148     tcg_gen_sari_i64(cc_src, o->out, 32);
5149     tcg_gen_ext32u_i64(cc_dst, o->out);
5150     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5151 }
5152 
5153 static void cout_subu64(DisasContext *s, DisasOps *o)
5154 {
5155     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5156 }
5157 
5158 static void cout_tm32(DisasContext *s, DisasOps *o)
5159 {
5160     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5161 }
5162 
5163 static void cout_tm64(DisasContext *s, DisasOps *o)
5164 {
5165     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5166 }
5167 
5168 static void cout_muls32(DisasContext *s, DisasOps *o)
5169 {
5170     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5171 }
5172 
5173 static void cout_muls64(DisasContext *s, DisasOps *o)
5174 {
5175     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5176     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5177 }
5178 
5179 /* ====================================================================== */
5180 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5181    with the TCG register to which we will write.  Used in combination with
5182    the "wout" generators, in some cases we need a new temporary, and in
5183    some cases we can write to a TCG global.  */
5184 
5185 static void prep_new(DisasContext *s, DisasOps *o)
5186 {
5187     o->out = tcg_temp_new_i64();
5188 }
5189 #define SPEC_prep_new 0
5190 
5191 static void prep_new_P(DisasContext *s, DisasOps *o)
5192 {
5193     o->out = tcg_temp_new_i64();
5194     o->out2 = tcg_temp_new_i64();
5195 }
5196 #define SPEC_prep_new_P 0
5197 
5198 static void prep_new_x(DisasContext *s, DisasOps *o)
5199 {
5200     o->out_128 = tcg_temp_new_i128();
5201 }
5202 #define SPEC_prep_new_x 0
5203 
5204 static void prep_r1(DisasContext *s, DisasOps *o)
5205 {
5206     o->out = regs[get_field(s, r1)];
5207 }
5208 #define SPEC_prep_r1 0
5209 
5210 static void prep_r1_P(DisasContext *s, DisasOps *o)
5211 {
5212     int r1 = get_field(s, r1);
5213     o->out = regs[r1];
5214     o->out2 = regs[r1 + 1];
5215 }
5216 #define SPEC_prep_r1_P SPEC_r1_even
5217 
5218 /* ====================================================================== */
5219 /* The "Write OUTput" generators.  These generally perform some non-trivial
5220    copy of data to TCG globals, or to main memory.  The trivial cases are
5221    generally handled by having a "prep" generator install the TCG global
5222    as the destination of the operation.  */
5223 
5224 static void wout_r1(DisasContext *s, DisasOps *o)
5225 {
5226     store_reg(get_field(s, r1), o->out);
5227 }
5228 #define SPEC_wout_r1 0
5229 
5230 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5231 {
5232     store_reg(get_field(s, r1), o->out2);
5233 }
5234 #define SPEC_wout_out2_r1 0
5235 
5236 static void wout_r1_8(DisasContext *s, DisasOps *o)
5237 {
5238     int r1 = get_field(s, r1);
5239     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5240 }
5241 #define SPEC_wout_r1_8 0
5242 
5243 static void wout_r1_16(DisasContext *s, DisasOps *o)
5244 {
5245     int r1 = get_field(s, r1);
5246     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5247 }
5248 #define SPEC_wout_r1_16 0
5249 
5250 static void wout_r1_32(DisasContext *s, DisasOps *o)
5251 {
5252     store_reg32_i64(get_field(s, r1), o->out);
5253 }
5254 #define SPEC_wout_r1_32 0
5255 
5256 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5257 {
5258     store_reg32h_i64(get_field(s, r1), o->out);
5259 }
5260 #define SPEC_wout_r1_32h 0
5261 
5262 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5263 {
5264     int r1 = get_field(s, r1);
5265     store_reg32_i64(r1, o->out);
5266     store_reg32_i64(r1 + 1, o->out2);
5267 }
5268 #define SPEC_wout_r1_P32 SPEC_r1_even
5269 
5270 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5271 {
5272     int r1 = get_field(s, r1);
5273     TCGv_i64 t = tcg_temp_new_i64();
5274     store_reg32_i64(r1 + 1, o->out);
5275     tcg_gen_shri_i64(t, o->out, 32);
5276     store_reg32_i64(r1, t);
5277 }
5278 #define SPEC_wout_r1_D32 SPEC_r1_even
5279 
5280 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5281 {
5282     int r1 = get_field(s, r1);
5283     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5284 }
5285 #define SPEC_wout_r1_D64 SPEC_r1_even
5286 
5287 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5288 {
5289     int r3 = get_field(s, r3);
5290     store_reg32_i64(r3, o->out);
5291     store_reg32_i64(r3 + 1, o->out2);
5292 }
5293 #define SPEC_wout_r3_P32 SPEC_r3_even
5294 
5295 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5296 {
5297     int r3 = get_field(s, r3);
5298     store_reg(r3, o->out);
5299     store_reg(r3 + 1, o->out2);
5300 }
5301 #define SPEC_wout_r3_P64 SPEC_r3_even
5302 
5303 static void wout_e1(DisasContext *s, DisasOps *o)
5304 {
5305     store_freg32_i64(get_field(s, r1), o->out);
5306 }
5307 #define SPEC_wout_e1 0
5308 
5309 static void wout_f1(DisasContext *s, DisasOps *o)
5310 {
5311     store_freg(get_field(s, r1), o->out);
5312 }
5313 #define SPEC_wout_f1 0
5314 
5315 static void wout_x1(DisasContext *s, DisasOps *o)
5316 {
5317     int f1 = get_field(s, r1);
5318 
5319     /* Split out_128 into out+out2 for cout_f128. */
5320     tcg_debug_assert(o->out == NULL);
5321     o->out = tcg_temp_new_i64();
5322     o->out2 = tcg_temp_new_i64();
5323 
5324     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5325     store_freg(f1, o->out);
5326     store_freg(f1 + 2, o->out2);
5327 }
5328 #define SPEC_wout_x1 SPEC_r1_f128
5329 
5330 static void wout_x1_P(DisasContext *s, DisasOps *o)
5331 {
5332     int f1 = get_field(s, r1);
5333     store_freg(f1, o->out);
5334     store_freg(f1 + 2, o->out2);
5335 }
5336 #define SPEC_wout_x1_P SPEC_r1_f128
5337 
5338 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5339 {
5340     if (get_field(s, r1) != get_field(s, r2)) {
5341         store_reg32_i64(get_field(s, r1), o->out);
5342     }
5343 }
5344 #define SPEC_wout_cond_r1r2_32 0
5345 
5346 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5347 {
5348     if (get_field(s, r1) != get_field(s, r2)) {
5349         store_freg32_i64(get_field(s, r1), o->out);
5350     }
5351 }
5352 #define SPEC_wout_cond_e1e2 0
5353 
5354 static void wout_m1_8(DisasContext *s, DisasOps *o)
5355 {
5356     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5357 }
5358 #define SPEC_wout_m1_8 0
5359 
5360 static void wout_m1_16(DisasContext *s, DisasOps *o)
5361 {
5362     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5363 }
5364 #define SPEC_wout_m1_16 0
5365 
5366 #ifndef CONFIG_USER_ONLY
5367 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5368 {
5369     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5370 }
5371 #define SPEC_wout_m1_16a 0
5372 #endif
5373 
5374 static void wout_m1_32(DisasContext *s, DisasOps *o)
5375 {
5376     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5377 }
5378 #define SPEC_wout_m1_32 0
5379 
5380 #ifndef CONFIG_USER_ONLY
5381 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5382 {
5383     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5384 }
5385 #define SPEC_wout_m1_32a 0
5386 #endif
5387 
5388 static void wout_m1_64(DisasContext *s, DisasOps *o)
5389 {
5390     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5391 }
5392 #define SPEC_wout_m1_64 0
5393 
5394 #ifndef CONFIG_USER_ONLY
5395 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5396 {
5397     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5398 }
5399 #define SPEC_wout_m1_64a 0
5400 #endif
5401 
5402 static void wout_m2_32(DisasContext *s, DisasOps *o)
5403 {
5404     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5405 }
5406 #define SPEC_wout_m2_32 0
5407 
5408 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5409 {
5410     store_reg(get_field(s, r1), o->in2);
5411 }
5412 #define SPEC_wout_in2_r1 0
5413 
5414 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5415 {
5416     store_reg32_i64(get_field(s, r1), o->in2);
5417 }
5418 #define SPEC_wout_in2_r1_32 0
5419 
5420 /* ====================================================================== */
5421 /* The "INput 1" generators.  These load the first operand to an insn.  */
5422 
5423 static void in1_r1(DisasContext *s, DisasOps *o)
5424 {
5425     o->in1 = load_reg(get_field(s, r1));
5426 }
5427 #define SPEC_in1_r1 0
5428 
5429 static void in1_r1_o(DisasContext *s, DisasOps *o)
5430 {
5431     o->in1 = regs[get_field(s, r1)];
5432 }
5433 #define SPEC_in1_r1_o 0
5434 
5435 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5436 {
5437     o->in1 = tcg_temp_new_i64();
5438     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5439 }
5440 #define SPEC_in1_r1_32s 0
5441 
5442 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5443 {
5444     o->in1 = tcg_temp_new_i64();
5445     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5446 }
5447 #define SPEC_in1_r1_32u 0
5448 
5449 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5450 {
5451     o->in1 = tcg_temp_new_i64();
5452     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5453 }
5454 #define SPEC_in1_r1_sr32 0
5455 
5456 static void in1_r1p1(DisasContext *s, DisasOps *o)
5457 {
5458     o->in1 = load_reg(get_field(s, r1) + 1);
5459 }
5460 #define SPEC_in1_r1p1 SPEC_r1_even
5461 
5462 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5463 {
5464     o->in1 = regs[get_field(s, r1) + 1];
5465 }
5466 #define SPEC_in1_r1p1_o SPEC_r1_even
5467 
5468 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5469 {
5470     o->in1 = tcg_temp_new_i64();
5471     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5472 }
5473 #define SPEC_in1_r1p1_32s SPEC_r1_even
5474 
5475 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5476 {
5477     o->in1 = tcg_temp_new_i64();
5478     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5479 }
5480 #define SPEC_in1_r1p1_32u SPEC_r1_even
5481 
5482 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5483 {
5484     int r1 = get_field(s, r1);
5485     o->in1 = tcg_temp_new_i64();
5486     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5487 }
5488 #define SPEC_in1_r1_D32 SPEC_r1_even
5489 
5490 static void in1_r2(DisasContext *s, DisasOps *o)
5491 {
5492     o->in1 = load_reg(get_field(s, r2));
5493 }
5494 #define SPEC_in1_r2 0
5495 
5496 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5497 {
5498     o->in1 = tcg_temp_new_i64();
5499     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5500 }
5501 #define SPEC_in1_r2_sr32 0
5502 
5503 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5504 {
5505     o->in1 = tcg_temp_new_i64();
5506     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5507 }
5508 #define SPEC_in1_r2_32u 0
5509 
5510 static void in1_r3(DisasContext *s, DisasOps *o)
5511 {
5512     o->in1 = load_reg(get_field(s, r3));
5513 }
5514 #define SPEC_in1_r3 0
5515 
5516 static void in1_r3_o(DisasContext *s, DisasOps *o)
5517 {
5518     o->in1 = regs[get_field(s, r3)];
5519 }
5520 #define SPEC_in1_r3_o 0
5521 
5522 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5523 {
5524     o->in1 = tcg_temp_new_i64();
5525     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5526 }
5527 #define SPEC_in1_r3_32s 0
5528 
5529 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5530 {
5531     o->in1 = tcg_temp_new_i64();
5532     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5533 }
5534 #define SPEC_in1_r3_32u 0
5535 
5536 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5537 {
5538     int r3 = get_field(s, r3);
5539     o->in1 = tcg_temp_new_i64();
5540     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5541 }
5542 #define SPEC_in1_r3_D32 SPEC_r3_even
5543 
5544 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5545 {
5546     o->in1 = tcg_temp_new_i64();
5547     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5548 }
5549 #define SPEC_in1_r3_sr32 0
5550 
5551 static void in1_e1(DisasContext *s, DisasOps *o)
5552 {
5553     o->in1 = load_freg32_i64(get_field(s, r1));
5554 }
5555 #define SPEC_in1_e1 0
5556 
5557 static void in1_f1(DisasContext *s, DisasOps *o)
5558 {
5559     o->in1 = load_freg(get_field(s, r1));
5560 }
5561 #define SPEC_in1_f1 0
5562 
5563 static void in1_x1(DisasContext *s, DisasOps *o)
5564 {
5565     o->in1_128 = load_freg_128(get_field(s, r1));
5566 }
5567 #define SPEC_in1_x1 SPEC_r1_f128
5568 
5569 /* Load the high double word of an extended (128-bit) format FP number */
5570 static void in1_x2h(DisasContext *s, DisasOps *o)
5571 {
5572     o->in1 = load_freg(get_field(s, r2));
5573 }
5574 #define SPEC_in1_x2h SPEC_r2_f128
5575 
5576 static void in1_f3(DisasContext *s, DisasOps *o)
5577 {
5578     o->in1 = load_freg(get_field(s, r3));
5579 }
5580 #define SPEC_in1_f3 0
5581 
5582 static void in1_la1(DisasContext *s, DisasOps *o)
5583 {
5584     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5585 }
5586 #define SPEC_in1_la1 0
5587 
5588 static void in1_la2(DisasContext *s, DisasOps *o)
5589 {
5590     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5591     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5592 }
5593 #define SPEC_in1_la2 0
5594 
5595 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5596 {
5597     in1_la1(s, o);
5598     o->in1 = tcg_temp_new_i64();
5599     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5600 }
5601 #define SPEC_in1_m1_8u 0
5602 
5603 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5604 {
5605     in1_la1(s, o);
5606     o->in1 = tcg_temp_new_i64();
5607     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5608 }
5609 #define SPEC_in1_m1_16s 0
5610 
5611 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5612 {
5613     in1_la1(s, o);
5614     o->in1 = tcg_temp_new_i64();
5615     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5616 }
5617 #define SPEC_in1_m1_16u 0
5618 
5619 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5620 {
5621     in1_la1(s, o);
5622     o->in1 = tcg_temp_new_i64();
5623     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5624 }
5625 #define SPEC_in1_m1_32s 0
5626 
5627 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5628 {
5629     in1_la1(s, o);
5630     o->in1 = tcg_temp_new_i64();
5631     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5632 }
5633 #define SPEC_in1_m1_32u 0
5634 
5635 static void in1_m1_64(DisasContext *s, DisasOps *o)
5636 {
5637     in1_la1(s, o);
5638     o->in1 = tcg_temp_new_i64();
5639     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5640 }
5641 #define SPEC_in1_m1_64 0
5642 
5643 /* ====================================================================== */
5644 /* The "INput 2" generators.  These load the second operand to an insn.  */
5645 
5646 static void in2_r1_o(DisasContext *s, DisasOps *o)
5647 {
5648     o->in2 = regs[get_field(s, r1)];
5649 }
5650 #define SPEC_in2_r1_o 0
5651 
5652 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5653 {
5654     o->in2 = tcg_temp_new_i64();
5655     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5656 }
5657 #define SPEC_in2_r1_16u 0
5658 
5659 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5660 {
5661     o->in2 = tcg_temp_new_i64();
5662     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5663 }
5664 #define SPEC_in2_r1_32u 0
5665 
5666 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5667 {
5668     int r1 = get_field(s, r1);
5669     o->in2 = tcg_temp_new_i64();
5670     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5671 }
5672 #define SPEC_in2_r1_D32 SPEC_r1_even
5673 
5674 static void in2_r2(DisasContext *s, DisasOps *o)
5675 {
5676     o->in2 = load_reg(get_field(s, r2));
5677 }
5678 #define SPEC_in2_r2 0
5679 
5680 static void in2_r2_o(DisasContext *s, DisasOps *o)
5681 {
5682     o->in2 = regs[get_field(s, r2)];
5683 }
5684 #define SPEC_in2_r2_o 0
5685 
5686 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5687 {
5688     int r2 = get_field(s, r2);
5689     if (r2 != 0) {
5690         o->in2 = load_reg(r2);
5691     }
5692 }
5693 #define SPEC_in2_r2_nz 0
5694 
5695 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5696 {
5697     o->in2 = tcg_temp_new_i64();
5698     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5699 }
5700 #define SPEC_in2_r2_8s 0
5701 
5702 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5703 {
5704     o->in2 = tcg_temp_new_i64();
5705     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5706 }
5707 #define SPEC_in2_r2_8u 0
5708 
5709 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5710 {
5711     o->in2 = tcg_temp_new_i64();
5712     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5713 }
5714 #define SPEC_in2_r2_16s 0
5715 
5716 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5717 {
5718     o->in2 = tcg_temp_new_i64();
5719     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5720 }
5721 #define SPEC_in2_r2_16u 0
5722 
5723 static void in2_r3(DisasContext *s, DisasOps *o)
5724 {
5725     o->in2 = load_reg(get_field(s, r3));
5726 }
5727 #define SPEC_in2_r3 0
5728 
5729 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5730 {
5731     int r3 = get_field(s, r3);
5732     o->in2_128 = tcg_temp_new_i128();
5733     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5734 }
5735 #define SPEC_in2_r3_D64 SPEC_r3_even
5736 
5737 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5738 {
5739     o->in2 = tcg_temp_new_i64();
5740     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5741 }
5742 #define SPEC_in2_r3_sr32 0
5743 
5744 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5745 {
5746     o->in2 = tcg_temp_new_i64();
5747     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5748 }
5749 #define SPEC_in2_r3_32u 0
5750 
5751 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5752 {
5753     o->in2 = tcg_temp_new_i64();
5754     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5755 }
5756 #define SPEC_in2_r2_32s 0
5757 
5758 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5759 {
5760     o->in2 = tcg_temp_new_i64();
5761     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5762 }
5763 #define SPEC_in2_r2_32u 0
5764 
5765 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5766 {
5767     o->in2 = tcg_temp_new_i64();
5768     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5769 }
5770 #define SPEC_in2_r2_sr32 0
5771 
5772 static void in2_e2(DisasContext *s, DisasOps *o)
5773 {
5774     o->in2 = load_freg32_i64(get_field(s, r2));
5775 }
5776 #define SPEC_in2_e2 0
5777 
5778 static void in2_f2(DisasContext *s, DisasOps *o)
5779 {
5780     o->in2 = load_freg(get_field(s, r2));
5781 }
5782 #define SPEC_in2_f2 0
5783 
5784 static void in2_x2(DisasContext *s, DisasOps *o)
5785 {
5786     o->in2_128 = load_freg_128(get_field(s, r2));
5787 }
5788 #define SPEC_in2_x2 SPEC_r2_f128
5789 
5790 /* Load the low double word of an extended (128-bit) format FP number */
5791 static void in2_x2l(DisasContext *s, DisasOps *o)
5792 {
5793     o->in2 = load_freg(get_field(s, r2) + 2);
5794 }
5795 #define SPEC_in2_x2l SPEC_r2_f128
5796 
5797 static void in2_ra2(DisasContext *s, DisasOps *o)
5798 {
5799     int r2 = get_field(s, r2);
5800 
5801     /* Note: *don't* treat !r2 as 0, use the reg value. */
5802     o->in2 = tcg_temp_new_i64();
5803     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5804 }
5805 #define SPEC_in2_ra2 0
5806 
5807 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5808 {
5809     return in2_ra2(s, o);
5810 }
5811 #define SPEC_in2_ra2_E SPEC_r2_even
5812 
5813 static void in2_a2(DisasContext *s, DisasOps *o)
5814 {
5815     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5816     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5817 }
5818 #define SPEC_in2_a2 0
5819 
5820 static TCGv gen_ri2(DisasContext *s)
5821 {
5822     TCGv ri2 = NULL;
5823     bool is_imm;
5824     int imm;
5825 
5826     disas_jdest(s, i2, is_imm, imm, ri2);
5827     if (is_imm) {
5828         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5829     }
5830 
5831     return ri2;
5832 }
5833 
5834 static void in2_ri2(DisasContext *s, DisasOps *o)
5835 {
5836     o->in2 = gen_ri2(s);
5837 }
5838 #define SPEC_in2_ri2 0
5839 
5840 static void in2_sh(DisasContext *s, DisasOps *o)
5841 {
5842     int b2 = get_field(s, b2);
5843     int d2 = get_field(s, d2);
5844 
5845     if (b2 == 0) {
5846         o->in2 = tcg_constant_i64(d2 & 0x3f);
5847     } else {
5848         o->in2 = get_address(s, 0, b2, d2);
5849         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5850     }
5851 }
5852 #define SPEC_in2_sh 0
5853 
5854 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5855 {
5856     in2_a2(s, o);
5857     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5858 }
5859 #define SPEC_in2_m2_8u 0
5860 
5861 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5862 {
5863     in2_a2(s, o);
5864     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5865 }
5866 #define SPEC_in2_m2_16s 0
5867 
5868 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5869 {
5870     in2_a2(s, o);
5871     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5872 }
5873 #define SPEC_in2_m2_16u 0
5874 
5875 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5876 {
5877     in2_a2(s, o);
5878     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5879 }
5880 #define SPEC_in2_m2_32s 0
5881 
5882 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5883 {
5884     in2_a2(s, o);
5885     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5886 }
5887 #define SPEC_in2_m2_32u 0
5888 
5889 #ifndef CONFIG_USER_ONLY
5890 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5891 {
5892     in2_a2(s, o);
5893     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5894 }
5895 #define SPEC_in2_m2_32ua 0
5896 #endif
5897 
5898 static void in2_m2_64(DisasContext *s, DisasOps *o)
5899 {
5900     in2_a2(s, o);
5901     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5902 }
5903 #define SPEC_in2_m2_64 0
5904 
5905 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5906 {
5907     in2_a2(s, o);
5908     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5909     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5910 }
5911 #define SPEC_in2_m2_64w 0
5912 
5913 #ifndef CONFIG_USER_ONLY
5914 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5915 {
5916     in2_a2(s, o);
5917     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5918 }
5919 #define SPEC_in2_m2_64a 0
5920 #endif
5921 
5922 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5923 {
5924     o->in2 = tcg_temp_new_i64();
5925     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5926 }
5927 #define SPEC_in2_mri2_16s 0
5928 
5929 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5930 {
5931     o->in2 = tcg_temp_new_i64();
5932     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5933 }
5934 #define SPEC_in2_mri2_16u 0
5935 
5936 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5937 {
5938     o->in2 = tcg_temp_new_i64();
5939     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5940                        MO_TESL | MO_ALIGN);
5941 }
5942 #define SPEC_in2_mri2_32s 0
5943 
5944 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5945 {
5946     o->in2 = tcg_temp_new_i64();
5947     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5948                        MO_TEUL | MO_ALIGN);
5949 }
5950 #define SPEC_in2_mri2_32u 0
5951 
5952 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5953 {
5954     o->in2 = tcg_temp_new_i64();
5955     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5956                         MO_TEUQ | MO_ALIGN);
5957 }
5958 #define SPEC_in2_mri2_64 0
5959 
5960 static void in2_i2(DisasContext *s, DisasOps *o)
5961 {
5962     o->in2 = tcg_constant_i64(get_field(s, i2));
5963 }
5964 #define SPEC_in2_i2 0
5965 
5966 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5967 {
5968     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5969 }
5970 #define SPEC_in2_i2_8u 0
5971 
5972 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5973 {
5974     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5975 }
5976 #define SPEC_in2_i2_16u 0
5977 
5978 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5979 {
5980     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5981 }
5982 #define SPEC_in2_i2_32u 0
5983 
5984 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5985 {
5986     uint64_t i2 = (uint16_t)get_field(s, i2);
5987     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5988 }
5989 #define SPEC_in2_i2_16u_shl 0
5990 
5991 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5992 {
5993     uint64_t i2 = (uint32_t)get_field(s, i2);
5994     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5995 }
5996 #define SPEC_in2_i2_32u_shl 0
5997 
5998 #ifndef CONFIG_USER_ONLY
5999 static void in2_insn(DisasContext *s, DisasOps *o)
6000 {
6001     o->in2 = tcg_constant_i64(s->fields.raw_insn);
6002 }
6003 #define SPEC_in2_insn 0
6004 #endif
6005 
6006 /* ====================================================================== */
6007 
6008 /* Find opc within the table of insns.  This is formulated as a switch
6009    statement so that (1) we get compile-time notice of cut-paste errors
6010    for duplicated opcodes, and (2) the compiler generates the binary
6011    search tree, rather than us having to post-process the table.  */
6012 
6013 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6014     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6015 
6016 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6017     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6018 
6019 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6020     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6021 
6022 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6023 
6024 enum DisasInsnEnum {
6025 #include "insn-data.h.inc"
6026 };
6027 
6028 #undef E
6029 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6030     .opc = OPC,                                                             \
6031     .flags = FL,                                                            \
6032     .fmt = FMT_##FT,                                                        \
6033     .fac = FAC_##FC,                                                        \
6034     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6035     .name = #NM,                                                            \
6036     .help_in1 = in1_##I1,                                                   \
6037     .help_in2 = in2_##I2,                                                   \
6038     .help_prep = prep_##P,                                                  \
6039     .help_wout = wout_##W,                                                  \
6040     .help_cout = cout_##CC,                                                 \
6041     .help_op = op_##OP,                                                     \
6042     .data = D                                                               \
6043  },
6044 
6045 /* Allow 0 to be used for NULL in the table below.  */
6046 #define in1_0  NULL
6047 #define in2_0  NULL
6048 #define prep_0  NULL
6049 #define wout_0  NULL
6050 #define cout_0  NULL
6051 #define op_0  NULL
6052 
6053 #define SPEC_in1_0 0
6054 #define SPEC_in2_0 0
6055 #define SPEC_prep_0 0
6056 #define SPEC_wout_0 0
6057 
6058 /* Give smaller names to the various facilities.  */
6059 #define FAC_Z           S390_FEAT_ZARCH
6060 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6061 #define FAC_DFP         S390_FEAT_DFP
6062 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6063 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6064 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6065 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6066 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6067 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6068 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6069 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6070 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6071 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6072 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6073 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6074 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6075 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6076 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6077 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6078 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6079 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6080 #define FAC_SFLE        S390_FEAT_STFLE
6081 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6082 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6083 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6084 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6085 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6086 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6087 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6088 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6089 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6090 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6091 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6092 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6093 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6094 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6095 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6096 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6097 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6098 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6099 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6100 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6101 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6102 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6103 
6104 static const DisasInsn insn_info[] = {
6105 #include "insn-data.h.inc"
6106 };
6107 
6108 #undef E
6109 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6110     case OPC: return &insn_info[insn_ ## NM];
6111 
6112 static const DisasInsn *lookup_opc(uint16_t opc)
6113 {
6114     switch (opc) {
6115 #include "insn-data.h.inc"
6116     default:
6117         return NULL;
6118     }
6119 }
6120 
6121 #undef F
6122 #undef E
6123 #undef D
6124 #undef C
6125 
6126 /* Extract a field from the insn.  The INSN should be left-aligned in
6127    the uint64_t so that we can more easily utilize the big-bit-endian
6128    definitions we extract from the Principals of Operation.  */
6129 
6130 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6131 {
6132     uint32_t r, m;
6133 
6134     if (f->size == 0) {
6135         return;
6136     }
6137 
6138     /* Zero extract the field from the insn.  */
6139     r = (insn << f->beg) >> (64 - f->size);
6140 
6141     /* Sign-extend, or un-swap the field as necessary.  */
6142     switch (f->type) {
6143     case 0: /* unsigned */
6144         break;
6145     case 1: /* signed */
6146         assert(f->size <= 32);
6147         m = 1u << (f->size - 1);
6148         r = (r ^ m) - m;
6149         break;
6150     case 2: /* dl+dh split, signed 20 bit. */
6151         r = ((int8_t)r << 12) | (r >> 8);
6152         break;
6153     case 3: /* MSB stored in RXB */
6154         g_assert(f->size == 4);
6155         switch (f->beg) {
6156         case 8:
6157             r |= extract64(insn, 63 - 36, 1) << 4;
6158             break;
6159         case 12:
6160             r |= extract64(insn, 63 - 37, 1) << 4;
6161             break;
6162         case 16:
6163             r |= extract64(insn, 63 - 38, 1) << 4;
6164             break;
6165         case 32:
6166             r |= extract64(insn, 63 - 39, 1) << 4;
6167             break;
6168         default:
6169             g_assert_not_reached();
6170         }
6171         break;
6172     default:
6173         abort();
6174     }
6175 
6176     /*
6177      * Validate that the "compressed" encoding we selected above is valid.
6178      * I.e. we haven't made two different original fields overlap.
6179      */
6180     assert(((o->presentC >> f->indexC) & 1) == 0);
6181     o->presentC |= 1 << f->indexC;
6182     o->presentO |= 1 << f->indexO;
6183 
6184     o->c[f->indexC] = r;
6185 }
6186 
6187 /* Lookup the insn at the current PC, extracting the operands into O and
6188    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6189 
6190 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6191 {
6192     uint64_t insn, pc = s->base.pc_next;
6193     int op, op2, ilen;
6194     const DisasInsn *info;
6195 
6196     if (unlikely(s->ex_value)) {
6197         /* Drop the EX data now, so that it's clear on exception paths.  */
6198         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6199                        offsetof(CPUS390XState, ex_value));
6200 
6201         /* Extract the values saved by EXECUTE.  */
6202         insn = s->ex_value & 0xffffffffffff0000ull;
6203         ilen = s->ex_value & 0xf;
6204 
6205         /* Register insn bytes with translator so plugins work. */
6206         for (int i = 0; i < ilen; i++) {
6207             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6208             translator_fake_ldb(byte, pc + i);
6209         }
6210         op = insn >> 56;
6211     } else {
6212         insn = ld_code2(env, s, pc);
6213         op = (insn >> 8) & 0xff;
6214         ilen = get_ilen(op);
6215         switch (ilen) {
6216         case 2:
6217             insn = insn << 48;
6218             break;
6219         case 4:
6220             insn = ld_code4(env, s, pc) << 32;
6221             break;
6222         case 6:
6223             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6224             break;
6225         default:
6226             g_assert_not_reached();
6227         }
6228     }
6229     s->pc_tmp = s->base.pc_next + ilen;
6230     s->ilen = ilen;
6231 
6232     /* We can't actually determine the insn format until we've looked up
6233        the full insn opcode.  Which we can't do without locating the
6234        secondary opcode.  Assume by default that OP2 is at bit 40; for
6235        those smaller insns that don't actually have a secondary opcode
6236        this will correctly result in OP2 = 0. */
6237     switch (op) {
6238     case 0x01: /* E */
6239     case 0x80: /* S */
6240     case 0x82: /* S */
6241     case 0x93: /* S */
6242     case 0xb2: /* S, RRF, RRE, IE */
6243     case 0xb3: /* RRE, RRD, RRF */
6244     case 0xb9: /* RRE, RRF */
6245     case 0xe5: /* SSE, SIL */
6246         op2 = (insn << 8) >> 56;
6247         break;
6248     case 0xa5: /* RI */
6249     case 0xa7: /* RI */
6250     case 0xc0: /* RIL */
6251     case 0xc2: /* RIL */
6252     case 0xc4: /* RIL */
6253     case 0xc6: /* RIL */
6254     case 0xc8: /* SSF */
6255     case 0xcc: /* RIL */
6256         op2 = (insn << 12) >> 60;
6257         break;
6258     case 0xc5: /* MII */
6259     case 0xc7: /* SMI */
6260     case 0xd0 ... 0xdf: /* SS */
6261     case 0xe1: /* SS */
6262     case 0xe2: /* SS */
6263     case 0xe8: /* SS */
6264     case 0xe9: /* SS */
6265     case 0xea: /* SS */
6266     case 0xee ... 0xf3: /* SS */
6267     case 0xf8 ... 0xfd: /* SS */
6268         op2 = 0;
6269         break;
6270     default:
6271         op2 = (insn << 40) >> 56;
6272         break;
6273     }
6274 
6275     memset(&s->fields, 0, sizeof(s->fields));
6276     s->fields.raw_insn = insn;
6277     s->fields.op = op;
6278     s->fields.op2 = op2;
6279 
6280     /* Lookup the instruction.  */
6281     info = lookup_opc(op << 8 | op2);
6282     s->insn = info;
6283 
6284     /* If we found it, extract the operands.  */
6285     if (info != NULL) {
6286         DisasFormat fmt = info->fmt;
6287         int i;
6288 
6289         for (i = 0; i < NUM_C_FIELD; ++i) {
6290             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6291         }
6292     }
6293     return info;
6294 }
6295 
6296 static bool is_afp_reg(int reg)
6297 {
6298     return reg % 2 || reg > 6;
6299 }
6300 
6301 static bool is_fp_pair(int reg)
6302 {
6303     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6304     return !(reg & 0x2);
6305 }
6306 
6307 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6308 {
6309     const DisasInsn *insn;
6310     DisasJumpType ret = DISAS_NEXT;
6311     DisasOps o = {};
6312     bool icount = false;
6313 
6314     /* Search for the insn in the table.  */
6315     insn = extract_insn(env, s);
6316 
6317     /* Update insn_start now that we know the ILEN.  */
6318     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6319 
6320     /* Not found means unimplemented/illegal opcode.  */
6321     if (insn == NULL) {
6322         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6323                       s->fields.op, s->fields.op2);
6324         gen_illegal_opcode(s);
6325         ret = DISAS_NORETURN;
6326         goto out;
6327     }
6328 
6329 #ifndef CONFIG_USER_ONLY
6330     if (s->base.tb->flags & FLAG_MASK_PER) {
6331         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6332         gen_helper_per_ifetch(tcg_env, addr);
6333     }
6334 #endif
6335 
6336     /* process flags */
6337     if (insn->flags) {
6338         /* privileged instruction */
6339         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6340             gen_program_exception(s, PGM_PRIVILEGED);
6341             ret = DISAS_NORETURN;
6342             goto out;
6343         }
6344 
6345         /* if AFP is not enabled, instructions and registers are forbidden */
6346         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6347             uint8_t dxc = 0;
6348 
6349             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6350                 dxc = 1;
6351             }
6352             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6353                 dxc = 1;
6354             }
6355             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6356                 dxc = 1;
6357             }
6358             if (insn->flags & IF_BFP) {
6359                 dxc = 2;
6360             }
6361             if (insn->flags & IF_DFP) {
6362                 dxc = 3;
6363             }
6364             if (insn->flags & IF_VEC) {
6365                 dxc = 0xfe;
6366             }
6367             if (dxc) {
6368                 gen_data_exception(dxc);
6369                 ret = DISAS_NORETURN;
6370                 goto out;
6371             }
6372         }
6373 
6374         /* if vector instructions not enabled, executing them is forbidden */
6375         if (insn->flags & IF_VEC) {
6376             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6377                 gen_data_exception(0xfe);
6378                 ret = DISAS_NORETURN;
6379                 goto out;
6380             }
6381         }
6382 
6383         /* input/output is the special case for icount mode */
6384         if (unlikely(insn->flags & IF_IO)) {
6385             icount = translator_io_start(&s->base);
6386         }
6387     }
6388 
6389     /* Check for insn specification exceptions.  */
6390     if (insn->spec) {
6391         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6392             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6393             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6394             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6395             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6396             gen_program_exception(s, PGM_SPECIFICATION);
6397             ret = DISAS_NORETURN;
6398             goto out;
6399         }
6400     }
6401 
6402     /* Implement the instruction.  */
6403     if (insn->help_in1) {
6404         insn->help_in1(s, &o);
6405     }
6406     if (insn->help_in2) {
6407         insn->help_in2(s, &o);
6408     }
6409     if (insn->help_prep) {
6410         insn->help_prep(s, &o);
6411     }
6412     if (insn->help_op) {
6413         ret = insn->help_op(s, &o);
6414     }
6415     if (ret != DISAS_NORETURN) {
6416         if (insn->help_wout) {
6417             insn->help_wout(s, &o);
6418         }
6419         if (insn->help_cout) {
6420             insn->help_cout(s, &o);
6421         }
6422     }
6423 
6424     /* io should be the last instruction in tb when icount is enabled */
6425     if (unlikely(icount && ret == DISAS_NEXT)) {
6426         ret = DISAS_TOO_MANY;
6427     }
6428 
6429 #ifndef CONFIG_USER_ONLY
6430     if (s->base.tb->flags & FLAG_MASK_PER) {
6431         /* An exception might be triggered, save PSW if not already done.  */
6432         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6433             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6434         }
6435 
6436         /* Call the helper to check for a possible PER exception.  */
6437         gen_helper_per_check_exception(tcg_env);
6438     }
6439 #endif
6440 
6441 out:
6442     /* Advance to the next instruction.  */
6443     s->base.pc_next = s->pc_tmp;
6444     return ret;
6445 }
6446 
6447 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6448 {
6449     DisasContext *dc = container_of(dcbase, DisasContext, base);
6450 
6451     /* 31-bit mode */
6452     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6453         dc->base.pc_first &= 0x7fffffff;
6454         dc->base.pc_next = dc->base.pc_first;
6455     }
6456 
6457     dc->cc_op = CC_OP_DYNAMIC;
6458     dc->ex_value = dc->base.tb->cs_base;
6459     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6460 }
6461 
6462 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6463 {
6464 }
6465 
6466 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6467 {
6468     DisasContext *dc = container_of(dcbase, DisasContext, base);
6469 
6470     /* Delay the set of ilen until we've read the insn. */
6471     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6472     dc->insn_start = tcg_last_op();
6473 }
6474 
6475 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6476                                 uint64_t pc)
6477 {
6478     uint64_t insn = cpu_lduw_code(env, pc);
6479 
6480     return pc + get_ilen((insn >> 8) & 0xff);
6481 }
6482 
6483 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6484 {
6485     CPUS390XState *env = cpu_env(cs);
6486     DisasContext *dc = container_of(dcbase, DisasContext, base);
6487 
6488     dc->base.is_jmp = translate_one(env, dc);
6489     if (dc->base.is_jmp == DISAS_NEXT) {
6490         if (dc->ex_value ||
6491             !is_same_page(dcbase, dc->base.pc_next) ||
6492             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6493             dc->base.is_jmp = DISAS_TOO_MANY;
6494         }
6495     }
6496 }
6497 
6498 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6499 {
6500     DisasContext *dc = container_of(dcbase, DisasContext, base);
6501 
6502     switch (dc->base.is_jmp) {
6503     case DISAS_NORETURN:
6504         break;
6505     case DISAS_TOO_MANY:
6506         update_psw_addr(dc);
6507         /* FALLTHRU */
6508     case DISAS_PC_UPDATED:
6509         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6510            cc op type is in env */
6511         update_cc_op(dc);
6512         /* FALLTHRU */
6513     case DISAS_PC_CC_UPDATED:
6514         /* Exit the TB, either by raising a debug exception or by return.  */
6515         if (dc->exit_to_mainloop) {
6516             tcg_gen_exit_tb(NULL, 0);
6517         } else {
6518             tcg_gen_lookup_and_goto_ptr();
6519         }
6520         break;
6521     default:
6522         g_assert_not_reached();
6523     }
6524 }
6525 
6526 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6527                                CPUState *cs, FILE *logfile)
6528 {
6529     DisasContext *dc = container_of(dcbase, DisasContext, base);
6530 
6531     if (unlikely(dc->ex_value)) {
6532         /* ??? Unfortunately target_disas can't use host memory.  */
6533         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6534     } else {
6535         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6536         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6537     }
6538 }
6539 
6540 static const TranslatorOps s390x_tr_ops = {
6541     .init_disas_context = s390x_tr_init_disas_context,
6542     .tb_start           = s390x_tr_tb_start,
6543     .insn_start         = s390x_tr_insn_start,
6544     .translate_insn     = s390x_tr_translate_insn,
6545     .tb_stop            = s390x_tr_tb_stop,
6546     .disas_log          = s390x_tr_disas_log,
6547 };
6548 
6549 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6550                            target_ulong pc, void *host_pc)
6551 {
6552     DisasContext dc;
6553 
6554     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6555 }
6556 
6557 void s390x_restore_state_to_opc(CPUState *cs,
6558                                 const TranslationBlock *tb,
6559                                 const uint64_t *data)
6560 {
6561     S390CPU *cpu = S390_CPU(cs);
6562     CPUS390XState *env = &cpu->env;
6563     int cc_op = data[1];
6564 
6565     env->psw.addr = data[0];
6566 
6567     /* Update the CC opcode if it is not already up-to-date.  */
6568     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6569         env->cc_op = cc_op;
6570     }
6571 
6572     /* Record ILEN.  */
6573     env->int_pgm_ilen = data[2];
6574 }
6575