xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 21e9a8aefb0313174c1861df84e5e49bd84026c8)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(tcg_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(tcg_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(tcg_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(tcg_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(tcg_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(tcg_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exception this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_TSTEQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_TSTNE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_TSTEQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_TSTNE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857 
858     case CC_OP_LTGT_64:
859     case CC_OP_LTUGTU_64:
860     case CC_OP_TM_32:
861     case CC_OP_TM_64:
862     case CC_OP_ICM:
863         c->u.s64.a = cc_src;
864         c->u.s64.b = cc_dst;
865         break;
866 
867     case CC_OP_ADDU:
868     case CC_OP_SUBU:
869         c->is_64 = true;
870         c->u.s64.b = tcg_constant_i64(0);
871         switch (mask) {
872         case 8 | 2:
873         case 4 | 1: /* result */
874             c->u.s64.a = cc_dst;
875             break;
876         case 8 | 4:
877         case 2 | 1: /* carry */
878             c->u.s64.a = cc_src;
879             break;
880         default:
881             g_assert_not_reached();
882         }
883         break;
884 
885     case CC_OP_STATIC:
886         c->is_64 = false;
887         c->u.s32.a = cc_op;
888 
889         /* Fold half of the cases using bit 3 to invert. */
890         switch (mask & 8 ? mask ^ 0xf : mask) {
891         case 0x1: /* cc == 3 */
892             cond = TCG_COND_EQ;
893             c->u.s32.b = tcg_constant_i32(3);
894             break;
895         case 0x2: /* cc == 2 */
896             cond = TCG_COND_EQ;
897             c->u.s32.b = tcg_constant_i32(2);
898             break;
899         case 0x4: /* cc == 1 */
900             cond = TCG_COND_EQ;
901             c->u.s32.b = tcg_constant_i32(1);
902             break;
903         case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
904             cond = TCG_COND_GTU;
905             c->u.s32.b = tcg_constant_i32(1);
906             break;
907         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
908             cond = TCG_COND_TSTNE;
909             c->u.s32.b = tcg_constant_i32(1);
910             break;
911         case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
912             cond = TCG_COND_LEU;
913             c->u.s32.a = tcg_temp_new_i32();
914             c->u.s32.b = tcg_constant_i32(1);
915             tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
916             break;
917         case 0x4 | 0x2 | 0x1: /* cc != 0 */
918             cond = TCG_COND_NE;
919             c->u.s32.b = tcg_constant_i32(0);
920             break;
921         default:
922             /* case 0: never, handled above. */
923             g_assert_not_reached();
924         }
925         if (mask & 8) {
926             cond = tcg_invert_cond(cond);
927         }
928         break;
929 
930     default:
931         abort();
932     }
933     c->cond = cond;
934 }
935 
936 /* ====================================================================== */
937 /* Define the insn format enumeration.  */
938 #define F0(N)                         FMT_##N,
939 #define F1(N, X1)                     F0(N)
940 #define F2(N, X1, X2)                 F0(N)
941 #define F3(N, X1, X2, X3)             F0(N)
942 #define F4(N, X1, X2, X3, X4)         F0(N)
943 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
944 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
945 
946 typedef enum {
947 #include "insn-format.h.inc"
948 } DisasFormat;
949 
950 #undef F0
951 #undef F1
952 #undef F2
953 #undef F3
954 #undef F4
955 #undef F5
956 #undef F6
957 
958 /* This is the way fields are to be accessed out of DisasFields.  */
959 #define have_field(S, F)  have_field1((S), FLD_O_##F)
960 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
961 
962 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
963 {
964     return (s->fields.presentO >> c) & 1;
965 }
966 
967 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
968                       enum DisasFieldIndexC c)
969 {
970     assert(have_field1(s, o));
971     return s->fields.c[c];
972 }
973 
974 /* Describe the layout of each field in each format.  */
975 typedef struct DisasField {
976     unsigned int beg:8;
977     unsigned int size:8;
978     unsigned int type:2;
979     unsigned int indexC:6;
980     enum DisasFieldIndexO indexO:8;
981 } DisasField;
982 
983 typedef struct DisasFormatInfo {
984     DisasField op[NUM_C_FIELD];
985 } DisasFormatInfo;
986 
987 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
988 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
989 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
990 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
991                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
992 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
993                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
994                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
995 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
996                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
997 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
998                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
999                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1000 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1001 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1002 
1003 #define F0(N)                     { { } },
1004 #define F1(N, X1)                 { { X1 } },
1005 #define F2(N, X1, X2)             { { X1, X2 } },
1006 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1007 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1008 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1009 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1010 
1011 static const DisasFormatInfo format_info[] = {
1012 #include "insn-format.h.inc"
1013 };
1014 
1015 #undef F0
1016 #undef F1
1017 #undef F2
1018 #undef F3
1019 #undef F4
1020 #undef F5
1021 #undef F6
1022 #undef R
1023 #undef M
1024 #undef V
1025 #undef BD
1026 #undef BXD
1027 #undef BDL
1028 #undef BXDL
1029 #undef I
1030 #undef L
1031 
1032 /* Generally, we'll extract operands into this structures, operate upon
1033    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1034    of routines below for more details.  */
1035 typedef struct {
1036     TCGv_i64 out, out2, in1, in2;
1037     TCGv_i64 addr1;
1038     TCGv_i128 out_128, in1_128, in2_128;
1039 } DisasOps;
1040 
1041 /* Instructions can place constraints on their operands, raising specification
1042    exceptions if they are violated.  To make this easy to automate, each "in1",
1043    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1044    of the following, or 0.  To make this easy to document, we'll put the
1045    SPEC_<name> defines next to <name>.  */
1046 
1047 #define SPEC_r1_even    1
1048 #define SPEC_r2_even    2
1049 #define SPEC_r3_even    4
1050 #define SPEC_r1_f128    8
1051 #define SPEC_r2_f128    16
1052 
1053 /* Return values from translate_one, indicating the state of the TB.  */
1054 
1055 /* We are not using a goto_tb (for whatever reason), but have updated
1056    the PC (for whatever reason), so there's no need to do it again on
1057    exiting the TB.  */
1058 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1059 
1060 /* We have updated the PC and CC values.  */
1061 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1062 
1063 
1064 /* Instruction flags */
1065 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1066 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1067 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1068 #define IF_BFP      0x0008      /* binary floating point instruction */
1069 #define IF_DFP      0x0010      /* decimal floating point instruction */
1070 #define IF_PRIV     0x0020      /* privileged instruction */
1071 #define IF_VEC      0x0040      /* vector instruction */
1072 #define IF_IO       0x0080      /* input/output instruction */
1073 
1074 struct DisasInsn {
1075     unsigned opc:16;
1076     unsigned flags:16;
1077     DisasFormat fmt:8;
1078     unsigned fac:8;
1079     unsigned spec:8;
1080 
1081     const char *name;
1082 
1083     /* Pre-process arguments before HELP_OP.  */
1084     void (*help_in1)(DisasContext *, DisasOps *);
1085     void (*help_in2)(DisasContext *, DisasOps *);
1086     void (*help_prep)(DisasContext *, DisasOps *);
1087 
1088     /*
1089      * Post-process output after HELP_OP.
1090      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1091      */
1092     void (*help_wout)(DisasContext *, DisasOps *);
1093     void (*help_cout)(DisasContext *, DisasOps *);
1094 
1095     /* Implement the operation itself.  */
1096     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1097 
1098     uint64_t data;
1099 };
1100 
1101 /* ====================================================================== */
1102 /* Miscellaneous helpers, used by several operations.  */
1103 
1104 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1105 {
1106     if (dest == s->pc_tmp) {
1107         per_branch(s, true);
1108         return DISAS_NEXT;
1109     }
1110     if (use_goto_tb(s, dest)) {
1111         update_cc_op(s);
1112         per_breaking_event(s);
1113         tcg_gen_goto_tb(0);
1114         tcg_gen_movi_i64(psw_addr, dest);
1115         tcg_gen_exit_tb(s->base.tb, 0);
1116         return DISAS_NORETURN;
1117     } else {
1118         tcg_gen_movi_i64(psw_addr, dest);
1119         per_branch(s, false);
1120         return DISAS_PC_UPDATED;
1121     }
1122 }
1123 
1124 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1125                                  bool is_imm, int imm, TCGv_i64 cdest)
1126 {
1127     DisasJumpType ret;
1128     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1129     TCGLabel *lab;
1130 
1131     /* Take care of the special cases first.  */
1132     if (c->cond == TCG_COND_NEVER) {
1133         ret = DISAS_NEXT;
1134         goto egress;
1135     }
1136     if (is_imm) {
1137         if (dest == s->pc_tmp) {
1138             /* Branch to next.  */
1139             per_branch(s, true);
1140             ret = DISAS_NEXT;
1141             goto egress;
1142         }
1143         if (c->cond == TCG_COND_ALWAYS) {
1144             ret = help_goto_direct(s, dest);
1145             goto egress;
1146         }
1147     } else {
1148         if (!cdest) {
1149             /* E.g. bcr %r0 -> no branch.  */
1150             ret = DISAS_NEXT;
1151             goto egress;
1152         }
1153         if (c->cond == TCG_COND_ALWAYS) {
1154             tcg_gen_mov_i64(psw_addr, cdest);
1155             per_branch(s, false);
1156             ret = DISAS_PC_UPDATED;
1157             goto egress;
1158         }
1159     }
1160 
1161     if (use_goto_tb(s, s->pc_tmp)) {
1162         if (is_imm && use_goto_tb(s, dest)) {
1163             /* Both exits can use goto_tb.  */
1164             update_cc_op(s);
1165 
1166             lab = gen_new_label();
1167             if (c->is_64) {
1168                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1169             } else {
1170                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1171             }
1172 
1173             /* Branch not taken.  */
1174             tcg_gen_goto_tb(0);
1175             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1176             tcg_gen_exit_tb(s->base.tb, 0);
1177 
1178             /* Branch taken.  */
1179             gen_set_label(lab);
1180             per_breaking_event(s);
1181             tcg_gen_goto_tb(1);
1182             tcg_gen_movi_i64(psw_addr, dest);
1183             tcg_gen_exit_tb(s->base.tb, 1);
1184 
1185             ret = DISAS_NORETURN;
1186         } else {
1187             /* Fallthru can use goto_tb, but taken branch cannot.  */
1188             /* Store taken branch destination before the brcond.  This
1189                avoids having to allocate a new local temp to hold it.
1190                We'll overwrite this in the not taken case anyway.  */
1191             if (!is_imm) {
1192                 tcg_gen_mov_i64(psw_addr, cdest);
1193             }
1194 
1195             lab = gen_new_label();
1196             if (c->is_64) {
1197                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1198             } else {
1199                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1200             }
1201 
1202             /* Branch not taken.  */
1203             update_cc_op(s);
1204             tcg_gen_goto_tb(0);
1205             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1206             tcg_gen_exit_tb(s->base.tb, 0);
1207 
1208             gen_set_label(lab);
1209             if (is_imm) {
1210                 tcg_gen_movi_i64(psw_addr, dest);
1211             }
1212             per_breaking_event(s);
1213             ret = DISAS_PC_UPDATED;
1214         }
1215     } else {
1216         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1217            Most commonly we're single-stepping or some other condition that
1218            disables all use of goto_tb.  Just update the PC and exit.  */
1219 
1220         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1221         if (is_imm) {
1222             cdest = tcg_constant_i64(dest);
1223         }
1224 
1225         if (c->is_64) {
1226             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1227                                 cdest, next);
1228             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1229         } else {
1230             TCGv_i32 t0 = tcg_temp_new_i32();
1231             TCGv_i64 t1 = tcg_temp_new_i64();
1232             TCGv_i64 z = tcg_constant_i64(0);
1233             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1234             tcg_gen_extu_i32_i64(t1, t0);
1235             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1236             per_branch_cond(s, TCG_COND_NE, t1, z);
1237         }
1238 
1239         ret = DISAS_PC_UPDATED;
1240     }
1241 
1242  egress:
1243     return ret;
1244 }
1245 
1246 /* ====================================================================== */
1247 /* The operations.  These perform the bulk of the work for any insn,
1248    usually after the operands have been loaded and output initialized.  */
1249 
1250 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1251 {
1252     tcg_gen_abs_i64(o->out, o->in2);
1253     return DISAS_NEXT;
1254 }
1255 
1256 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1257 {
1258     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1259     return DISAS_NEXT;
1260 }
1261 
1262 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1263 {
1264     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1265     return DISAS_NEXT;
1266 }
1267 
1268 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1269 {
1270     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1271     tcg_gen_mov_i64(o->out2, o->in2);
1272     return DISAS_NEXT;
1273 }
1274 
1275 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1276 {
1277     tcg_gen_add_i64(o->out, o->in1, o->in2);
1278     return DISAS_NEXT;
1279 }
1280 
1281 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1282 {
1283     tcg_gen_movi_i64(cc_src, 0);
1284     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1285     return DISAS_NEXT;
1286 }
1287 
1288 /* Compute carry into cc_src. */
1289 static void compute_carry(DisasContext *s)
1290 {
1291     switch (s->cc_op) {
1292     case CC_OP_ADDU:
1293         /* The carry value is already in cc_src (1,0). */
1294         break;
1295     case CC_OP_SUBU:
1296         tcg_gen_addi_i64(cc_src, cc_src, 1);
1297         break;
1298     default:
1299         gen_op_calc_cc(s);
1300         /* fall through */
1301     case CC_OP_STATIC:
1302         /* The carry flag is the msb of CC; compute into cc_src. */
1303         tcg_gen_extu_i32_i64(cc_src, cc_op);
1304         tcg_gen_shri_i64(cc_src, cc_src, 1);
1305         break;
1306     }
1307 }
1308 
1309 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1310 {
1311     compute_carry(s);
1312     tcg_gen_add_i64(o->out, o->in1, o->in2);
1313     tcg_gen_add_i64(o->out, o->out, cc_src);
1314     return DISAS_NEXT;
1315 }
1316 
1317 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1318 {
1319     compute_carry(s);
1320 
1321     TCGv_i64 zero = tcg_constant_i64(0);
1322     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1323     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1324 
1325     return DISAS_NEXT;
1326 }
1327 
1328 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1329 {
1330     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1331 
1332     o->in1 = tcg_temp_new_i64();
1333     if (non_atomic) {
1334         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1335     } else {
1336         /* Perform the atomic addition in memory. */
1337         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1338                                      s->insn->data);
1339     }
1340 
1341     /* Recompute also for atomic case: needed for setting CC. */
1342     tcg_gen_add_i64(o->out, o->in1, o->in2);
1343 
1344     if (non_atomic) {
1345         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1346     }
1347     return DISAS_NEXT;
1348 }
1349 
1350 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1351 {
1352     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1353 
1354     o->in1 = tcg_temp_new_i64();
1355     if (non_atomic) {
1356         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1357     } else {
1358         /* Perform the atomic addition in memory. */
1359         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1360                                      s->insn->data);
1361     }
1362 
1363     /* Recompute also for atomic case: needed for setting CC. */
1364     tcg_gen_movi_i64(cc_src, 0);
1365     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1366 
1367     if (non_atomic) {
1368         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1369     }
1370     return DISAS_NEXT;
1371 }
1372 
1373 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1374 {
1375     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1376     return DISAS_NEXT;
1377 }
1378 
1379 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1380 {
1381     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1382     return DISAS_NEXT;
1383 }
1384 
1385 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1386 {
1387     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1388     return DISAS_NEXT;
1389 }
1390 
1391 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1392 {
1393     tcg_gen_and_i64(o->out, o->in1, o->in2);
1394     return DISAS_NEXT;
1395 }
1396 
1397 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1398 {
1399     int shift = s->insn->data & 0xff;
1400     int size = s->insn->data >> 8;
1401     uint64_t mask = ((1ull << size) - 1) << shift;
1402     TCGv_i64 t = tcg_temp_new_i64();
1403 
1404     tcg_gen_shli_i64(t, o->in2, shift);
1405     tcg_gen_ori_i64(t, t, ~mask);
1406     tcg_gen_and_i64(o->out, o->in1, t);
1407 
1408     /* Produce the CC from only the bits manipulated.  */
1409     tcg_gen_andi_i64(cc_dst, o->out, mask);
1410     set_cc_nz_u64(s, cc_dst);
1411     return DISAS_NEXT;
1412 }
1413 
1414 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1415 {
1416     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1417     return DISAS_NEXT;
1418 }
1419 
1420 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1421 {
1422     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1423     return DISAS_NEXT;
1424 }
1425 
1426 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1427 {
1428     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1429     return DISAS_NEXT;
1430 }
1431 
1432 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1433 {
1434     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1435     return DISAS_NEXT;
1436 }
1437 
1438 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1439 {
1440     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1441     return DISAS_NEXT;
1442 }
1443 
1444 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1445 {
1446     o->in1 = tcg_temp_new_i64();
1447 
1448     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1449         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1450     } else {
1451         /* Perform the atomic operation in memory. */
1452         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1453                                      s->insn->data);
1454     }
1455 
1456     /* Recompute also for atomic case: needed for setting CC. */
1457     tcg_gen_and_i64(o->out, o->in1, o->in2);
1458 
1459     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1460         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1461     }
1462     return DISAS_NEXT;
1463 }
1464 
1465 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1466 {
1467     pc_to_link_info(o->out, s, s->pc_tmp);
1468     if (o->in2) {
1469         tcg_gen_mov_i64(psw_addr, o->in2);
1470         per_branch(s, false);
1471         return DISAS_PC_UPDATED;
1472     } else {
1473         return DISAS_NEXT;
1474     }
1475 }
1476 
1477 static void save_link_info(DisasContext *s, DisasOps *o)
1478 {
1479     TCGv_i64 t;
1480 
1481     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1482         pc_to_link_info(o->out, s, s->pc_tmp);
1483         return;
1484     }
1485     gen_op_calc_cc(s);
1486     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1487     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1488     t = tcg_temp_new_i64();
1489     tcg_gen_shri_i64(t, psw_mask, 16);
1490     tcg_gen_andi_i64(t, t, 0x0f000000);
1491     tcg_gen_or_i64(o->out, o->out, t);
1492     tcg_gen_extu_i32_i64(t, cc_op);
1493     tcg_gen_shli_i64(t, t, 28);
1494     tcg_gen_or_i64(o->out, o->out, t);
1495 }
1496 
1497 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1498 {
1499     save_link_info(s, o);
1500     if (o->in2) {
1501         tcg_gen_mov_i64(psw_addr, o->in2);
1502         per_branch(s, false);
1503         return DISAS_PC_UPDATED;
1504     } else {
1505         return DISAS_NEXT;
1506     }
1507 }
1508 
1509 /*
1510  * Disassemble the target of a branch. The results are returned in a form
1511  * suitable for passing into help_branch():
1512  *
1513  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1514  *   branches, whose DisasContext *S contains the relative immediate field RI,
1515  *   are considered fixed. All the other branches are considered computed.
1516  * - int IMM is the value of RI.
1517  * - TCGv_i64 CDEST is the address of the computed target.
1518  */
1519 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1520     if (have_field(s, ri)) {                                                   \
1521         if (unlikely(s->ex_value)) {                                           \
1522             cdest = tcg_temp_new_i64();                                        \
1523             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1524             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1525             is_imm = false;                                                    \
1526         } else {                                                               \
1527             is_imm = true;                                                     \
1528         }                                                                      \
1529     } else {                                                                   \
1530         is_imm = false;                                                        \
1531     }                                                                          \
1532     imm = is_imm ? get_field(s, ri) : 0;                                       \
1533 } while (false)
1534 
1535 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1536 {
1537     DisasCompare c;
1538     bool is_imm;
1539     int imm;
1540 
1541     pc_to_link_info(o->out, s, s->pc_tmp);
1542 
1543     disas_jdest(s, i2, is_imm, imm, o->in2);
1544     disas_jcc(s, &c, 0xf);
1545     return help_branch(s, &c, is_imm, imm, o->in2);
1546 }
1547 
1548 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1549 {
1550     int m1 = get_field(s, m1);
1551     DisasCompare c;
1552     bool is_imm;
1553     int imm;
1554 
1555     /* BCR with R2 = 0 causes no branching */
1556     if (have_field(s, r2) && get_field(s, r2) == 0) {
1557         if (m1 == 14) {
1558             /* Perform serialization */
1559             /* FIXME: check for fast-BCR-serialization facility */
1560             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1561         }
1562         if (m1 == 15) {
1563             /* Perform serialization */
1564             /* FIXME: perform checkpoint-synchronisation */
1565             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1566         }
1567         return DISAS_NEXT;
1568     }
1569 
1570     disas_jdest(s, i2, is_imm, imm, o->in2);
1571     disas_jcc(s, &c, m1);
1572     return help_branch(s, &c, is_imm, imm, o->in2);
1573 }
1574 
1575 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1576 {
1577     int r1 = get_field(s, r1);
1578     DisasCompare c;
1579     bool is_imm;
1580     TCGv_i64 t;
1581     int imm;
1582 
1583     c.cond = TCG_COND_NE;
1584     c.is_64 = false;
1585 
1586     t = tcg_temp_new_i64();
1587     tcg_gen_subi_i64(t, regs[r1], 1);
1588     store_reg32_i64(r1, t);
1589     c.u.s32.a = tcg_temp_new_i32();
1590     c.u.s32.b = tcg_constant_i32(0);
1591     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1592 
1593     disas_jdest(s, i2, is_imm, imm, o->in2);
1594     return help_branch(s, &c, is_imm, imm, o->in2);
1595 }
1596 
1597 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1598 {
1599     int r1 = get_field(s, r1);
1600     int imm = get_field(s, i2);
1601     DisasCompare c;
1602     TCGv_i64 t;
1603 
1604     c.cond = TCG_COND_NE;
1605     c.is_64 = false;
1606 
1607     t = tcg_temp_new_i64();
1608     tcg_gen_shri_i64(t, regs[r1], 32);
1609     tcg_gen_subi_i64(t, t, 1);
1610     store_reg32h_i64(r1, t);
1611     c.u.s32.a = tcg_temp_new_i32();
1612     c.u.s32.b = tcg_constant_i32(0);
1613     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1614 
1615     return help_branch(s, &c, 1, imm, o->in2);
1616 }
1617 
1618 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1619 {
1620     int r1 = get_field(s, r1);
1621     DisasCompare c;
1622     bool is_imm;
1623     int imm;
1624 
1625     c.cond = TCG_COND_NE;
1626     c.is_64 = true;
1627 
1628     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1629     c.u.s64.a = regs[r1];
1630     c.u.s64.b = tcg_constant_i64(0);
1631 
1632     disas_jdest(s, i2, is_imm, imm, o->in2);
1633     return help_branch(s, &c, is_imm, imm, o->in2);
1634 }
1635 
1636 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1637 {
1638     int r1 = get_field(s, r1);
1639     int r3 = get_field(s, r3);
1640     DisasCompare c;
1641     bool is_imm;
1642     TCGv_i64 t;
1643     int imm;
1644 
1645     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1646     c.is_64 = false;
1647 
1648     t = tcg_temp_new_i64();
1649     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1650     c.u.s32.a = tcg_temp_new_i32();
1651     c.u.s32.b = tcg_temp_new_i32();
1652     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1653     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1654     store_reg32_i64(r1, t);
1655 
1656     disas_jdest(s, i2, is_imm, imm, o->in2);
1657     return help_branch(s, &c, is_imm, imm, o->in2);
1658 }
1659 
1660 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1661 {
1662     int r1 = get_field(s, r1);
1663     int r3 = get_field(s, r3);
1664     DisasCompare c;
1665     bool is_imm;
1666     int imm;
1667 
1668     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1669     c.is_64 = true;
1670 
1671     if (r1 == (r3 | 1)) {
1672         c.u.s64.b = load_reg(r3 | 1);
1673     } else {
1674         c.u.s64.b = regs[r3 | 1];
1675     }
1676 
1677     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1678     c.u.s64.a = regs[r1];
1679 
1680     disas_jdest(s, i2, is_imm, imm, o->in2);
1681     return help_branch(s, &c, is_imm, imm, o->in2);
1682 }
1683 
1684 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1685 {
1686     int imm, m3 = get_field(s, m3);
1687     bool is_imm;
1688     DisasCompare c;
1689 
1690     c.cond = ltgt_cond[m3];
1691     if (s->insn->data) {
1692         c.cond = tcg_unsigned_cond(c.cond);
1693     }
1694     c.is_64 = true;
1695     c.u.s64.a = o->in1;
1696     c.u.s64.b = o->in2;
1697 
1698     o->out = NULL;
1699     disas_jdest(s, i4, is_imm, imm, o->out);
1700     if (!is_imm && !o->out) {
1701         imm = 0;
1702         o->out = get_address(s, 0, get_field(s, b4),
1703                              get_field(s, d4));
1704     }
1705 
1706     return help_branch(s, &c, is_imm, imm, o->out);
1707 }
1708 
1709 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1710 {
1711     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1712     set_cc_static(s);
1713     return DISAS_NEXT;
1714 }
1715 
1716 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1717 {
1718     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1719     set_cc_static(s);
1720     return DISAS_NEXT;
1721 }
1722 
1723 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1724 {
1725     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1726     set_cc_static(s);
1727     return DISAS_NEXT;
1728 }
1729 
1730 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1731                                    bool m4_with_fpe)
1732 {
1733     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1734     uint8_t m3 = get_field(s, m3);
1735     uint8_t m4 = get_field(s, m4);
1736 
1737     /* m3 field was introduced with FPE */
1738     if (!fpe && m3_with_fpe) {
1739         m3 = 0;
1740     }
1741     /* m4 field was introduced with FPE */
1742     if (!fpe && m4_with_fpe) {
1743         m4 = 0;
1744     }
1745 
1746     /* Check for valid rounding modes. Mode 3 was introduced later. */
1747     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1748         gen_program_exception(s, PGM_SPECIFICATION);
1749         return NULL;
1750     }
1751 
1752     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1753 }
1754 
1755 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1756 {
1757     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1758 
1759     if (!m34) {
1760         return DISAS_NORETURN;
1761     }
1762     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1763     set_cc_static(s);
1764     return DISAS_NEXT;
1765 }
1766 
1767 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1768 {
1769     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1770 
1771     if (!m34) {
1772         return DISAS_NORETURN;
1773     }
1774     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1775     set_cc_static(s);
1776     return DISAS_NEXT;
1777 }
1778 
1779 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1780 {
1781     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1782 
1783     if (!m34) {
1784         return DISAS_NORETURN;
1785     }
1786     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1787     set_cc_static(s);
1788     return DISAS_NEXT;
1789 }
1790 
1791 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1792 {
1793     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1794 
1795     if (!m34) {
1796         return DISAS_NORETURN;
1797     }
1798     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1799     set_cc_static(s);
1800     return DISAS_NEXT;
1801 }
1802 
1803 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1804 {
1805     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1806 
1807     if (!m34) {
1808         return DISAS_NORETURN;
1809     }
1810     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1811     set_cc_static(s);
1812     return DISAS_NEXT;
1813 }
1814 
1815 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1816 {
1817     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1818 
1819     if (!m34) {
1820         return DISAS_NORETURN;
1821     }
1822     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1823     set_cc_static(s);
1824     return DISAS_NEXT;
1825 }
1826 
1827 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1828 {
1829     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1830 
1831     if (!m34) {
1832         return DISAS_NORETURN;
1833     }
1834     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1835     set_cc_static(s);
1836     return DISAS_NEXT;
1837 }
1838 
1839 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1840 {
1841     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1842 
1843     if (!m34) {
1844         return DISAS_NORETURN;
1845     }
1846     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1847     set_cc_static(s);
1848     return DISAS_NEXT;
1849 }
1850 
1851 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1852 {
1853     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1854 
1855     if (!m34) {
1856         return DISAS_NORETURN;
1857     }
1858     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1859     set_cc_static(s);
1860     return DISAS_NEXT;
1861 }
1862 
1863 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1864 {
1865     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1866 
1867     if (!m34) {
1868         return DISAS_NORETURN;
1869     }
1870     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1871     set_cc_static(s);
1872     return DISAS_NEXT;
1873 }
1874 
1875 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1876 {
1877     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1878 
1879     if (!m34) {
1880         return DISAS_NORETURN;
1881     }
1882     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1883     set_cc_static(s);
1884     return DISAS_NEXT;
1885 }
1886 
1887 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1888 {
1889     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1890 
1891     if (!m34) {
1892         return DISAS_NORETURN;
1893     }
1894     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1895     set_cc_static(s);
1896     return DISAS_NEXT;
1897 }
1898 
1899 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1900 {
1901     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1902 
1903     if (!m34) {
1904         return DISAS_NORETURN;
1905     }
1906     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1907     return DISAS_NEXT;
1908 }
1909 
1910 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1911 {
1912     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1913 
1914     if (!m34) {
1915         return DISAS_NORETURN;
1916     }
1917     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1918     return DISAS_NEXT;
1919 }
1920 
1921 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1922 {
1923     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1924 
1925     if (!m34) {
1926         return DISAS_NORETURN;
1927     }
1928     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1929     return DISAS_NEXT;
1930 }
1931 
1932 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1933 {
1934     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1935 
1936     if (!m34) {
1937         return DISAS_NORETURN;
1938     }
1939     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1940     return DISAS_NEXT;
1941 }
1942 
1943 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1944 {
1945     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1946 
1947     if (!m34) {
1948         return DISAS_NORETURN;
1949     }
1950     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1951     return DISAS_NEXT;
1952 }
1953 
1954 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1955 {
1956     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957 
1958     if (!m34) {
1959         return DISAS_NORETURN;
1960     }
1961     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1962     return DISAS_NEXT;
1963 }
1964 
1965 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1966 {
1967     int r2 = get_field(s, r2);
1968     TCGv_i128 pair = tcg_temp_new_i128();
1969     TCGv_i64 len = tcg_temp_new_i64();
1970 
1971     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1972     set_cc_static(s);
1973     tcg_gen_extr_i128_i64(o->out, len, pair);
1974 
1975     tcg_gen_add_i64(regs[r2], regs[r2], len);
1976     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1977 
1978     return DISAS_NEXT;
1979 }
1980 
1981 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1982 {
1983     int l = get_field(s, l1);
1984     TCGv_i64 src;
1985     TCGv_i32 vl;
1986     MemOp mop;
1987 
1988     switch (l + 1) {
1989     case 1:
1990     case 2:
1991     case 4:
1992     case 8:
1993         mop = ctz32(l + 1) | MO_TE;
1994         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1995         src = tcg_temp_new_i64();
1996         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1997         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1998         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1999         return DISAS_NEXT;
2000     default:
2001         vl = tcg_constant_i32(l);
2002         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2003         set_cc_static(s);
2004         return DISAS_NEXT;
2005     }
2006 }
2007 
2008 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2009 {
2010     int r1 = get_field(s, r1);
2011     int r2 = get_field(s, r2);
2012     TCGv_i32 t1, t2;
2013 
2014     /* r1 and r2 must be even.  */
2015     if (r1 & 1 || r2 & 1) {
2016         gen_program_exception(s, PGM_SPECIFICATION);
2017         return DISAS_NORETURN;
2018     }
2019 
2020     t1 = tcg_constant_i32(r1);
2021     t2 = tcg_constant_i32(r2);
2022     gen_helper_clcl(cc_op, tcg_env, t1, t2);
2023     set_cc_static(s);
2024     return DISAS_NEXT;
2025 }
2026 
2027 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2028 {
2029     int r1 = get_field(s, r1);
2030     int r3 = get_field(s, r3);
2031     TCGv_i32 t1, t3;
2032 
2033     /* r1 and r3 must be even.  */
2034     if (r1 & 1 || r3 & 1) {
2035         gen_program_exception(s, PGM_SPECIFICATION);
2036         return DISAS_NORETURN;
2037     }
2038 
2039     t1 = tcg_constant_i32(r1);
2040     t3 = tcg_constant_i32(r3);
2041     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2042     set_cc_static(s);
2043     return DISAS_NEXT;
2044 }
2045 
2046 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2047 {
2048     int r1 = get_field(s, r1);
2049     int r3 = get_field(s, r3);
2050     TCGv_i32 t1, t3;
2051 
2052     /* r1 and r3 must be even.  */
2053     if (r1 & 1 || r3 & 1) {
2054         gen_program_exception(s, PGM_SPECIFICATION);
2055         return DISAS_NORETURN;
2056     }
2057 
2058     t1 = tcg_constant_i32(r1);
2059     t3 = tcg_constant_i32(r3);
2060     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2061     set_cc_static(s);
2062     return DISAS_NEXT;
2063 }
2064 
2065 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2066 {
2067     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2068     TCGv_i32 t1 = tcg_temp_new_i32();
2069 
2070     tcg_gen_extrl_i64_i32(t1, o->in1);
2071     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2072     set_cc_static(s);
2073     return DISAS_NEXT;
2074 }
2075 
2076 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2077 {
2078     TCGv_i128 pair = tcg_temp_new_i128();
2079 
2080     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2081     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2082 
2083     set_cc_static(s);
2084     return DISAS_NEXT;
2085 }
2086 
2087 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2088 {
2089     TCGv_i64 t = tcg_temp_new_i64();
2090     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2091     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2092     tcg_gen_or_i64(o->out, o->out, t);
2093     return DISAS_NEXT;
2094 }
2095 
2096 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2097 {
2098     int d2 = get_field(s, d2);
2099     int b2 = get_field(s, b2);
2100     TCGv_i64 addr, cc;
2101 
2102     /* Note that in1 = R3 (new value) and
2103        in2 = (zero-extended) R1 (expected value).  */
2104 
2105     addr = get_address(s, 0, b2, d2);
2106     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2107                                get_mem_index(s), s->insn->data | MO_ALIGN);
2108 
2109     /* Are the memory and expected values (un)equal?  Note that this setcond
2110        produces the output CC value, thus the NE sense of the test.  */
2111     cc = tcg_temp_new_i64();
2112     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2113     tcg_gen_extrl_i64_i32(cc_op, cc);
2114     set_cc_static(s);
2115 
2116     return DISAS_NEXT;
2117 }
2118 
2119 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2120 {
2121     int r1 = get_field(s, r1);
2122 
2123     o->out_128 = tcg_temp_new_i128();
2124     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2125 
2126     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2127     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2128                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2129 
2130     /*
2131      * Extract result into cc_dst:cc_src, compare vs the expected value
2132      * in the as yet unmodified input registers, then update CC_OP.
2133      */
2134     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2135     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2136     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2137     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2138     set_cc_nz_u64(s, cc_dst);
2139 
2140     return DISAS_NEXT;
2141 }
2142 
2143 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2144 {
2145     int r3 = get_field(s, r3);
2146     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2147 
2148     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2149         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2150     } else {
2151         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2152     }
2153 
2154     set_cc_static(s);
2155     return DISAS_NEXT;
2156 }
2157 
2158 #ifndef CONFIG_USER_ONLY
2159 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2160 {
2161     MemOp mop = s->insn->data;
2162     TCGv_i64 addr, old, cc;
2163     TCGLabel *lab = gen_new_label();
2164 
2165     /* Note that in1 = R1 (zero-extended expected value),
2166        out = R1 (original reg), out2 = R1+1 (new value).  */
2167 
2168     addr = tcg_temp_new_i64();
2169     old = tcg_temp_new_i64();
2170     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2171     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2172                                get_mem_index(s), mop | MO_ALIGN);
2173 
2174     /* Are the memory and expected values (un)equal?  */
2175     cc = tcg_temp_new_i64();
2176     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2177     tcg_gen_extrl_i64_i32(cc_op, cc);
2178 
2179     /* Write back the output now, so that it happens before the
2180        following branch, so that we don't need local temps.  */
2181     if ((mop & MO_SIZE) == MO_32) {
2182         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2183     } else {
2184         tcg_gen_mov_i64(o->out, old);
2185     }
2186 
2187     /* If the comparison was equal, and the LSB of R2 was set,
2188        then we need to flush the TLB (for all cpus).  */
2189     tcg_gen_xori_i64(cc, cc, 1);
2190     tcg_gen_and_i64(cc, cc, o->in2);
2191     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2192 
2193     gen_helper_purge(tcg_env);
2194     gen_set_label(lab);
2195 
2196     return DISAS_NEXT;
2197 }
2198 #endif
2199 
2200 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2201 {
2202     TCGv_i64 t1 = tcg_temp_new_i64();
2203     TCGv_i32 t2 = tcg_temp_new_i32();
2204     tcg_gen_extrl_i64_i32(t2, o->in1);
2205     gen_helper_cvd(t1, t2);
2206     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2207     return DISAS_NEXT;
2208 }
2209 
2210 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2211 {
2212     int m3 = get_field(s, m3);
2213     TCGLabel *lab = gen_new_label();
2214     TCGCond c;
2215 
2216     c = tcg_invert_cond(ltgt_cond[m3]);
2217     if (s->insn->data) {
2218         c = tcg_unsigned_cond(c);
2219     }
2220     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2221 
2222     /* Trap.  */
2223     gen_trap(s);
2224 
2225     gen_set_label(lab);
2226     return DISAS_NEXT;
2227 }
2228 
2229 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2230 {
2231     int m3 = get_field(s, m3);
2232     int r1 = get_field(s, r1);
2233     int r2 = get_field(s, r2);
2234     TCGv_i32 tr1, tr2, chk;
2235 
2236     /* R1 and R2 must both be even.  */
2237     if ((r1 | r2) & 1) {
2238         gen_program_exception(s, PGM_SPECIFICATION);
2239         return DISAS_NORETURN;
2240     }
2241     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2242         m3 = 0;
2243     }
2244 
2245     tr1 = tcg_constant_i32(r1);
2246     tr2 = tcg_constant_i32(r2);
2247     chk = tcg_constant_i32(m3);
2248 
2249     switch (s->insn->data) {
2250     case 12:
2251         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2252         break;
2253     case 14:
2254         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2255         break;
2256     case 21:
2257         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2258         break;
2259     case 24:
2260         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2261         break;
2262     case 41:
2263         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2264         break;
2265     case 42:
2266         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2267         break;
2268     default:
2269         g_assert_not_reached();
2270     }
2271 
2272     set_cc_static(s);
2273     return DISAS_NEXT;
2274 }
2275 
2276 #ifndef CONFIG_USER_ONLY
2277 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2278 {
2279     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2280     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2281     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2282 
2283     gen_helper_diag(tcg_env, r1, r3, func_code);
2284     return DISAS_NEXT;
2285 }
2286 #endif
2287 
2288 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2289 {
2290     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2291     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2292     return DISAS_NEXT;
2293 }
2294 
2295 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2296 {
2297     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2298     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2299     return DISAS_NEXT;
2300 }
2301 
2302 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2303 {
2304     TCGv_i128 t = tcg_temp_new_i128();
2305 
2306     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2307     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2308     return DISAS_NEXT;
2309 }
2310 
2311 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2312 {
2313     TCGv_i128 t = tcg_temp_new_i128();
2314 
2315     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2316     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2317     return DISAS_NEXT;
2318 }
2319 
2320 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2321 {
2322     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2323     return DISAS_NEXT;
2324 }
2325 
2326 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2327 {
2328     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2329     return DISAS_NEXT;
2330 }
2331 
2332 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2333 {
2334     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2335     return DISAS_NEXT;
2336 }
2337 
2338 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2339 {
2340     int r2 = get_field(s, r2);
2341     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2342     return DISAS_NEXT;
2343 }
2344 
2345 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2346 {
2347     /* No cache information provided.  */
2348     tcg_gen_movi_i64(o->out, -1);
2349     return DISAS_NEXT;
2350 }
2351 
2352 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2353 {
2354     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2355     return DISAS_NEXT;
2356 }
2357 
2358 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2359 {
2360     int r1 = get_field(s, r1);
2361     int r2 = get_field(s, r2);
2362     TCGv_i64 t = tcg_temp_new_i64();
2363     TCGv_i64 t_cc = tcg_temp_new_i64();
2364 
2365     /* Note the "subsequently" in the PoO, which implies a defined result
2366        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2367     gen_op_calc_cc(s);
2368     tcg_gen_extu_i32_i64(t_cc, cc_op);
2369     tcg_gen_shri_i64(t, psw_mask, 32);
2370     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2371     store_reg32_i64(r1, t);
2372     if (r2 != 0) {
2373         store_reg32_i64(r2, psw_mask);
2374     }
2375     return DISAS_NEXT;
2376 }
2377 
2378 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2379 {
2380     int r1 = get_field(s, r1);
2381     TCGv_i32 ilen;
2382     TCGv_i64 v1;
2383 
2384     /* Nested EXECUTE is not allowed.  */
2385     if (unlikely(s->ex_value)) {
2386         gen_program_exception(s, PGM_EXECUTE);
2387         return DISAS_NORETURN;
2388     }
2389 
2390     update_psw_addr(s);
2391     update_cc_op(s);
2392 
2393     if (r1 == 0) {
2394         v1 = tcg_constant_i64(0);
2395     } else {
2396         v1 = regs[r1];
2397     }
2398 
2399     ilen = tcg_constant_i32(s->ilen);
2400     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2401 
2402     return DISAS_PC_CC_UPDATED;
2403 }
2404 
2405 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2406 {
2407     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2408 
2409     if (!m34) {
2410         return DISAS_NORETURN;
2411     }
2412     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2413     return DISAS_NEXT;
2414 }
2415 
2416 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2417 {
2418     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2419 
2420     if (!m34) {
2421         return DISAS_NORETURN;
2422     }
2423     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2424     return DISAS_NEXT;
2425 }
2426 
2427 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2428 {
2429     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2430 
2431     if (!m34) {
2432         return DISAS_NORETURN;
2433     }
2434     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2435     return DISAS_NEXT;
2436 }
2437 
2438 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2439 {
2440     /* We'll use the original input for cc computation, since we get to
2441        compare that against 0, which ought to be better than comparing
2442        the real output against 64.  It also lets cc_dst be a convenient
2443        temporary during our computation.  */
2444     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2445 
2446     /* R1 = IN ? CLZ(IN) : 64.  */
2447     tcg_gen_clzi_i64(o->out, o->in2, 64);
2448 
2449     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2450        value by 64, which is undefined.  But since the shift is 64 iff the
2451        input is zero, we still get the correct result after and'ing.  */
2452     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2453     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2454     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2455     return DISAS_NEXT;
2456 }
2457 
2458 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2459 {
2460     int m3 = get_field(s, m3);
2461     int pos, len, base = s->insn->data;
2462     TCGv_i64 tmp = tcg_temp_new_i64();
2463     uint64_t ccm;
2464 
2465     switch (m3) {
2466     case 0xf:
2467         /* Effectively a 32-bit load.  */
2468         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2469         len = 32;
2470         goto one_insert;
2471 
2472     case 0xc:
2473     case 0x6:
2474     case 0x3:
2475         /* Effectively a 16-bit load.  */
2476         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2477         len = 16;
2478         goto one_insert;
2479 
2480     case 0x8:
2481     case 0x4:
2482     case 0x2:
2483     case 0x1:
2484         /* Effectively an 8-bit load.  */
2485         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2486         len = 8;
2487         goto one_insert;
2488 
2489     one_insert:
2490         pos = base + ctz32(m3) * 8;
2491         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2492         ccm = ((1ull << len) - 1) << pos;
2493         break;
2494 
2495     case 0:
2496         /* Recognize access exceptions for the first byte.  */
2497         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2498         gen_op_movi_cc(s, 0);
2499         return DISAS_NEXT;
2500 
2501     default:
2502         /* This is going to be a sequence of loads and inserts.  */
2503         pos = base + 32 - 8;
2504         ccm = 0;
2505         while (m3) {
2506             if (m3 & 0x8) {
2507                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2508                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2509                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2510                 ccm |= 0xffull << pos;
2511             }
2512             m3 = (m3 << 1) & 0xf;
2513             pos -= 8;
2514         }
2515         break;
2516     }
2517 
2518     tcg_gen_movi_i64(tmp, ccm);
2519     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2520     return DISAS_NEXT;
2521 }
2522 
2523 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2524 {
2525     int shift = s->insn->data & 0xff;
2526     int size = s->insn->data >> 8;
2527     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2528     return DISAS_NEXT;
2529 }
2530 
2531 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2532 {
2533     TCGv_i64 t1, t2;
2534 
2535     gen_op_calc_cc(s);
2536     t1 = tcg_temp_new_i64();
2537     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2538     t2 = tcg_temp_new_i64();
2539     tcg_gen_extu_i32_i64(t2, cc_op);
2540     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2541     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2542     return DISAS_NEXT;
2543 }
2544 
2545 #ifndef CONFIG_USER_ONLY
2546 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2547 {
2548     TCGv_i32 m4;
2549 
2550     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2551         m4 = tcg_constant_i32(get_field(s, m4));
2552     } else {
2553         m4 = tcg_constant_i32(0);
2554     }
2555     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2556     return DISAS_NEXT;
2557 }
2558 
2559 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2560 {
2561     TCGv_i32 m4;
2562 
2563     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2564         m4 = tcg_constant_i32(get_field(s, m4));
2565     } else {
2566         m4 = tcg_constant_i32(0);
2567     }
2568     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2569     return DISAS_NEXT;
2570 }
2571 
2572 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2573 {
2574     gen_helper_iske(o->out, tcg_env, o->in2);
2575     return DISAS_NEXT;
2576 }
2577 #endif
2578 
2579 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2580 {
2581     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2582     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2583     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2584     TCGv_i32 t_r1, t_r2, t_r3, type;
2585 
2586     switch (s->insn->data) {
2587     case S390_FEAT_TYPE_KMA:
2588         if (r3 == r1 || r3 == r2) {
2589             gen_program_exception(s, PGM_SPECIFICATION);
2590             return DISAS_NORETURN;
2591         }
2592         /* FALL THROUGH */
2593     case S390_FEAT_TYPE_KMCTR:
2594         if (r3 & 1 || !r3) {
2595             gen_program_exception(s, PGM_SPECIFICATION);
2596             return DISAS_NORETURN;
2597         }
2598         /* FALL THROUGH */
2599     case S390_FEAT_TYPE_PPNO:
2600     case S390_FEAT_TYPE_KMF:
2601     case S390_FEAT_TYPE_KMC:
2602     case S390_FEAT_TYPE_KMO:
2603     case S390_FEAT_TYPE_KM:
2604         if (r1 & 1 || !r1) {
2605             gen_program_exception(s, PGM_SPECIFICATION);
2606             return DISAS_NORETURN;
2607         }
2608         /* FALL THROUGH */
2609     case S390_FEAT_TYPE_KMAC:
2610     case S390_FEAT_TYPE_KIMD:
2611     case S390_FEAT_TYPE_KLMD:
2612         if (r2 & 1 || !r2) {
2613             gen_program_exception(s, PGM_SPECIFICATION);
2614             return DISAS_NORETURN;
2615         }
2616         /* FALL THROUGH */
2617     case S390_FEAT_TYPE_PCKMO:
2618     case S390_FEAT_TYPE_PCC:
2619         break;
2620     default:
2621         g_assert_not_reached();
2622     };
2623 
2624     t_r1 = tcg_constant_i32(r1);
2625     t_r2 = tcg_constant_i32(r2);
2626     t_r3 = tcg_constant_i32(r3);
2627     type = tcg_constant_i32(s->insn->data);
2628     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2629     set_cc_static(s);
2630     return DISAS_NEXT;
2631 }
2632 
2633 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2634 {
2635     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2636     set_cc_static(s);
2637     return DISAS_NEXT;
2638 }
2639 
2640 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2641 {
2642     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2643     set_cc_static(s);
2644     return DISAS_NEXT;
2645 }
2646 
2647 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2648 {
2649     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2650     set_cc_static(s);
2651     return DISAS_NEXT;
2652 }
2653 
2654 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2655 {
2656     /* The real output is indeed the original value in memory;
2657        recompute the addition for the computation of CC.  */
2658     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2659                                  s->insn->data | MO_ALIGN);
2660     /* However, we need to recompute the addition for setting CC.  */
2661     if (addu64) {
2662         tcg_gen_movi_i64(cc_src, 0);
2663         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2664     } else {
2665         tcg_gen_add_i64(o->out, o->in1, o->in2);
2666     }
2667     return DISAS_NEXT;
2668 }
2669 
2670 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2671 {
2672     return help_laa(s, o, false);
2673 }
2674 
2675 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2676 {
2677     return help_laa(s, o, true);
2678 }
2679 
2680 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2681 {
2682     /* The real output is indeed the original value in memory;
2683        recompute the addition for the computation of CC.  */
2684     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2685                                  s->insn->data | MO_ALIGN);
2686     /* However, we need to recompute the operation for setting CC.  */
2687     tcg_gen_and_i64(o->out, o->in1, o->in2);
2688     return DISAS_NEXT;
2689 }
2690 
2691 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2692 {
2693     /* The real output is indeed the original value in memory;
2694        recompute the addition for the computation of CC.  */
2695     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2696                                 s->insn->data | MO_ALIGN);
2697     /* However, we need to recompute the operation for setting CC.  */
2698     tcg_gen_or_i64(o->out, o->in1, o->in2);
2699     return DISAS_NEXT;
2700 }
2701 
2702 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2703 {
2704     /* The real output is indeed the original value in memory;
2705        recompute the addition for the computation of CC.  */
2706     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2707                                  s->insn->data | MO_ALIGN);
2708     /* However, we need to recompute the operation for setting CC.  */
2709     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2710     return DISAS_NEXT;
2711 }
2712 
2713 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2714 {
2715     gen_helper_ldeb(o->out, tcg_env, o->in2);
2716     return DISAS_NEXT;
2717 }
2718 
2719 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2720 {
2721     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2722 
2723     if (!m34) {
2724         return DISAS_NORETURN;
2725     }
2726     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2727     return DISAS_NEXT;
2728 }
2729 
2730 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2731 {
2732     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2733 
2734     if (!m34) {
2735         return DISAS_NORETURN;
2736     }
2737     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2742 {
2743     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2744 
2745     if (!m34) {
2746         return DISAS_NORETURN;
2747     }
2748     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2749     return DISAS_NEXT;
2750 }
2751 
2752 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2753 {
2754     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2755     return DISAS_NEXT;
2756 }
2757 
2758 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2759 {
2760     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2761     return DISAS_NEXT;
2762 }
2763 
2764 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2765 {
2766     tcg_gen_shli_i64(o->out, o->in2, 32);
2767     return DISAS_NEXT;
2768 }
2769 
2770 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2771 {
2772     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2773     return DISAS_NEXT;
2774 }
2775 
2776 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2777 {
2778     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2779     return DISAS_NEXT;
2780 }
2781 
2782 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2783 {
2784     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2785     return DISAS_NEXT;
2786 }
2787 
2788 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2789 {
2790     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2791     return DISAS_NEXT;
2792 }
2793 
2794 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2795 {
2796     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2797     return DISAS_NEXT;
2798 }
2799 
2800 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2801 {
2802     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2803                        MO_TESL | s->insn->data);
2804     return DISAS_NEXT;
2805 }
2806 
2807 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2808 {
2809     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2810                        MO_TEUL | s->insn->data);
2811     return DISAS_NEXT;
2812 }
2813 
2814 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2815 {
2816     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2817                         MO_TEUQ | s->insn->data);
2818     return DISAS_NEXT;
2819 }
2820 
2821 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2822 {
2823     TCGLabel *lab = gen_new_label();
2824     store_reg32_i64(get_field(s, r1), o->in2);
2825     /* The value is stored even in case of trap. */
2826     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2827     gen_trap(s);
2828     gen_set_label(lab);
2829     return DISAS_NEXT;
2830 }
2831 
2832 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2833 {
2834     TCGLabel *lab = gen_new_label();
2835     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2836     /* The value is stored even in case of trap. */
2837     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2838     gen_trap(s);
2839     gen_set_label(lab);
2840     return DISAS_NEXT;
2841 }
2842 
2843 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2844 {
2845     TCGLabel *lab = gen_new_label();
2846     store_reg32h_i64(get_field(s, r1), o->in2);
2847     /* The value is stored even in case of trap. */
2848     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2849     gen_trap(s);
2850     gen_set_label(lab);
2851     return DISAS_NEXT;
2852 }
2853 
2854 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2855 {
2856     TCGLabel *lab = gen_new_label();
2857 
2858     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2859     /* The value is stored even in case of trap. */
2860     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2861     gen_trap(s);
2862     gen_set_label(lab);
2863     return DISAS_NEXT;
2864 }
2865 
2866 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2867 {
2868     TCGLabel *lab = gen_new_label();
2869     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2870     /* The value is stored even in case of trap. */
2871     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2872     gen_trap(s);
2873     gen_set_label(lab);
2874     return DISAS_NEXT;
2875 }
2876 
2877 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2878 {
2879     DisasCompare c;
2880 
2881     if (have_field(s, m3)) {
2882         /* LOAD * ON CONDITION */
2883         disas_jcc(s, &c, get_field(s, m3));
2884     } else {
2885         /* SELECT */
2886         disas_jcc(s, &c, get_field(s, m4));
2887     }
2888 
2889     if (c.is_64) {
2890         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2891                             o->in2, o->in1);
2892     } else {
2893         TCGv_i32 t32 = tcg_temp_new_i32();
2894         TCGv_i64 t, z;
2895 
2896         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2897 
2898         t = tcg_temp_new_i64();
2899         tcg_gen_extu_i32_i64(t, t32);
2900 
2901         z = tcg_constant_i64(0);
2902         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2903     }
2904 
2905     return DISAS_NEXT;
2906 }
2907 
2908 #ifndef CONFIG_USER_ONLY
2909 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2910 {
2911     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2912     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2913 
2914     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2915     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2916     s->exit_to_mainloop = true;
2917     return DISAS_TOO_MANY;
2918 }
2919 
2920 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2921 {
2922     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2923     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2924 
2925     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2926     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2927     s->exit_to_mainloop = true;
2928     return DISAS_TOO_MANY;
2929 }
2930 
2931 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2932 {
2933     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2934     set_cc_static(s);
2935     return DISAS_NEXT;
2936 }
2937 
2938 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2939 {
2940     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2941     return DISAS_NEXT;
2942 }
2943 
2944 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2945 {
2946     TCGv_i64 mask, addr;
2947 
2948     per_breaking_event(s);
2949 
2950     /*
2951      * Convert the short PSW into the normal PSW, similar to what
2952      * s390_cpu_load_normal() does.
2953      */
2954     mask = tcg_temp_new_i64();
2955     addr = tcg_temp_new_i64();
2956     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2957     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2958     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2959     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2960     gen_helper_load_psw(tcg_env, mask, addr);
2961     return DISAS_NORETURN;
2962 }
2963 
2964 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2965 {
2966     TCGv_i64 t1, t2;
2967 
2968     per_breaking_event(s);
2969 
2970     t1 = tcg_temp_new_i64();
2971     t2 = tcg_temp_new_i64();
2972     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2973                         MO_TEUQ | MO_ALIGN_8);
2974     tcg_gen_addi_i64(o->in2, o->in2, 8);
2975     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2976     gen_helper_load_psw(tcg_env, t1, t2);
2977     return DISAS_NORETURN;
2978 }
2979 #endif
2980 
2981 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2982 {
2983     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2984     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2985 
2986     gen_helper_lam(tcg_env, r1, o->in2, r3);
2987     return DISAS_NEXT;
2988 }
2989 
2990 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2991 {
2992     int r1 = get_field(s, r1);
2993     int r3 = get_field(s, r3);
2994     TCGv_i64 t1, t2;
2995 
2996     /* Only one register to read. */
2997     t1 = tcg_temp_new_i64();
2998     if (unlikely(r1 == r3)) {
2999         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3000         store_reg32_i64(r1, t1);
3001         return DISAS_NEXT;
3002     }
3003 
3004     /* First load the values of the first and last registers to trigger
3005        possible page faults. */
3006     t2 = tcg_temp_new_i64();
3007     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3008     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3009     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3010     store_reg32_i64(r1, t1);
3011     store_reg32_i64(r3, t2);
3012 
3013     /* Only two registers to read. */
3014     if (((r1 + 1) & 15) == r3) {
3015         return DISAS_NEXT;
3016     }
3017 
3018     /* Then load the remaining registers. Page fault can't occur. */
3019     r3 = (r3 - 1) & 15;
3020     tcg_gen_movi_i64(t2, 4);
3021     while (r1 != r3) {
3022         r1 = (r1 + 1) & 15;
3023         tcg_gen_add_i64(o->in2, o->in2, t2);
3024         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3025         store_reg32_i64(r1, t1);
3026     }
3027     return DISAS_NEXT;
3028 }
3029 
3030 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3031 {
3032     int r1 = get_field(s, r1);
3033     int r3 = get_field(s, r3);
3034     TCGv_i64 t1, t2;
3035 
3036     /* Only one register to read. */
3037     t1 = tcg_temp_new_i64();
3038     if (unlikely(r1 == r3)) {
3039         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3040         store_reg32h_i64(r1, t1);
3041         return DISAS_NEXT;
3042     }
3043 
3044     /* First load the values of the first and last registers to trigger
3045        possible page faults. */
3046     t2 = tcg_temp_new_i64();
3047     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3048     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3049     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3050     store_reg32h_i64(r1, t1);
3051     store_reg32h_i64(r3, t2);
3052 
3053     /* Only two registers to read. */
3054     if (((r1 + 1) & 15) == r3) {
3055         return DISAS_NEXT;
3056     }
3057 
3058     /* Then load the remaining registers. Page fault can't occur. */
3059     r3 = (r3 - 1) & 15;
3060     tcg_gen_movi_i64(t2, 4);
3061     while (r1 != r3) {
3062         r1 = (r1 + 1) & 15;
3063         tcg_gen_add_i64(o->in2, o->in2, t2);
3064         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3065         store_reg32h_i64(r1, t1);
3066     }
3067     return DISAS_NEXT;
3068 }
3069 
3070 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3071 {
3072     int r1 = get_field(s, r1);
3073     int r3 = get_field(s, r3);
3074     TCGv_i64 t1, t2;
3075 
3076     /* Only one register to read. */
3077     if (unlikely(r1 == r3)) {
3078         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3079         return DISAS_NEXT;
3080     }
3081 
3082     /* First load the values of the first and last registers to trigger
3083        possible page faults. */
3084     t1 = tcg_temp_new_i64();
3085     t2 = tcg_temp_new_i64();
3086     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3087     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3088     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3089     tcg_gen_mov_i64(regs[r1], t1);
3090 
3091     /* Only two registers to read. */
3092     if (((r1 + 1) & 15) == r3) {
3093         return DISAS_NEXT;
3094     }
3095 
3096     /* Then load the remaining registers. Page fault can't occur. */
3097     r3 = (r3 - 1) & 15;
3098     tcg_gen_movi_i64(t1, 8);
3099     while (r1 != r3) {
3100         r1 = (r1 + 1) & 15;
3101         tcg_gen_add_i64(o->in2, o->in2, t1);
3102         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3103     }
3104     return DISAS_NEXT;
3105 }
3106 
3107 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3108 {
3109     TCGv_i64 a1, a2;
3110     MemOp mop = s->insn->data;
3111 
3112     /* In a parallel context, stop the world and single step.  */
3113     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3114         update_psw_addr(s);
3115         update_cc_op(s);
3116         gen_exception(EXCP_ATOMIC);
3117         return DISAS_NORETURN;
3118     }
3119 
3120     /* In a serial context, perform the two loads ... */
3121     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3122     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3123     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3124     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3125 
3126     /* ... and indicate that we performed them while interlocked.  */
3127     gen_op_movi_cc(s, 0);
3128     return DISAS_NEXT;
3129 }
3130 
3131 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3132 {
3133     o->out_128 = tcg_temp_new_i128();
3134     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3135                          MO_TE | MO_128 | MO_ALIGN);
3136     return DISAS_NEXT;
3137 }
3138 
3139 #ifndef CONFIG_USER_ONLY
3140 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3141 {
3142     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3143     return DISAS_NEXT;
3144 }
3145 #endif
3146 
3147 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3148 {
3149     tcg_gen_andi_i64(o->out, o->in2, -256);
3150     return DISAS_NEXT;
3151 }
3152 
3153 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3154 {
3155     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3156 
3157     if (get_field(s, m3) > 6) {
3158         gen_program_exception(s, PGM_SPECIFICATION);
3159         return DISAS_NORETURN;
3160     }
3161 
3162     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3163     tcg_gen_neg_i64(o->addr1, o->addr1);
3164     tcg_gen_movi_i64(o->out, 16);
3165     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3166     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3167     return DISAS_NEXT;
3168 }
3169 
3170 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3171 {
3172     const uint8_t monitor_class = get_field(s, i2);
3173 
3174     if (monitor_class & 0xf0) {
3175         gen_program_exception(s, PGM_SPECIFICATION);
3176         return DISAS_NORETURN;
3177     }
3178 
3179 #if !defined(CONFIG_USER_ONLY)
3180     gen_helper_monitor_call(tcg_env, o->addr1,
3181                             tcg_constant_i32(monitor_class));
3182 #endif
3183     /* Defaults to a NOP. */
3184     return DISAS_NEXT;
3185 }
3186 
3187 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3188 {
3189     o->out = o->in2;
3190     o->in2 = NULL;
3191     return DISAS_NEXT;
3192 }
3193 
3194 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3195 {
3196     int b2 = get_field(s, b2);
3197     TCGv ar1 = tcg_temp_new_i64();
3198     int r1 = get_field(s, r1);
3199 
3200     o->out = o->in2;
3201     o->in2 = NULL;
3202 
3203     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3204     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3205         tcg_gen_movi_i64(ar1, 0);
3206         break;
3207     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3208         tcg_gen_movi_i64(ar1, 1);
3209         break;
3210     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3211         if (b2) {
3212             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3213         } else {
3214             tcg_gen_movi_i64(ar1, 0);
3215         }
3216         break;
3217     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3218         tcg_gen_movi_i64(ar1, 2);
3219         break;
3220     }
3221 
3222     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3223     return DISAS_NEXT;
3224 }
3225 
3226 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3227 {
3228     o->out = o->in1;
3229     o->out2 = o->in2;
3230     o->in1 = NULL;
3231     o->in2 = NULL;
3232     return DISAS_NEXT;
3233 }
3234 
3235 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3236 {
3237     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3238 
3239     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3240     return DISAS_NEXT;
3241 }
3242 
3243 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3244 {
3245     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3246     return DISAS_NEXT;
3247 }
3248 
3249 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3250 {
3251     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3252 
3253     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3254     return DISAS_NEXT;
3255 }
3256 
3257 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3258 {
3259     int r1 = get_field(s, r1);
3260     int r2 = get_field(s, r2);
3261     TCGv_i32 t1, t2;
3262 
3263     /* r1 and r2 must be even.  */
3264     if (r1 & 1 || r2 & 1) {
3265         gen_program_exception(s, PGM_SPECIFICATION);
3266         return DISAS_NORETURN;
3267     }
3268 
3269     t1 = tcg_constant_i32(r1);
3270     t2 = tcg_constant_i32(r2);
3271     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3272     set_cc_static(s);
3273     return DISAS_NEXT;
3274 }
3275 
3276 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3277 {
3278     int r1 = get_field(s, r1);
3279     int r3 = get_field(s, r3);
3280     TCGv_i32 t1, t3;
3281 
3282     /* r1 and r3 must be even.  */
3283     if (r1 & 1 || r3 & 1) {
3284         gen_program_exception(s, PGM_SPECIFICATION);
3285         return DISAS_NORETURN;
3286     }
3287 
3288     t1 = tcg_constant_i32(r1);
3289     t3 = tcg_constant_i32(r3);
3290     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3291     set_cc_static(s);
3292     return DISAS_NEXT;
3293 }
3294 
3295 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3296 {
3297     int r1 = get_field(s, r1);
3298     int r3 = get_field(s, r3);
3299     TCGv_i32 t1, t3;
3300 
3301     /* r1 and r3 must be even.  */
3302     if (r1 & 1 || r3 & 1) {
3303         gen_program_exception(s, PGM_SPECIFICATION);
3304         return DISAS_NORETURN;
3305     }
3306 
3307     t1 = tcg_constant_i32(r1);
3308     t3 = tcg_constant_i32(r3);
3309     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3310     set_cc_static(s);
3311     return DISAS_NEXT;
3312 }
3313 
3314 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3315 {
3316     int r3 = get_field(s, r3);
3317     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3318     set_cc_static(s);
3319     return DISAS_NEXT;
3320 }
3321 
3322 #ifndef CONFIG_USER_ONLY
3323 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3324 {
3325     int r1 = get_field(s, l1);
3326     int r3 = get_field(s, r3);
3327     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3328     set_cc_static(s);
3329     return DISAS_NEXT;
3330 }
3331 
3332 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3333 {
3334     int r1 = get_field(s, l1);
3335     int r3 = get_field(s, r3);
3336     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3337     set_cc_static(s);
3338     return DISAS_NEXT;
3339 }
3340 #endif
3341 
3342 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3343 {
3344     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3345 
3346     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3347     return DISAS_NEXT;
3348 }
3349 
3350 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3351 {
3352     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3353 
3354     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3355     return DISAS_NEXT;
3356 }
3357 
3358 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3359 {
3360     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3361     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3362 
3363     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3364     set_cc_static(s);
3365     return DISAS_NEXT;
3366 }
3367 
3368 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3369 {
3370     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3371     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3372 
3373     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3374     set_cc_static(s);
3375     return DISAS_NEXT;
3376 }
3377 
3378 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3379 {
3380     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3381 
3382     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3383     return DISAS_NEXT;
3384 }
3385 
3386 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3387 {
3388     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3389     return DISAS_NEXT;
3390 }
3391 
3392 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3393 {
3394     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3395     return DISAS_NEXT;
3396 }
3397 
3398 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3399 {
3400     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3401     return DISAS_NEXT;
3402 }
3403 
3404 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3405 {
3406     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3407     return DISAS_NEXT;
3408 }
3409 
3410 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3411 {
3412     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3413     return DISAS_NEXT;
3414 }
3415 
3416 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3417 {
3418     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3419     return DISAS_NEXT;
3420 }
3421 
3422 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3423 {
3424     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3425     return DISAS_NEXT;
3426 }
3427 
3428 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3429 {
3430     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3431     return DISAS_NEXT;
3432 }
3433 
3434 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3435 {
3436     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3437     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3438     return DISAS_NEXT;
3439 }
3440 
3441 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3442 {
3443     TCGv_i64 r3 = load_freg(get_field(s, r3));
3444     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3445     return DISAS_NEXT;
3446 }
3447 
3448 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3449 {
3450     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3451     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3452     return DISAS_NEXT;
3453 }
3454 
3455 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3456 {
3457     TCGv_i64 r3 = load_freg(get_field(s, r3));
3458     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3459     return DISAS_NEXT;
3460 }
3461 
3462 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3463 {
3464     TCGv_i64 z = tcg_constant_i64(0);
3465     TCGv_i64 n = tcg_temp_new_i64();
3466 
3467     tcg_gen_neg_i64(n, o->in2);
3468     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3469     return DISAS_NEXT;
3470 }
3471 
3472 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3473 {
3474     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3475     return DISAS_NEXT;
3476 }
3477 
3478 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3479 {
3480     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3481     return DISAS_NEXT;
3482 }
3483 
3484 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3485 {
3486     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3487     tcg_gen_mov_i64(o->out2, o->in2);
3488     return DISAS_NEXT;
3489 }
3490 
3491 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3492 {
3493     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3494 
3495     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3496     set_cc_static(s);
3497     return DISAS_NEXT;
3498 }
3499 
3500 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3501 {
3502     tcg_gen_neg_i64(o->out, o->in2);
3503     return DISAS_NEXT;
3504 }
3505 
3506 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3507 {
3508     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3509     return DISAS_NEXT;
3510 }
3511 
3512 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3513 {
3514     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3515     return DISAS_NEXT;
3516 }
3517 
3518 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3519 {
3520     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3521     tcg_gen_mov_i64(o->out2, o->in2);
3522     return DISAS_NEXT;
3523 }
3524 
3525 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3526 {
3527     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3528 
3529     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3530     set_cc_static(s);
3531     return DISAS_NEXT;
3532 }
3533 
3534 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3535 {
3536     tcg_gen_or_i64(o->out, o->in1, o->in2);
3537     return DISAS_NEXT;
3538 }
3539 
3540 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3541 {
3542     int shift = s->insn->data & 0xff;
3543     int size = s->insn->data >> 8;
3544     uint64_t mask = ((1ull << size) - 1) << shift;
3545     TCGv_i64 t = tcg_temp_new_i64();
3546 
3547     tcg_gen_shli_i64(t, o->in2, shift);
3548     tcg_gen_or_i64(o->out, o->in1, t);
3549 
3550     /* Produce the CC from only the bits manipulated.  */
3551     tcg_gen_andi_i64(cc_dst, o->out, mask);
3552     set_cc_nz_u64(s, cc_dst);
3553     return DISAS_NEXT;
3554 }
3555 
3556 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3557 {
3558     o->in1 = tcg_temp_new_i64();
3559 
3560     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3561         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3562     } else {
3563         /* Perform the atomic operation in memory. */
3564         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3565                                     s->insn->data);
3566     }
3567 
3568     /* Recompute also for atomic case: needed for setting CC. */
3569     tcg_gen_or_i64(o->out, o->in1, o->in2);
3570 
3571     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3572         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3573     }
3574     return DISAS_NEXT;
3575 }
3576 
3577 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3578 {
3579     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3580 
3581     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3582     return DISAS_NEXT;
3583 }
3584 
3585 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3586 {
3587     int l2 = get_field(s, l2) + 1;
3588     TCGv_i32 l;
3589 
3590     /* The length must not exceed 32 bytes.  */
3591     if (l2 > 32) {
3592         gen_program_exception(s, PGM_SPECIFICATION);
3593         return DISAS_NORETURN;
3594     }
3595     l = tcg_constant_i32(l2);
3596     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3597     return DISAS_NEXT;
3598 }
3599 
3600 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3601 {
3602     int l2 = get_field(s, l2) + 1;
3603     TCGv_i32 l;
3604 
3605     /* The length must be even and should not exceed 64 bytes.  */
3606     if ((l2 & 1) || (l2 > 64)) {
3607         gen_program_exception(s, PGM_SPECIFICATION);
3608         return DISAS_NORETURN;
3609     }
3610     l = tcg_constant_i32(l2);
3611     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3612     return DISAS_NEXT;
3613 }
3614 
3615 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3616 {
3617     const uint8_t m3 = get_field(s, m3);
3618 
3619     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3620         tcg_gen_ctpop_i64(o->out, o->in2);
3621     } else {
3622         gen_helper_popcnt(o->out, o->in2);
3623     }
3624     return DISAS_NEXT;
3625 }
3626 
3627 #ifndef CONFIG_USER_ONLY
3628 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3629 {
3630     gen_helper_ptlb(tcg_env);
3631     return DISAS_NEXT;
3632 }
3633 #endif
3634 
3635 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3636 {
3637     int i3 = get_field(s, i3);
3638     int i4 = get_field(s, i4);
3639     int i5 = get_field(s, i5);
3640     int do_zero = i4 & 0x80;
3641     uint64_t mask, imask, pmask;
3642     int pos, len, rot;
3643 
3644     /* Adjust the arguments for the specific insn.  */
3645     switch (s->fields.op2) {
3646     case 0x55: /* risbg */
3647     case 0x59: /* risbgn */
3648         i3 &= 63;
3649         i4 &= 63;
3650         pmask = ~0;
3651         break;
3652     case 0x5d: /* risbhg */
3653         i3 &= 31;
3654         i4 &= 31;
3655         pmask = 0xffffffff00000000ull;
3656         break;
3657     case 0x51: /* risblg */
3658         i3 = (i3 & 31) + 32;
3659         i4 = (i4 & 31) + 32;
3660         pmask = 0x00000000ffffffffull;
3661         break;
3662     default:
3663         g_assert_not_reached();
3664     }
3665 
3666     /* MASK is the set of bits to be inserted from R2. */
3667     if (i3 <= i4) {
3668         /* [0...i3---i4...63] */
3669         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3670     } else {
3671         /* [0---i4...i3---63] */
3672         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3673     }
3674     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3675     mask &= pmask;
3676 
3677     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3678        insns, we need to keep the other half of the register.  */
3679     imask = ~mask | ~pmask;
3680     if (do_zero) {
3681         imask = ~pmask;
3682     }
3683 
3684     len = i4 - i3 + 1;
3685     pos = 63 - i4;
3686     rot = i5 & 63;
3687 
3688     /* In some cases we can implement this with extract.  */
3689     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3690         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3691         return DISAS_NEXT;
3692     }
3693 
3694     /* In some cases we can implement this with deposit.  */
3695     if (len > 0 && (imask == 0 || ~mask == imask)) {
3696         /* Note that we rotate the bits to be inserted to the lsb, not to
3697            the position as described in the PoO.  */
3698         rot = (rot - pos) & 63;
3699     } else {
3700         pos = -1;
3701     }
3702 
3703     /* Rotate the input as necessary.  */
3704     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3705 
3706     /* Insert the selected bits into the output.  */
3707     if (pos >= 0) {
3708         if (imask == 0) {
3709             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3710         } else {
3711             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3712         }
3713     } else if (imask == 0) {
3714         tcg_gen_andi_i64(o->out, o->in2, mask);
3715     } else {
3716         tcg_gen_andi_i64(o->in2, o->in2, mask);
3717         tcg_gen_andi_i64(o->out, o->out, imask);
3718         tcg_gen_or_i64(o->out, o->out, o->in2);
3719     }
3720     return DISAS_NEXT;
3721 }
3722 
3723 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3724 {
3725     int i3 = get_field(s, i3);
3726     int i4 = get_field(s, i4);
3727     int i5 = get_field(s, i5);
3728     TCGv_i64 orig_out;
3729     uint64_t mask;
3730 
3731     /* If this is a test-only form, arrange to discard the result.  */
3732     if (i3 & 0x80) {
3733         tcg_debug_assert(o->out != NULL);
3734         orig_out = o->out;
3735         o->out = tcg_temp_new_i64();
3736         tcg_gen_mov_i64(o->out, orig_out);
3737     }
3738 
3739     i3 &= 63;
3740     i4 &= 63;
3741     i5 &= 63;
3742 
3743     /* MASK is the set of bits to be operated on from R2.
3744        Take care for I3/I4 wraparound.  */
3745     mask = ~0ull >> i3;
3746     if (i3 <= i4) {
3747         mask ^= ~0ull >> i4 >> 1;
3748     } else {
3749         mask |= ~(~0ull >> i4 >> 1);
3750     }
3751 
3752     /* Rotate the input as necessary.  */
3753     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3754 
3755     /* Operate.  */
3756     switch (s->fields.op2) {
3757     case 0x54: /* AND */
3758         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3759         tcg_gen_and_i64(o->out, o->out, o->in2);
3760         break;
3761     case 0x56: /* OR */
3762         tcg_gen_andi_i64(o->in2, o->in2, mask);
3763         tcg_gen_or_i64(o->out, o->out, o->in2);
3764         break;
3765     case 0x57: /* XOR */
3766         tcg_gen_andi_i64(o->in2, o->in2, mask);
3767         tcg_gen_xor_i64(o->out, o->out, o->in2);
3768         break;
3769     default:
3770         abort();
3771     }
3772 
3773     /* Set the CC.  */
3774     tcg_gen_andi_i64(cc_dst, o->out, mask);
3775     set_cc_nz_u64(s, cc_dst);
3776     return DISAS_NEXT;
3777 }
3778 
3779 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3780 {
3781     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3782     return DISAS_NEXT;
3783 }
3784 
3785 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3786 {
3787     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3788     return DISAS_NEXT;
3789 }
3790 
3791 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3792 {
3793     tcg_gen_bswap64_i64(o->out, o->in2);
3794     return DISAS_NEXT;
3795 }
3796 
3797 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3798 {
3799     TCGv_i32 t1 = tcg_temp_new_i32();
3800     TCGv_i32 t2 = tcg_temp_new_i32();
3801     TCGv_i32 to = tcg_temp_new_i32();
3802     tcg_gen_extrl_i64_i32(t1, o->in1);
3803     tcg_gen_extrl_i64_i32(t2, o->in2);
3804     tcg_gen_rotl_i32(to, t1, t2);
3805     tcg_gen_extu_i32_i64(o->out, to);
3806     return DISAS_NEXT;
3807 }
3808 
3809 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3810 {
3811     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3812     return DISAS_NEXT;
3813 }
3814 
3815 #ifndef CONFIG_USER_ONLY
3816 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3817 {
3818     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3819     set_cc_static(s);
3820     return DISAS_NEXT;
3821 }
3822 
3823 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3824 {
3825     gen_helper_sacf(tcg_env, o->in2);
3826     /* Addressing mode has changed, so end the block.  */
3827     return DISAS_TOO_MANY;
3828 }
3829 #endif
3830 
3831 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3832 {
3833     int sam = s->insn->data;
3834     TCGv_i64 tsam;
3835     uint64_t mask;
3836 
3837     switch (sam) {
3838     case 0:
3839         mask = 0xffffff;
3840         break;
3841     case 1:
3842         mask = 0x7fffffff;
3843         break;
3844     default:
3845         mask = -1;
3846         break;
3847     }
3848 
3849     /* Bizarre but true, we check the address of the current insn for the
3850        specification exception, not the next to be executed.  Thus the PoO
3851        documents that Bad Things Happen two bytes before the end.  */
3852     if (s->base.pc_next & ~mask) {
3853         gen_program_exception(s, PGM_SPECIFICATION);
3854         return DISAS_NORETURN;
3855     }
3856     s->pc_tmp &= mask;
3857 
3858     tsam = tcg_constant_i64(sam);
3859     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3860 
3861     /* Always exit the TB, since we (may have) changed execution mode.  */
3862     return DISAS_TOO_MANY;
3863 }
3864 
3865 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3866 {
3867     int r1 = get_field(s, r1);
3868     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3869     return DISAS_NEXT;
3870 }
3871 
3872 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3873 {
3874     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3875     return DISAS_NEXT;
3876 }
3877 
3878 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3879 {
3880     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3881     return DISAS_NEXT;
3882 }
3883 
3884 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3885 {
3886     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3887     return DISAS_NEXT;
3888 }
3889 
3890 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3891 {
3892     gen_helper_sqeb(o->out, tcg_env, o->in2);
3893     return DISAS_NEXT;
3894 }
3895 
3896 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3897 {
3898     gen_helper_sqdb(o->out, tcg_env, o->in2);
3899     return DISAS_NEXT;
3900 }
3901 
3902 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3903 {
3904     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3905     return DISAS_NEXT;
3906 }
3907 
3908 #ifndef CONFIG_USER_ONLY
3909 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3910 {
3911     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3912     set_cc_static(s);
3913     return DISAS_NEXT;
3914 }
3915 
3916 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3917 {
3918     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3919     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3920 
3921     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3922     set_cc_static(s);
3923     return DISAS_NEXT;
3924 }
3925 #endif
3926 
3927 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3928 {
3929     DisasCompare c;
3930     TCGv_i64 a, h;
3931     TCGLabel *lab;
3932     int r1;
3933 
3934     disas_jcc(s, &c, get_field(s, m3));
3935 
3936     /* We want to store when the condition is fulfilled, so branch
3937        out when it's not */
3938     c.cond = tcg_invert_cond(c.cond);
3939 
3940     lab = gen_new_label();
3941     if (c.is_64) {
3942         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3943     } else {
3944         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3945     }
3946 
3947     r1 = get_field(s, r1);
3948     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3949     switch (s->insn->data) {
3950     case 1: /* STOCG */
3951         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3952         break;
3953     case 0: /* STOC */
3954         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3955         break;
3956     case 2: /* STOCFH */
3957         h = tcg_temp_new_i64();
3958         tcg_gen_shri_i64(h, regs[r1], 32);
3959         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3960         break;
3961     default:
3962         g_assert_not_reached();
3963     }
3964 
3965     gen_set_label(lab);
3966     return DISAS_NEXT;
3967 }
3968 
3969 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3970 {
3971     TCGv_i64 t;
3972     uint64_t sign = 1ull << s->insn->data;
3973     if (s->insn->data == 31) {
3974         t = tcg_temp_new_i64();
3975         tcg_gen_shli_i64(t, o->in1, 32);
3976     } else {
3977         t = o->in1;
3978     }
3979     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3980     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3981     /* The arithmetic left shift is curious in that it does not affect
3982        the sign bit.  Copy that over from the source unchanged.  */
3983     tcg_gen_andi_i64(o->out, o->out, ~sign);
3984     tcg_gen_andi_i64(o->in1, o->in1, sign);
3985     tcg_gen_or_i64(o->out, o->out, o->in1);
3986     return DISAS_NEXT;
3987 }
3988 
3989 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3990 {
3991     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3992     return DISAS_NEXT;
3993 }
3994 
3995 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3996 {
3997     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3998     return DISAS_NEXT;
3999 }
4000 
4001 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4002 {
4003     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4004     return DISAS_NEXT;
4005 }
4006 
4007 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4008 {
4009     gen_helper_sfpc(tcg_env, o->in2);
4010     return DISAS_NEXT;
4011 }
4012 
4013 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4014 {
4015     gen_helper_sfas(tcg_env, o->in2);
4016     return DISAS_NEXT;
4017 }
4018 
4019 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4020 {
4021     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4022     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4023     gen_helper_srnm(tcg_env, o->addr1);
4024     return DISAS_NEXT;
4025 }
4026 
4027 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4028 {
4029     /* Bits 0-55 are are ignored. */
4030     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4031     gen_helper_srnm(tcg_env, o->addr1);
4032     return DISAS_NEXT;
4033 }
4034 
4035 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4036 {
4037     TCGv_i64 tmp = tcg_temp_new_i64();
4038 
4039     /* Bits other than 61-63 are ignored. */
4040     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4041 
4042     /* No need to call a helper, we don't implement dfp */
4043     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4044     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4045     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4046     return DISAS_NEXT;
4047 }
4048 
4049 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4050 {
4051     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4052     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4053     set_cc_static(s);
4054 
4055     tcg_gen_shri_i64(o->in1, o->in1, 24);
4056     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4057     return DISAS_NEXT;
4058 }
4059 
4060 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4061 {
4062     int b1 = get_field(s, b1);
4063     int d1 = get_field(s, d1);
4064     int b2 = get_field(s, b2);
4065     int d2 = get_field(s, d2);
4066     int r3 = get_field(s, r3);
4067     TCGv_i64 tmp = tcg_temp_new_i64();
4068 
4069     /* fetch all operands first */
4070     o->in1 = tcg_temp_new_i64();
4071     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4072     o->in2 = tcg_temp_new_i64();
4073     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4074     o->addr1 = tcg_temp_new_i64();
4075     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4076 
4077     /* load the third operand into r3 before modifying anything */
4078     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4079 
4080     /* subtract CPU timer from first operand and store in GR0 */
4081     gen_helper_stpt(tmp, tcg_env);
4082     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4083 
4084     /* store second operand in GR1 */
4085     tcg_gen_mov_i64(regs[1], o->in2);
4086     return DISAS_NEXT;
4087 }
4088 
4089 #ifndef CONFIG_USER_ONLY
4090 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4091 {
4092     tcg_gen_shri_i64(o->in2, o->in2, 4);
4093     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4094     return DISAS_NEXT;
4095 }
4096 
4097 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4098 {
4099     gen_helper_sske(tcg_env, o->in1, o->in2);
4100     return DISAS_NEXT;
4101 }
4102 
4103 static void gen_check_psw_mask(DisasContext *s)
4104 {
4105     TCGv_i64 reserved = tcg_temp_new_i64();
4106     TCGLabel *ok = gen_new_label();
4107 
4108     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4109     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4110     gen_program_exception(s, PGM_SPECIFICATION);
4111     gen_set_label(ok);
4112 }
4113 
4114 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4115 {
4116     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4117 
4118     gen_check_psw_mask(s);
4119 
4120     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4121     s->exit_to_mainloop = true;
4122     return DISAS_TOO_MANY;
4123 }
4124 
4125 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4126 {
4127     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4128     return DISAS_NEXT;
4129 }
4130 #endif
4131 
4132 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4133 {
4134     gen_helper_stck(o->out, tcg_env);
4135     /* ??? We don't implement clock states.  */
4136     gen_op_movi_cc(s, 0);
4137     return DISAS_NEXT;
4138 }
4139 
4140 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4141 {
4142     TCGv_i64 c1 = tcg_temp_new_i64();
4143     TCGv_i64 c2 = tcg_temp_new_i64();
4144     TCGv_i64 todpr = tcg_temp_new_i64();
4145     gen_helper_stck(c1, tcg_env);
4146     /* 16 bit value store in an uint32_t (only valid bits set) */
4147     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4148     /* Shift the 64-bit value into its place as a zero-extended
4149        104-bit value.  Note that "bit positions 64-103 are always
4150        non-zero so that they compare differently to STCK"; we set
4151        the least significant bit to 1.  */
4152     tcg_gen_shli_i64(c2, c1, 56);
4153     tcg_gen_shri_i64(c1, c1, 8);
4154     tcg_gen_ori_i64(c2, c2, 0x10000);
4155     tcg_gen_or_i64(c2, c2, todpr);
4156     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4157     tcg_gen_addi_i64(o->in2, o->in2, 8);
4158     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4159     /* ??? We don't implement clock states.  */
4160     gen_op_movi_cc(s, 0);
4161     return DISAS_NEXT;
4162 }
4163 
4164 #ifndef CONFIG_USER_ONLY
4165 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4166 {
4167     gen_helper_sck(cc_op, tcg_env, o->in2);
4168     set_cc_static(s);
4169     return DISAS_NEXT;
4170 }
4171 
4172 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4173 {
4174     gen_helper_sckc(tcg_env, o->in2);
4175     return DISAS_NEXT;
4176 }
4177 
4178 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4179 {
4180     gen_helper_sckpf(tcg_env, regs[0]);
4181     return DISAS_NEXT;
4182 }
4183 
4184 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4185 {
4186     gen_helper_stckc(o->out, tcg_env);
4187     return DISAS_NEXT;
4188 }
4189 
4190 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4191 {
4192     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4193     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4194 
4195     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4196     return DISAS_NEXT;
4197 }
4198 
4199 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4200 {
4201     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4202     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4203 
4204     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4205     return DISAS_NEXT;
4206 }
4207 
4208 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4209 {
4210     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4211     return DISAS_NEXT;
4212 }
4213 
4214 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4215 {
4216     gen_helper_spt(tcg_env, o->in2);
4217     return DISAS_NEXT;
4218 }
4219 
4220 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4221 {
4222     gen_helper_stfl(tcg_env);
4223     return DISAS_NEXT;
4224 }
4225 
4226 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4227 {
4228     gen_helper_stpt(o->out, tcg_env);
4229     return DISAS_NEXT;
4230 }
4231 
4232 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4233 {
4234     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4235     set_cc_static(s);
4236     return DISAS_NEXT;
4237 }
4238 
4239 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4240 {
4241     gen_helper_spx(tcg_env, o->in2);
4242     return DISAS_NEXT;
4243 }
4244 
4245 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4246 {
4247     gen_helper_xsch(tcg_env, regs[1]);
4248     set_cc_static(s);
4249     return DISAS_NEXT;
4250 }
4251 
4252 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4253 {
4254     gen_helper_csch(tcg_env, regs[1]);
4255     set_cc_static(s);
4256     return DISAS_NEXT;
4257 }
4258 
4259 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4260 {
4261     gen_helper_hsch(tcg_env, regs[1]);
4262     set_cc_static(s);
4263     return DISAS_NEXT;
4264 }
4265 
4266 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4267 {
4268     gen_helper_msch(tcg_env, regs[1], o->in2);
4269     set_cc_static(s);
4270     return DISAS_NEXT;
4271 }
4272 
4273 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4274 {
4275     gen_helper_rchp(tcg_env, regs[1]);
4276     set_cc_static(s);
4277     return DISAS_NEXT;
4278 }
4279 
4280 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4281 {
4282     gen_helper_rsch(tcg_env, regs[1]);
4283     set_cc_static(s);
4284     return DISAS_NEXT;
4285 }
4286 
4287 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4288 {
4289     gen_helper_sal(tcg_env, regs[1]);
4290     return DISAS_NEXT;
4291 }
4292 
4293 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4294 {
4295     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4296     return DISAS_NEXT;
4297 }
4298 
4299 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4300 {
4301     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4302     gen_op_movi_cc(s, 3);
4303     return DISAS_NEXT;
4304 }
4305 
4306 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4307 {
4308     /* The instruction is suppressed if not provided. */
4309     return DISAS_NEXT;
4310 }
4311 
4312 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4313 {
4314     gen_helper_ssch(tcg_env, regs[1], o->in2);
4315     set_cc_static(s);
4316     return DISAS_NEXT;
4317 }
4318 
4319 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4320 {
4321     gen_helper_stsch(tcg_env, regs[1], o->in2);
4322     set_cc_static(s);
4323     return DISAS_NEXT;
4324 }
4325 
4326 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4327 {
4328     gen_helper_stcrw(tcg_env, o->in2);
4329     set_cc_static(s);
4330     return DISAS_NEXT;
4331 }
4332 
4333 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4334 {
4335     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4336     set_cc_static(s);
4337     return DISAS_NEXT;
4338 }
4339 
4340 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4341 {
4342     gen_helper_tsch(tcg_env, regs[1], o->in2);
4343     set_cc_static(s);
4344     return DISAS_NEXT;
4345 }
4346 
4347 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4348 {
4349     gen_helper_chsc(tcg_env, o->in2);
4350     set_cc_static(s);
4351     return DISAS_NEXT;
4352 }
4353 
4354 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4355 {
4356     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4357     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4358     return DISAS_NEXT;
4359 }
4360 
4361 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4362 {
4363     uint64_t i2 = get_field(s, i2);
4364     TCGv_i64 t;
4365 
4366     /* It is important to do what the instruction name says: STORE THEN.
4367        If we let the output hook perform the store then if we fault and
4368        restart, we'll have the wrong SYSTEM MASK in place.  */
4369     t = tcg_temp_new_i64();
4370     tcg_gen_shri_i64(t, psw_mask, 56);
4371     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4372 
4373     if (s->fields.op == 0xac) {
4374         tcg_gen_andi_i64(psw_mask, psw_mask,
4375                          (i2 << 56) | 0x00ffffffffffffffull);
4376     } else {
4377         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4378     }
4379 
4380     gen_check_psw_mask(s);
4381 
4382     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4383     s->exit_to_mainloop = true;
4384     return DISAS_TOO_MANY;
4385 }
4386 
4387 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4388 {
4389     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4390 
4391     if (s->base.tb->flags & FLAG_MASK_PER) {
4392         update_psw_addr(s);
4393         gen_helper_per_store_real(tcg_env);
4394     }
4395     return DISAS_NEXT;
4396 }
4397 #endif
4398 
4399 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4400 {
4401     gen_helper_stfle(cc_op, tcg_env, o->in2);
4402     set_cc_static(s);
4403     return DISAS_NEXT;
4404 }
4405 
4406 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4407 {
4408     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4409     return DISAS_NEXT;
4410 }
4411 
4412 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4413 {
4414     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4415     return DISAS_NEXT;
4416 }
4417 
4418 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4419 {
4420     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4421                        MO_TEUL | s->insn->data);
4422     return DISAS_NEXT;
4423 }
4424 
4425 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4426 {
4427     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4428                         MO_TEUQ | s->insn->data);
4429     return DISAS_NEXT;
4430 }
4431 
4432 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4433 {
4434     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4435     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4436 
4437     gen_helper_stam(tcg_env, r1, o->in2, r3);
4438     return DISAS_NEXT;
4439 }
4440 
4441 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4442 {
4443     int m3 = get_field(s, m3);
4444     int pos, base = s->insn->data;
4445     TCGv_i64 tmp = tcg_temp_new_i64();
4446 
4447     pos = base + ctz32(m3) * 8;
4448     switch (m3) {
4449     case 0xf:
4450         /* Effectively a 32-bit store.  */
4451         tcg_gen_shri_i64(tmp, o->in1, pos);
4452         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4453         break;
4454 
4455     case 0xc:
4456     case 0x6:
4457     case 0x3:
4458         /* Effectively a 16-bit store.  */
4459         tcg_gen_shri_i64(tmp, o->in1, pos);
4460         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4461         break;
4462 
4463     case 0x8:
4464     case 0x4:
4465     case 0x2:
4466     case 0x1:
4467         /* Effectively an 8-bit store.  */
4468         tcg_gen_shri_i64(tmp, o->in1, pos);
4469         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4470         break;
4471 
4472     default:
4473         /* This is going to be a sequence of shifts and stores.  */
4474         pos = base + 32 - 8;
4475         while (m3) {
4476             if (m3 & 0x8) {
4477                 tcg_gen_shri_i64(tmp, o->in1, pos);
4478                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4479                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4480             }
4481             m3 = (m3 << 1) & 0xf;
4482             pos -= 8;
4483         }
4484         break;
4485     }
4486     return DISAS_NEXT;
4487 }
4488 
4489 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4490 {
4491     int r1 = get_field(s, r1);
4492     int r3 = get_field(s, r3);
4493     int size = s->insn->data;
4494     TCGv_i64 tsize = tcg_constant_i64(size);
4495 
4496     while (1) {
4497         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4498                             size == 8 ? MO_TEUQ : MO_TEUL);
4499         if (r1 == r3) {
4500             break;
4501         }
4502         tcg_gen_add_i64(o->in2, o->in2, tsize);
4503         r1 = (r1 + 1) & 15;
4504     }
4505 
4506     return DISAS_NEXT;
4507 }
4508 
4509 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4510 {
4511     int r1 = get_field(s, r1);
4512     int r3 = get_field(s, r3);
4513     TCGv_i64 t = tcg_temp_new_i64();
4514     TCGv_i64 t4 = tcg_constant_i64(4);
4515     TCGv_i64 t32 = tcg_constant_i64(32);
4516 
4517     while (1) {
4518         tcg_gen_shl_i64(t, regs[r1], t32);
4519         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4520         if (r1 == r3) {
4521             break;
4522         }
4523         tcg_gen_add_i64(o->in2, o->in2, t4);
4524         r1 = (r1 + 1) & 15;
4525     }
4526     return DISAS_NEXT;
4527 }
4528 
4529 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4530 {
4531     TCGv_i128 t16 = tcg_temp_new_i128();
4532 
4533     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4534     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4535                          MO_TE | MO_128 | MO_ALIGN);
4536     return DISAS_NEXT;
4537 }
4538 
4539 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4540 {
4541     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4542     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4543 
4544     gen_helper_srst(tcg_env, r1, r2);
4545     set_cc_static(s);
4546     return DISAS_NEXT;
4547 }
4548 
4549 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4550 {
4551     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4552     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4553 
4554     gen_helper_srstu(tcg_env, r1, r2);
4555     set_cc_static(s);
4556     return DISAS_NEXT;
4557 }
4558 
4559 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4560 {
4561     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4562     return DISAS_NEXT;
4563 }
4564 
4565 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4566 {
4567     tcg_gen_movi_i64(cc_src, 0);
4568     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4569     return DISAS_NEXT;
4570 }
4571 
4572 /* Compute borrow (0, -1) into cc_src. */
4573 static void compute_borrow(DisasContext *s)
4574 {
4575     switch (s->cc_op) {
4576     case CC_OP_SUBU:
4577         /* The borrow value is already in cc_src (0,-1). */
4578         break;
4579     default:
4580         gen_op_calc_cc(s);
4581         /* fall through */
4582     case CC_OP_STATIC:
4583         /* The carry flag is the msb of CC; compute into cc_src. */
4584         tcg_gen_extu_i32_i64(cc_src, cc_op);
4585         tcg_gen_shri_i64(cc_src, cc_src, 1);
4586         /* fall through */
4587     case CC_OP_ADDU:
4588         /* Convert carry (1,0) to borrow (0,-1). */
4589         tcg_gen_subi_i64(cc_src, cc_src, 1);
4590         break;
4591     }
4592 }
4593 
4594 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4595 {
4596     compute_borrow(s);
4597 
4598     /* Borrow is {0, -1}, so add to subtract. */
4599     tcg_gen_add_i64(o->out, o->in1, cc_src);
4600     tcg_gen_sub_i64(o->out, o->out, o->in2);
4601     return DISAS_NEXT;
4602 }
4603 
4604 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4605 {
4606     compute_borrow(s);
4607 
4608     /*
4609      * Borrow is {0, -1}, so add to subtract; replicate the
4610      * borrow input to produce 128-bit -1 for the addition.
4611      */
4612     TCGv_i64 zero = tcg_constant_i64(0);
4613     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4614     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4615 
4616     return DISAS_NEXT;
4617 }
4618 
4619 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4620 {
4621     TCGv_i32 t;
4622 
4623     update_psw_addr(s);
4624     update_cc_op(s);
4625 
4626     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4627     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4628 
4629     t = tcg_constant_i32(s->ilen);
4630     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4631 
4632     gen_exception(EXCP_SVC);
4633     return DISAS_NORETURN;
4634 }
4635 
4636 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4637 {
4638     int cc = 0;
4639 
4640     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4641     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4642     gen_op_movi_cc(s, cc);
4643     return DISAS_NEXT;
4644 }
4645 
4646 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4647 {
4648     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4649     set_cc_static(s);
4650     return DISAS_NEXT;
4651 }
4652 
4653 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4654 {
4655     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4656     set_cc_static(s);
4657     return DISAS_NEXT;
4658 }
4659 
4660 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4661 {
4662     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4663     set_cc_static(s);
4664     return DISAS_NEXT;
4665 }
4666 
4667 #ifndef CONFIG_USER_ONLY
4668 
4669 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4670 {
4671     gen_helper_testblock(cc_op, tcg_env, o->in2);
4672     set_cc_static(s);
4673     return DISAS_NEXT;
4674 }
4675 
4676 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4677 {
4678     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4679     set_cc_static(s);
4680     return DISAS_NEXT;
4681 }
4682 
4683 #endif
4684 
4685 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4686 {
4687     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4688 
4689     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4690     set_cc_static(s);
4691     return DISAS_NEXT;
4692 }
4693 
4694 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4695 {
4696     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4697 
4698     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4699     set_cc_static(s);
4700     return DISAS_NEXT;
4701 }
4702 
4703 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4704 {
4705     TCGv_i128 pair = tcg_temp_new_i128();
4706 
4707     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4708     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4709     set_cc_static(s);
4710     return DISAS_NEXT;
4711 }
4712 
4713 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4714 {
4715     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4716 
4717     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4718     set_cc_static(s);
4719     return DISAS_NEXT;
4720 }
4721 
4722 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4723 {
4724     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4725 
4726     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4727     set_cc_static(s);
4728     return DISAS_NEXT;
4729 }
4730 
4731 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4732 {
4733     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4734     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4735     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4736     TCGv_i32 tst = tcg_temp_new_i32();
4737     int m3 = get_field(s, m3);
4738 
4739     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4740         m3 = 0;
4741     }
4742     if (m3 & 1) {
4743         tcg_gen_movi_i32(tst, -1);
4744     } else {
4745         tcg_gen_extrl_i64_i32(tst, regs[0]);
4746         if (s->insn->opc & 3) {
4747             tcg_gen_ext8u_i32(tst, tst);
4748         } else {
4749             tcg_gen_ext16u_i32(tst, tst);
4750         }
4751     }
4752     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4753 
4754     set_cc_static(s);
4755     return DISAS_NEXT;
4756 }
4757 
4758 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4759 {
4760     TCGv_i32 t1 = tcg_constant_i32(0xff);
4761 
4762     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4763     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4764     set_cc_static(s);
4765     return DISAS_NEXT;
4766 }
4767 
4768 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4769 {
4770     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4771 
4772     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4773     return DISAS_NEXT;
4774 }
4775 
4776 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4777 {
4778     int l1 = get_field(s, l1) + 1;
4779     TCGv_i32 l;
4780 
4781     /* The length must not exceed 32 bytes.  */
4782     if (l1 > 32) {
4783         gen_program_exception(s, PGM_SPECIFICATION);
4784         return DISAS_NORETURN;
4785     }
4786     l = tcg_constant_i32(l1);
4787     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4788     set_cc_static(s);
4789     return DISAS_NEXT;
4790 }
4791 
4792 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4793 {
4794     int l1 = get_field(s, l1) + 1;
4795     TCGv_i32 l;
4796 
4797     /* The length must be even and should not exceed 64 bytes.  */
4798     if ((l1 & 1) || (l1 > 64)) {
4799         gen_program_exception(s, PGM_SPECIFICATION);
4800         return DISAS_NORETURN;
4801     }
4802     l = tcg_constant_i32(l1);
4803     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4804     set_cc_static(s);
4805     return DISAS_NEXT;
4806 }
4807 
4808 
4809 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4810 {
4811     int d1 = get_field(s, d1);
4812     int d2 = get_field(s, d2);
4813     int b1 = get_field(s, b1);
4814     int b2 = get_field(s, b2);
4815     int l = get_field(s, l1);
4816     TCGv_i32 t32;
4817 
4818     o->addr1 = get_address(s, 0, b1, d1);
4819 
4820     /* If the addresses are identical, this is a store/memset of zero.  */
4821     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4822         o->in2 = tcg_constant_i64(0);
4823 
4824         l++;
4825         while (l >= 8) {
4826             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4827             l -= 8;
4828             if (l > 0) {
4829                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4830             }
4831         }
4832         if (l >= 4) {
4833             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4834             l -= 4;
4835             if (l > 0) {
4836                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4837             }
4838         }
4839         if (l >= 2) {
4840             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4841             l -= 2;
4842             if (l > 0) {
4843                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4844             }
4845         }
4846         if (l) {
4847             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4848         }
4849         gen_op_movi_cc(s, 0);
4850         return DISAS_NEXT;
4851     }
4852 
4853     /* But in general we'll defer to a helper.  */
4854     o->in2 = get_address(s, 0, b2, d2);
4855     t32 = tcg_constant_i32(l);
4856     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4857     set_cc_static(s);
4858     return DISAS_NEXT;
4859 }
4860 
4861 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4862 {
4863     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4864     return DISAS_NEXT;
4865 }
4866 
4867 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4868 {
4869     int shift = s->insn->data & 0xff;
4870     int size = s->insn->data >> 8;
4871     uint64_t mask = ((1ull << size) - 1) << shift;
4872     TCGv_i64 t = tcg_temp_new_i64();
4873 
4874     tcg_gen_shli_i64(t, o->in2, shift);
4875     tcg_gen_xor_i64(o->out, o->in1, t);
4876 
4877     /* Produce the CC from only the bits manipulated.  */
4878     tcg_gen_andi_i64(cc_dst, o->out, mask);
4879     set_cc_nz_u64(s, cc_dst);
4880     return DISAS_NEXT;
4881 }
4882 
4883 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4884 {
4885     o->in1 = tcg_temp_new_i64();
4886 
4887     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4888         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4889     } else {
4890         /* Perform the atomic operation in memory. */
4891         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4892                                      s->insn->data);
4893     }
4894 
4895     /* Recompute also for atomic case: needed for setting CC. */
4896     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4897 
4898     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4899         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4900     }
4901     return DISAS_NEXT;
4902 }
4903 
4904 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4905 {
4906     o->out = tcg_constant_i64(0);
4907     return DISAS_NEXT;
4908 }
4909 
4910 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4911 {
4912     o->out = tcg_constant_i64(0);
4913     o->out2 = o->out;
4914     return DISAS_NEXT;
4915 }
4916 
4917 #ifndef CONFIG_USER_ONLY
4918 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4919 {
4920     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4921 
4922     gen_helper_clp(tcg_env, r2);
4923     set_cc_static(s);
4924     return DISAS_NEXT;
4925 }
4926 
4927 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4928 {
4929     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4930     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4931 
4932     gen_helper_pcilg(tcg_env, r1, r2);
4933     set_cc_static(s);
4934     return DISAS_NEXT;
4935 }
4936 
4937 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4938 {
4939     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4940     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4941 
4942     gen_helper_pcistg(tcg_env, r1, r2);
4943     set_cc_static(s);
4944     return DISAS_NEXT;
4945 }
4946 
4947 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4948 {
4949     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4950     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4951 
4952     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4953     set_cc_static(s);
4954     return DISAS_NEXT;
4955 }
4956 
4957 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4958 {
4959     gen_helper_sic(tcg_env, o->in1, o->in2);
4960     return DISAS_NEXT;
4961 }
4962 
4963 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4964 {
4965     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4966     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4967 
4968     gen_helper_rpcit(tcg_env, r1, r2);
4969     set_cc_static(s);
4970     return DISAS_NEXT;
4971 }
4972 
4973 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4974 {
4975     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4976     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4977     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4978 
4979     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
4980     set_cc_static(s);
4981     return DISAS_NEXT;
4982 }
4983 
4984 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4985 {
4986     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4987     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4988 
4989     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
4990     set_cc_static(s);
4991     return DISAS_NEXT;
4992 }
4993 #endif
4994 
4995 #include "translate_vx.c.inc"
4996 
4997 /* ====================================================================== */
4998 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4999    the original inputs), update the various cc data structures in order to
5000    be able to compute the new condition code.  */
5001 
5002 static void cout_abs32(DisasContext *s, DisasOps *o)
5003 {
5004     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5005 }
5006 
5007 static void cout_abs64(DisasContext *s, DisasOps *o)
5008 {
5009     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5010 }
5011 
5012 static void cout_adds32(DisasContext *s, DisasOps *o)
5013 {
5014     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5015 }
5016 
5017 static void cout_adds64(DisasContext *s, DisasOps *o)
5018 {
5019     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5020 }
5021 
5022 static void cout_addu32(DisasContext *s, DisasOps *o)
5023 {
5024     tcg_gen_shri_i64(cc_src, o->out, 32);
5025     tcg_gen_ext32u_i64(cc_dst, o->out);
5026     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5027 }
5028 
5029 static void cout_addu64(DisasContext *s, DisasOps *o)
5030 {
5031     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5032 }
5033 
5034 static void cout_cmps32(DisasContext *s, DisasOps *o)
5035 {
5036     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5037 }
5038 
5039 static void cout_cmps64(DisasContext *s, DisasOps *o)
5040 {
5041     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5042 }
5043 
5044 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5045 {
5046     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5047 }
5048 
5049 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5050 {
5051     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5052 }
5053 
5054 static void cout_f32(DisasContext *s, DisasOps *o)
5055 {
5056     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5057 }
5058 
5059 static void cout_f64(DisasContext *s, DisasOps *o)
5060 {
5061     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5062 }
5063 
5064 static void cout_f128(DisasContext *s, DisasOps *o)
5065 {
5066     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5067 }
5068 
5069 static void cout_nabs32(DisasContext *s, DisasOps *o)
5070 {
5071     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5072 }
5073 
5074 static void cout_nabs64(DisasContext *s, DisasOps *o)
5075 {
5076     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5077 }
5078 
5079 static void cout_neg32(DisasContext *s, DisasOps *o)
5080 {
5081     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5082 }
5083 
5084 static void cout_neg64(DisasContext *s, DisasOps *o)
5085 {
5086     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5087 }
5088 
5089 static void cout_nz32(DisasContext *s, DisasOps *o)
5090 {
5091     tcg_gen_ext32u_i64(cc_dst, o->out);
5092     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5093 }
5094 
5095 static void cout_nz64(DisasContext *s, DisasOps *o)
5096 {
5097     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5098 }
5099 
5100 static void cout_s32(DisasContext *s, DisasOps *o)
5101 {
5102     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5103 }
5104 
5105 static void cout_s64(DisasContext *s, DisasOps *o)
5106 {
5107     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5108 }
5109 
5110 static void cout_subs32(DisasContext *s, DisasOps *o)
5111 {
5112     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5113 }
5114 
5115 static void cout_subs64(DisasContext *s, DisasOps *o)
5116 {
5117     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5118 }
5119 
5120 static void cout_subu32(DisasContext *s, DisasOps *o)
5121 {
5122     tcg_gen_sari_i64(cc_src, o->out, 32);
5123     tcg_gen_ext32u_i64(cc_dst, o->out);
5124     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5125 }
5126 
5127 static void cout_subu64(DisasContext *s, DisasOps *o)
5128 {
5129     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5130 }
5131 
5132 static void cout_tm32(DisasContext *s, DisasOps *o)
5133 {
5134     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5135 }
5136 
5137 static void cout_tm64(DisasContext *s, DisasOps *o)
5138 {
5139     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5140 }
5141 
5142 static void cout_muls32(DisasContext *s, DisasOps *o)
5143 {
5144     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5145 }
5146 
5147 static void cout_muls64(DisasContext *s, DisasOps *o)
5148 {
5149     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5150     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5151 }
5152 
5153 /* ====================================================================== */
5154 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5155    with the TCG register to which we will write.  Used in combination with
5156    the "wout" generators, in some cases we need a new temporary, and in
5157    some cases we can write to a TCG global.  */
5158 
5159 static void prep_new(DisasContext *s, DisasOps *o)
5160 {
5161     o->out = tcg_temp_new_i64();
5162 }
5163 #define SPEC_prep_new 0
5164 
5165 static void prep_new_P(DisasContext *s, DisasOps *o)
5166 {
5167     o->out = tcg_temp_new_i64();
5168     o->out2 = tcg_temp_new_i64();
5169 }
5170 #define SPEC_prep_new_P 0
5171 
5172 static void prep_new_x(DisasContext *s, DisasOps *o)
5173 {
5174     o->out_128 = tcg_temp_new_i128();
5175 }
5176 #define SPEC_prep_new_x 0
5177 
5178 static void prep_r1(DisasContext *s, DisasOps *o)
5179 {
5180     o->out = regs[get_field(s, r1)];
5181 }
5182 #define SPEC_prep_r1 0
5183 
5184 static void prep_r1_P(DisasContext *s, DisasOps *o)
5185 {
5186     int r1 = get_field(s, r1);
5187     o->out = regs[r1];
5188     o->out2 = regs[r1 + 1];
5189 }
5190 #define SPEC_prep_r1_P SPEC_r1_even
5191 
5192 /* ====================================================================== */
5193 /* The "Write OUTput" generators.  These generally perform some non-trivial
5194    copy of data to TCG globals, or to main memory.  The trivial cases are
5195    generally handled by having a "prep" generator install the TCG global
5196    as the destination of the operation.  */
5197 
5198 static void wout_r1(DisasContext *s, DisasOps *o)
5199 {
5200     store_reg(get_field(s, r1), o->out);
5201 }
5202 #define SPEC_wout_r1 0
5203 
5204 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5205 {
5206     store_reg(get_field(s, r1), o->out2);
5207 }
5208 #define SPEC_wout_out2_r1 0
5209 
5210 static void wout_r1_8(DisasContext *s, DisasOps *o)
5211 {
5212     int r1 = get_field(s, r1);
5213     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5214 }
5215 #define SPEC_wout_r1_8 0
5216 
5217 static void wout_r1_16(DisasContext *s, DisasOps *o)
5218 {
5219     int r1 = get_field(s, r1);
5220     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5221 }
5222 #define SPEC_wout_r1_16 0
5223 
5224 static void wout_r1_32(DisasContext *s, DisasOps *o)
5225 {
5226     store_reg32_i64(get_field(s, r1), o->out);
5227 }
5228 #define SPEC_wout_r1_32 0
5229 
5230 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5231 {
5232     store_reg32h_i64(get_field(s, r1), o->out);
5233 }
5234 #define SPEC_wout_r1_32h 0
5235 
5236 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5237 {
5238     int r1 = get_field(s, r1);
5239     store_reg32_i64(r1, o->out);
5240     store_reg32_i64(r1 + 1, o->out2);
5241 }
5242 #define SPEC_wout_r1_P32 SPEC_r1_even
5243 
5244 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5245 {
5246     int r1 = get_field(s, r1);
5247     TCGv_i64 t = tcg_temp_new_i64();
5248     store_reg32_i64(r1 + 1, o->out);
5249     tcg_gen_shri_i64(t, o->out, 32);
5250     store_reg32_i64(r1, t);
5251 }
5252 #define SPEC_wout_r1_D32 SPEC_r1_even
5253 
5254 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5255 {
5256     int r1 = get_field(s, r1);
5257     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5258 }
5259 #define SPEC_wout_r1_D64 SPEC_r1_even
5260 
5261 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5262 {
5263     int r3 = get_field(s, r3);
5264     store_reg32_i64(r3, o->out);
5265     store_reg32_i64(r3 + 1, o->out2);
5266 }
5267 #define SPEC_wout_r3_P32 SPEC_r3_even
5268 
5269 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5270 {
5271     int r3 = get_field(s, r3);
5272     store_reg(r3, o->out);
5273     store_reg(r3 + 1, o->out2);
5274 }
5275 #define SPEC_wout_r3_P64 SPEC_r3_even
5276 
5277 static void wout_e1(DisasContext *s, DisasOps *o)
5278 {
5279     store_freg32_i64(get_field(s, r1), o->out);
5280 }
5281 #define SPEC_wout_e1 0
5282 
5283 static void wout_f1(DisasContext *s, DisasOps *o)
5284 {
5285     store_freg(get_field(s, r1), o->out);
5286 }
5287 #define SPEC_wout_f1 0
5288 
5289 static void wout_x1(DisasContext *s, DisasOps *o)
5290 {
5291     int f1 = get_field(s, r1);
5292 
5293     /* Split out_128 into out+out2 for cout_f128. */
5294     tcg_debug_assert(o->out == NULL);
5295     o->out = tcg_temp_new_i64();
5296     o->out2 = tcg_temp_new_i64();
5297 
5298     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5299     store_freg(f1, o->out);
5300     store_freg(f1 + 2, o->out2);
5301 }
5302 #define SPEC_wout_x1 SPEC_r1_f128
5303 
5304 static void wout_x1_P(DisasContext *s, DisasOps *o)
5305 {
5306     int f1 = get_field(s, r1);
5307     store_freg(f1, o->out);
5308     store_freg(f1 + 2, o->out2);
5309 }
5310 #define SPEC_wout_x1_P SPEC_r1_f128
5311 
5312 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5313 {
5314     if (get_field(s, r1) != get_field(s, r2)) {
5315         store_reg32_i64(get_field(s, r1), o->out);
5316     }
5317 }
5318 #define SPEC_wout_cond_r1r2_32 0
5319 
5320 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5321 {
5322     if (get_field(s, r1) != get_field(s, r2)) {
5323         store_freg32_i64(get_field(s, r1), o->out);
5324     }
5325 }
5326 #define SPEC_wout_cond_e1e2 0
5327 
5328 static void wout_m1_8(DisasContext *s, DisasOps *o)
5329 {
5330     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5331 }
5332 #define SPEC_wout_m1_8 0
5333 
5334 static void wout_m1_16(DisasContext *s, DisasOps *o)
5335 {
5336     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5337 }
5338 #define SPEC_wout_m1_16 0
5339 
5340 #ifndef CONFIG_USER_ONLY
5341 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5342 {
5343     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5344 }
5345 #define SPEC_wout_m1_16a 0
5346 #endif
5347 
5348 static void wout_m1_32(DisasContext *s, DisasOps *o)
5349 {
5350     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5351 }
5352 #define SPEC_wout_m1_32 0
5353 
5354 #ifndef CONFIG_USER_ONLY
5355 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5356 {
5357     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5358 }
5359 #define SPEC_wout_m1_32a 0
5360 #endif
5361 
5362 static void wout_m1_64(DisasContext *s, DisasOps *o)
5363 {
5364     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5365 }
5366 #define SPEC_wout_m1_64 0
5367 
5368 #ifndef CONFIG_USER_ONLY
5369 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5370 {
5371     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5372 }
5373 #define SPEC_wout_m1_64a 0
5374 #endif
5375 
5376 static void wout_m2_32(DisasContext *s, DisasOps *o)
5377 {
5378     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5379 }
5380 #define SPEC_wout_m2_32 0
5381 
5382 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5383 {
5384     store_reg(get_field(s, r1), o->in2);
5385 }
5386 #define SPEC_wout_in2_r1 0
5387 
5388 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5389 {
5390     store_reg32_i64(get_field(s, r1), o->in2);
5391 }
5392 #define SPEC_wout_in2_r1_32 0
5393 
5394 /* ====================================================================== */
5395 /* The "INput 1" generators.  These load the first operand to an insn.  */
5396 
5397 static void in1_r1(DisasContext *s, DisasOps *o)
5398 {
5399     o->in1 = load_reg(get_field(s, r1));
5400 }
5401 #define SPEC_in1_r1 0
5402 
5403 static void in1_r1_o(DisasContext *s, DisasOps *o)
5404 {
5405     o->in1 = regs[get_field(s, r1)];
5406 }
5407 #define SPEC_in1_r1_o 0
5408 
5409 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5410 {
5411     o->in1 = tcg_temp_new_i64();
5412     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5413 }
5414 #define SPEC_in1_r1_32s 0
5415 
5416 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5417 {
5418     o->in1 = tcg_temp_new_i64();
5419     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5420 }
5421 #define SPEC_in1_r1_32u 0
5422 
5423 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5424 {
5425     o->in1 = tcg_temp_new_i64();
5426     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5427 }
5428 #define SPEC_in1_r1_sr32 0
5429 
5430 static void in1_r1p1(DisasContext *s, DisasOps *o)
5431 {
5432     o->in1 = load_reg(get_field(s, r1) + 1);
5433 }
5434 #define SPEC_in1_r1p1 SPEC_r1_even
5435 
5436 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5437 {
5438     o->in1 = regs[get_field(s, r1) + 1];
5439 }
5440 #define SPEC_in1_r1p1_o SPEC_r1_even
5441 
5442 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5443 {
5444     o->in1 = tcg_temp_new_i64();
5445     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5446 }
5447 #define SPEC_in1_r1p1_32s SPEC_r1_even
5448 
5449 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5450 {
5451     o->in1 = tcg_temp_new_i64();
5452     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5453 }
5454 #define SPEC_in1_r1p1_32u SPEC_r1_even
5455 
5456 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5457 {
5458     int r1 = get_field(s, r1);
5459     o->in1 = tcg_temp_new_i64();
5460     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5461 }
5462 #define SPEC_in1_r1_D32 SPEC_r1_even
5463 
5464 static void in1_r2(DisasContext *s, DisasOps *o)
5465 {
5466     o->in1 = load_reg(get_field(s, r2));
5467 }
5468 #define SPEC_in1_r2 0
5469 
5470 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5471 {
5472     o->in1 = tcg_temp_new_i64();
5473     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5474 }
5475 #define SPEC_in1_r2_sr32 0
5476 
5477 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5478 {
5479     o->in1 = tcg_temp_new_i64();
5480     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5481 }
5482 #define SPEC_in1_r2_32u 0
5483 
5484 static void in1_r3(DisasContext *s, DisasOps *o)
5485 {
5486     o->in1 = load_reg(get_field(s, r3));
5487 }
5488 #define SPEC_in1_r3 0
5489 
5490 static void in1_r3_o(DisasContext *s, DisasOps *o)
5491 {
5492     o->in1 = regs[get_field(s, r3)];
5493 }
5494 #define SPEC_in1_r3_o 0
5495 
5496 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5497 {
5498     o->in1 = tcg_temp_new_i64();
5499     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5500 }
5501 #define SPEC_in1_r3_32s 0
5502 
5503 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5504 {
5505     o->in1 = tcg_temp_new_i64();
5506     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5507 }
5508 #define SPEC_in1_r3_32u 0
5509 
5510 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5511 {
5512     int r3 = get_field(s, r3);
5513     o->in1 = tcg_temp_new_i64();
5514     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5515 }
5516 #define SPEC_in1_r3_D32 SPEC_r3_even
5517 
5518 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5519 {
5520     o->in1 = tcg_temp_new_i64();
5521     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5522 }
5523 #define SPEC_in1_r3_sr32 0
5524 
5525 static void in1_e1(DisasContext *s, DisasOps *o)
5526 {
5527     o->in1 = load_freg32_i64(get_field(s, r1));
5528 }
5529 #define SPEC_in1_e1 0
5530 
5531 static void in1_f1(DisasContext *s, DisasOps *o)
5532 {
5533     o->in1 = load_freg(get_field(s, r1));
5534 }
5535 #define SPEC_in1_f1 0
5536 
5537 static void in1_x1(DisasContext *s, DisasOps *o)
5538 {
5539     o->in1_128 = load_freg_128(get_field(s, r1));
5540 }
5541 #define SPEC_in1_x1 SPEC_r1_f128
5542 
5543 /* Load the high double word of an extended (128-bit) format FP number */
5544 static void in1_x2h(DisasContext *s, DisasOps *o)
5545 {
5546     o->in1 = load_freg(get_field(s, r2));
5547 }
5548 #define SPEC_in1_x2h SPEC_r2_f128
5549 
5550 static void in1_f3(DisasContext *s, DisasOps *o)
5551 {
5552     o->in1 = load_freg(get_field(s, r3));
5553 }
5554 #define SPEC_in1_f3 0
5555 
5556 static void in1_la1(DisasContext *s, DisasOps *o)
5557 {
5558     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5559 }
5560 #define SPEC_in1_la1 0
5561 
5562 static void in1_la2(DisasContext *s, DisasOps *o)
5563 {
5564     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5565     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5566 }
5567 #define SPEC_in1_la2 0
5568 
5569 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5570 {
5571     in1_la1(s, o);
5572     o->in1 = tcg_temp_new_i64();
5573     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5574 }
5575 #define SPEC_in1_m1_8u 0
5576 
5577 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5578 {
5579     in1_la1(s, o);
5580     o->in1 = tcg_temp_new_i64();
5581     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5582 }
5583 #define SPEC_in1_m1_16s 0
5584 
5585 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5586 {
5587     in1_la1(s, o);
5588     o->in1 = tcg_temp_new_i64();
5589     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5590 }
5591 #define SPEC_in1_m1_16u 0
5592 
5593 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5594 {
5595     in1_la1(s, o);
5596     o->in1 = tcg_temp_new_i64();
5597     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5598 }
5599 #define SPEC_in1_m1_32s 0
5600 
5601 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5602 {
5603     in1_la1(s, o);
5604     o->in1 = tcg_temp_new_i64();
5605     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5606 }
5607 #define SPEC_in1_m1_32u 0
5608 
5609 static void in1_m1_64(DisasContext *s, DisasOps *o)
5610 {
5611     in1_la1(s, o);
5612     o->in1 = tcg_temp_new_i64();
5613     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5614 }
5615 #define SPEC_in1_m1_64 0
5616 
5617 /* ====================================================================== */
5618 /* The "INput 2" generators.  These load the second operand to an insn.  */
5619 
5620 static void in2_r1_o(DisasContext *s, DisasOps *o)
5621 {
5622     o->in2 = regs[get_field(s, r1)];
5623 }
5624 #define SPEC_in2_r1_o 0
5625 
5626 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5627 {
5628     o->in2 = tcg_temp_new_i64();
5629     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5630 }
5631 #define SPEC_in2_r1_16u 0
5632 
5633 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5634 {
5635     o->in2 = tcg_temp_new_i64();
5636     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5637 }
5638 #define SPEC_in2_r1_32u 0
5639 
5640 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5641 {
5642     int r1 = get_field(s, r1);
5643     o->in2 = tcg_temp_new_i64();
5644     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5645 }
5646 #define SPEC_in2_r1_D32 SPEC_r1_even
5647 
5648 static void in2_r2(DisasContext *s, DisasOps *o)
5649 {
5650     o->in2 = load_reg(get_field(s, r2));
5651 }
5652 #define SPEC_in2_r2 0
5653 
5654 static void in2_r2_o(DisasContext *s, DisasOps *o)
5655 {
5656     o->in2 = regs[get_field(s, r2)];
5657 }
5658 #define SPEC_in2_r2_o 0
5659 
5660 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5661 {
5662     int r2 = get_field(s, r2);
5663     if (r2 != 0) {
5664         o->in2 = load_reg(r2);
5665     }
5666 }
5667 #define SPEC_in2_r2_nz 0
5668 
5669 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5670 {
5671     o->in2 = tcg_temp_new_i64();
5672     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5673 }
5674 #define SPEC_in2_r2_8s 0
5675 
5676 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5677 {
5678     o->in2 = tcg_temp_new_i64();
5679     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5680 }
5681 #define SPEC_in2_r2_8u 0
5682 
5683 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5684 {
5685     o->in2 = tcg_temp_new_i64();
5686     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5687 }
5688 #define SPEC_in2_r2_16s 0
5689 
5690 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5691 {
5692     o->in2 = tcg_temp_new_i64();
5693     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5694 }
5695 #define SPEC_in2_r2_16u 0
5696 
5697 static void in2_r3(DisasContext *s, DisasOps *o)
5698 {
5699     o->in2 = load_reg(get_field(s, r3));
5700 }
5701 #define SPEC_in2_r3 0
5702 
5703 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5704 {
5705     int r3 = get_field(s, r3);
5706     o->in2_128 = tcg_temp_new_i128();
5707     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5708 }
5709 #define SPEC_in2_r3_D64 SPEC_r3_even
5710 
5711 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5712 {
5713     o->in2 = tcg_temp_new_i64();
5714     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5715 }
5716 #define SPEC_in2_r3_sr32 0
5717 
5718 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5719 {
5720     o->in2 = tcg_temp_new_i64();
5721     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5722 }
5723 #define SPEC_in2_r3_32u 0
5724 
5725 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5726 {
5727     o->in2 = tcg_temp_new_i64();
5728     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5729 }
5730 #define SPEC_in2_r2_32s 0
5731 
5732 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5733 {
5734     o->in2 = tcg_temp_new_i64();
5735     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5736 }
5737 #define SPEC_in2_r2_32u 0
5738 
5739 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5740 {
5741     o->in2 = tcg_temp_new_i64();
5742     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5743 }
5744 #define SPEC_in2_r2_sr32 0
5745 
5746 static void in2_e2(DisasContext *s, DisasOps *o)
5747 {
5748     o->in2 = load_freg32_i64(get_field(s, r2));
5749 }
5750 #define SPEC_in2_e2 0
5751 
5752 static void in2_f2(DisasContext *s, DisasOps *o)
5753 {
5754     o->in2 = load_freg(get_field(s, r2));
5755 }
5756 #define SPEC_in2_f2 0
5757 
5758 static void in2_x2(DisasContext *s, DisasOps *o)
5759 {
5760     o->in2_128 = load_freg_128(get_field(s, r2));
5761 }
5762 #define SPEC_in2_x2 SPEC_r2_f128
5763 
5764 /* Load the low double word of an extended (128-bit) format FP number */
5765 static void in2_x2l(DisasContext *s, DisasOps *o)
5766 {
5767     o->in2 = load_freg(get_field(s, r2) + 2);
5768 }
5769 #define SPEC_in2_x2l SPEC_r2_f128
5770 
5771 static void in2_ra2(DisasContext *s, DisasOps *o)
5772 {
5773     int r2 = get_field(s, r2);
5774 
5775     /* Note: *don't* treat !r2 as 0, use the reg value. */
5776     o->in2 = tcg_temp_new_i64();
5777     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5778 }
5779 #define SPEC_in2_ra2 0
5780 
5781 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5782 {
5783     return in2_ra2(s, o);
5784 }
5785 #define SPEC_in2_ra2_E SPEC_r2_even
5786 
5787 static void in2_a2(DisasContext *s, DisasOps *o)
5788 {
5789     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5790     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5791 }
5792 #define SPEC_in2_a2 0
5793 
5794 static TCGv gen_ri2(DisasContext *s)
5795 {
5796     TCGv ri2 = NULL;
5797     bool is_imm;
5798     int imm;
5799 
5800     disas_jdest(s, i2, is_imm, imm, ri2);
5801     if (is_imm) {
5802         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5803     }
5804 
5805     return ri2;
5806 }
5807 
5808 static void in2_ri2(DisasContext *s, DisasOps *o)
5809 {
5810     o->in2 = gen_ri2(s);
5811 }
5812 #define SPEC_in2_ri2 0
5813 
5814 static void in2_sh(DisasContext *s, DisasOps *o)
5815 {
5816     int b2 = get_field(s, b2);
5817     int d2 = get_field(s, d2);
5818 
5819     if (b2 == 0) {
5820         o->in2 = tcg_constant_i64(d2 & 0x3f);
5821     } else {
5822         o->in2 = get_address(s, 0, b2, d2);
5823         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5824     }
5825 }
5826 #define SPEC_in2_sh 0
5827 
5828 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5829 {
5830     in2_a2(s, o);
5831     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5832 }
5833 #define SPEC_in2_m2_8u 0
5834 
5835 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5836 {
5837     in2_a2(s, o);
5838     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5839 }
5840 #define SPEC_in2_m2_16s 0
5841 
5842 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5843 {
5844     in2_a2(s, o);
5845     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5846 }
5847 #define SPEC_in2_m2_16u 0
5848 
5849 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5850 {
5851     in2_a2(s, o);
5852     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5853 }
5854 #define SPEC_in2_m2_32s 0
5855 
5856 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5857 {
5858     in2_a2(s, o);
5859     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5860 }
5861 #define SPEC_in2_m2_32u 0
5862 
5863 #ifndef CONFIG_USER_ONLY
5864 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5865 {
5866     in2_a2(s, o);
5867     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5868 }
5869 #define SPEC_in2_m2_32ua 0
5870 #endif
5871 
5872 static void in2_m2_64(DisasContext *s, DisasOps *o)
5873 {
5874     in2_a2(s, o);
5875     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5876 }
5877 #define SPEC_in2_m2_64 0
5878 
5879 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5880 {
5881     in2_a2(s, o);
5882     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5883     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5884 }
5885 #define SPEC_in2_m2_64w 0
5886 
5887 #ifndef CONFIG_USER_ONLY
5888 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5889 {
5890     in2_a2(s, o);
5891     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5892 }
5893 #define SPEC_in2_m2_64a 0
5894 #endif
5895 
5896 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5897 {
5898     o->in2 = tcg_temp_new_i64();
5899     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5900 }
5901 #define SPEC_in2_mri2_16s 0
5902 
5903 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5904 {
5905     o->in2 = tcg_temp_new_i64();
5906     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5907 }
5908 #define SPEC_in2_mri2_16u 0
5909 
5910 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5911 {
5912     o->in2 = tcg_temp_new_i64();
5913     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5914                        MO_TESL | MO_ALIGN);
5915 }
5916 #define SPEC_in2_mri2_32s 0
5917 
5918 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5919 {
5920     o->in2 = tcg_temp_new_i64();
5921     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5922                        MO_TEUL | MO_ALIGN);
5923 }
5924 #define SPEC_in2_mri2_32u 0
5925 
5926 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5927 {
5928     o->in2 = tcg_temp_new_i64();
5929     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5930                         MO_TEUQ | MO_ALIGN);
5931 }
5932 #define SPEC_in2_mri2_64 0
5933 
5934 static void in2_i2(DisasContext *s, DisasOps *o)
5935 {
5936     o->in2 = tcg_constant_i64(get_field(s, i2));
5937 }
5938 #define SPEC_in2_i2 0
5939 
5940 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5941 {
5942     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5943 }
5944 #define SPEC_in2_i2_8u 0
5945 
5946 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5947 {
5948     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5949 }
5950 #define SPEC_in2_i2_16u 0
5951 
5952 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5953 {
5954     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5955 }
5956 #define SPEC_in2_i2_32u 0
5957 
5958 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5959 {
5960     uint64_t i2 = (uint16_t)get_field(s, i2);
5961     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5962 }
5963 #define SPEC_in2_i2_16u_shl 0
5964 
5965 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5966 {
5967     uint64_t i2 = (uint32_t)get_field(s, i2);
5968     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5969 }
5970 #define SPEC_in2_i2_32u_shl 0
5971 
5972 #ifndef CONFIG_USER_ONLY
5973 static void in2_insn(DisasContext *s, DisasOps *o)
5974 {
5975     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5976 }
5977 #define SPEC_in2_insn 0
5978 #endif
5979 
5980 /* ====================================================================== */
5981 
5982 /* Find opc within the table of insns.  This is formulated as a switch
5983    statement so that (1) we get compile-time notice of cut-paste errors
5984    for duplicated opcodes, and (2) the compiler generates the binary
5985    search tree, rather than us having to post-process the table.  */
5986 
5987 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5988     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5989 
5990 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5991     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5992 
5993 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5994     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5995 
5996 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5997 
5998 enum DisasInsnEnum {
5999 #include "insn-data.h.inc"
6000 };
6001 
6002 #undef E
6003 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6004     .opc = OPC,                                                             \
6005     .flags = FL,                                                            \
6006     .fmt = FMT_##FT,                                                        \
6007     .fac = FAC_##FC,                                                        \
6008     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6009     .name = #NM,                                                            \
6010     .help_in1 = in1_##I1,                                                   \
6011     .help_in2 = in2_##I2,                                                   \
6012     .help_prep = prep_##P,                                                  \
6013     .help_wout = wout_##W,                                                  \
6014     .help_cout = cout_##CC,                                                 \
6015     .help_op = op_##OP,                                                     \
6016     .data = D                                                               \
6017  },
6018 
6019 /* Allow 0 to be used for NULL in the table below.  */
6020 #define in1_0  NULL
6021 #define in2_0  NULL
6022 #define prep_0  NULL
6023 #define wout_0  NULL
6024 #define cout_0  NULL
6025 #define op_0  NULL
6026 
6027 #define SPEC_in1_0 0
6028 #define SPEC_in2_0 0
6029 #define SPEC_prep_0 0
6030 #define SPEC_wout_0 0
6031 
6032 /* Give smaller names to the various facilities.  */
6033 #define FAC_Z           S390_FEAT_ZARCH
6034 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6035 #define FAC_DFP         S390_FEAT_DFP
6036 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6037 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6038 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6039 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6040 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6041 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6042 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6043 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6044 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6045 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6046 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6047 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6048 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6049 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6050 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6051 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6052 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6053 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6054 #define FAC_SFLE        S390_FEAT_STFLE
6055 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6056 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6057 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6058 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6059 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6060 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6061 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6062 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6063 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6064 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6065 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6066 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6067 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6068 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6069 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6070 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6071 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6072 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6073 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6074 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6075 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6076 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6077 
6078 static const DisasInsn insn_info[] = {
6079 #include "insn-data.h.inc"
6080 };
6081 
6082 #undef E
6083 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6084     case OPC: return &insn_info[insn_ ## NM];
6085 
6086 static const DisasInsn *lookup_opc(uint16_t opc)
6087 {
6088     switch (opc) {
6089 #include "insn-data.h.inc"
6090     default:
6091         return NULL;
6092     }
6093 }
6094 
6095 #undef F
6096 #undef E
6097 #undef D
6098 #undef C
6099 
6100 /* Extract a field from the insn.  The INSN should be left-aligned in
6101    the uint64_t so that we can more easily utilize the big-bit-endian
6102    definitions we extract from the Principals of Operation.  */
6103 
6104 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6105 {
6106     uint32_t r, m;
6107 
6108     if (f->size == 0) {
6109         return;
6110     }
6111 
6112     /* Zero extract the field from the insn.  */
6113     r = (insn << f->beg) >> (64 - f->size);
6114 
6115     /* Sign-extend, or un-swap the field as necessary.  */
6116     switch (f->type) {
6117     case 0: /* unsigned */
6118         break;
6119     case 1: /* signed */
6120         assert(f->size <= 32);
6121         m = 1u << (f->size - 1);
6122         r = (r ^ m) - m;
6123         break;
6124     case 2: /* dl+dh split, signed 20 bit. */
6125         r = ((int8_t)r << 12) | (r >> 8);
6126         break;
6127     case 3: /* MSB stored in RXB */
6128         g_assert(f->size == 4);
6129         switch (f->beg) {
6130         case 8:
6131             r |= extract64(insn, 63 - 36, 1) << 4;
6132             break;
6133         case 12:
6134             r |= extract64(insn, 63 - 37, 1) << 4;
6135             break;
6136         case 16:
6137             r |= extract64(insn, 63 - 38, 1) << 4;
6138             break;
6139         case 32:
6140             r |= extract64(insn, 63 - 39, 1) << 4;
6141             break;
6142         default:
6143             g_assert_not_reached();
6144         }
6145         break;
6146     default:
6147         abort();
6148     }
6149 
6150     /*
6151      * Validate that the "compressed" encoding we selected above is valid.
6152      * I.e. we haven't made two different original fields overlap.
6153      */
6154     assert(((o->presentC >> f->indexC) & 1) == 0);
6155     o->presentC |= 1 << f->indexC;
6156     o->presentO |= 1 << f->indexO;
6157 
6158     o->c[f->indexC] = r;
6159 }
6160 
6161 /* Lookup the insn at the current PC, extracting the operands into O and
6162    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6163 
6164 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6165 {
6166     uint64_t insn, pc = s->base.pc_next;
6167     int op, op2, ilen;
6168     const DisasInsn *info;
6169 
6170     if (unlikely(s->ex_value)) {
6171         /* Drop the EX data now, so that it's clear on exception paths.  */
6172         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6173                        offsetof(CPUS390XState, ex_value));
6174 
6175         /* Extract the values saved by EXECUTE.  */
6176         insn = s->ex_value & 0xffffffffffff0000ull;
6177         ilen = s->ex_value & 0xf;
6178 
6179         /* Register insn bytes with translator so plugins work. */
6180         for (int i = 0; i < ilen; i++) {
6181             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6182             translator_fake_ldb(byte, pc + i);
6183         }
6184         op = insn >> 56;
6185     } else {
6186         insn = ld_code2(env, s, pc);
6187         op = (insn >> 8) & 0xff;
6188         ilen = get_ilen(op);
6189         switch (ilen) {
6190         case 2:
6191             insn = insn << 48;
6192             break;
6193         case 4:
6194             insn = ld_code4(env, s, pc) << 32;
6195             break;
6196         case 6:
6197             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6198             break;
6199         default:
6200             g_assert_not_reached();
6201         }
6202     }
6203     s->pc_tmp = s->base.pc_next + ilen;
6204     s->ilen = ilen;
6205 
6206     /* We can't actually determine the insn format until we've looked up
6207        the full insn opcode.  Which we can't do without locating the
6208        secondary opcode.  Assume by default that OP2 is at bit 40; for
6209        those smaller insns that don't actually have a secondary opcode
6210        this will correctly result in OP2 = 0. */
6211     switch (op) {
6212     case 0x01: /* E */
6213     case 0x80: /* S */
6214     case 0x82: /* S */
6215     case 0x93: /* S */
6216     case 0xb2: /* S, RRF, RRE, IE */
6217     case 0xb3: /* RRE, RRD, RRF */
6218     case 0xb9: /* RRE, RRF */
6219     case 0xe5: /* SSE, SIL */
6220         op2 = (insn << 8) >> 56;
6221         break;
6222     case 0xa5: /* RI */
6223     case 0xa7: /* RI */
6224     case 0xc0: /* RIL */
6225     case 0xc2: /* RIL */
6226     case 0xc4: /* RIL */
6227     case 0xc6: /* RIL */
6228     case 0xc8: /* SSF */
6229     case 0xcc: /* RIL */
6230         op2 = (insn << 12) >> 60;
6231         break;
6232     case 0xc5: /* MII */
6233     case 0xc7: /* SMI */
6234     case 0xd0 ... 0xdf: /* SS */
6235     case 0xe1: /* SS */
6236     case 0xe2: /* SS */
6237     case 0xe8: /* SS */
6238     case 0xe9: /* SS */
6239     case 0xea: /* SS */
6240     case 0xee ... 0xf3: /* SS */
6241     case 0xf8 ... 0xfd: /* SS */
6242         op2 = 0;
6243         break;
6244     default:
6245         op2 = (insn << 40) >> 56;
6246         break;
6247     }
6248 
6249     memset(&s->fields, 0, sizeof(s->fields));
6250     s->fields.raw_insn = insn;
6251     s->fields.op = op;
6252     s->fields.op2 = op2;
6253 
6254     /* Lookup the instruction.  */
6255     info = lookup_opc(op << 8 | op2);
6256     s->insn = info;
6257 
6258     /* If we found it, extract the operands.  */
6259     if (info != NULL) {
6260         DisasFormat fmt = info->fmt;
6261         int i;
6262 
6263         for (i = 0; i < NUM_C_FIELD; ++i) {
6264             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6265         }
6266     }
6267     return info;
6268 }
6269 
6270 static bool is_afp_reg(int reg)
6271 {
6272     return reg % 2 || reg > 6;
6273 }
6274 
6275 static bool is_fp_pair(int reg)
6276 {
6277     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6278     return !(reg & 0x2);
6279 }
6280 
6281 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6282 {
6283     const DisasInsn *insn;
6284     DisasJumpType ret = DISAS_NEXT;
6285     DisasOps o = {};
6286     bool icount = false;
6287 
6288     /* Search for the insn in the table.  */
6289     insn = extract_insn(env, s);
6290 
6291     /* Update insn_start now that we know the ILEN.  */
6292     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6293 
6294     /* Not found means unimplemented/illegal opcode.  */
6295     if (insn == NULL) {
6296         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6297                       s->fields.op, s->fields.op2);
6298         gen_illegal_opcode(s);
6299         ret = DISAS_NORETURN;
6300         goto out;
6301     }
6302 
6303 #ifndef CONFIG_USER_ONLY
6304     if (s->base.tb->flags & FLAG_MASK_PER) {
6305         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6306         gen_helper_per_ifetch(tcg_env, addr);
6307     }
6308 #endif
6309 
6310     /* process flags */
6311     if (insn->flags) {
6312         /* privileged instruction */
6313         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6314             gen_program_exception(s, PGM_PRIVILEGED);
6315             ret = DISAS_NORETURN;
6316             goto out;
6317         }
6318 
6319         /* if AFP is not enabled, instructions and registers are forbidden */
6320         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6321             uint8_t dxc = 0;
6322 
6323             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6324                 dxc = 1;
6325             }
6326             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6327                 dxc = 1;
6328             }
6329             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6330                 dxc = 1;
6331             }
6332             if (insn->flags & IF_BFP) {
6333                 dxc = 2;
6334             }
6335             if (insn->flags & IF_DFP) {
6336                 dxc = 3;
6337             }
6338             if (insn->flags & IF_VEC) {
6339                 dxc = 0xfe;
6340             }
6341             if (dxc) {
6342                 gen_data_exception(dxc);
6343                 ret = DISAS_NORETURN;
6344                 goto out;
6345             }
6346         }
6347 
6348         /* if vector instructions not enabled, executing them is forbidden */
6349         if (insn->flags & IF_VEC) {
6350             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6351                 gen_data_exception(0xfe);
6352                 ret = DISAS_NORETURN;
6353                 goto out;
6354             }
6355         }
6356 
6357         /* input/output is the special case for icount mode */
6358         if (unlikely(insn->flags & IF_IO)) {
6359             icount = translator_io_start(&s->base);
6360         }
6361     }
6362 
6363     /* Check for insn specification exceptions.  */
6364     if (insn->spec) {
6365         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6366             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6367             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6368             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6369             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6370             gen_program_exception(s, PGM_SPECIFICATION);
6371             ret = DISAS_NORETURN;
6372             goto out;
6373         }
6374     }
6375 
6376     /* Implement the instruction.  */
6377     if (insn->help_in1) {
6378         insn->help_in1(s, &o);
6379     }
6380     if (insn->help_in2) {
6381         insn->help_in2(s, &o);
6382     }
6383     if (insn->help_prep) {
6384         insn->help_prep(s, &o);
6385     }
6386     if (insn->help_op) {
6387         ret = insn->help_op(s, &o);
6388     }
6389     if (ret != DISAS_NORETURN) {
6390         if (insn->help_wout) {
6391             insn->help_wout(s, &o);
6392         }
6393         if (insn->help_cout) {
6394             insn->help_cout(s, &o);
6395         }
6396     }
6397 
6398     /* io should be the last instruction in tb when icount is enabled */
6399     if (unlikely(icount && ret == DISAS_NEXT)) {
6400         ret = DISAS_TOO_MANY;
6401     }
6402 
6403 #ifndef CONFIG_USER_ONLY
6404     if (s->base.tb->flags & FLAG_MASK_PER) {
6405         /* An exception might be triggered, save PSW if not already done.  */
6406         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6407             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6408         }
6409 
6410         /* Call the helper to check for a possible PER exception.  */
6411         gen_helper_per_check_exception(tcg_env);
6412     }
6413 #endif
6414 
6415 out:
6416     /* Advance to the next instruction.  */
6417     s->base.pc_next = s->pc_tmp;
6418     return ret;
6419 }
6420 
6421 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6422 {
6423     DisasContext *dc = container_of(dcbase, DisasContext, base);
6424 
6425     /* 31-bit mode */
6426     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6427         dc->base.pc_first &= 0x7fffffff;
6428         dc->base.pc_next = dc->base.pc_first;
6429     }
6430 
6431     dc->cc_op = CC_OP_DYNAMIC;
6432     dc->ex_value = dc->base.tb->cs_base;
6433     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6434 }
6435 
6436 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6437 {
6438 }
6439 
6440 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6441 {
6442     DisasContext *dc = container_of(dcbase, DisasContext, base);
6443 
6444     /* Delay the set of ilen until we've read the insn. */
6445     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6446     dc->insn_start = tcg_last_op();
6447 }
6448 
6449 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6450                                 uint64_t pc)
6451 {
6452     uint64_t insn = cpu_lduw_code(env, pc);
6453 
6454     return pc + get_ilen((insn >> 8) & 0xff);
6455 }
6456 
6457 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6458 {
6459     CPUS390XState *env = cpu_env(cs);
6460     DisasContext *dc = container_of(dcbase, DisasContext, base);
6461 
6462     dc->base.is_jmp = translate_one(env, dc);
6463     if (dc->base.is_jmp == DISAS_NEXT) {
6464         if (dc->ex_value ||
6465             !is_same_page(dcbase, dc->base.pc_next) ||
6466             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6467             dc->base.is_jmp = DISAS_TOO_MANY;
6468         }
6469     }
6470 }
6471 
6472 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6473 {
6474     DisasContext *dc = container_of(dcbase, DisasContext, base);
6475 
6476     switch (dc->base.is_jmp) {
6477     case DISAS_NORETURN:
6478         break;
6479     case DISAS_TOO_MANY:
6480         update_psw_addr(dc);
6481         /* FALLTHRU */
6482     case DISAS_PC_UPDATED:
6483         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6484            cc op type is in env */
6485         update_cc_op(dc);
6486         /* FALLTHRU */
6487     case DISAS_PC_CC_UPDATED:
6488         /* Exit the TB, either by raising a debug exception or by return.  */
6489         if (dc->exit_to_mainloop) {
6490             tcg_gen_exit_tb(NULL, 0);
6491         } else {
6492             tcg_gen_lookup_and_goto_ptr();
6493         }
6494         break;
6495     default:
6496         g_assert_not_reached();
6497     }
6498 }
6499 
6500 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6501                                CPUState *cs, FILE *logfile)
6502 {
6503     DisasContext *dc = container_of(dcbase, DisasContext, base);
6504 
6505     if (unlikely(dc->ex_value)) {
6506         /* ??? Unfortunately target_disas can't use host memory.  */
6507         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6508     } else {
6509         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6510         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6511     }
6512 }
6513 
6514 static const TranslatorOps s390x_tr_ops = {
6515     .init_disas_context = s390x_tr_init_disas_context,
6516     .tb_start           = s390x_tr_tb_start,
6517     .insn_start         = s390x_tr_insn_start,
6518     .translate_insn     = s390x_tr_translate_insn,
6519     .tb_stop            = s390x_tr_tb_stop,
6520     .disas_log          = s390x_tr_disas_log,
6521 };
6522 
6523 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6524                            vaddr pc, void *host_pc)
6525 {
6526     DisasContext dc;
6527 
6528     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6529 }
6530 
6531 void s390x_restore_state_to_opc(CPUState *cs,
6532                                 const TranslationBlock *tb,
6533                                 const uint64_t *data)
6534 {
6535     S390CPU *cpu = S390_CPU(cs);
6536     CPUS390XState *env = &cpu->env;
6537     int cc_op = data[1];
6538 
6539     env->psw.addr = data[0];
6540 
6541     /* Update the CC opcode if it is not already up-to-date.  */
6542     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6543         env->cc_op = cc_op;
6544     }
6545 
6546     /* Record ILEN.  */
6547     env->int_pgm_ilen = data[2];
6548 }
6549