xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 104cf552)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "exec/exec-all.h"
35 #include "tcg/tcg-op.h"
36 #include "tcg/tcg-op-gvec.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/helper-proto.h"
41 #include "exec/helper-gen.h"
42 
43 #include "exec/translator.h"
44 #include "exec/log.h"
45 #include "qemu/atomic128.h"
46 
47 #define HELPER_H "helper.h"
48 #include "exec/helper-info.c.inc"
49 #undef  HELPER_H
50 
51 
52 /* Information that (most) every instruction needs to manipulate.  */
53 typedef struct DisasContext DisasContext;
54 typedef struct DisasInsn DisasInsn;
55 typedef struct DisasFields DisasFields;
56 
57 /*
58  * Define a structure to hold the decoded fields.  We'll store each inside
59  * an array indexed by an enum.  In order to conserve memory, we'll arrange
60  * for fields that do not exist at the same time to overlap, thus the "C"
61  * for compact.  For checking purposes there is an "O" for original index
62  * as well that will be applied to availability bitmaps.
63  */
64 
65 enum DisasFieldIndexO {
66     FLD_O_r1,
67     FLD_O_r2,
68     FLD_O_r3,
69     FLD_O_m1,
70     FLD_O_m3,
71     FLD_O_m4,
72     FLD_O_m5,
73     FLD_O_m6,
74     FLD_O_b1,
75     FLD_O_b2,
76     FLD_O_b4,
77     FLD_O_d1,
78     FLD_O_d2,
79     FLD_O_d4,
80     FLD_O_x2,
81     FLD_O_l1,
82     FLD_O_l2,
83     FLD_O_i1,
84     FLD_O_i2,
85     FLD_O_i3,
86     FLD_O_i4,
87     FLD_O_i5,
88     FLD_O_v1,
89     FLD_O_v2,
90     FLD_O_v3,
91     FLD_O_v4,
92 };
93 
94 enum DisasFieldIndexC {
95     FLD_C_r1 = 0,
96     FLD_C_m1 = 0,
97     FLD_C_b1 = 0,
98     FLD_C_i1 = 0,
99     FLD_C_v1 = 0,
100 
101     FLD_C_r2 = 1,
102     FLD_C_b2 = 1,
103     FLD_C_i2 = 1,
104 
105     FLD_C_r3 = 2,
106     FLD_C_m3 = 2,
107     FLD_C_i3 = 2,
108     FLD_C_v3 = 2,
109 
110     FLD_C_m4 = 3,
111     FLD_C_b4 = 3,
112     FLD_C_i4 = 3,
113     FLD_C_l1 = 3,
114     FLD_C_v4 = 3,
115 
116     FLD_C_i5 = 4,
117     FLD_C_d1 = 4,
118     FLD_C_m5 = 4,
119 
120     FLD_C_d2 = 5,
121     FLD_C_m6 = 5,
122 
123     FLD_C_d4 = 6,
124     FLD_C_x2 = 6,
125     FLD_C_l2 = 6,
126     FLD_C_v2 = 6,
127 
128     NUM_C_FIELD = 7
129 };
130 
131 struct DisasFields {
132     uint64_t raw_insn;
133     unsigned op:8;
134     unsigned op2:8;
135     unsigned presentC:16;
136     unsigned int presentO;
137     int c[NUM_C_FIELD];
138 };
139 
140 struct DisasContext {
141     DisasContextBase base;
142     const DisasInsn *insn;
143     DisasFields fields;
144     uint64_t ex_value;
145     /*
146      * During translate_one(), pc_tmp is used to determine the instruction
147      * to be executed after base.pc_next - e.g. next sequential instruction
148      * or a branch target.
149      */
150     uint64_t pc_tmp;
151     uint32_t ilen;
152     enum cc_op cc_op;
153     bool exit_to_mainloop;
154 };
155 
156 /* Information carried about a condition to be evaluated.  */
157 typedef struct {
158     TCGCond cond:8;
159     bool is_64;
160     union {
161         struct { TCGv_i64 a, b; } s64;
162         struct { TCGv_i32 a, b; } s32;
163     } u;
164 } DisasCompare;
165 
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit[CC_OP_MAX];
168 static uint64_t inline_branch_miss[CC_OP_MAX];
169 #endif
170 
171 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
172 {
173     if (s->base.tb->flags & FLAG_MASK_32) {
174         if (s->base.tb->flags & FLAG_MASK_64) {
175             tcg_gen_movi_i64(out, pc);
176             return;
177         }
178         pc |= 0x80000000;
179     }
180     assert(!(s->base.tb->flags & FLAG_MASK_64));
181     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
182 }
183 
184 static TCGv_i64 psw_addr;
185 static TCGv_i64 psw_mask;
186 static TCGv_i64 gbea;
187 
188 static TCGv_i32 cc_op;
189 static TCGv_i64 cc_src;
190 static TCGv_i64 cc_dst;
191 static TCGv_i64 cc_vr;
192 
193 static char cpu_reg_names[16][4];
194 static TCGv_i64 regs[16];
195 
196 void s390x_translate_init(void)
197 {
198     int i;
199 
200     psw_addr = tcg_global_mem_new_i64(tcg_env,
201                                       offsetof(CPUS390XState, psw.addr),
202                                       "psw_addr");
203     psw_mask = tcg_global_mem_new_i64(tcg_env,
204                                       offsetof(CPUS390XState, psw.mask),
205                                       "psw_mask");
206     gbea = tcg_global_mem_new_i64(tcg_env,
207                                   offsetof(CPUS390XState, gbea),
208                                   "gbea");
209 
210     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
211                                    "cc_op");
212     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
213                                     "cc_src");
214     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
215                                     "cc_dst");
216     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
217                                    "cc_vr");
218 
219     for (i = 0; i < 16; i++) {
220         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
221         regs[i] = tcg_global_mem_new(tcg_env,
222                                      offsetof(CPUS390XState, regs[i]),
223                                      cpu_reg_names[i]);
224     }
225 }
226 
227 static inline int vec_full_reg_offset(uint8_t reg)
228 {
229     g_assert(reg < 32);
230     return offsetof(CPUS390XState, vregs[reg][0]);
231 }
232 
233 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
234 {
235     /* Convert element size (es) - e.g. MO_8 - to bytes */
236     const uint8_t bytes = 1 << es;
237     int offs = enr * bytes;
238 
239     /*
240      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
241      * of the 16 byte vector, on both, little and big endian systems.
242      *
243      * Big Endian (target/possible host)
244      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
245      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
246      * W:  [             0][             1] - [             2][             3]
247      * DW: [                             0] - [                             1]
248      *
249      * Little Endian (possible host)
250      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
251      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
252      * W:  [             1][             0] - [             3][             2]
253      * DW: [                             0] - [                             1]
254      *
255      * For 16 byte elements, the two 8 byte halves will not form a host
256      * int128 if the host is little endian, since they're in the wrong order.
257      * Some operations (e.g. xor) do not care. For operations like addition,
258      * the two 8 byte elements have to be loaded separately. Let's force all
259      * 16 byte operations to handle it in a special way.
260      */
261     g_assert(es <= MO_64);
262 #if !HOST_BIG_ENDIAN
263     offs ^= (8 - bytes);
264 #endif
265     return offs + vec_full_reg_offset(reg);
266 }
267 
268 static inline int freg64_offset(uint8_t reg)
269 {
270     g_assert(reg < 16);
271     return vec_reg_offset(reg, 0, MO_64);
272 }
273 
274 static inline int freg32_offset(uint8_t reg)
275 {
276     g_assert(reg < 16);
277     return vec_reg_offset(reg, 0, MO_32);
278 }
279 
280 static TCGv_i64 load_reg(int reg)
281 {
282     TCGv_i64 r = tcg_temp_new_i64();
283     tcg_gen_mov_i64(r, regs[reg]);
284     return r;
285 }
286 
287 static TCGv_i64 load_freg(int reg)
288 {
289     TCGv_i64 r = tcg_temp_new_i64();
290 
291     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
292     return r;
293 }
294 
295 static TCGv_i64 load_freg32_i64(int reg)
296 {
297     TCGv_i64 r = tcg_temp_new_i64();
298 
299     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
300     return r;
301 }
302 
303 static TCGv_i128 load_freg_128(int reg)
304 {
305     TCGv_i64 h = load_freg(reg);
306     TCGv_i64 l = load_freg(reg + 2);
307     TCGv_i128 r = tcg_temp_new_i128();
308 
309     tcg_gen_concat_i64_i128(r, l, h);
310     return r;
311 }
312 
313 static void store_reg(int reg, TCGv_i64 v)
314 {
315     tcg_gen_mov_i64(regs[reg], v);
316 }
317 
318 static void store_freg(int reg, TCGv_i64 v)
319 {
320     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
321 }
322 
323 static void store_reg32_i64(int reg, TCGv_i64 v)
324 {
325     /* 32 bit register writes keep the upper half */
326     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
327 }
328 
329 static void store_reg32h_i64(int reg, TCGv_i64 v)
330 {
331     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
332 }
333 
334 static void store_freg32_i64(int reg, TCGv_i64 v)
335 {
336     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
337 }
338 
339 static void update_psw_addr(DisasContext *s)
340 {
341     /* psw.addr */
342     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
343 }
344 
345 static void per_branch(DisasContext *s, bool to_next)
346 {
347 #ifndef CONFIG_USER_ONLY
348     tcg_gen_movi_i64(gbea, s->base.pc_next);
349 
350     if (s->base.tb->flags & FLAG_MASK_PER) {
351         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
352         gen_helper_per_branch(tcg_env, gbea, next_pc);
353     }
354 #endif
355 }
356 
357 static void per_branch_cond(DisasContext *s, TCGCond cond,
358                             TCGv_i64 arg1, TCGv_i64 arg2)
359 {
360 #ifndef CONFIG_USER_ONLY
361     if (s->base.tb->flags & FLAG_MASK_PER) {
362         TCGLabel *lab = gen_new_label();
363         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
364 
365         tcg_gen_movi_i64(gbea, s->base.pc_next);
366         gen_helper_per_branch(tcg_env, gbea, psw_addr);
367 
368         gen_set_label(lab);
369     } else {
370         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
371         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
372     }
373 #endif
374 }
375 
376 static void per_breaking_event(DisasContext *s)
377 {
378     tcg_gen_movi_i64(gbea, s->base.pc_next);
379 }
380 
381 static void update_cc_op(DisasContext *s)
382 {
383     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
384         tcg_gen_movi_i32(cc_op, s->cc_op);
385     }
386 }
387 
388 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
389                                 uint64_t pc)
390 {
391     return (uint64_t)translator_lduw(env, &s->base, pc);
392 }
393 
394 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
395                                 uint64_t pc)
396 {
397     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
398 }
399 
400 static int get_mem_index(DisasContext *s)
401 {
402 #ifdef CONFIG_USER_ONLY
403     return MMU_USER_IDX;
404 #else
405     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
406         return MMU_REAL_IDX;
407     }
408 
409     switch (s->base.tb->flags & FLAG_MASK_ASC) {
410     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
411         return MMU_PRIMARY_IDX;
412     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_SECONDARY_IDX;
414     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
415         return MMU_HOME_IDX;
416     default:
417         g_assert_not_reached();
418         break;
419     }
420 #endif
421 }
422 
423 static void gen_exception(int excp)
424 {
425     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
426 }
427 
428 static void gen_program_exception(DisasContext *s, int code)
429 {
430     /* Remember what pgm exception this was.  */
431     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
432                    offsetof(CPUS390XState, int_pgm_code));
433 
434     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
435                    offsetof(CPUS390XState, int_pgm_ilen));
436 
437     /* update the psw */
438     update_psw_addr(s);
439 
440     /* Save off cc.  */
441     update_cc_op(s);
442 
443     /* Trigger exception.  */
444     gen_exception(EXCP_PGM);
445 }
446 
447 static inline void gen_illegal_opcode(DisasContext *s)
448 {
449     gen_program_exception(s, PGM_OPERATION);
450 }
451 
452 static inline void gen_data_exception(uint8_t dxc)
453 {
454     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
455 }
456 
457 static inline void gen_trap(DisasContext *s)
458 {
459     /* Set DXC to 0xff */
460     gen_data_exception(0xff);
461 }
462 
463 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
464                                   int64_t imm)
465 {
466     tcg_gen_addi_i64(dst, src, imm);
467     if (!(s->base.tb->flags & FLAG_MASK_64)) {
468         if (s->base.tb->flags & FLAG_MASK_32) {
469             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
470         } else {
471             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
472         }
473     }
474 }
475 
476 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
477 {
478     TCGv_i64 tmp = tcg_temp_new_i64();
479 
480     /*
481      * Note that d2 is limited to 20 bits, signed.  If we crop negative
482      * displacements early we create larger immediate addends.
483      */
484     if (b2 && x2) {
485         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
486         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
487     } else if (b2) {
488         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
489     } else if (x2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
491     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
492         if (s->base.tb->flags & FLAG_MASK_32) {
493             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
494         } else {
495             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
496         }
497     } else {
498         tcg_gen_movi_i64(tmp, d2);
499     }
500 
501     return tmp;
502 }
503 
504 static inline bool live_cc_data(DisasContext *s)
505 {
506     return (s->cc_op != CC_OP_DYNAMIC
507             && s->cc_op != CC_OP_STATIC
508             && s->cc_op > 3);
509 }
510 
511 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
512 {
513     if (live_cc_data(s)) {
514         tcg_gen_discard_i64(cc_src);
515         tcg_gen_discard_i64(cc_dst);
516         tcg_gen_discard_i64(cc_vr);
517     }
518     s->cc_op = CC_OP_CONST0 + val;
519 }
520 
521 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
522 {
523     if (live_cc_data(s)) {
524         tcg_gen_discard_i64(cc_src);
525         tcg_gen_discard_i64(cc_vr);
526     }
527     tcg_gen_mov_i64(cc_dst, dst);
528     s->cc_op = op;
529 }
530 
531 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
532                                   TCGv_i64 dst)
533 {
534     if (live_cc_data(s)) {
535         tcg_gen_discard_i64(cc_vr);
536     }
537     tcg_gen_mov_i64(cc_src, src);
538     tcg_gen_mov_i64(cc_dst, dst);
539     s->cc_op = op;
540 }
541 
542 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
543                                   TCGv_i64 dst, TCGv_i64 vr)
544 {
545     tcg_gen_mov_i64(cc_src, src);
546     tcg_gen_mov_i64(cc_dst, dst);
547     tcg_gen_mov_i64(cc_vr, vr);
548     s->cc_op = op;
549 }
550 
551 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
552 {
553     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
554 }
555 
556 /* CC value is in env->cc_op */
557 static void set_cc_static(DisasContext *s)
558 {
559     if (live_cc_data(s)) {
560         tcg_gen_discard_i64(cc_src);
561         tcg_gen_discard_i64(cc_dst);
562         tcg_gen_discard_i64(cc_vr);
563     }
564     s->cc_op = CC_OP_STATIC;
565 }
566 
567 /* calculates cc into cc_op */
568 static void gen_op_calc_cc(DisasContext *s)
569 {
570     TCGv_i32 local_cc_op = NULL;
571     TCGv_i64 dummy = NULL;
572 
573     switch (s->cc_op) {
574     default:
575         dummy = tcg_constant_i64(0);
576         /* FALLTHRU */
577     case CC_OP_ADD_64:
578     case CC_OP_SUB_64:
579     case CC_OP_ADD_32:
580     case CC_OP_SUB_32:
581         local_cc_op = tcg_constant_i32(s->cc_op);
582         break;
583     case CC_OP_CONST0:
584     case CC_OP_CONST1:
585     case CC_OP_CONST2:
586     case CC_OP_CONST3:
587     case CC_OP_STATIC:
588     case CC_OP_DYNAMIC:
589         break;
590     }
591 
592     switch (s->cc_op) {
593     case CC_OP_CONST0:
594     case CC_OP_CONST1:
595     case CC_OP_CONST2:
596     case CC_OP_CONST3:
597         /* s->cc_op is the cc value */
598         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
599         break;
600     case CC_OP_STATIC:
601         /* env->cc_op already is the cc value */
602         break;
603     case CC_OP_NZ:
604         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
605         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
606         break;
607     case CC_OP_ABS_64:
608     case CC_OP_NABS_64:
609     case CC_OP_ABS_32:
610     case CC_OP_NABS_32:
611     case CC_OP_LTGT0_32:
612     case CC_OP_LTGT0_64:
613     case CC_OP_COMP_32:
614     case CC_OP_COMP_64:
615     case CC_OP_NZ_F32:
616     case CC_OP_NZ_F64:
617     case CC_OP_FLOGR:
618     case CC_OP_LCBB:
619     case CC_OP_MULS_32:
620         /* 1 argument */
621         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
622         break;
623     case CC_OP_ADDU:
624     case CC_OP_ICM:
625     case CC_OP_LTGT_32:
626     case CC_OP_LTGT_64:
627     case CC_OP_LTUGTU_32:
628     case CC_OP_LTUGTU_64:
629     case CC_OP_TM_32:
630     case CC_OP_TM_64:
631     case CC_OP_SLA:
632     case CC_OP_SUBU:
633     case CC_OP_NZ_F128:
634     case CC_OP_VC:
635     case CC_OP_MULS_64:
636         /* 2 arguments */
637         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
638         break;
639     case CC_OP_ADD_64:
640     case CC_OP_SUB_64:
641     case CC_OP_ADD_32:
642     case CC_OP_SUB_32:
643         /* 3 arguments */
644         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
645         break;
646     case CC_OP_DYNAMIC:
647         /* unknown operation - assume 3 arguments and cc_op in env */
648         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
649         break;
650     default:
651         g_assert_not_reached();
652     }
653 
654     /* We now have cc in cc_op as constant */
655     set_cc_static(s);
656 }
657 
658 static bool use_goto_tb(DisasContext *s, uint64_t dest)
659 {
660     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
661         return false;
662     }
663     return translator_use_goto_tb(&s->base, dest);
664 }
665 
666 static void account_noninline_branch(DisasContext *s, int cc_op)
667 {
668 #ifdef DEBUG_INLINE_BRANCHES
669     inline_branch_miss[cc_op]++;
670 #endif
671 }
672 
673 static void account_inline_branch(DisasContext *s, int cc_op)
674 {
675 #ifdef DEBUG_INLINE_BRANCHES
676     inline_branch_hit[cc_op]++;
677 #endif
678 }
679 
680 /* Table of mask values to comparison codes, given a comparison as input.
681    For such, CC=3 should not be possible.  */
682 static const TCGCond ltgt_cond[16] = {
683     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
684     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
685     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
686     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
687     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
688     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
689     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
690     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
691 };
692 
693 /* Table of mask values to comparison codes, given a logic op as input.
694    For such, only CC=0 and CC=1 should be possible.  */
695 static const TCGCond nz_cond[16] = {
696     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
697     TCG_COND_NEVER, TCG_COND_NEVER,
698     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
699     TCG_COND_NE, TCG_COND_NE,
700     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
701     TCG_COND_EQ, TCG_COND_EQ,
702     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
703     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
704 };
705 
706 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
707    details required to generate a TCG comparison.  */
708 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
709 {
710     TCGCond cond;
711     enum cc_op old_cc_op = s->cc_op;
712 
713     if (mask == 15 || mask == 0) {
714         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
715         c->u.s32.a = cc_op;
716         c->u.s32.b = cc_op;
717         c->is_64 = false;
718         return;
719     }
720 
721     /* Find the TCG condition for the mask + cc op.  */
722     switch (old_cc_op) {
723     case CC_OP_LTGT0_32:
724     case CC_OP_LTGT0_64:
725     case CC_OP_LTGT_32:
726     case CC_OP_LTGT_64:
727         cond = ltgt_cond[mask];
728         if (cond == TCG_COND_NEVER) {
729             goto do_dynamic;
730         }
731         account_inline_branch(s, old_cc_op);
732         break;
733 
734     case CC_OP_LTUGTU_32:
735     case CC_OP_LTUGTU_64:
736         cond = tcg_unsigned_cond(ltgt_cond[mask]);
737         if (cond == TCG_COND_NEVER) {
738             goto do_dynamic;
739         }
740         account_inline_branch(s, old_cc_op);
741         break;
742 
743     case CC_OP_NZ:
744         cond = nz_cond[mask];
745         if (cond == TCG_COND_NEVER) {
746             goto do_dynamic;
747         }
748         account_inline_branch(s, old_cc_op);
749         break;
750 
751     case CC_OP_TM_32:
752     case CC_OP_TM_64:
753         switch (mask) {
754         case 8:
755             cond = TCG_COND_TSTEQ;
756             break;
757         case 4 | 2 | 1:
758             cond = TCG_COND_TSTNE;
759             break;
760         default:
761             goto do_dynamic;
762         }
763         account_inline_branch(s, old_cc_op);
764         break;
765 
766     case CC_OP_ICM:
767         switch (mask) {
768         case 8:
769             cond = TCG_COND_TSTEQ;
770             break;
771         case 4 | 2 | 1:
772         case 4 | 2:
773             cond = TCG_COND_TSTNE;
774             break;
775         default:
776             goto do_dynamic;
777         }
778         account_inline_branch(s, old_cc_op);
779         break;
780 
781     case CC_OP_FLOGR:
782         switch (mask & 0xa) {
783         case 8: /* src == 0 -> no one bit found */
784             cond = TCG_COND_EQ;
785             break;
786         case 2: /* src != 0 -> one bit found */
787             cond = TCG_COND_NE;
788             break;
789         default:
790             goto do_dynamic;
791         }
792         account_inline_branch(s, old_cc_op);
793         break;
794 
795     case CC_OP_ADDU:
796     case CC_OP_SUBU:
797         switch (mask) {
798         case 8 | 2: /* result == 0 */
799             cond = TCG_COND_EQ;
800             break;
801         case 4 | 1: /* result != 0 */
802             cond = TCG_COND_NE;
803             break;
804         case 8 | 4: /* !carry (borrow) */
805             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
806             break;
807         case 2 | 1: /* carry (!borrow) */
808             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
809             break;
810         default:
811             goto do_dynamic;
812         }
813         account_inline_branch(s, old_cc_op);
814         break;
815 
816     default:
817     do_dynamic:
818         /* Calculate cc value.  */
819         gen_op_calc_cc(s);
820         /* FALLTHRU */
821 
822     case CC_OP_STATIC:
823         /* Jump based on CC.  We'll load up the real cond below;
824            the assignment here merely avoids a compiler warning.  */
825         account_noninline_branch(s, old_cc_op);
826         old_cc_op = CC_OP_STATIC;
827         cond = TCG_COND_NEVER;
828         break;
829     }
830 
831     /* Load up the arguments of the comparison.  */
832     c->is_64 = true;
833     switch (old_cc_op) {
834     case CC_OP_LTGT0_32:
835         c->is_64 = false;
836         c->u.s32.a = tcg_temp_new_i32();
837         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
838         c->u.s32.b = tcg_constant_i32(0);
839         break;
840     case CC_OP_LTGT_32:
841     case CC_OP_LTUGTU_32:
842         c->is_64 = false;
843         c->u.s32.a = tcg_temp_new_i32();
844         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
845         c->u.s32.b = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
847         break;
848 
849     case CC_OP_LTGT0_64:
850     case CC_OP_NZ:
851     case CC_OP_FLOGR:
852         c->u.s64.a = cc_dst;
853         c->u.s64.b = tcg_constant_i64(0);
854         break;
855 
856     case CC_OP_LTGT_64:
857     case CC_OP_LTUGTU_64:
858     case CC_OP_TM_32:
859     case CC_OP_TM_64:
860     case CC_OP_ICM:
861         c->u.s64.a = cc_src;
862         c->u.s64.b = cc_dst;
863         break;
864 
865     case CC_OP_ADDU:
866     case CC_OP_SUBU:
867         c->is_64 = true;
868         c->u.s64.b = tcg_constant_i64(0);
869         switch (mask) {
870         case 8 | 2:
871         case 4 | 1: /* result */
872             c->u.s64.a = cc_dst;
873             break;
874         case 8 | 4:
875         case 2 | 1: /* carry */
876             c->u.s64.a = cc_src;
877             break;
878         default:
879             g_assert_not_reached();
880         }
881         break;
882 
883     case CC_OP_STATIC:
884         c->is_64 = false;
885         c->u.s32.a = cc_op;
886 
887         /* Fold half of the cases using bit 3 to invert. */
888         switch (mask & 8 ? mask ^ 0xf : mask) {
889         case 0x1: /* cc == 3 */
890             cond = TCG_COND_EQ;
891             c->u.s32.b = tcg_constant_i32(3);
892             break;
893         case 0x2: /* cc == 2 */
894             cond = TCG_COND_EQ;
895             c->u.s32.b = tcg_constant_i32(2);
896             break;
897         case 0x4: /* cc == 1 */
898             cond = TCG_COND_EQ;
899             c->u.s32.b = tcg_constant_i32(1);
900             break;
901         case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
902             cond = TCG_COND_GTU;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
906             cond = TCG_COND_TSTNE;
907             c->u.s32.b = tcg_constant_i32(1);
908             break;
909         case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
910             cond = TCG_COND_LEU;
911             c->u.s32.a = tcg_temp_new_i32();
912             c->u.s32.b = tcg_constant_i32(1);
913             tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
914             break;
915         case 0x4 | 0x2 | 0x1: /* cc != 0 */
916             cond = TCG_COND_NE;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         default:
920             /* case 0: never, handled above. */
921             g_assert_not_reached();
922         }
923         if (mask & 8) {
924             cond = tcg_invert_cond(cond);
925         }
926         break;
927 
928     default:
929         abort();
930     }
931     c->cond = cond;
932 }
933 
934 /* ====================================================================== */
935 /* Define the insn format enumeration.  */
936 #define F0(N)                         FMT_##N,
937 #define F1(N, X1)                     F0(N)
938 #define F2(N, X1, X2)                 F0(N)
939 #define F3(N, X1, X2, X3)             F0(N)
940 #define F4(N, X1, X2, X3, X4)         F0(N)
941 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
942 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
943 
944 typedef enum {
945 #include "insn-format.h.inc"
946 } DisasFormat;
947 
948 #undef F0
949 #undef F1
950 #undef F2
951 #undef F3
952 #undef F4
953 #undef F5
954 #undef F6
955 
956 /* This is the way fields are to be accessed out of DisasFields.  */
957 #define have_field(S, F)  have_field1((S), FLD_O_##F)
958 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
959 
960 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
961 {
962     return (s->fields.presentO >> c) & 1;
963 }
964 
965 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
966                       enum DisasFieldIndexC c)
967 {
968     assert(have_field1(s, o));
969     return s->fields.c[c];
970 }
971 
972 /* Describe the layout of each field in each format.  */
973 typedef struct DisasField {
974     unsigned int beg:8;
975     unsigned int size:8;
976     unsigned int type:2;
977     unsigned int indexC:6;
978     enum DisasFieldIndexO indexO:8;
979 } DisasField;
980 
981 typedef struct DisasFormatInfo {
982     DisasField op[NUM_C_FIELD];
983 } DisasFormatInfo;
984 
985 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
986 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
987 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
988 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
989                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
990 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
991                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
992                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
993 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
994                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
995 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
996                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
997                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
998 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
999 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1000 
1001 #define F0(N)                     { { } },
1002 #define F1(N, X1)                 { { X1 } },
1003 #define F2(N, X1, X2)             { { X1, X2 } },
1004 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1005 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1006 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1007 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1008 
1009 static const DisasFormatInfo format_info[] = {
1010 #include "insn-format.h.inc"
1011 };
1012 
1013 #undef F0
1014 #undef F1
1015 #undef F2
1016 #undef F3
1017 #undef F4
1018 #undef F5
1019 #undef F6
1020 #undef R
1021 #undef M
1022 #undef V
1023 #undef BD
1024 #undef BXD
1025 #undef BDL
1026 #undef BXDL
1027 #undef I
1028 #undef L
1029 
1030 /* Generally, we'll extract operands into this structures, operate upon
1031    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1032    of routines below for more details.  */
1033 typedef struct {
1034     TCGv_i64 out, out2, in1, in2;
1035     TCGv_i64 addr1;
1036     TCGv_i128 out_128, in1_128, in2_128;
1037 } DisasOps;
1038 
1039 /* Instructions can place constraints on their operands, raising specification
1040    exceptions if they are violated.  To make this easy to automate, each "in1",
1041    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1042    of the following, or 0.  To make this easy to document, we'll put the
1043    SPEC_<name> defines next to <name>.  */
1044 
1045 #define SPEC_r1_even    1
1046 #define SPEC_r2_even    2
1047 #define SPEC_r3_even    4
1048 #define SPEC_r1_f128    8
1049 #define SPEC_r2_f128    16
1050 
1051 /* Return values from translate_one, indicating the state of the TB.  */
1052 
1053 /* We are not using a goto_tb (for whatever reason), but have updated
1054    the PC (for whatever reason), so there's no need to do it again on
1055    exiting the TB.  */
1056 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1057 
1058 /* We have updated the PC and CC values.  */
1059 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1060 
1061 
1062 /* Instruction flags */
1063 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1064 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1065 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1066 #define IF_BFP      0x0008      /* binary floating point instruction */
1067 #define IF_DFP      0x0010      /* decimal floating point instruction */
1068 #define IF_PRIV     0x0020      /* privileged instruction */
1069 #define IF_VEC      0x0040      /* vector instruction */
1070 #define IF_IO       0x0080      /* input/output instruction */
1071 
1072 struct DisasInsn {
1073     unsigned opc:16;
1074     unsigned flags:16;
1075     DisasFormat fmt:8;
1076     unsigned fac:8;
1077     unsigned spec:8;
1078 
1079     const char *name;
1080 
1081     /* Pre-process arguments before HELP_OP.  */
1082     void (*help_in1)(DisasContext *, DisasOps *);
1083     void (*help_in2)(DisasContext *, DisasOps *);
1084     void (*help_prep)(DisasContext *, DisasOps *);
1085 
1086     /*
1087      * Post-process output after HELP_OP.
1088      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1089      */
1090     void (*help_wout)(DisasContext *, DisasOps *);
1091     void (*help_cout)(DisasContext *, DisasOps *);
1092 
1093     /* Implement the operation itself.  */
1094     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1095 
1096     uint64_t data;
1097 };
1098 
1099 /* ====================================================================== */
1100 /* Miscellaneous helpers, used by several operations.  */
1101 
1102 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1103 {
1104     if (dest == s->pc_tmp) {
1105         per_branch(s, true);
1106         return DISAS_NEXT;
1107     }
1108     if (use_goto_tb(s, dest)) {
1109         update_cc_op(s);
1110         per_breaking_event(s);
1111         tcg_gen_goto_tb(0);
1112         tcg_gen_movi_i64(psw_addr, dest);
1113         tcg_gen_exit_tb(s->base.tb, 0);
1114         return DISAS_NORETURN;
1115     } else {
1116         tcg_gen_movi_i64(psw_addr, dest);
1117         per_branch(s, false);
1118         return DISAS_PC_UPDATED;
1119     }
1120 }
1121 
1122 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1123                                  bool is_imm, int imm, TCGv_i64 cdest)
1124 {
1125     DisasJumpType ret;
1126     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1127     TCGLabel *lab;
1128 
1129     /* Take care of the special cases first.  */
1130     if (c->cond == TCG_COND_NEVER) {
1131         ret = DISAS_NEXT;
1132         goto egress;
1133     }
1134     if (is_imm) {
1135         if (dest == s->pc_tmp) {
1136             /* Branch to next.  */
1137             per_branch(s, true);
1138             ret = DISAS_NEXT;
1139             goto egress;
1140         }
1141         if (c->cond == TCG_COND_ALWAYS) {
1142             ret = help_goto_direct(s, dest);
1143             goto egress;
1144         }
1145     } else {
1146         if (!cdest) {
1147             /* E.g. bcr %r0 -> no branch.  */
1148             ret = DISAS_NEXT;
1149             goto egress;
1150         }
1151         if (c->cond == TCG_COND_ALWAYS) {
1152             tcg_gen_mov_i64(psw_addr, cdest);
1153             per_branch(s, false);
1154             ret = DISAS_PC_UPDATED;
1155             goto egress;
1156         }
1157     }
1158 
1159     if (use_goto_tb(s, s->pc_tmp)) {
1160         if (is_imm && use_goto_tb(s, dest)) {
1161             /* Both exits can use goto_tb.  */
1162             update_cc_op(s);
1163 
1164             lab = gen_new_label();
1165             if (c->is_64) {
1166                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1167             } else {
1168                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1169             }
1170 
1171             /* Branch not taken.  */
1172             tcg_gen_goto_tb(0);
1173             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1174             tcg_gen_exit_tb(s->base.tb, 0);
1175 
1176             /* Branch taken.  */
1177             gen_set_label(lab);
1178             per_breaking_event(s);
1179             tcg_gen_goto_tb(1);
1180             tcg_gen_movi_i64(psw_addr, dest);
1181             tcg_gen_exit_tb(s->base.tb, 1);
1182 
1183             ret = DISAS_NORETURN;
1184         } else {
1185             /* Fallthru can use goto_tb, but taken branch cannot.  */
1186             /* Store taken branch destination before the brcond.  This
1187                avoids having to allocate a new local temp to hold it.
1188                We'll overwrite this in the not taken case anyway.  */
1189             if (!is_imm) {
1190                 tcg_gen_mov_i64(psw_addr, cdest);
1191             }
1192 
1193             lab = gen_new_label();
1194             if (c->is_64) {
1195                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1196             } else {
1197                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1198             }
1199 
1200             /* Branch not taken.  */
1201             update_cc_op(s);
1202             tcg_gen_goto_tb(0);
1203             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1204             tcg_gen_exit_tb(s->base.tb, 0);
1205 
1206             gen_set_label(lab);
1207             if (is_imm) {
1208                 tcg_gen_movi_i64(psw_addr, dest);
1209             }
1210             per_breaking_event(s);
1211             ret = DISAS_PC_UPDATED;
1212         }
1213     } else {
1214         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1215            Most commonly we're single-stepping or some other condition that
1216            disables all use of goto_tb.  Just update the PC and exit.  */
1217 
1218         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1219         if (is_imm) {
1220             cdest = tcg_constant_i64(dest);
1221         }
1222 
1223         if (c->is_64) {
1224             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1225                                 cdest, next);
1226             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1227         } else {
1228             TCGv_i32 t0 = tcg_temp_new_i32();
1229             TCGv_i64 t1 = tcg_temp_new_i64();
1230             TCGv_i64 z = tcg_constant_i64(0);
1231             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1232             tcg_gen_extu_i32_i64(t1, t0);
1233             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1234             per_branch_cond(s, TCG_COND_NE, t1, z);
1235         }
1236 
1237         ret = DISAS_PC_UPDATED;
1238     }
1239 
1240  egress:
1241     return ret;
1242 }
1243 
1244 /* ====================================================================== */
1245 /* The operations.  These perform the bulk of the work for any insn,
1246    usually after the operands have been loaded and output initialized.  */
1247 
1248 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1249 {
1250     tcg_gen_abs_i64(o->out, o->in2);
1251     return DISAS_NEXT;
1252 }
1253 
1254 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1255 {
1256     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1257     return DISAS_NEXT;
1258 }
1259 
1260 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1261 {
1262     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1263     return DISAS_NEXT;
1264 }
1265 
1266 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1267 {
1268     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1269     tcg_gen_mov_i64(o->out2, o->in2);
1270     return DISAS_NEXT;
1271 }
1272 
1273 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1274 {
1275     tcg_gen_add_i64(o->out, o->in1, o->in2);
1276     return DISAS_NEXT;
1277 }
1278 
1279 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1280 {
1281     tcg_gen_movi_i64(cc_src, 0);
1282     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1283     return DISAS_NEXT;
1284 }
1285 
1286 /* Compute carry into cc_src. */
1287 static void compute_carry(DisasContext *s)
1288 {
1289     switch (s->cc_op) {
1290     case CC_OP_ADDU:
1291         /* The carry value is already in cc_src (1,0). */
1292         break;
1293     case CC_OP_SUBU:
1294         tcg_gen_addi_i64(cc_src, cc_src, 1);
1295         break;
1296     default:
1297         gen_op_calc_cc(s);
1298         /* fall through */
1299     case CC_OP_STATIC:
1300         /* The carry flag is the msb of CC; compute into cc_src. */
1301         tcg_gen_extu_i32_i64(cc_src, cc_op);
1302         tcg_gen_shri_i64(cc_src, cc_src, 1);
1303         break;
1304     }
1305 }
1306 
1307 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1308 {
1309     compute_carry(s);
1310     tcg_gen_add_i64(o->out, o->in1, o->in2);
1311     tcg_gen_add_i64(o->out, o->out, cc_src);
1312     return DISAS_NEXT;
1313 }
1314 
1315 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1316 {
1317     compute_carry(s);
1318 
1319     TCGv_i64 zero = tcg_constant_i64(0);
1320     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1321     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1322 
1323     return DISAS_NEXT;
1324 }
1325 
1326 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1327 {
1328     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1329 
1330     o->in1 = tcg_temp_new_i64();
1331     if (non_atomic) {
1332         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1333     } else {
1334         /* Perform the atomic addition in memory. */
1335         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1336                                      s->insn->data);
1337     }
1338 
1339     /* Recompute also for atomic case: needed for setting CC. */
1340     tcg_gen_add_i64(o->out, o->in1, o->in2);
1341 
1342     if (non_atomic) {
1343         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1344     }
1345     return DISAS_NEXT;
1346 }
1347 
1348 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1349 {
1350     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1351 
1352     o->in1 = tcg_temp_new_i64();
1353     if (non_atomic) {
1354         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1355     } else {
1356         /* Perform the atomic addition in memory. */
1357         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1358                                      s->insn->data);
1359     }
1360 
1361     /* Recompute also for atomic case: needed for setting CC. */
1362     tcg_gen_movi_i64(cc_src, 0);
1363     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1364 
1365     if (non_atomic) {
1366         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1367     }
1368     return DISAS_NEXT;
1369 }
1370 
1371 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1372 {
1373     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1374     return DISAS_NEXT;
1375 }
1376 
1377 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1378 {
1379     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1380     return DISAS_NEXT;
1381 }
1382 
1383 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1384 {
1385     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1386     return DISAS_NEXT;
1387 }
1388 
1389 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1390 {
1391     tcg_gen_and_i64(o->out, o->in1, o->in2);
1392     return DISAS_NEXT;
1393 }
1394 
1395 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1396 {
1397     int shift = s->insn->data & 0xff;
1398     int size = s->insn->data >> 8;
1399     uint64_t mask = ((1ull << size) - 1) << shift;
1400     TCGv_i64 t = tcg_temp_new_i64();
1401 
1402     tcg_gen_shli_i64(t, o->in2, shift);
1403     tcg_gen_ori_i64(t, t, ~mask);
1404     tcg_gen_and_i64(o->out, o->in1, t);
1405 
1406     /* Produce the CC from only the bits manipulated.  */
1407     tcg_gen_andi_i64(cc_dst, o->out, mask);
1408     set_cc_nz_u64(s, cc_dst);
1409     return DISAS_NEXT;
1410 }
1411 
1412 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1413 {
1414     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1415     return DISAS_NEXT;
1416 }
1417 
1418 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1419 {
1420     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1421     return DISAS_NEXT;
1422 }
1423 
1424 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1425 {
1426     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1427     return DISAS_NEXT;
1428 }
1429 
1430 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1431 {
1432     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1433     return DISAS_NEXT;
1434 }
1435 
1436 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1437 {
1438     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1439     return DISAS_NEXT;
1440 }
1441 
1442 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1443 {
1444     o->in1 = tcg_temp_new_i64();
1445 
1446     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1447         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1448     } else {
1449         /* Perform the atomic operation in memory. */
1450         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1451                                      s->insn->data);
1452     }
1453 
1454     /* Recompute also for atomic case: needed for setting CC. */
1455     tcg_gen_and_i64(o->out, o->in1, o->in2);
1456 
1457     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1458         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1459     }
1460     return DISAS_NEXT;
1461 }
1462 
1463 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1464 {
1465     pc_to_link_info(o->out, s, s->pc_tmp);
1466     if (o->in2) {
1467         tcg_gen_mov_i64(psw_addr, o->in2);
1468         per_branch(s, false);
1469         return DISAS_PC_UPDATED;
1470     } else {
1471         return DISAS_NEXT;
1472     }
1473 }
1474 
1475 static void save_link_info(DisasContext *s, DisasOps *o)
1476 {
1477     TCGv_i64 t;
1478 
1479     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1480         pc_to_link_info(o->out, s, s->pc_tmp);
1481         return;
1482     }
1483     gen_op_calc_cc(s);
1484     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1485     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1486     t = tcg_temp_new_i64();
1487     tcg_gen_shri_i64(t, psw_mask, 16);
1488     tcg_gen_andi_i64(t, t, 0x0f000000);
1489     tcg_gen_or_i64(o->out, o->out, t);
1490     tcg_gen_extu_i32_i64(t, cc_op);
1491     tcg_gen_shli_i64(t, t, 28);
1492     tcg_gen_or_i64(o->out, o->out, t);
1493 }
1494 
1495 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1496 {
1497     save_link_info(s, o);
1498     if (o->in2) {
1499         tcg_gen_mov_i64(psw_addr, o->in2);
1500         per_branch(s, false);
1501         return DISAS_PC_UPDATED;
1502     } else {
1503         return DISAS_NEXT;
1504     }
1505 }
1506 
1507 /*
1508  * Disassemble the target of a branch. The results are returned in a form
1509  * suitable for passing into help_branch():
1510  *
1511  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1512  *   branches, whose DisasContext *S contains the relative immediate field RI,
1513  *   are considered fixed. All the other branches are considered computed.
1514  * - int IMM is the value of RI.
1515  * - TCGv_i64 CDEST is the address of the computed target.
1516  */
1517 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1518     if (have_field(s, ri)) {                                                   \
1519         if (unlikely(s->ex_value)) {                                           \
1520             cdest = tcg_temp_new_i64();                                        \
1521             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1522             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1523             is_imm = false;                                                    \
1524         } else {                                                               \
1525             is_imm = true;                                                     \
1526         }                                                                      \
1527     } else {                                                                   \
1528         is_imm = false;                                                        \
1529     }                                                                          \
1530     imm = is_imm ? get_field(s, ri) : 0;                                       \
1531 } while (false)
1532 
1533 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1534 {
1535     DisasCompare c;
1536     bool is_imm;
1537     int imm;
1538 
1539     pc_to_link_info(o->out, s, s->pc_tmp);
1540 
1541     disas_jdest(s, i2, is_imm, imm, o->in2);
1542     disas_jcc(s, &c, 0xf);
1543     return help_branch(s, &c, is_imm, imm, o->in2);
1544 }
1545 
1546 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1547 {
1548     int m1 = get_field(s, m1);
1549     DisasCompare c;
1550     bool is_imm;
1551     int imm;
1552 
1553     /* BCR with R2 = 0 causes no branching */
1554     if (have_field(s, r2) && get_field(s, r2) == 0) {
1555         if (m1 == 14) {
1556             /* Perform serialization */
1557             /* FIXME: check for fast-BCR-serialization facility */
1558             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1559         }
1560         if (m1 == 15) {
1561             /* Perform serialization */
1562             /* FIXME: perform checkpoint-synchronisation */
1563             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1564         }
1565         return DISAS_NEXT;
1566     }
1567 
1568     disas_jdest(s, i2, is_imm, imm, o->in2);
1569     disas_jcc(s, &c, m1);
1570     return help_branch(s, &c, is_imm, imm, o->in2);
1571 }
1572 
1573 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1574 {
1575     int r1 = get_field(s, r1);
1576     DisasCompare c;
1577     bool is_imm;
1578     TCGv_i64 t;
1579     int imm;
1580 
1581     c.cond = TCG_COND_NE;
1582     c.is_64 = false;
1583 
1584     t = tcg_temp_new_i64();
1585     tcg_gen_subi_i64(t, regs[r1], 1);
1586     store_reg32_i64(r1, t);
1587     c.u.s32.a = tcg_temp_new_i32();
1588     c.u.s32.b = tcg_constant_i32(0);
1589     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1590 
1591     disas_jdest(s, i2, is_imm, imm, o->in2);
1592     return help_branch(s, &c, is_imm, imm, o->in2);
1593 }
1594 
1595 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1596 {
1597     int r1 = get_field(s, r1);
1598     int imm = get_field(s, i2);
1599     DisasCompare c;
1600     TCGv_i64 t;
1601 
1602     c.cond = TCG_COND_NE;
1603     c.is_64 = false;
1604 
1605     t = tcg_temp_new_i64();
1606     tcg_gen_shri_i64(t, regs[r1], 32);
1607     tcg_gen_subi_i64(t, t, 1);
1608     store_reg32h_i64(r1, t);
1609     c.u.s32.a = tcg_temp_new_i32();
1610     c.u.s32.b = tcg_constant_i32(0);
1611     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1612 
1613     return help_branch(s, &c, 1, imm, o->in2);
1614 }
1615 
1616 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1617 {
1618     int r1 = get_field(s, r1);
1619     DisasCompare c;
1620     bool is_imm;
1621     int imm;
1622 
1623     c.cond = TCG_COND_NE;
1624     c.is_64 = true;
1625 
1626     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1627     c.u.s64.a = regs[r1];
1628     c.u.s64.b = tcg_constant_i64(0);
1629 
1630     disas_jdest(s, i2, is_imm, imm, o->in2);
1631     return help_branch(s, &c, is_imm, imm, o->in2);
1632 }
1633 
1634 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1635 {
1636     int r1 = get_field(s, r1);
1637     int r3 = get_field(s, r3);
1638     DisasCompare c;
1639     bool is_imm;
1640     TCGv_i64 t;
1641     int imm;
1642 
1643     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1644     c.is_64 = false;
1645 
1646     t = tcg_temp_new_i64();
1647     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1648     c.u.s32.a = tcg_temp_new_i32();
1649     c.u.s32.b = tcg_temp_new_i32();
1650     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1651     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1652     store_reg32_i64(r1, t);
1653 
1654     disas_jdest(s, i2, is_imm, imm, o->in2);
1655     return help_branch(s, &c, is_imm, imm, o->in2);
1656 }
1657 
1658 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1659 {
1660     int r1 = get_field(s, r1);
1661     int r3 = get_field(s, r3);
1662     DisasCompare c;
1663     bool is_imm;
1664     int imm;
1665 
1666     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1667     c.is_64 = true;
1668 
1669     if (r1 == (r3 | 1)) {
1670         c.u.s64.b = load_reg(r3 | 1);
1671     } else {
1672         c.u.s64.b = regs[r3 | 1];
1673     }
1674 
1675     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1676     c.u.s64.a = regs[r1];
1677 
1678     disas_jdest(s, i2, is_imm, imm, o->in2);
1679     return help_branch(s, &c, is_imm, imm, o->in2);
1680 }
1681 
1682 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1683 {
1684     int imm, m3 = get_field(s, m3);
1685     bool is_imm;
1686     DisasCompare c;
1687 
1688     c.cond = ltgt_cond[m3];
1689     if (s->insn->data) {
1690         c.cond = tcg_unsigned_cond(c.cond);
1691     }
1692     c.is_64 = true;
1693     c.u.s64.a = o->in1;
1694     c.u.s64.b = o->in2;
1695 
1696     o->out = NULL;
1697     disas_jdest(s, i4, is_imm, imm, o->out);
1698     if (!is_imm && !o->out) {
1699         imm = 0;
1700         o->out = get_address(s, 0, get_field(s, b4),
1701                              get_field(s, d4));
1702     }
1703 
1704     return help_branch(s, &c, is_imm, imm, o->out);
1705 }
1706 
1707 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1708 {
1709     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1710     set_cc_static(s);
1711     return DISAS_NEXT;
1712 }
1713 
1714 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1715 {
1716     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1717     set_cc_static(s);
1718     return DISAS_NEXT;
1719 }
1720 
1721 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1722 {
1723     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1724     set_cc_static(s);
1725     return DISAS_NEXT;
1726 }
1727 
1728 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1729                                    bool m4_with_fpe)
1730 {
1731     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1732     uint8_t m3 = get_field(s, m3);
1733     uint8_t m4 = get_field(s, m4);
1734 
1735     /* m3 field was introduced with FPE */
1736     if (!fpe && m3_with_fpe) {
1737         m3 = 0;
1738     }
1739     /* m4 field was introduced with FPE */
1740     if (!fpe && m4_with_fpe) {
1741         m4 = 0;
1742     }
1743 
1744     /* Check for valid rounding modes. Mode 3 was introduced later. */
1745     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1746         gen_program_exception(s, PGM_SPECIFICATION);
1747         return NULL;
1748     }
1749 
1750     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1751 }
1752 
1753 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1754 {
1755     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1756 
1757     if (!m34) {
1758         return DISAS_NORETURN;
1759     }
1760     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1761     set_cc_static(s);
1762     return DISAS_NEXT;
1763 }
1764 
1765 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1766 {
1767     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1768 
1769     if (!m34) {
1770         return DISAS_NORETURN;
1771     }
1772     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1773     set_cc_static(s);
1774     return DISAS_NEXT;
1775 }
1776 
1777 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1778 {
1779     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1780 
1781     if (!m34) {
1782         return DISAS_NORETURN;
1783     }
1784     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1785     set_cc_static(s);
1786     return DISAS_NEXT;
1787 }
1788 
1789 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1790 {
1791     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1792 
1793     if (!m34) {
1794         return DISAS_NORETURN;
1795     }
1796     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1797     set_cc_static(s);
1798     return DISAS_NEXT;
1799 }
1800 
1801 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1802 {
1803     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1804 
1805     if (!m34) {
1806         return DISAS_NORETURN;
1807     }
1808     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1809     set_cc_static(s);
1810     return DISAS_NEXT;
1811 }
1812 
1813 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1814 {
1815     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1816 
1817     if (!m34) {
1818         return DISAS_NORETURN;
1819     }
1820     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1821     set_cc_static(s);
1822     return DISAS_NEXT;
1823 }
1824 
1825 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1826 {
1827     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1828 
1829     if (!m34) {
1830         return DISAS_NORETURN;
1831     }
1832     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1833     set_cc_static(s);
1834     return DISAS_NEXT;
1835 }
1836 
1837 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1838 {
1839     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1840 
1841     if (!m34) {
1842         return DISAS_NORETURN;
1843     }
1844     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1845     set_cc_static(s);
1846     return DISAS_NEXT;
1847 }
1848 
1849 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1850 {
1851     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1852 
1853     if (!m34) {
1854         return DISAS_NORETURN;
1855     }
1856     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1857     set_cc_static(s);
1858     return DISAS_NEXT;
1859 }
1860 
1861 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1862 {
1863     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1864 
1865     if (!m34) {
1866         return DISAS_NORETURN;
1867     }
1868     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1869     set_cc_static(s);
1870     return DISAS_NEXT;
1871 }
1872 
1873 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1874 {
1875     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1876 
1877     if (!m34) {
1878         return DISAS_NORETURN;
1879     }
1880     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1881     set_cc_static(s);
1882     return DISAS_NEXT;
1883 }
1884 
1885 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1886 {
1887     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1888 
1889     if (!m34) {
1890         return DISAS_NORETURN;
1891     }
1892     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1893     set_cc_static(s);
1894     return DISAS_NEXT;
1895 }
1896 
1897 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1898 {
1899     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1900 
1901     if (!m34) {
1902         return DISAS_NORETURN;
1903     }
1904     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1905     return DISAS_NEXT;
1906 }
1907 
1908 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1909 {
1910     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1911 
1912     if (!m34) {
1913         return DISAS_NORETURN;
1914     }
1915     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1916     return DISAS_NEXT;
1917 }
1918 
1919 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1920 {
1921     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1922 
1923     if (!m34) {
1924         return DISAS_NORETURN;
1925     }
1926     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1927     return DISAS_NEXT;
1928 }
1929 
1930 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1931 {
1932     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1933 
1934     if (!m34) {
1935         return DISAS_NORETURN;
1936     }
1937     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1938     return DISAS_NEXT;
1939 }
1940 
1941 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1942 {
1943     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944 
1945     if (!m34) {
1946         return DISAS_NORETURN;
1947     }
1948     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1949     return DISAS_NEXT;
1950 }
1951 
1952 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1953 {
1954     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1955 
1956     if (!m34) {
1957         return DISAS_NORETURN;
1958     }
1959     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1960     return DISAS_NEXT;
1961 }
1962 
1963 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1964 {
1965     int r2 = get_field(s, r2);
1966     TCGv_i128 pair = tcg_temp_new_i128();
1967     TCGv_i64 len = tcg_temp_new_i64();
1968 
1969     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1970     set_cc_static(s);
1971     tcg_gen_extr_i128_i64(o->out, len, pair);
1972 
1973     tcg_gen_add_i64(regs[r2], regs[r2], len);
1974     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1975 
1976     return DISAS_NEXT;
1977 }
1978 
1979 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1980 {
1981     int l = get_field(s, l1);
1982     TCGv_i64 src;
1983     TCGv_i32 vl;
1984     MemOp mop;
1985 
1986     switch (l + 1) {
1987     case 1:
1988     case 2:
1989     case 4:
1990     case 8:
1991         mop = ctz32(l + 1) | MO_TE;
1992         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1993         src = tcg_temp_new_i64();
1994         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1995         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1996         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1997         return DISAS_NEXT;
1998     default:
1999         vl = tcg_constant_i32(l);
2000         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2001         set_cc_static(s);
2002         return DISAS_NEXT;
2003     }
2004 }
2005 
2006 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2007 {
2008     int r1 = get_field(s, r1);
2009     int r2 = get_field(s, r2);
2010     TCGv_i32 t1, t2;
2011 
2012     /* r1 and r2 must be even.  */
2013     if (r1 & 1 || r2 & 1) {
2014         gen_program_exception(s, PGM_SPECIFICATION);
2015         return DISAS_NORETURN;
2016     }
2017 
2018     t1 = tcg_constant_i32(r1);
2019     t2 = tcg_constant_i32(r2);
2020     gen_helper_clcl(cc_op, tcg_env, t1, t2);
2021     set_cc_static(s);
2022     return DISAS_NEXT;
2023 }
2024 
2025 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2026 {
2027     int r1 = get_field(s, r1);
2028     int r3 = get_field(s, r3);
2029     TCGv_i32 t1, t3;
2030 
2031     /* r1 and r3 must be even.  */
2032     if (r1 & 1 || r3 & 1) {
2033         gen_program_exception(s, PGM_SPECIFICATION);
2034         return DISAS_NORETURN;
2035     }
2036 
2037     t1 = tcg_constant_i32(r1);
2038     t3 = tcg_constant_i32(r3);
2039     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2040     set_cc_static(s);
2041     return DISAS_NEXT;
2042 }
2043 
2044 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2045 {
2046     int r1 = get_field(s, r1);
2047     int r3 = get_field(s, r3);
2048     TCGv_i32 t1, t3;
2049 
2050     /* r1 and r3 must be even.  */
2051     if (r1 & 1 || r3 & 1) {
2052         gen_program_exception(s, PGM_SPECIFICATION);
2053         return DISAS_NORETURN;
2054     }
2055 
2056     t1 = tcg_constant_i32(r1);
2057     t3 = tcg_constant_i32(r3);
2058     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2059     set_cc_static(s);
2060     return DISAS_NEXT;
2061 }
2062 
2063 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2064 {
2065     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2066     TCGv_i32 t1 = tcg_temp_new_i32();
2067 
2068     tcg_gen_extrl_i64_i32(t1, o->in1);
2069     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2070     set_cc_static(s);
2071     return DISAS_NEXT;
2072 }
2073 
2074 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2075 {
2076     TCGv_i128 pair = tcg_temp_new_i128();
2077 
2078     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2079     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2080 
2081     set_cc_static(s);
2082     return DISAS_NEXT;
2083 }
2084 
2085 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2086 {
2087     TCGv_i64 t = tcg_temp_new_i64();
2088     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2089     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2090     tcg_gen_or_i64(o->out, o->out, t);
2091     return DISAS_NEXT;
2092 }
2093 
2094 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2095 {
2096     int d2 = get_field(s, d2);
2097     int b2 = get_field(s, b2);
2098     TCGv_i64 addr, cc;
2099 
2100     /* Note that in1 = R3 (new value) and
2101        in2 = (zero-extended) R1 (expected value).  */
2102 
2103     addr = get_address(s, 0, b2, d2);
2104     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2105                                get_mem_index(s), s->insn->data | MO_ALIGN);
2106 
2107     /* Are the memory and expected values (un)equal?  Note that this setcond
2108        produces the output CC value, thus the NE sense of the test.  */
2109     cc = tcg_temp_new_i64();
2110     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2111     tcg_gen_extrl_i64_i32(cc_op, cc);
2112     set_cc_static(s);
2113 
2114     return DISAS_NEXT;
2115 }
2116 
2117 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2118 {
2119     int r1 = get_field(s, r1);
2120 
2121     o->out_128 = tcg_temp_new_i128();
2122     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2123 
2124     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2125     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2126                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2127 
2128     /*
2129      * Extract result into cc_dst:cc_src, compare vs the expected value
2130      * in the as yet unmodified input registers, then update CC_OP.
2131      */
2132     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2133     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2134     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2135     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2136     set_cc_nz_u64(s, cc_dst);
2137 
2138     return DISAS_NEXT;
2139 }
2140 
2141 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2142 {
2143     int r3 = get_field(s, r3);
2144     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2145 
2146     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2147         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2148     } else {
2149         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2150     }
2151 
2152     set_cc_static(s);
2153     return DISAS_NEXT;
2154 }
2155 
2156 #ifndef CONFIG_USER_ONLY
2157 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2158 {
2159     MemOp mop = s->insn->data;
2160     TCGv_i64 addr, old, cc;
2161     TCGLabel *lab = gen_new_label();
2162 
2163     /* Note that in1 = R1 (zero-extended expected value),
2164        out = R1 (original reg), out2 = R1+1 (new value).  */
2165 
2166     addr = tcg_temp_new_i64();
2167     old = tcg_temp_new_i64();
2168     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2169     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2170                                get_mem_index(s), mop | MO_ALIGN);
2171 
2172     /* Are the memory and expected values (un)equal?  */
2173     cc = tcg_temp_new_i64();
2174     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2175     tcg_gen_extrl_i64_i32(cc_op, cc);
2176 
2177     /* Write back the output now, so that it happens before the
2178        following branch, so that we don't need local temps.  */
2179     if ((mop & MO_SIZE) == MO_32) {
2180         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2181     } else {
2182         tcg_gen_mov_i64(o->out, old);
2183     }
2184 
2185     /* If the comparison was equal, and the LSB of R2 was set,
2186        then we need to flush the TLB (for all cpus).  */
2187     tcg_gen_xori_i64(cc, cc, 1);
2188     tcg_gen_and_i64(cc, cc, o->in2);
2189     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2190 
2191     gen_helper_purge(tcg_env);
2192     gen_set_label(lab);
2193 
2194     return DISAS_NEXT;
2195 }
2196 #endif
2197 
2198 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2199 {
2200     TCGv_i64 t = tcg_temp_new_i64();
2201     tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2202     gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2203     return DISAS_NEXT;
2204 }
2205 
2206 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2207 {
2208     TCGv_i128 t = tcg_temp_new_i128();
2209     tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2210     gen_helper_cvbg(o->out, tcg_env, t);
2211     return DISAS_NEXT;
2212 }
2213 
2214 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2215 {
2216     TCGv_i64 t1 = tcg_temp_new_i64();
2217     TCGv_i32 t2 = tcg_temp_new_i32();
2218     tcg_gen_extrl_i64_i32(t2, o->in1);
2219     gen_helper_cvd(t1, t2);
2220     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2221     return DISAS_NEXT;
2222 }
2223 
2224 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2225 {
2226     TCGv_i128 t = tcg_temp_new_i128();
2227     gen_helper_cvdg(t, o->in1);
2228     tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2229     return DISAS_NEXT;
2230 }
2231 
2232 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2233 {
2234     int m3 = get_field(s, m3);
2235     TCGLabel *lab = gen_new_label();
2236     TCGCond c;
2237 
2238     c = tcg_invert_cond(ltgt_cond[m3]);
2239     if (s->insn->data) {
2240         c = tcg_unsigned_cond(c);
2241     }
2242     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2243 
2244     /* Trap.  */
2245     gen_trap(s);
2246 
2247     gen_set_label(lab);
2248     return DISAS_NEXT;
2249 }
2250 
2251 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2252 {
2253     int m3 = get_field(s, m3);
2254     int r1 = get_field(s, r1);
2255     int r2 = get_field(s, r2);
2256     TCGv_i32 tr1, tr2, chk;
2257 
2258     /* R1 and R2 must both be even.  */
2259     if ((r1 | r2) & 1) {
2260         gen_program_exception(s, PGM_SPECIFICATION);
2261         return DISAS_NORETURN;
2262     }
2263     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2264         m3 = 0;
2265     }
2266 
2267     tr1 = tcg_constant_i32(r1);
2268     tr2 = tcg_constant_i32(r2);
2269     chk = tcg_constant_i32(m3);
2270 
2271     switch (s->insn->data) {
2272     case 12:
2273         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2274         break;
2275     case 14:
2276         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2277         break;
2278     case 21:
2279         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2280         break;
2281     case 24:
2282         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2283         break;
2284     case 41:
2285         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2286         break;
2287     case 42:
2288         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2289         break;
2290     default:
2291         g_assert_not_reached();
2292     }
2293 
2294     set_cc_static(s);
2295     return DISAS_NEXT;
2296 }
2297 
2298 #ifndef CONFIG_USER_ONLY
2299 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2300 {
2301     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2302     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2303     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2304 
2305     gen_helper_diag(tcg_env, r1, r3, func_code);
2306     return DISAS_NEXT;
2307 }
2308 #endif
2309 
2310 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2311 {
2312     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2313     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2314     return DISAS_NEXT;
2315 }
2316 
2317 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2318 {
2319     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2320     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2321     return DISAS_NEXT;
2322 }
2323 
2324 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2325 {
2326     TCGv_i128 t = tcg_temp_new_i128();
2327 
2328     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2329     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2330     return DISAS_NEXT;
2331 }
2332 
2333 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2334 {
2335     TCGv_i128 t = tcg_temp_new_i128();
2336 
2337     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2338     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2339     return DISAS_NEXT;
2340 }
2341 
2342 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2343 {
2344     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2345     return DISAS_NEXT;
2346 }
2347 
2348 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2349 {
2350     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2351     return DISAS_NEXT;
2352 }
2353 
2354 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2355 {
2356     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2357     return DISAS_NEXT;
2358 }
2359 
2360 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2361 {
2362     int r2 = get_field(s, r2);
2363     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2364     return DISAS_NEXT;
2365 }
2366 
2367 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2368 {
2369     /* No cache information provided.  */
2370     tcg_gen_movi_i64(o->out, -1);
2371     return DISAS_NEXT;
2372 }
2373 
2374 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2375 {
2376     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2377     return DISAS_NEXT;
2378 }
2379 
2380 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2381 {
2382     int r1 = get_field(s, r1);
2383     int r2 = get_field(s, r2);
2384     TCGv_i64 t = tcg_temp_new_i64();
2385     TCGv_i64 t_cc = tcg_temp_new_i64();
2386 
2387     /* Note the "subsequently" in the PoO, which implies a defined result
2388        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2389     gen_op_calc_cc(s);
2390     tcg_gen_extu_i32_i64(t_cc, cc_op);
2391     tcg_gen_shri_i64(t, psw_mask, 32);
2392     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2393     store_reg32_i64(r1, t);
2394     if (r2 != 0) {
2395         store_reg32_i64(r2, psw_mask);
2396     }
2397     return DISAS_NEXT;
2398 }
2399 
2400 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2401 {
2402     int r1 = get_field(s, r1);
2403     TCGv_i32 ilen;
2404     TCGv_i64 v1;
2405 
2406     /* Nested EXECUTE is not allowed.  */
2407     if (unlikely(s->ex_value)) {
2408         gen_program_exception(s, PGM_EXECUTE);
2409         return DISAS_NORETURN;
2410     }
2411 
2412     update_psw_addr(s);
2413     update_cc_op(s);
2414 
2415     if (r1 == 0) {
2416         v1 = tcg_constant_i64(0);
2417     } else {
2418         v1 = regs[r1];
2419     }
2420 
2421     ilen = tcg_constant_i32(s->ilen);
2422     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2423 
2424     return DISAS_PC_CC_UPDATED;
2425 }
2426 
2427 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2428 {
2429     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2430 
2431     if (!m34) {
2432         return DISAS_NORETURN;
2433     }
2434     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2435     return DISAS_NEXT;
2436 }
2437 
2438 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2439 {
2440     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2441 
2442     if (!m34) {
2443         return DISAS_NORETURN;
2444     }
2445     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2446     return DISAS_NEXT;
2447 }
2448 
2449 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2450 {
2451     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2452 
2453     if (!m34) {
2454         return DISAS_NORETURN;
2455     }
2456     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2457     return DISAS_NEXT;
2458 }
2459 
2460 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2461 {
2462     /* We'll use the original input for cc computation, since we get to
2463        compare that against 0, which ought to be better than comparing
2464        the real output against 64.  It also lets cc_dst be a convenient
2465        temporary during our computation.  */
2466     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2467 
2468     /* R1 = IN ? CLZ(IN) : 64.  */
2469     tcg_gen_clzi_i64(o->out, o->in2, 64);
2470 
2471     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2472        value by 64, which is undefined.  But since the shift is 64 iff the
2473        input is zero, we still get the correct result after and'ing.  */
2474     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2475     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2476     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2477     return DISAS_NEXT;
2478 }
2479 
2480 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2481 {
2482     int m3 = get_field(s, m3);
2483     int pos, len, base = s->insn->data;
2484     TCGv_i64 tmp = tcg_temp_new_i64();
2485     uint64_t ccm;
2486 
2487     switch (m3) {
2488     case 0xf:
2489         /* Effectively a 32-bit load.  */
2490         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2491         len = 32;
2492         goto one_insert;
2493 
2494     case 0xc:
2495     case 0x6:
2496     case 0x3:
2497         /* Effectively a 16-bit load.  */
2498         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2499         len = 16;
2500         goto one_insert;
2501 
2502     case 0x8:
2503     case 0x4:
2504     case 0x2:
2505     case 0x1:
2506         /* Effectively an 8-bit load.  */
2507         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2508         len = 8;
2509         goto one_insert;
2510 
2511     one_insert:
2512         pos = base + ctz32(m3) * 8;
2513         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2514         ccm = ((1ull << len) - 1) << pos;
2515         break;
2516 
2517     case 0:
2518         /* Recognize access exceptions for the first byte.  */
2519         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2520         gen_op_movi_cc(s, 0);
2521         return DISAS_NEXT;
2522 
2523     default:
2524         /* This is going to be a sequence of loads and inserts.  */
2525         pos = base + 32 - 8;
2526         ccm = 0;
2527         while (m3) {
2528             if (m3 & 0x8) {
2529                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2530                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2531                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2532                 ccm |= 0xffull << pos;
2533             }
2534             m3 = (m3 << 1) & 0xf;
2535             pos -= 8;
2536         }
2537         break;
2538     }
2539 
2540     tcg_gen_movi_i64(tmp, ccm);
2541     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2542     return DISAS_NEXT;
2543 }
2544 
2545 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2546 {
2547     int shift = s->insn->data & 0xff;
2548     int size = s->insn->data >> 8;
2549     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2550     return DISAS_NEXT;
2551 }
2552 
2553 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2554 {
2555     TCGv_i64 t1, t2;
2556 
2557     gen_op_calc_cc(s);
2558     t1 = tcg_temp_new_i64();
2559     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2560     t2 = tcg_temp_new_i64();
2561     tcg_gen_extu_i32_i64(t2, cc_op);
2562     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2563     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2564     return DISAS_NEXT;
2565 }
2566 
2567 #ifndef CONFIG_USER_ONLY
2568 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2569 {
2570     TCGv_i32 m4;
2571 
2572     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2573         m4 = tcg_constant_i32(get_field(s, m4));
2574     } else {
2575         m4 = tcg_constant_i32(0);
2576     }
2577     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2578     return DISAS_NEXT;
2579 }
2580 
2581 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2582 {
2583     TCGv_i32 m4;
2584 
2585     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2586         m4 = tcg_constant_i32(get_field(s, m4));
2587     } else {
2588         m4 = tcg_constant_i32(0);
2589     }
2590     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2591     return DISAS_NEXT;
2592 }
2593 
2594 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2595 {
2596     gen_helper_iske(o->out, tcg_env, o->in2);
2597     return DISAS_NEXT;
2598 }
2599 #endif
2600 
2601 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2602 {
2603     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2604     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2605     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2606     TCGv_i32 t_r1, t_r2, t_r3, type;
2607 
2608     switch (s->insn->data) {
2609     case S390_FEAT_TYPE_KMA:
2610         if (r3 == r1 || r3 == r2) {
2611             gen_program_exception(s, PGM_SPECIFICATION);
2612             return DISAS_NORETURN;
2613         }
2614         /* FALL THROUGH */
2615     case S390_FEAT_TYPE_KMCTR:
2616         if (r3 & 1 || !r3) {
2617             gen_program_exception(s, PGM_SPECIFICATION);
2618             return DISAS_NORETURN;
2619         }
2620         /* FALL THROUGH */
2621     case S390_FEAT_TYPE_PPNO:
2622     case S390_FEAT_TYPE_KMF:
2623     case S390_FEAT_TYPE_KMC:
2624     case S390_FEAT_TYPE_KMO:
2625     case S390_FEAT_TYPE_KM:
2626         if (r1 & 1 || !r1) {
2627             gen_program_exception(s, PGM_SPECIFICATION);
2628             return DISAS_NORETURN;
2629         }
2630         /* FALL THROUGH */
2631     case S390_FEAT_TYPE_KMAC:
2632     case S390_FEAT_TYPE_KIMD:
2633     case S390_FEAT_TYPE_KLMD:
2634         if (r2 & 1 || !r2) {
2635             gen_program_exception(s, PGM_SPECIFICATION);
2636             return DISAS_NORETURN;
2637         }
2638         /* FALL THROUGH */
2639     case S390_FEAT_TYPE_PCKMO:
2640     case S390_FEAT_TYPE_PCC:
2641         break;
2642     default:
2643         g_assert_not_reached();
2644     };
2645 
2646     t_r1 = tcg_constant_i32(r1);
2647     t_r2 = tcg_constant_i32(r2);
2648     t_r3 = tcg_constant_i32(r3);
2649     type = tcg_constant_i32(s->insn->data);
2650     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2651     set_cc_static(s);
2652     return DISAS_NEXT;
2653 }
2654 
2655 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2656 {
2657     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2658     set_cc_static(s);
2659     return DISAS_NEXT;
2660 }
2661 
2662 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2663 {
2664     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2665     set_cc_static(s);
2666     return DISAS_NEXT;
2667 }
2668 
2669 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2670 {
2671     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2672     set_cc_static(s);
2673     return DISAS_NEXT;
2674 }
2675 
2676 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2677 {
2678     /* The real output is indeed the original value in memory;
2679        recompute the addition for the computation of CC.  */
2680     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2681                                  s->insn->data | MO_ALIGN);
2682     /* However, we need to recompute the addition for setting CC.  */
2683     if (addu64) {
2684         tcg_gen_movi_i64(cc_src, 0);
2685         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2686     } else {
2687         tcg_gen_add_i64(o->out, o->in1, o->in2);
2688     }
2689     return DISAS_NEXT;
2690 }
2691 
2692 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2693 {
2694     return help_laa(s, o, false);
2695 }
2696 
2697 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2698 {
2699     return help_laa(s, o, true);
2700 }
2701 
2702 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2703 {
2704     /* The real output is indeed the original value in memory;
2705        recompute the addition for the computation of CC.  */
2706     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2707                                  s->insn->data | MO_ALIGN);
2708     /* However, we need to recompute the operation for setting CC.  */
2709     tcg_gen_and_i64(o->out, o->in1, o->in2);
2710     return DISAS_NEXT;
2711 }
2712 
2713 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2714 {
2715     /* The real output is indeed the original value in memory;
2716        recompute the addition for the computation of CC.  */
2717     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2718                                 s->insn->data | MO_ALIGN);
2719     /* However, we need to recompute the operation for setting CC.  */
2720     tcg_gen_or_i64(o->out, o->in1, o->in2);
2721     return DISAS_NEXT;
2722 }
2723 
2724 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2725 {
2726     /* The real output is indeed the original value in memory;
2727        recompute the addition for the computation of CC.  */
2728     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2729                                  s->insn->data | MO_ALIGN);
2730     /* However, we need to recompute the operation for setting CC.  */
2731     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2732     return DISAS_NEXT;
2733 }
2734 
2735 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2736 {
2737     gen_helper_ldeb(o->out, tcg_env, o->in2);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2742 {
2743     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2744 
2745     if (!m34) {
2746         return DISAS_NORETURN;
2747     }
2748     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2749     return DISAS_NEXT;
2750 }
2751 
2752 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2753 {
2754     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2755 
2756     if (!m34) {
2757         return DISAS_NORETURN;
2758     }
2759     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2760     return DISAS_NEXT;
2761 }
2762 
2763 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2764 {
2765     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2766 
2767     if (!m34) {
2768         return DISAS_NORETURN;
2769     }
2770     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2771     return DISAS_NEXT;
2772 }
2773 
2774 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2775 {
2776     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2777     return DISAS_NEXT;
2778 }
2779 
2780 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2781 {
2782     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2783     return DISAS_NEXT;
2784 }
2785 
2786 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2787 {
2788     tcg_gen_shli_i64(o->out, o->in2, 32);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2793 {
2794     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2795     return DISAS_NEXT;
2796 }
2797 
2798 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2799 {
2800     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2801     return DISAS_NEXT;
2802 }
2803 
2804 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2805 {
2806     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2807     return DISAS_NEXT;
2808 }
2809 
2810 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2811 {
2812     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2813     return DISAS_NEXT;
2814 }
2815 
2816 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2817 {
2818     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2819     return DISAS_NEXT;
2820 }
2821 
2822 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2823 {
2824     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2825                        MO_TESL | s->insn->data);
2826     return DISAS_NEXT;
2827 }
2828 
2829 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2830 {
2831     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2832                        MO_TEUL | s->insn->data);
2833     return DISAS_NEXT;
2834 }
2835 
2836 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2837 {
2838     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2839                         MO_TEUQ | s->insn->data);
2840     return DISAS_NEXT;
2841 }
2842 
2843 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2844 {
2845     TCGLabel *lab = gen_new_label();
2846     store_reg32_i64(get_field(s, r1), o->in2);
2847     /* The value is stored even in case of trap. */
2848     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2849     gen_trap(s);
2850     gen_set_label(lab);
2851     return DISAS_NEXT;
2852 }
2853 
2854 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2855 {
2856     TCGLabel *lab = gen_new_label();
2857     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2858     /* The value is stored even in case of trap. */
2859     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2860     gen_trap(s);
2861     gen_set_label(lab);
2862     return DISAS_NEXT;
2863 }
2864 
2865 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2866 {
2867     TCGLabel *lab = gen_new_label();
2868     store_reg32h_i64(get_field(s, r1), o->in2);
2869     /* The value is stored even in case of trap. */
2870     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2871     gen_trap(s);
2872     gen_set_label(lab);
2873     return DISAS_NEXT;
2874 }
2875 
2876 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2877 {
2878     TCGLabel *lab = gen_new_label();
2879 
2880     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2881     /* The value is stored even in case of trap. */
2882     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2883     gen_trap(s);
2884     gen_set_label(lab);
2885     return DISAS_NEXT;
2886 }
2887 
2888 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2889 {
2890     TCGLabel *lab = gen_new_label();
2891     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2892     /* The value is stored even in case of trap. */
2893     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2894     gen_trap(s);
2895     gen_set_label(lab);
2896     return DISAS_NEXT;
2897 }
2898 
2899 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2900 {
2901     DisasCompare c;
2902 
2903     if (have_field(s, m3)) {
2904         /* LOAD * ON CONDITION */
2905         disas_jcc(s, &c, get_field(s, m3));
2906     } else {
2907         /* SELECT */
2908         disas_jcc(s, &c, get_field(s, m4));
2909     }
2910 
2911     if (c.is_64) {
2912         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2913                             o->in2, o->in1);
2914     } else {
2915         TCGv_i32 t32 = tcg_temp_new_i32();
2916         TCGv_i64 t, z;
2917 
2918         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2919 
2920         t = tcg_temp_new_i64();
2921         tcg_gen_extu_i32_i64(t, t32);
2922 
2923         z = tcg_constant_i64(0);
2924         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2925     }
2926 
2927     return DISAS_NEXT;
2928 }
2929 
2930 #ifndef CONFIG_USER_ONLY
2931 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2932 {
2933     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2934     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2935 
2936     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2937     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2938     s->exit_to_mainloop = true;
2939     return DISAS_TOO_MANY;
2940 }
2941 
2942 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2943 {
2944     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2945     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2946 
2947     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2948     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2949     s->exit_to_mainloop = true;
2950     return DISAS_TOO_MANY;
2951 }
2952 
2953 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2954 {
2955     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2956     set_cc_static(s);
2957     return DISAS_NEXT;
2958 }
2959 
2960 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2961 {
2962     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2963     return DISAS_NEXT;
2964 }
2965 
2966 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2967 {
2968     TCGv_i64 mask, addr;
2969 
2970     per_breaking_event(s);
2971 
2972     /*
2973      * Convert the short PSW into the normal PSW, similar to what
2974      * s390_cpu_load_normal() does.
2975      */
2976     mask = tcg_temp_new_i64();
2977     addr = tcg_temp_new_i64();
2978     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2979     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2980     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2981     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2982     gen_helper_load_psw(tcg_env, mask, addr);
2983     return DISAS_NORETURN;
2984 }
2985 
2986 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2987 {
2988     TCGv_i64 t1, t2;
2989 
2990     per_breaking_event(s);
2991 
2992     t1 = tcg_temp_new_i64();
2993     t2 = tcg_temp_new_i64();
2994     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2995                         MO_TEUQ | MO_ALIGN_8);
2996     tcg_gen_addi_i64(o->in2, o->in2, 8);
2997     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2998     gen_helper_load_psw(tcg_env, t1, t2);
2999     return DISAS_NORETURN;
3000 }
3001 #endif
3002 
3003 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3004 {
3005     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3006     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3007 
3008     gen_helper_lam(tcg_env, r1, o->in2, r3);
3009     return DISAS_NEXT;
3010 }
3011 
3012 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3013 {
3014     int r1 = get_field(s, r1);
3015     int r3 = get_field(s, r3);
3016     TCGv_i64 t1, t2;
3017 
3018     /* Only one register to read. */
3019     t1 = tcg_temp_new_i64();
3020     if (unlikely(r1 == r3)) {
3021         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3022         store_reg32_i64(r1, t1);
3023         return DISAS_NEXT;
3024     }
3025 
3026     /* First load the values of the first and last registers to trigger
3027        possible page faults. */
3028     t2 = tcg_temp_new_i64();
3029     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3030     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3031     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3032     store_reg32_i64(r1, t1);
3033     store_reg32_i64(r3, t2);
3034 
3035     /* Only two registers to read. */
3036     if (((r1 + 1) & 15) == r3) {
3037         return DISAS_NEXT;
3038     }
3039 
3040     /* Then load the remaining registers. Page fault can't occur. */
3041     r3 = (r3 - 1) & 15;
3042     tcg_gen_movi_i64(t2, 4);
3043     while (r1 != r3) {
3044         r1 = (r1 + 1) & 15;
3045         tcg_gen_add_i64(o->in2, o->in2, t2);
3046         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3047         store_reg32_i64(r1, t1);
3048     }
3049     return DISAS_NEXT;
3050 }
3051 
3052 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3053 {
3054     int r1 = get_field(s, r1);
3055     int r3 = get_field(s, r3);
3056     TCGv_i64 t1, t2;
3057 
3058     /* Only one register to read. */
3059     t1 = tcg_temp_new_i64();
3060     if (unlikely(r1 == r3)) {
3061         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3062         store_reg32h_i64(r1, t1);
3063         return DISAS_NEXT;
3064     }
3065 
3066     /* First load the values of the first and last registers to trigger
3067        possible page faults. */
3068     t2 = tcg_temp_new_i64();
3069     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3070     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3071     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3072     store_reg32h_i64(r1, t1);
3073     store_reg32h_i64(r3, t2);
3074 
3075     /* Only two registers to read. */
3076     if (((r1 + 1) & 15) == r3) {
3077         return DISAS_NEXT;
3078     }
3079 
3080     /* Then load the remaining registers. Page fault can't occur. */
3081     r3 = (r3 - 1) & 15;
3082     tcg_gen_movi_i64(t2, 4);
3083     while (r1 != r3) {
3084         r1 = (r1 + 1) & 15;
3085         tcg_gen_add_i64(o->in2, o->in2, t2);
3086         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3087         store_reg32h_i64(r1, t1);
3088     }
3089     return DISAS_NEXT;
3090 }
3091 
3092 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3093 {
3094     int r1 = get_field(s, r1);
3095     int r3 = get_field(s, r3);
3096     TCGv_i64 t1, t2;
3097 
3098     /* Only one register to read. */
3099     if (unlikely(r1 == r3)) {
3100         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3101         return DISAS_NEXT;
3102     }
3103 
3104     /* First load the values of the first and last registers to trigger
3105        possible page faults. */
3106     t1 = tcg_temp_new_i64();
3107     t2 = tcg_temp_new_i64();
3108     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3109     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3110     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3111     tcg_gen_mov_i64(regs[r1], t1);
3112 
3113     /* Only two registers to read. */
3114     if (((r1 + 1) & 15) == r3) {
3115         return DISAS_NEXT;
3116     }
3117 
3118     /* Then load the remaining registers. Page fault can't occur. */
3119     r3 = (r3 - 1) & 15;
3120     tcg_gen_movi_i64(t1, 8);
3121     while (r1 != r3) {
3122         r1 = (r1 + 1) & 15;
3123         tcg_gen_add_i64(o->in2, o->in2, t1);
3124         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3125     }
3126     return DISAS_NEXT;
3127 }
3128 
3129 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3130 {
3131     TCGv_i64 a1, a2;
3132     MemOp mop = s->insn->data;
3133 
3134     /* In a parallel context, stop the world and single step.  */
3135     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3136         update_psw_addr(s);
3137         update_cc_op(s);
3138         gen_exception(EXCP_ATOMIC);
3139         return DISAS_NORETURN;
3140     }
3141 
3142     /* In a serial context, perform the two loads ... */
3143     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3144     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3145     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3146     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3147 
3148     /* ... and indicate that we performed them while interlocked.  */
3149     gen_op_movi_cc(s, 0);
3150     return DISAS_NEXT;
3151 }
3152 
3153 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3154 {
3155     o->out_128 = tcg_temp_new_i128();
3156     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3157                          MO_TE | MO_128 | MO_ALIGN);
3158     return DISAS_NEXT;
3159 }
3160 
3161 #ifndef CONFIG_USER_ONLY
3162 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3163 {
3164     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3165     return DISAS_NEXT;
3166 }
3167 #endif
3168 
3169 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3170 {
3171     tcg_gen_andi_i64(o->out, o->in2, -256);
3172     return DISAS_NEXT;
3173 }
3174 
3175 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3176 {
3177     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3178 
3179     if (get_field(s, m3) > 6) {
3180         gen_program_exception(s, PGM_SPECIFICATION);
3181         return DISAS_NORETURN;
3182     }
3183 
3184     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3185     tcg_gen_neg_i64(o->addr1, o->addr1);
3186     tcg_gen_movi_i64(o->out, 16);
3187     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3188     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3189     return DISAS_NEXT;
3190 }
3191 
3192 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3193 {
3194     const uint8_t monitor_class = get_field(s, i2);
3195 
3196     if (monitor_class & 0xf0) {
3197         gen_program_exception(s, PGM_SPECIFICATION);
3198         return DISAS_NORETURN;
3199     }
3200 
3201 #if !defined(CONFIG_USER_ONLY)
3202     gen_helper_monitor_call(tcg_env, o->addr1,
3203                             tcg_constant_i32(monitor_class));
3204 #endif
3205     /* Defaults to a NOP. */
3206     return DISAS_NEXT;
3207 }
3208 
3209 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3210 {
3211     o->out = o->in2;
3212     o->in2 = NULL;
3213     return DISAS_NEXT;
3214 }
3215 
3216 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3217 {
3218     int b2 = get_field(s, b2);
3219     TCGv ar1 = tcg_temp_new_i64();
3220     int r1 = get_field(s, r1);
3221 
3222     o->out = o->in2;
3223     o->in2 = NULL;
3224 
3225     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3226     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3227         tcg_gen_movi_i64(ar1, 0);
3228         break;
3229     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3230         tcg_gen_movi_i64(ar1, 1);
3231         break;
3232     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3233         if (b2) {
3234             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3235         } else {
3236             tcg_gen_movi_i64(ar1, 0);
3237         }
3238         break;
3239     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3240         tcg_gen_movi_i64(ar1, 2);
3241         break;
3242     }
3243 
3244     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3245     return DISAS_NEXT;
3246 }
3247 
3248 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3249 {
3250     o->out = o->in1;
3251     o->out2 = o->in2;
3252     o->in1 = NULL;
3253     o->in2 = NULL;
3254     return DISAS_NEXT;
3255 }
3256 
3257 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3258 {
3259     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3260 
3261     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3262     return DISAS_NEXT;
3263 }
3264 
3265 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3266 {
3267     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3268     return DISAS_NEXT;
3269 }
3270 
3271 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3272 {
3273     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3274 
3275     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3276     return DISAS_NEXT;
3277 }
3278 
3279 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3280 {
3281     int r1 = get_field(s, r1);
3282     int r2 = get_field(s, r2);
3283     TCGv_i32 t1, t2;
3284 
3285     /* r1 and r2 must be even.  */
3286     if (r1 & 1 || r2 & 1) {
3287         gen_program_exception(s, PGM_SPECIFICATION);
3288         return DISAS_NORETURN;
3289     }
3290 
3291     t1 = tcg_constant_i32(r1);
3292     t2 = tcg_constant_i32(r2);
3293     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3294     set_cc_static(s);
3295     return DISAS_NEXT;
3296 }
3297 
3298 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3299 {
3300     int r1 = get_field(s, r1);
3301     int r3 = get_field(s, r3);
3302     TCGv_i32 t1, t3;
3303 
3304     /* r1 and r3 must be even.  */
3305     if (r1 & 1 || r3 & 1) {
3306         gen_program_exception(s, PGM_SPECIFICATION);
3307         return DISAS_NORETURN;
3308     }
3309 
3310     t1 = tcg_constant_i32(r1);
3311     t3 = tcg_constant_i32(r3);
3312     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3313     set_cc_static(s);
3314     return DISAS_NEXT;
3315 }
3316 
3317 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3318 {
3319     int r1 = get_field(s, r1);
3320     int r3 = get_field(s, r3);
3321     TCGv_i32 t1, t3;
3322 
3323     /* r1 and r3 must be even.  */
3324     if (r1 & 1 || r3 & 1) {
3325         gen_program_exception(s, PGM_SPECIFICATION);
3326         return DISAS_NORETURN;
3327     }
3328 
3329     t1 = tcg_constant_i32(r1);
3330     t3 = tcg_constant_i32(r3);
3331     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3332     set_cc_static(s);
3333     return DISAS_NEXT;
3334 }
3335 
3336 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3337 {
3338     int r3 = get_field(s, r3);
3339     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3340     set_cc_static(s);
3341     return DISAS_NEXT;
3342 }
3343 
3344 #ifndef CONFIG_USER_ONLY
3345 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3346 {
3347     int r1 = get_field(s, l1);
3348     int r3 = get_field(s, r3);
3349     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3350     set_cc_static(s);
3351     return DISAS_NEXT;
3352 }
3353 
3354 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3355 {
3356     int r1 = get_field(s, l1);
3357     int r3 = get_field(s, r3);
3358     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3359     set_cc_static(s);
3360     return DISAS_NEXT;
3361 }
3362 #endif
3363 
3364 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3365 {
3366     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3367 
3368     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3369     return DISAS_NEXT;
3370 }
3371 
3372 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3373 {
3374     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3375 
3376     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3377     return DISAS_NEXT;
3378 }
3379 
3380 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3381 {
3382     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3383     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3384 
3385     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3386     set_cc_static(s);
3387     return DISAS_NEXT;
3388 }
3389 
3390 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3391 {
3392     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3393     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3394 
3395     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3396     set_cc_static(s);
3397     return DISAS_NEXT;
3398 }
3399 
3400 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3401 {
3402     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3403 
3404     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3405     return DISAS_NEXT;
3406 }
3407 
3408 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3409 {
3410     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3411     return DISAS_NEXT;
3412 }
3413 
3414 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3415 {
3416     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3417     return DISAS_NEXT;
3418 }
3419 
3420 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3421 {
3422     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3423     return DISAS_NEXT;
3424 }
3425 
3426 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3427 {
3428     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3429     return DISAS_NEXT;
3430 }
3431 
3432 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3433 {
3434     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3435     return DISAS_NEXT;
3436 }
3437 
3438 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3439 {
3440     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3441     return DISAS_NEXT;
3442 }
3443 
3444 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3445 {
3446     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3447     return DISAS_NEXT;
3448 }
3449 
3450 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3451 {
3452     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3453     return DISAS_NEXT;
3454 }
3455 
3456 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3457 {
3458     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3459     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3460     return DISAS_NEXT;
3461 }
3462 
3463 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3464 {
3465     TCGv_i64 r3 = load_freg(get_field(s, r3));
3466     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3467     return DISAS_NEXT;
3468 }
3469 
3470 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3471 {
3472     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3473     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3474     return DISAS_NEXT;
3475 }
3476 
3477 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3478 {
3479     TCGv_i64 r3 = load_freg(get_field(s, r3));
3480     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3481     return DISAS_NEXT;
3482 }
3483 
3484 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3485 {
3486     TCGv_i64 z = tcg_constant_i64(0);
3487     TCGv_i64 n = tcg_temp_new_i64();
3488 
3489     tcg_gen_neg_i64(n, o->in2);
3490     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3491     return DISAS_NEXT;
3492 }
3493 
3494 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3495 {
3496     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3497     return DISAS_NEXT;
3498 }
3499 
3500 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3501 {
3502     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3503     return DISAS_NEXT;
3504 }
3505 
3506 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3507 {
3508     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3509     tcg_gen_mov_i64(o->out2, o->in2);
3510     return DISAS_NEXT;
3511 }
3512 
3513 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3514 {
3515     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3516 
3517     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3518     set_cc_static(s);
3519     return DISAS_NEXT;
3520 }
3521 
3522 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3523 {
3524     tcg_gen_neg_i64(o->out, o->in2);
3525     return DISAS_NEXT;
3526 }
3527 
3528 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3529 {
3530     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3531     return DISAS_NEXT;
3532 }
3533 
3534 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3535 {
3536     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3537     return DISAS_NEXT;
3538 }
3539 
3540 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3541 {
3542     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3543     tcg_gen_mov_i64(o->out2, o->in2);
3544     return DISAS_NEXT;
3545 }
3546 
3547 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3548 {
3549     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3550 
3551     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3552     set_cc_static(s);
3553     return DISAS_NEXT;
3554 }
3555 
3556 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3557 {
3558     tcg_gen_or_i64(o->out, o->in1, o->in2);
3559     return DISAS_NEXT;
3560 }
3561 
3562 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3563 {
3564     int shift = s->insn->data & 0xff;
3565     int size = s->insn->data >> 8;
3566     uint64_t mask = ((1ull << size) - 1) << shift;
3567     TCGv_i64 t = tcg_temp_new_i64();
3568 
3569     tcg_gen_shli_i64(t, o->in2, shift);
3570     tcg_gen_or_i64(o->out, o->in1, t);
3571 
3572     /* Produce the CC from only the bits manipulated.  */
3573     tcg_gen_andi_i64(cc_dst, o->out, mask);
3574     set_cc_nz_u64(s, cc_dst);
3575     return DISAS_NEXT;
3576 }
3577 
3578 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3579 {
3580     o->in1 = tcg_temp_new_i64();
3581 
3582     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3583         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3584     } else {
3585         /* Perform the atomic operation in memory. */
3586         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3587                                     s->insn->data);
3588     }
3589 
3590     /* Recompute also for atomic case: needed for setting CC. */
3591     tcg_gen_or_i64(o->out, o->in1, o->in2);
3592 
3593     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3594         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3595     }
3596     return DISAS_NEXT;
3597 }
3598 
3599 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3600 {
3601     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3602 
3603     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3604     return DISAS_NEXT;
3605 }
3606 
3607 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3608 {
3609     int l2 = get_field(s, l2) + 1;
3610     TCGv_i32 l;
3611 
3612     /* The length must not exceed 32 bytes.  */
3613     if (l2 > 32) {
3614         gen_program_exception(s, PGM_SPECIFICATION);
3615         return DISAS_NORETURN;
3616     }
3617     l = tcg_constant_i32(l2);
3618     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3619     return DISAS_NEXT;
3620 }
3621 
3622 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3623 {
3624     int l2 = get_field(s, l2) + 1;
3625     TCGv_i32 l;
3626 
3627     /* The length must be even and should not exceed 64 bytes.  */
3628     if ((l2 & 1) || (l2 > 64)) {
3629         gen_program_exception(s, PGM_SPECIFICATION);
3630         return DISAS_NORETURN;
3631     }
3632     l = tcg_constant_i32(l2);
3633     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3634     return DISAS_NEXT;
3635 }
3636 
3637 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3638 {
3639     const uint8_t m3 = get_field(s, m3);
3640 
3641     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3642         tcg_gen_ctpop_i64(o->out, o->in2);
3643     } else {
3644         gen_helper_popcnt(o->out, o->in2);
3645     }
3646     return DISAS_NEXT;
3647 }
3648 
3649 #ifndef CONFIG_USER_ONLY
3650 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3651 {
3652     gen_helper_ptlb(tcg_env);
3653     return DISAS_NEXT;
3654 }
3655 #endif
3656 
3657 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3658 {
3659     int i3 = get_field(s, i3);
3660     int i4 = get_field(s, i4);
3661     int i5 = get_field(s, i5);
3662     int do_zero = i4 & 0x80;
3663     uint64_t mask, imask, pmask;
3664     int pos, len, rot;
3665 
3666     /* Adjust the arguments for the specific insn.  */
3667     switch (s->fields.op2) {
3668     case 0x55: /* risbg */
3669     case 0x59: /* risbgn */
3670         i3 &= 63;
3671         i4 &= 63;
3672         pmask = ~0;
3673         break;
3674     case 0x5d: /* risbhg */
3675         i3 &= 31;
3676         i4 &= 31;
3677         pmask = 0xffffffff00000000ull;
3678         break;
3679     case 0x51: /* risblg */
3680         i3 = (i3 & 31) + 32;
3681         i4 = (i4 & 31) + 32;
3682         pmask = 0x00000000ffffffffull;
3683         break;
3684     default:
3685         g_assert_not_reached();
3686     }
3687 
3688     /* MASK is the set of bits to be inserted from R2. */
3689     if (i3 <= i4) {
3690         /* [0...i3---i4...63] */
3691         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3692     } else {
3693         /* [0---i4...i3---63] */
3694         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3695     }
3696     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3697     mask &= pmask;
3698 
3699     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3700        insns, we need to keep the other half of the register.  */
3701     imask = ~mask | ~pmask;
3702     if (do_zero) {
3703         imask = ~pmask;
3704     }
3705 
3706     len = i4 - i3 + 1;
3707     pos = 63 - i4;
3708     rot = i5 & 63;
3709 
3710     /* In some cases we can implement this with extract.  */
3711     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3712         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3713         return DISAS_NEXT;
3714     }
3715 
3716     /* In some cases we can implement this with deposit.  */
3717     if (len > 0 && (imask == 0 || ~mask == imask)) {
3718         /* Note that we rotate the bits to be inserted to the lsb, not to
3719            the position as described in the PoO.  */
3720         rot = (rot - pos) & 63;
3721     } else {
3722         pos = -1;
3723     }
3724 
3725     /* Rotate the input as necessary.  */
3726     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3727 
3728     /* Insert the selected bits into the output.  */
3729     if (pos >= 0) {
3730         if (imask == 0) {
3731             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3732         } else {
3733             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3734         }
3735     } else if (imask == 0) {
3736         tcg_gen_andi_i64(o->out, o->in2, mask);
3737     } else {
3738         tcg_gen_andi_i64(o->in2, o->in2, mask);
3739         tcg_gen_andi_i64(o->out, o->out, imask);
3740         tcg_gen_or_i64(o->out, o->out, o->in2);
3741     }
3742     return DISAS_NEXT;
3743 }
3744 
3745 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3746 {
3747     int i3 = get_field(s, i3);
3748     int i4 = get_field(s, i4);
3749     int i5 = get_field(s, i5);
3750     TCGv_i64 orig_out;
3751     uint64_t mask;
3752 
3753     /* If this is a test-only form, arrange to discard the result.  */
3754     if (i3 & 0x80) {
3755         tcg_debug_assert(o->out != NULL);
3756         orig_out = o->out;
3757         o->out = tcg_temp_new_i64();
3758         tcg_gen_mov_i64(o->out, orig_out);
3759     }
3760 
3761     i3 &= 63;
3762     i4 &= 63;
3763     i5 &= 63;
3764 
3765     /* MASK is the set of bits to be operated on from R2.
3766        Take care for I3/I4 wraparound.  */
3767     mask = ~0ull >> i3;
3768     if (i3 <= i4) {
3769         mask ^= ~0ull >> i4 >> 1;
3770     } else {
3771         mask |= ~(~0ull >> i4 >> 1);
3772     }
3773 
3774     /* Rotate the input as necessary.  */
3775     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3776 
3777     /* Operate.  */
3778     switch (s->fields.op2) {
3779     case 0x54: /* AND */
3780         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3781         tcg_gen_and_i64(o->out, o->out, o->in2);
3782         break;
3783     case 0x56: /* OR */
3784         tcg_gen_andi_i64(o->in2, o->in2, mask);
3785         tcg_gen_or_i64(o->out, o->out, o->in2);
3786         break;
3787     case 0x57: /* XOR */
3788         tcg_gen_andi_i64(o->in2, o->in2, mask);
3789         tcg_gen_xor_i64(o->out, o->out, o->in2);
3790         break;
3791     default:
3792         abort();
3793     }
3794 
3795     /* Set the CC.  */
3796     tcg_gen_andi_i64(cc_dst, o->out, mask);
3797     set_cc_nz_u64(s, cc_dst);
3798     return DISAS_NEXT;
3799 }
3800 
3801 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3802 {
3803     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3804     return DISAS_NEXT;
3805 }
3806 
3807 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3808 {
3809     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3810     return DISAS_NEXT;
3811 }
3812 
3813 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3814 {
3815     tcg_gen_bswap64_i64(o->out, o->in2);
3816     return DISAS_NEXT;
3817 }
3818 
3819 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3820 {
3821     TCGv_i32 t1 = tcg_temp_new_i32();
3822     TCGv_i32 t2 = tcg_temp_new_i32();
3823     TCGv_i32 to = tcg_temp_new_i32();
3824     tcg_gen_extrl_i64_i32(t1, o->in1);
3825     tcg_gen_extrl_i64_i32(t2, o->in2);
3826     tcg_gen_rotl_i32(to, t1, t2);
3827     tcg_gen_extu_i32_i64(o->out, to);
3828     return DISAS_NEXT;
3829 }
3830 
3831 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3832 {
3833     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3834     return DISAS_NEXT;
3835 }
3836 
3837 #ifndef CONFIG_USER_ONLY
3838 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3839 {
3840     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3841     set_cc_static(s);
3842     return DISAS_NEXT;
3843 }
3844 
3845 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3846 {
3847     gen_helper_sacf(tcg_env, o->in2);
3848     /* Addressing mode has changed, so end the block.  */
3849     return DISAS_TOO_MANY;
3850 }
3851 #endif
3852 
3853 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3854 {
3855     int sam = s->insn->data;
3856     TCGv_i64 tsam;
3857     uint64_t mask;
3858 
3859     switch (sam) {
3860     case 0:
3861         mask = 0xffffff;
3862         break;
3863     case 1:
3864         mask = 0x7fffffff;
3865         break;
3866     default:
3867         mask = -1;
3868         break;
3869     }
3870 
3871     /* Bizarre but true, we check the address of the current insn for the
3872        specification exception, not the next to be executed.  Thus the PoO
3873        documents that Bad Things Happen two bytes before the end.  */
3874     if (s->base.pc_next & ~mask) {
3875         gen_program_exception(s, PGM_SPECIFICATION);
3876         return DISAS_NORETURN;
3877     }
3878     s->pc_tmp &= mask;
3879 
3880     tsam = tcg_constant_i64(sam);
3881     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3882 
3883     /* Always exit the TB, since we (may have) changed execution mode.  */
3884     return DISAS_TOO_MANY;
3885 }
3886 
3887 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3888 {
3889     int r1 = get_field(s, r1);
3890     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3891     return DISAS_NEXT;
3892 }
3893 
3894 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3895 {
3896     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3897     return DISAS_NEXT;
3898 }
3899 
3900 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3901 {
3902     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3903     return DISAS_NEXT;
3904 }
3905 
3906 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3907 {
3908     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3909     return DISAS_NEXT;
3910 }
3911 
3912 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3913 {
3914     gen_helper_sqeb(o->out, tcg_env, o->in2);
3915     return DISAS_NEXT;
3916 }
3917 
3918 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3919 {
3920     gen_helper_sqdb(o->out, tcg_env, o->in2);
3921     return DISAS_NEXT;
3922 }
3923 
3924 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3925 {
3926     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3927     return DISAS_NEXT;
3928 }
3929 
3930 #ifndef CONFIG_USER_ONLY
3931 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3932 {
3933     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3934     set_cc_static(s);
3935     return DISAS_NEXT;
3936 }
3937 
3938 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3939 {
3940     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3941     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3942 
3943     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3944     set_cc_static(s);
3945     return DISAS_NEXT;
3946 }
3947 #endif
3948 
3949 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3950 {
3951     DisasCompare c;
3952     TCGv_i64 a, h;
3953     TCGLabel *lab;
3954     int r1;
3955 
3956     disas_jcc(s, &c, get_field(s, m3));
3957 
3958     /* We want to store when the condition is fulfilled, so branch
3959        out when it's not */
3960     c.cond = tcg_invert_cond(c.cond);
3961 
3962     lab = gen_new_label();
3963     if (c.is_64) {
3964         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3965     } else {
3966         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3967     }
3968 
3969     r1 = get_field(s, r1);
3970     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3971     switch (s->insn->data) {
3972     case 1: /* STOCG */
3973         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3974         break;
3975     case 0: /* STOC */
3976         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3977         break;
3978     case 2: /* STOCFH */
3979         h = tcg_temp_new_i64();
3980         tcg_gen_shri_i64(h, regs[r1], 32);
3981         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3982         break;
3983     default:
3984         g_assert_not_reached();
3985     }
3986 
3987     gen_set_label(lab);
3988     return DISAS_NEXT;
3989 }
3990 
3991 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3992 {
3993     TCGv_i64 t;
3994     uint64_t sign = 1ull << s->insn->data;
3995     if (s->insn->data == 31) {
3996         t = tcg_temp_new_i64();
3997         tcg_gen_shli_i64(t, o->in1, 32);
3998     } else {
3999         t = o->in1;
4000     }
4001     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4002     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4003     /* The arithmetic left shift is curious in that it does not affect
4004        the sign bit.  Copy that over from the source unchanged.  */
4005     tcg_gen_andi_i64(o->out, o->out, ~sign);
4006     tcg_gen_andi_i64(o->in1, o->in1, sign);
4007     tcg_gen_or_i64(o->out, o->out, o->in1);
4008     return DISAS_NEXT;
4009 }
4010 
4011 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4012 {
4013     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4014     return DISAS_NEXT;
4015 }
4016 
4017 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4018 {
4019     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4020     return DISAS_NEXT;
4021 }
4022 
4023 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4024 {
4025     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4026     return DISAS_NEXT;
4027 }
4028 
4029 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4030 {
4031     gen_helper_sfpc(tcg_env, o->in2);
4032     return DISAS_NEXT;
4033 }
4034 
4035 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4036 {
4037     gen_helper_sfas(tcg_env, o->in2);
4038     return DISAS_NEXT;
4039 }
4040 
4041 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4042 {
4043     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4044     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4045     gen_helper_srnm(tcg_env, o->addr1);
4046     return DISAS_NEXT;
4047 }
4048 
4049 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4050 {
4051     /* Bits 0-55 are are ignored. */
4052     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4053     gen_helper_srnm(tcg_env, o->addr1);
4054     return DISAS_NEXT;
4055 }
4056 
4057 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4058 {
4059     TCGv_i64 tmp = tcg_temp_new_i64();
4060 
4061     /* Bits other than 61-63 are ignored. */
4062     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4063 
4064     /* No need to call a helper, we don't implement dfp */
4065     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4066     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4067     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4068     return DISAS_NEXT;
4069 }
4070 
4071 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4072 {
4073     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4074     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4075     set_cc_static(s);
4076 
4077     tcg_gen_shri_i64(o->in1, o->in1, 24);
4078     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4079     return DISAS_NEXT;
4080 }
4081 
4082 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4083 {
4084     int b1 = get_field(s, b1);
4085     int d1 = get_field(s, d1);
4086     int b2 = get_field(s, b2);
4087     int d2 = get_field(s, d2);
4088     int r3 = get_field(s, r3);
4089     TCGv_i64 tmp = tcg_temp_new_i64();
4090 
4091     /* fetch all operands first */
4092     o->in1 = tcg_temp_new_i64();
4093     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4094     o->in2 = tcg_temp_new_i64();
4095     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4096     o->addr1 = tcg_temp_new_i64();
4097     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4098 
4099     /* load the third operand into r3 before modifying anything */
4100     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4101 
4102     /* subtract CPU timer from first operand and store in GR0 */
4103     gen_helper_stpt(tmp, tcg_env);
4104     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4105 
4106     /* store second operand in GR1 */
4107     tcg_gen_mov_i64(regs[1], o->in2);
4108     return DISAS_NEXT;
4109 }
4110 
4111 #ifndef CONFIG_USER_ONLY
4112 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4113 {
4114     tcg_gen_shri_i64(o->in2, o->in2, 4);
4115     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4116     return DISAS_NEXT;
4117 }
4118 
4119 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4120 {
4121     gen_helper_sske(tcg_env, o->in1, o->in2);
4122     return DISAS_NEXT;
4123 }
4124 
4125 static void gen_check_psw_mask(DisasContext *s)
4126 {
4127     TCGv_i64 reserved = tcg_temp_new_i64();
4128     TCGLabel *ok = gen_new_label();
4129 
4130     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4131     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4132     gen_program_exception(s, PGM_SPECIFICATION);
4133     gen_set_label(ok);
4134 }
4135 
4136 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4137 {
4138     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4139 
4140     gen_check_psw_mask(s);
4141 
4142     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4143     s->exit_to_mainloop = true;
4144     return DISAS_TOO_MANY;
4145 }
4146 
4147 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4148 {
4149     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4150     return DISAS_NEXT;
4151 }
4152 #endif
4153 
4154 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4155 {
4156     gen_helper_stck(o->out, tcg_env);
4157     /* ??? We don't implement clock states.  */
4158     gen_op_movi_cc(s, 0);
4159     return DISAS_NEXT;
4160 }
4161 
4162 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4163 {
4164     TCGv_i64 c1 = tcg_temp_new_i64();
4165     TCGv_i64 c2 = tcg_temp_new_i64();
4166     TCGv_i64 todpr = tcg_temp_new_i64();
4167     gen_helper_stck(c1, tcg_env);
4168     /* 16 bit value store in an uint32_t (only valid bits set) */
4169     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4170     /* Shift the 64-bit value into its place as a zero-extended
4171        104-bit value.  Note that "bit positions 64-103 are always
4172        non-zero so that they compare differently to STCK"; we set
4173        the least significant bit to 1.  */
4174     tcg_gen_shli_i64(c2, c1, 56);
4175     tcg_gen_shri_i64(c1, c1, 8);
4176     tcg_gen_ori_i64(c2, c2, 0x10000);
4177     tcg_gen_or_i64(c2, c2, todpr);
4178     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4179     tcg_gen_addi_i64(o->in2, o->in2, 8);
4180     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4181     /* ??? We don't implement clock states.  */
4182     gen_op_movi_cc(s, 0);
4183     return DISAS_NEXT;
4184 }
4185 
4186 #ifndef CONFIG_USER_ONLY
4187 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4188 {
4189     gen_helper_sck(cc_op, tcg_env, o->in2);
4190     set_cc_static(s);
4191     return DISAS_NEXT;
4192 }
4193 
4194 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4195 {
4196     gen_helper_sckc(tcg_env, o->in2);
4197     return DISAS_NEXT;
4198 }
4199 
4200 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4201 {
4202     gen_helper_sckpf(tcg_env, regs[0]);
4203     return DISAS_NEXT;
4204 }
4205 
4206 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4207 {
4208     gen_helper_stckc(o->out, tcg_env);
4209     return DISAS_NEXT;
4210 }
4211 
4212 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4213 {
4214     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4215     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4216 
4217     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4218     return DISAS_NEXT;
4219 }
4220 
4221 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4222 {
4223     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4224     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4225 
4226     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4227     return DISAS_NEXT;
4228 }
4229 
4230 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4231 {
4232     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4233     return DISAS_NEXT;
4234 }
4235 
4236 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4237 {
4238     gen_helper_spt(tcg_env, o->in2);
4239     return DISAS_NEXT;
4240 }
4241 
4242 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4243 {
4244     gen_helper_stfl(tcg_env);
4245     return DISAS_NEXT;
4246 }
4247 
4248 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4249 {
4250     gen_helper_stpt(o->out, tcg_env);
4251     return DISAS_NEXT;
4252 }
4253 
4254 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4255 {
4256     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4257     set_cc_static(s);
4258     return DISAS_NEXT;
4259 }
4260 
4261 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4262 {
4263     gen_helper_spx(tcg_env, o->in2);
4264     return DISAS_NEXT;
4265 }
4266 
4267 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4268 {
4269     gen_helper_xsch(tcg_env, regs[1]);
4270     set_cc_static(s);
4271     return DISAS_NEXT;
4272 }
4273 
4274 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4275 {
4276     gen_helper_csch(tcg_env, regs[1]);
4277     set_cc_static(s);
4278     return DISAS_NEXT;
4279 }
4280 
4281 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4282 {
4283     gen_helper_hsch(tcg_env, regs[1]);
4284     set_cc_static(s);
4285     return DISAS_NEXT;
4286 }
4287 
4288 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4289 {
4290     gen_helper_msch(tcg_env, regs[1], o->in2);
4291     set_cc_static(s);
4292     return DISAS_NEXT;
4293 }
4294 
4295 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4296 {
4297     gen_helper_rchp(tcg_env, regs[1]);
4298     set_cc_static(s);
4299     return DISAS_NEXT;
4300 }
4301 
4302 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4303 {
4304     gen_helper_rsch(tcg_env, regs[1]);
4305     set_cc_static(s);
4306     return DISAS_NEXT;
4307 }
4308 
4309 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4310 {
4311     gen_helper_sal(tcg_env, regs[1]);
4312     return DISAS_NEXT;
4313 }
4314 
4315 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4316 {
4317     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4318     return DISAS_NEXT;
4319 }
4320 
4321 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4322 {
4323     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4324     gen_op_movi_cc(s, 3);
4325     return DISAS_NEXT;
4326 }
4327 
4328 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4329 {
4330     /* The instruction is suppressed if not provided. */
4331     return DISAS_NEXT;
4332 }
4333 
4334 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4335 {
4336     gen_helper_ssch(tcg_env, regs[1], o->in2);
4337     set_cc_static(s);
4338     return DISAS_NEXT;
4339 }
4340 
4341 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4342 {
4343     gen_helper_stsch(tcg_env, regs[1], o->in2);
4344     set_cc_static(s);
4345     return DISAS_NEXT;
4346 }
4347 
4348 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4349 {
4350     gen_helper_stcrw(tcg_env, o->in2);
4351     set_cc_static(s);
4352     return DISAS_NEXT;
4353 }
4354 
4355 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4356 {
4357     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4358     set_cc_static(s);
4359     return DISAS_NEXT;
4360 }
4361 
4362 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4363 {
4364     gen_helper_tsch(tcg_env, regs[1], o->in2);
4365     set_cc_static(s);
4366     return DISAS_NEXT;
4367 }
4368 
4369 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4370 {
4371     gen_helper_chsc(tcg_env, o->in2);
4372     set_cc_static(s);
4373     return DISAS_NEXT;
4374 }
4375 
4376 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4377 {
4378     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4379     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4380     return DISAS_NEXT;
4381 }
4382 
4383 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4384 {
4385     uint64_t i2 = get_field(s, i2);
4386     TCGv_i64 t;
4387 
4388     /* It is important to do what the instruction name says: STORE THEN.
4389        If we let the output hook perform the store then if we fault and
4390        restart, we'll have the wrong SYSTEM MASK in place.  */
4391     t = tcg_temp_new_i64();
4392     tcg_gen_shri_i64(t, psw_mask, 56);
4393     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4394 
4395     if (s->fields.op == 0xac) {
4396         tcg_gen_andi_i64(psw_mask, psw_mask,
4397                          (i2 << 56) | 0x00ffffffffffffffull);
4398     } else {
4399         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4400     }
4401 
4402     gen_check_psw_mask(s);
4403 
4404     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4405     s->exit_to_mainloop = true;
4406     return DISAS_TOO_MANY;
4407 }
4408 
4409 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4410 {
4411     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4412 
4413     if (s->base.tb->flags & FLAG_MASK_PER) {
4414         update_psw_addr(s);
4415         gen_helper_per_store_real(tcg_env);
4416     }
4417     return DISAS_NEXT;
4418 }
4419 #endif
4420 
4421 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4422 {
4423     gen_helper_stfle(cc_op, tcg_env, o->in2);
4424     set_cc_static(s);
4425     return DISAS_NEXT;
4426 }
4427 
4428 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4429 {
4430     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4431     return DISAS_NEXT;
4432 }
4433 
4434 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4435 {
4436     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4437     return DISAS_NEXT;
4438 }
4439 
4440 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4441 {
4442     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4443                        MO_TEUL | s->insn->data);
4444     return DISAS_NEXT;
4445 }
4446 
4447 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4448 {
4449     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4450                         MO_TEUQ | s->insn->data);
4451     return DISAS_NEXT;
4452 }
4453 
4454 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4455 {
4456     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4457     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4458 
4459     gen_helper_stam(tcg_env, r1, o->in2, r3);
4460     return DISAS_NEXT;
4461 }
4462 
4463 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4464 {
4465     int m3 = get_field(s, m3);
4466     int pos, base = s->insn->data;
4467     TCGv_i64 tmp = tcg_temp_new_i64();
4468 
4469     pos = base + ctz32(m3) * 8;
4470     switch (m3) {
4471     case 0xf:
4472         /* Effectively a 32-bit store.  */
4473         tcg_gen_shri_i64(tmp, o->in1, pos);
4474         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4475         break;
4476 
4477     case 0xc:
4478     case 0x6:
4479     case 0x3:
4480         /* Effectively a 16-bit store.  */
4481         tcg_gen_shri_i64(tmp, o->in1, pos);
4482         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4483         break;
4484 
4485     case 0x8:
4486     case 0x4:
4487     case 0x2:
4488     case 0x1:
4489         /* Effectively an 8-bit store.  */
4490         tcg_gen_shri_i64(tmp, o->in1, pos);
4491         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4492         break;
4493 
4494     default:
4495         /* This is going to be a sequence of shifts and stores.  */
4496         pos = base + 32 - 8;
4497         while (m3) {
4498             if (m3 & 0x8) {
4499                 tcg_gen_shri_i64(tmp, o->in1, pos);
4500                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4501                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4502             }
4503             m3 = (m3 << 1) & 0xf;
4504             pos -= 8;
4505         }
4506         break;
4507     }
4508     return DISAS_NEXT;
4509 }
4510 
4511 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4512 {
4513     int r1 = get_field(s, r1);
4514     int r3 = get_field(s, r3);
4515     int size = s->insn->data;
4516     TCGv_i64 tsize = tcg_constant_i64(size);
4517 
4518     while (1) {
4519         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4520                             size == 8 ? MO_TEUQ : MO_TEUL);
4521         if (r1 == r3) {
4522             break;
4523         }
4524         tcg_gen_add_i64(o->in2, o->in2, tsize);
4525         r1 = (r1 + 1) & 15;
4526     }
4527 
4528     return DISAS_NEXT;
4529 }
4530 
4531 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4532 {
4533     int r1 = get_field(s, r1);
4534     int r3 = get_field(s, r3);
4535     TCGv_i64 t = tcg_temp_new_i64();
4536     TCGv_i64 t4 = tcg_constant_i64(4);
4537     TCGv_i64 t32 = tcg_constant_i64(32);
4538 
4539     while (1) {
4540         tcg_gen_shl_i64(t, regs[r1], t32);
4541         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4542         if (r1 == r3) {
4543             break;
4544         }
4545         tcg_gen_add_i64(o->in2, o->in2, t4);
4546         r1 = (r1 + 1) & 15;
4547     }
4548     return DISAS_NEXT;
4549 }
4550 
4551 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4552 {
4553     TCGv_i128 t16 = tcg_temp_new_i128();
4554 
4555     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4556     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4557                          MO_TE | MO_128 | MO_ALIGN);
4558     return DISAS_NEXT;
4559 }
4560 
4561 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4562 {
4563     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4564     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4565 
4566     gen_helper_srst(tcg_env, r1, r2);
4567     set_cc_static(s);
4568     return DISAS_NEXT;
4569 }
4570 
4571 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4572 {
4573     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4574     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4575 
4576     gen_helper_srstu(tcg_env, r1, r2);
4577     set_cc_static(s);
4578     return DISAS_NEXT;
4579 }
4580 
4581 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4582 {
4583     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4584     return DISAS_NEXT;
4585 }
4586 
4587 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4588 {
4589     tcg_gen_movi_i64(cc_src, 0);
4590     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4591     return DISAS_NEXT;
4592 }
4593 
4594 /* Compute borrow (0, -1) into cc_src. */
4595 static void compute_borrow(DisasContext *s)
4596 {
4597     switch (s->cc_op) {
4598     case CC_OP_SUBU:
4599         /* The borrow value is already in cc_src (0,-1). */
4600         break;
4601     default:
4602         gen_op_calc_cc(s);
4603         /* fall through */
4604     case CC_OP_STATIC:
4605         /* The carry flag is the msb of CC; compute into cc_src. */
4606         tcg_gen_extu_i32_i64(cc_src, cc_op);
4607         tcg_gen_shri_i64(cc_src, cc_src, 1);
4608         /* fall through */
4609     case CC_OP_ADDU:
4610         /* Convert carry (1,0) to borrow (0,-1). */
4611         tcg_gen_subi_i64(cc_src, cc_src, 1);
4612         break;
4613     }
4614 }
4615 
4616 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4617 {
4618     compute_borrow(s);
4619 
4620     /* Borrow is {0, -1}, so add to subtract. */
4621     tcg_gen_add_i64(o->out, o->in1, cc_src);
4622     tcg_gen_sub_i64(o->out, o->out, o->in2);
4623     return DISAS_NEXT;
4624 }
4625 
4626 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4627 {
4628     compute_borrow(s);
4629 
4630     /*
4631      * Borrow is {0, -1}, so add to subtract; replicate the
4632      * borrow input to produce 128-bit -1 for the addition.
4633      */
4634     TCGv_i64 zero = tcg_constant_i64(0);
4635     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4636     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4637 
4638     return DISAS_NEXT;
4639 }
4640 
4641 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4642 {
4643     TCGv_i32 t;
4644 
4645     update_psw_addr(s);
4646     update_cc_op(s);
4647 
4648     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4649     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4650 
4651     t = tcg_constant_i32(s->ilen);
4652     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4653 
4654     gen_exception(EXCP_SVC);
4655     return DISAS_NORETURN;
4656 }
4657 
4658 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4659 {
4660     int cc = 0;
4661 
4662     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4663     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4664     gen_op_movi_cc(s, cc);
4665     return DISAS_NEXT;
4666 }
4667 
4668 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4669 {
4670     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4671     set_cc_static(s);
4672     return DISAS_NEXT;
4673 }
4674 
4675 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4676 {
4677     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4678     set_cc_static(s);
4679     return DISAS_NEXT;
4680 }
4681 
4682 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4683 {
4684     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4685     set_cc_static(s);
4686     return DISAS_NEXT;
4687 }
4688 
4689 #ifndef CONFIG_USER_ONLY
4690 
4691 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4692 {
4693     gen_helper_testblock(cc_op, tcg_env, o->in2);
4694     set_cc_static(s);
4695     return DISAS_NEXT;
4696 }
4697 
4698 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4699 {
4700     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4701     set_cc_static(s);
4702     return DISAS_NEXT;
4703 }
4704 
4705 #endif
4706 
4707 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4708 {
4709     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4710 
4711     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4712     set_cc_static(s);
4713     return DISAS_NEXT;
4714 }
4715 
4716 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4717 {
4718     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4719 
4720     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4721     set_cc_static(s);
4722     return DISAS_NEXT;
4723 }
4724 
4725 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4726 {
4727     TCGv_i128 pair = tcg_temp_new_i128();
4728 
4729     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4730     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4731     set_cc_static(s);
4732     return DISAS_NEXT;
4733 }
4734 
4735 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4736 {
4737     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4738 
4739     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4740     set_cc_static(s);
4741     return DISAS_NEXT;
4742 }
4743 
4744 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4745 {
4746     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4747 
4748     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4749     set_cc_static(s);
4750     return DISAS_NEXT;
4751 }
4752 
4753 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4754 {
4755     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4756     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4757     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4758     TCGv_i32 tst = tcg_temp_new_i32();
4759     int m3 = get_field(s, m3);
4760 
4761     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4762         m3 = 0;
4763     }
4764     if (m3 & 1) {
4765         tcg_gen_movi_i32(tst, -1);
4766     } else {
4767         tcg_gen_extrl_i64_i32(tst, regs[0]);
4768         if (s->insn->opc & 3) {
4769             tcg_gen_ext8u_i32(tst, tst);
4770         } else {
4771             tcg_gen_ext16u_i32(tst, tst);
4772         }
4773     }
4774     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4775 
4776     set_cc_static(s);
4777     return DISAS_NEXT;
4778 }
4779 
4780 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4781 {
4782     TCGv_i32 ff = tcg_constant_i32(0xff);
4783     TCGv_i32 t1 = tcg_temp_new_i32();
4784 
4785     tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB);
4786     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4787     set_cc_static(s);
4788     return DISAS_NEXT;
4789 }
4790 
4791 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4792 {
4793     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4794 
4795     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4796     return DISAS_NEXT;
4797 }
4798 
4799 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4800 {
4801     int l1 = get_field(s, l1) + 1;
4802     TCGv_i32 l;
4803 
4804     /* The length must not exceed 32 bytes.  */
4805     if (l1 > 32) {
4806         gen_program_exception(s, PGM_SPECIFICATION);
4807         return DISAS_NORETURN;
4808     }
4809     l = tcg_constant_i32(l1);
4810     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4811     set_cc_static(s);
4812     return DISAS_NEXT;
4813 }
4814 
4815 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4816 {
4817     int l1 = get_field(s, l1) + 1;
4818     TCGv_i32 l;
4819 
4820     /* The length must be even and should not exceed 64 bytes.  */
4821     if ((l1 & 1) || (l1 > 64)) {
4822         gen_program_exception(s, PGM_SPECIFICATION);
4823         return DISAS_NORETURN;
4824     }
4825     l = tcg_constant_i32(l1);
4826     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4827     set_cc_static(s);
4828     return DISAS_NEXT;
4829 }
4830 
4831 
4832 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4833 {
4834     int d1 = get_field(s, d1);
4835     int d2 = get_field(s, d2);
4836     int b1 = get_field(s, b1);
4837     int b2 = get_field(s, b2);
4838     int l = get_field(s, l1);
4839     TCGv_i32 t32;
4840 
4841     o->addr1 = get_address(s, 0, b1, d1);
4842 
4843     /* If the addresses are identical, this is a store/memset of zero.  */
4844     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4845         o->in2 = tcg_constant_i64(0);
4846 
4847         l++;
4848         while (l >= 8) {
4849             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4850             l -= 8;
4851             if (l > 0) {
4852                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4853             }
4854         }
4855         if (l >= 4) {
4856             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4857             l -= 4;
4858             if (l > 0) {
4859                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4860             }
4861         }
4862         if (l >= 2) {
4863             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4864             l -= 2;
4865             if (l > 0) {
4866                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4867             }
4868         }
4869         if (l) {
4870             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4871         }
4872         gen_op_movi_cc(s, 0);
4873         return DISAS_NEXT;
4874     }
4875 
4876     /* But in general we'll defer to a helper.  */
4877     o->in2 = get_address(s, 0, b2, d2);
4878     t32 = tcg_constant_i32(l);
4879     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4880     set_cc_static(s);
4881     return DISAS_NEXT;
4882 }
4883 
4884 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4885 {
4886     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4887     return DISAS_NEXT;
4888 }
4889 
4890 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4891 {
4892     int shift = s->insn->data & 0xff;
4893     int size = s->insn->data >> 8;
4894     uint64_t mask = ((1ull << size) - 1) << shift;
4895     TCGv_i64 t = tcg_temp_new_i64();
4896 
4897     tcg_gen_shli_i64(t, o->in2, shift);
4898     tcg_gen_xor_i64(o->out, o->in1, t);
4899 
4900     /* Produce the CC from only the bits manipulated.  */
4901     tcg_gen_andi_i64(cc_dst, o->out, mask);
4902     set_cc_nz_u64(s, cc_dst);
4903     return DISAS_NEXT;
4904 }
4905 
4906 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4907 {
4908     o->in1 = tcg_temp_new_i64();
4909 
4910     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4911         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4912     } else {
4913         /* Perform the atomic operation in memory. */
4914         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4915                                      s->insn->data);
4916     }
4917 
4918     /* Recompute also for atomic case: needed for setting CC. */
4919     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4920 
4921     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4922         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4923     }
4924     return DISAS_NEXT;
4925 }
4926 
4927 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4928 {
4929     o->out = tcg_constant_i64(0);
4930     return DISAS_NEXT;
4931 }
4932 
4933 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4934 {
4935     o->out = tcg_constant_i64(0);
4936     o->out2 = o->out;
4937     return DISAS_NEXT;
4938 }
4939 
4940 #ifndef CONFIG_USER_ONLY
4941 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4942 {
4943     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4944 
4945     gen_helper_clp(tcg_env, r2);
4946     set_cc_static(s);
4947     return DISAS_NEXT;
4948 }
4949 
4950 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4951 {
4952     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4953     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4954 
4955     gen_helper_pcilg(tcg_env, r1, r2);
4956     set_cc_static(s);
4957     return DISAS_NEXT;
4958 }
4959 
4960 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4961 {
4962     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4963     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4964 
4965     gen_helper_pcistg(tcg_env, r1, r2);
4966     set_cc_static(s);
4967     return DISAS_NEXT;
4968 }
4969 
4970 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4971 {
4972     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4973     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4974 
4975     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4976     set_cc_static(s);
4977     return DISAS_NEXT;
4978 }
4979 
4980 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4981 {
4982     gen_helper_sic(tcg_env, o->in1, o->in2);
4983     return DISAS_NEXT;
4984 }
4985 
4986 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4987 {
4988     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4989     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4990 
4991     gen_helper_rpcit(tcg_env, r1, r2);
4992     set_cc_static(s);
4993     return DISAS_NEXT;
4994 }
4995 
4996 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4997 {
4998     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4999     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5000     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5001 
5002     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5003     set_cc_static(s);
5004     return DISAS_NEXT;
5005 }
5006 
5007 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5008 {
5009     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5010     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5011 
5012     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5013     set_cc_static(s);
5014     return DISAS_NEXT;
5015 }
5016 #endif
5017 
5018 #include "translate_vx.c.inc"
5019 
5020 /* ====================================================================== */
5021 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5022    the original inputs), update the various cc data structures in order to
5023    be able to compute the new condition code.  */
5024 
5025 static void cout_abs32(DisasContext *s, DisasOps *o)
5026 {
5027     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5028 }
5029 
5030 static void cout_abs64(DisasContext *s, DisasOps *o)
5031 {
5032     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5033 }
5034 
5035 static void cout_adds32(DisasContext *s, DisasOps *o)
5036 {
5037     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5038 }
5039 
5040 static void cout_adds64(DisasContext *s, DisasOps *o)
5041 {
5042     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5043 }
5044 
5045 static void cout_addu32(DisasContext *s, DisasOps *o)
5046 {
5047     tcg_gen_shri_i64(cc_src, o->out, 32);
5048     tcg_gen_ext32u_i64(cc_dst, o->out);
5049     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5050 }
5051 
5052 static void cout_addu64(DisasContext *s, DisasOps *o)
5053 {
5054     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5055 }
5056 
5057 static void cout_cmps32(DisasContext *s, DisasOps *o)
5058 {
5059     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5060 }
5061 
5062 static void cout_cmps64(DisasContext *s, DisasOps *o)
5063 {
5064     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5065 }
5066 
5067 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5068 {
5069     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5070 }
5071 
5072 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5073 {
5074     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5075 }
5076 
5077 static void cout_f32(DisasContext *s, DisasOps *o)
5078 {
5079     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5080 }
5081 
5082 static void cout_f64(DisasContext *s, DisasOps *o)
5083 {
5084     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5085 }
5086 
5087 static void cout_f128(DisasContext *s, DisasOps *o)
5088 {
5089     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5090 }
5091 
5092 static void cout_nabs32(DisasContext *s, DisasOps *o)
5093 {
5094     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5095 }
5096 
5097 static void cout_nabs64(DisasContext *s, DisasOps *o)
5098 {
5099     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5100 }
5101 
5102 static void cout_neg32(DisasContext *s, DisasOps *o)
5103 {
5104     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5105 }
5106 
5107 static void cout_neg64(DisasContext *s, DisasOps *o)
5108 {
5109     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5110 }
5111 
5112 static void cout_nz32(DisasContext *s, DisasOps *o)
5113 {
5114     tcg_gen_ext32u_i64(cc_dst, o->out);
5115     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5116 }
5117 
5118 static void cout_nz64(DisasContext *s, DisasOps *o)
5119 {
5120     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5121 }
5122 
5123 static void cout_s32(DisasContext *s, DisasOps *o)
5124 {
5125     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5126 }
5127 
5128 static void cout_s64(DisasContext *s, DisasOps *o)
5129 {
5130     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5131 }
5132 
5133 static void cout_subs32(DisasContext *s, DisasOps *o)
5134 {
5135     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5136 }
5137 
5138 static void cout_subs64(DisasContext *s, DisasOps *o)
5139 {
5140     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5141 }
5142 
5143 static void cout_subu32(DisasContext *s, DisasOps *o)
5144 {
5145     tcg_gen_sari_i64(cc_src, o->out, 32);
5146     tcg_gen_ext32u_i64(cc_dst, o->out);
5147     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5148 }
5149 
5150 static void cout_subu64(DisasContext *s, DisasOps *o)
5151 {
5152     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5153 }
5154 
5155 static void cout_tm32(DisasContext *s, DisasOps *o)
5156 {
5157     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5158 }
5159 
5160 static void cout_tm64(DisasContext *s, DisasOps *o)
5161 {
5162     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5163 }
5164 
5165 static void cout_muls32(DisasContext *s, DisasOps *o)
5166 {
5167     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5168 }
5169 
5170 static void cout_muls64(DisasContext *s, DisasOps *o)
5171 {
5172     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5173     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5174 }
5175 
5176 /* ====================================================================== */
5177 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5178    with the TCG register to which we will write.  Used in combination with
5179    the "wout" generators, in some cases we need a new temporary, and in
5180    some cases we can write to a TCG global.  */
5181 
5182 static void prep_new(DisasContext *s, DisasOps *o)
5183 {
5184     o->out = tcg_temp_new_i64();
5185 }
5186 #define SPEC_prep_new 0
5187 
5188 static void prep_new_P(DisasContext *s, DisasOps *o)
5189 {
5190     o->out = tcg_temp_new_i64();
5191     o->out2 = tcg_temp_new_i64();
5192 }
5193 #define SPEC_prep_new_P 0
5194 
5195 static void prep_new_x(DisasContext *s, DisasOps *o)
5196 {
5197     o->out_128 = tcg_temp_new_i128();
5198 }
5199 #define SPEC_prep_new_x 0
5200 
5201 static void prep_r1(DisasContext *s, DisasOps *o)
5202 {
5203     o->out = regs[get_field(s, r1)];
5204 }
5205 #define SPEC_prep_r1 0
5206 
5207 static void prep_r1_P(DisasContext *s, DisasOps *o)
5208 {
5209     int r1 = get_field(s, r1);
5210     o->out = regs[r1];
5211     o->out2 = regs[r1 + 1];
5212 }
5213 #define SPEC_prep_r1_P SPEC_r1_even
5214 
5215 /* ====================================================================== */
5216 /* The "Write OUTput" generators.  These generally perform some non-trivial
5217    copy of data to TCG globals, or to main memory.  The trivial cases are
5218    generally handled by having a "prep" generator install the TCG global
5219    as the destination of the operation.  */
5220 
5221 static void wout_r1(DisasContext *s, DisasOps *o)
5222 {
5223     store_reg(get_field(s, r1), o->out);
5224 }
5225 #define SPEC_wout_r1 0
5226 
5227 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5228 {
5229     store_reg(get_field(s, r1), o->out2);
5230 }
5231 #define SPEC_wout_out2_r1 0
5232 
5233 static void wout_r1_8(DisasContext *s, DisasOps *o)
5234 {
5235     int r1 = get_field(s, r1);
5236     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5237 }
5238 #define SPEC_wout_r1_8 0
5239 
5240 static void wout_r1_16(DisasContext *s, DisasOps *o)
5241 {
5242     int r1 = get_field(s, r1);
5243     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5244 }
5245 #define SPEC_wout_r1_16 0
5246 
5247 static void wout_r1_32(DisasContext *s, DisasOps *o)
5248 {
5249     store_reg32_i64(get_field(s, r1), o->out);
5250 }
5251 #define SPEC_wout_r1_32 0
5252 
5253 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5254 {
5255     store_reg32h_i64(get_field(s, r1), o->out);
5256 }
5257 #define SPEC_wout_r1_32h 0
5258 
5259 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5260 {
5261     int r1 = get_field(s, r1);
5262     store_reg32_i64(r1, o->out);
5263     store_reg32_i64(r1 + 1, o->out2);
5264 }
5265 #define SPEC_wout_r1_P32 SPEC_r1_even
5266 
5267 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5268 {
5269     int r1 = get_field(s, r1);
5270     TCGv_i64 t = tcg_temp_new_i64();
5271     store_reg32_i64(r1 + 1, o->out);
5272     tcg_gen_shri_i64(t, o->out, 32);
5273     store_reg32_i64(r1, t);
5274 }
5275 #define SPEC_wout_r1_D32 SPEC_r1_even
5276 
5277 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5278 {
5279     int r1 = get_field(s, r1);
5280     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5281 }
5282 #define SPEC_wout_r1_D64 SPEC_r1_even
5283 
5284 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5285 {
5286     int r3 = get_field(s, r3);
5287     store_reg32_i64(r3, o->out);
5288     store_reg32_i64(r3 + 1, o->out2);
5289 }
5290 #define SPEC_wout_r3_P32 SPEC_r3_even
5291 
5292 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5293 {
5294     int r3 = get_field(s, r3);
5295     store_reg(r3, o->out);
5296     store_reg(r3 + 1, o->out2);
5297 }
5298 #define SPEC_wout_r3_P64 SPEC_r3_even
5299 
5300 static void wout_e1(DisasContext *s, DisasOps *o)
5301 {
5302     store_freg32_i64(get_field(s, r1), o->out);
5303 }
5304 #define SPEC_wout_e1 0
5305 
5306 static void wout_f1(DisasContext *s, DisasOps *o)
5307 {
5308     store_freg(get_field(s, r1), o->out);
5309 }
5310 #define SPEC_wout_f1 0
5311 
5312 static void wout_x1(DisasContext *s, DisasOps *o)
5313 {
5314     int f1 = get_field(s, r1);
5315 
5316     /* Split out_128 into out+out2 for cout_f128. */
5317     tcg_debug_assert(o->out == NULL);
5318     o->out = tcg_temp_new_i64();
5319     o->out2 = tcg_temp_new_i64();
5320 
5321     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5322     store_freg(f1, o->out);
5323     store_freg(f1 + 2, o->out2);
5324 }
5325 #define SPEC_wout_x1 SPEC_r1_f128
5326 
5327 static void wout_x1_P(DisasContext *s, DisasOps *o)
5328 {
5329     int f1 = get_field(s, r1);
5330     store_freg(f1, o->out);
5331     store_freg(f1 + 2, o->out2);
5332 }
5333 #define SPEC_wout_x1_P SPEC_r1_f128
5334 
5335 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5336 {
5337     if (get_field(s, r1) != get_field(s, r2)) {
5338         store_reg32_i64(get_field(s, r1), o->out);
5339     }
5340 }
5341 #define SPEC_wout_cond_r1r2_32 0
5342 
5343 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5344 {
5345     if (get_field(s, r1) != get_field(s, r2)) {
5346         store_freg32_i64(get_field(s, r1), o->out);
5347     }
5348 }
5349 #define SPEC_wout_cond_e1e2 0
5350 
5351 static void wout_m1_8(DisasContext *s, DisasOps *o)
5352 {
5353     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5354 }
5355 #define SPEC_wout_m1_8 0
5356 
5357 static void wout_m1_16(DisasContext *s, DisasOps *o)
5358 {
5359     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5360 }
5361 #define SPEC_wout_m1_16 0
5362 
5363 #ifndef CONFIG_USER_ONLY
5364 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5365 {
5366     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5367 }
5368 #define SPEC_wout_m1_16a 0
5369 #endif
5370 
5371 static void wout_m1_32(DisasContext *s, DisasOps *o)
5372 {
5373     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5374 }
5375 #define SPEC_wout_m1_32 0
5376 
5377 #ifndef CONFIG_USER_ONLY
5378 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5379 {
5380     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5381 }
5382 #define SPEC_wout_m1_32a 0
5383 #endif
5384 
5385 static void wout_m1_64(DisasContext *s, DisasOps *o)
5386 {
5387     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5388 }
5389 #define SPEC_wout_m1_64 0
5390 
5391 #ifndef CONFIG_USER_ONLY
5392 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5393 {
5394     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5395 }
5396 #define SPEC_wout_m1_64a 0
5397 #endif
5398 
5399 static void wout_m2_32(DisasContext *s, DisasOps *o)
5400 {
5401     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5402 }
5403 #define SPEC_wout_m2_32 0
5404 
5405 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5406 {
5407     store_reg(get_field(s, r1), o->in2);
5408 }
5409 #define SPEC_wout_in2_r1 0
5410 
5411 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5412 {
5413     store_reg32_i64(get_field(s, r1), o->in2);
5414 }
5415 #define SPEC_wout_in2_r1_32 0
5416 
5417 /* ====================================================================== */
5418 /* The "INput 1" generators.  These load the first operand to an insn.  */
5419 
5420 static void in1_r1(DisasContext *s, DisasOps *o)
5421 {
5422     o->in1 = load_reg(get_field(s, r1));
5423 }
5424 #define SPEC_in1_r1 0
5425 
5426 static void in1_r1_o(DisasContext *s, DisasOps *o)
5427 {
5428     o->in1 = regs[get_field(s, r1)];
5429 }
5430 #define SPEC_in1_r1_o 0
5431 
5432 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5433 {
5434     o->in1 = tcg_temp_new_i64();
5435     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5436 }
5437 #define SPEC_in1_r1_32s 0
5438 
5439 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5440 {
5441     o->in1 = tcg_temp_new_i64();
5442     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5443 }
5444 #define SPEC_in1_r1_32u 0
5445 
5446 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5447 {
5448     o->in1 = tcg_temp_new_i64();
5449     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5450 }
5451 #define SPEC_in1_r1_sr32 0
5452 
5453 static void in1_r1p1(DisasContext *s, DisasOps *o)
5454 {
5455     o->in1 = load_reg(get_field(s, r1) + 1);
5456 }
5457 #define SPEC_in1_r1p1 SPEC_r1_even
5458 
5459 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5460 {
5461     o->in1 = regs[get_field(s, r1) + 1];
5462 }
5463 #define SPEC_in1_r1p1_o SPEC_r1_even
5464 
5465 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5466 {
5467     o->in1 = tcg_temp_new_i64();
5468     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5469 }
5470 #define SPEC_in1_r1p1_32s SPEC_r1_even
5471 
5472 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5473 {
5474     o->in1 = tcg_temp_new_i64();
5475     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5476 }
5477 #define SPEC_in1_r1p1_32u SPEC_r1_even
5478 
5479 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5480 {
5481     int r1 = get_field(s, r1);
5482     o->in1 = tcg_temp_new_i64();
5483     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5484 }
5485 #define SPEC_in1_r1_D32 SPEC_r1_even
5486 
5487 static void in1_r2(DisasContext *s, DisasOps *o)
5488 {
5489     o->in1 = load_reg(get_field(s, r2));
5490 }
5491 #define SPEC_in1_r2 0
5492 
5493 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5494 {
5495     o->in1 = tcg_temp_new_i64();
5496     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5497 }
5498 #define SPEC_in1_r2_sr32 0
5499 
5500 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5501 {
5502     o->in1 = tcg_temp_new_i64();
5503     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5504 }
5505 #define SPEC_in1_r2_32u 0
5506 
5507 static void in1_r3(DisasContext *s, DisasOps *o)
5508 {
5509     o->in1 = load_reg(get_field(s, r3));
5510 }
5511 #define SPEC_in1_r3 0
5512 
5513 static void in1_r3_o(DisasContext *s, DisasOps *o)
5514 {
5515     o->in1 = regs[get_field(s, r3)];
5516 }
5517 #define SPEC_in1_r3_o 0
5518 
5519 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5520 {
5521     o->in1 = tcg_temp_new_i64();
5522     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5523 }
5524 #define SPEC_in1_r3_32s 0
5525 
5526 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5527 {
5528     o->in1 = tcg_temp_new_i64();
5529     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5530 }
5531 #define SPEC_in1_r3_32u 0
5532 
5533 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5534 {
5535     int r3 = get_field(s, r3);
5536     o->in1 = tcg_temp_new_i64();
5537     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5538 }
5539 #define SPEC_in1_r3_D32 SPEC_r3_even
5540 
5541 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5542 {
5543     o->in1 = tcg_temp_new_i64();
5544     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5545 }
5546 #define SPEC_in1_r3_sr32 0
5547 
5548 static void in1_e1(DisasContext *s, DisasOps *o)
5549 {
5550     o->in1 = load_freg32_i64(get_field(s, r1));
5551 }
5552 #define SPEC_in1_e1 0
5553 
5554 static void in1_f1(DisasContext *s, DisasOps *o)
5555 {
5556     o->in1 = load_freg(get_field(s, r1));
5557 }
5558 #define SPEC_in1_f1 0
5559 
5560 static void in1_x1(DisasContext *s, DisasOps *o)
5561 {
5562     o->in1_128 = load_freg_128(get_field(s, r1));
5563 }
5564 #define SPEC_in1_x1 SPEC_r1_f128
5565 
5566 /* Load the high double word of an extended (128-bit) format FP number */
5567 static void in1_x2h(DisasContext *s, DisasOps *o)
5568 {
5569     o->in1 = load_freg(get_field(s, r2));
5570 }
5571 #define SPEC_in1_x2h SPEC_r2_f128
5572 
5573 static void in1_f3(DisasContext *s, DisasOps *o)
5574 {
5575     o->in1 = load_freg(get_field(s, r3));
5576 }
5577 #define SPEC_in1_f3 0
5578 
5579 static void in1_la1(DisasContext *s, DisasOps *o)
5580 {
5581     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5582 }
5583 #define SPEC_in1_la1 0
5584 
5585 static void in1_la2(DisasContext *s, DisasOps *o)
5586 {
5587     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5588     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5589 }
5590 #define SPEC_in1_la2 0
5591 
5592 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5593 {
5594     in1_la1(s, o);
5595     o->in1 = tcg_temp_new_i64();
5596     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5597 }
5598 #define SPEC_in1_m1_8u 0
5599 
5600 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5601 {
5602     in1_la1(s, o);
5603     o->in1 = tcg_temp_new_i64();
5604     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5605 }
5606 #define SPEC_in1_m1_16s 0
5607 
5608 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5609 {
5610     in1_la1(s, o);
5611     o->in1 = tcg_temp_new_i64();
5612     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5613 }
5614 #define SPEC_in1_m1_16u 0
5615 
5616 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5617 {
5618     in1_la1(s, o);
5619     o->in1 = tcg_temp_new_i64();
5620     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5621 }
5622 #define SPEC_in1_m1_32s 0
5623 
5624 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5625 {
5626     in1_la1(s, o);
5627     o->in1 = tcg_temp_new_i64();
5628     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5629 }
5630 #define SPEC_in1_m1_32u 0
5631 
5632 static void in1_m1_64(DisasContext *s, DisasOps *o)
5633 {
5634     in1_la1(s, o);
5635     o->in1 = tcg_temp_new_i64();
5636     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5637 }
5638 #define SPEC_in1_m1_64 0
5639 
5640 /* ====================================================================== */
5641 /* The "INput 2" generators.  These load the second operand to an insn.  */
5642 
5643 static void in2_r1_o(DisasContext *s, DisasOps *o)
5644 {
5645     o->in2 = regs[get_field(s, r1)];
5646 }
5647 #define SPEC_in2_r1_o 0
5648 
5649 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5650 {
5651     o->in2 = tcg_temp_new_i64();
5652     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5653 }
5654 #define SPEC_in2_r1_16u 0
5655 
5656 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5657 {
5658     o->in2 = tcg_temp_new_i64();
5659     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5660 }
5661 #define SPEC_in2_r1_32u 0
5662 
5663 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5664 {
5665     int r1 = get_field(s, r1);
5666     o->in2 = tcg_temp_new_i64();
5667     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5668 }
5669 #define SPEC_in2_r1_D32 SPEC_r1_even
5670 
5671 static void in2_r2(DisasContext *s, DisasOps *o)
5672 {
5673     o->in2 = load_reg(get_field(s, r2));
5674 }
5675 #define SPEC_in2_r2 0
5676 
5677 static void in2_r2_o(DisasContext *s, DisasOps *o)
5678 {
5679     o->in2 = regs[get_field(s, r2)];
5680 }
5681 #define SPEC_in2_r2_o 0
5682 
5683 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5684 {
5685     int r2 = get_field(s, r2);
5686     if (r2 != 0) {
5687         o->in2 = load_reg(r2);
5688     }
5689 }
5690 #define SPEC_in2_r2_nz 0
5691 
5692 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5693 {
5694     o->in2 = tcg_temp_new_i64();
5695     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5696 }
5697 #define SPEC_in2_r2_8s 0
5698 
5699 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5700 {
5701     o->in2 = tcg_temp_new_i64();
5702     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5703 }
5704 #define SPEC_in2_r2_8u 0
5705 
5706 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5707 {
5708     o->in2 = tcg_temp_new_i64();
5709     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5710 }
5711 #define SPEC_in2_r2_16s 0
5712 
5713 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5714 {
5715     o->in2 = tcg_temp_new_i64();
5716     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5717 }
5718 #define SPEC_in2_r2_16u 0
5719 
5720 static void in2_r3(DisasContext *s, DisasOps *o)
5721 {
5722     o->in2 = load_reg(get_field(s, r3));
5723 }
5724 #define SPEC_in2_r3 0
5725 
5726 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5727 {
5728     int r3 = get_field(s, r3);
5729     o->in2_128 = tcg_temp_new_i128();
5730     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5731 }
5732 #define SPEC_in2_r3_D64 SPEC_r3_even
5733 
5734 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5735 {
5736     o->in2 = tcg_temp_new_i64();
5737     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5738 }
5739 #define SPEC_in2_r3_sr32 0
5740 
5741 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5742 {
5743     o->in2 = tcg_temp_new_i64();
5744     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5745 }
5746 #define SPEC_in2_r3_32u 0
5747 
5748 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5749 {
5750     o->in2 = tcg_temp_new_i64();
5751     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5752 }
5753 #define SPEC_in2_r2_32s 0
5754 
5755 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5756 {
5757     o->in2 = tcg_temp_new_i64();
5758     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5759 }
5760 #define SPEC_in2_r2_32u 0
5761 
5762 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5763 {
5764     o->in2 = tcg_temp_new_i64();
5765     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5766 }
5767 #define SPEC_in2_r2_sr32 0
5768 
5769 static void in2_e2(DisasContext *s, DisasOps *o)
5770 {
5771     o->in2 = load_freg32_i64(get_field(s, r2));
5772 }
5773 #define SPEC_in2_e2 0
5774 
5775 static void in2_f2(DisasContext *s, DisasOps *o)
5776 {
5777     o->in2 = load_freg(get_field(s, r2));
5778 }
5779 #define SPEC_in2_f2 0
5780 
5781 static void in2_x2(DisasContext *s, DisasOps *o)
5782 {
5783     o->in2_128 = load_freg_128(get_field(s, r2));
5784 }
5785 #define SPEC_in2_x2 SPEC_r2_f128
5786 
5787 /* Load the low double word of an extended (128-bit) format FP number */
5788 static void in2_x2l(DisasContext *s, DisasOps *o)
5789 {
5790     o->in2 = load_freg(get_field(s, r2) + 2);
5791 }
5792 #define SPEC_in2_x2l SPEC_r2_f128
5793 
5794 static void in2_ra2(DisasContext *s, DisasOps *o)
5795 {
5796     int r2 = get_field(s, r2);
5797 
5798     /* Note: *don't* treat !r2 as 0, use the reg value. */
5799     o->in2 = tcg_temp_new_i64();
5800     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5801 }
5802 #define SPEC_in2_ra2 0
5803 
5804 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5805 {
5806     return in2_ra2(s, o);
5807 }
5808 #define SPEC_in2_ra2_E SPEC_r2_even
5809 
5810 static void in2_a2(DisasContext *s, DisasOps *o)
5811 {
5812     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5813     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5814 }
5815 #define SPEC_in2_a2 0
5816 
5817 static TCGv gen_ri2(DisasContext *s)
5818 {
5819     TCGv ri2 = NULL;
5820     bool is_imm;
5821     int imm;
5822 
5823     disas_jdest(s, i2, is_imm, imm, ri2);
5824     if (is_imm) {
5825         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5826     }
5827 
5828     return ri2;
5829 }
5830 
5831 static void in2_ri2(DisasContext *s, DisasOps *o)
5832 {
5833     o->in2 = gen_ri2(s);
5834 }
5835 #define SPEC_in2_ri2 0
5836 
5837 static void in2_sh(DisasContext *s, DisasOps *o)
5838 {
5839     int b2 = get_field(s, b2);
5840     int d2 = get_field(s, d2);
5841 
5842     if (b2 == 0) {
5843         o->in2 = tcg_constant_i64(d2 & 0x3f);
5844     } else {
5845         o->in2 = get_address(s, 0, b2, d2);
5846         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5847     }
5848 }
5849 #define SPEC_in2_sh 0
5850 
5851 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5852 {
5853     in2_a2(s, o);
5854     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5855 }
5856 #define SPEC_in2_m2_8u 0
5857 
5858 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5859 {
5860     in2_a2(s, o);
5861     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5862 }
5863 #define SPEC_in2_m2_16s 0
5864 
5865 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5866 {
5867     in2_a2(s, o);
5868     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5869 }
5870 #define SPEC_in2_m2_16u 0
5871 
5872 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5873 {
5874     in2_a2(s, o);
5875     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5876 }
5877 #define SPEC_in2_m2_32s 0
5878 
5879 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5880 {
5881     in2_a2(s, o);
5882     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5883 }
5884 #define SPEC_in2_m2_32u 0
5885 
5886 #ifndef CONFIG_USER_ONLY
5887 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5888 {
5889     in2_a2(s, o);
5890     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5891 }
5892 #define SPEC_in2_m2_32ua 0
5893 #endif
5894 
5895 static void in2_m2_64(DisasContext *s, DisasOps *o)
5896 {
5897     in2_a2(s, o);
5898     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5899 }
5900 #define SPEC_in2_m2_64 0
5901 
5902 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5903 {
5904     in2_a2(s, o);
5905     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5906     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5907 }
5908 #define SPEC_in2_m2_64w 0
5909 
5910 #ifndef CONFIG_USER_ONLY
5911 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5912 {
5913     in2_a2(s, o);
5914     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5915 }
5916 #define SPEC_in2_m2_64a 0
5917 #endif
5918 
5919 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5920 {
5921     o->in2 = tcg_temp_new_i64();
5922     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5923 }
5924 #define SPEC_in2_mri2_16s 0
5925 
5926 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5927 {
5928     o->in2 = tcg_temp_new_i64();
5929     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5930 }
5931 #define SPEC_in2_mri2_16u 0
5932 
5933 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5934 {
5935     o->in2 = tcg_temp_new_i64();
5936     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5937                        MO_TESL | MO_ALIGN);
5938 }
5939 #define SPEC_in2_mri2_32s 0
5940 
5941 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5942 {
5943     o->in2 = tcg_temp_new_i64();
5944     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5945                        MO_TEUL | MO_ALIGN);
5946 }
5947 #define SPEC_in2_mri2_32u 0
5948 
5949 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5950 {
5951     o->in2 = tcg_temp_new_i64();
5952     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5953                         MO_TEUQ | MO_ALIGN);
5954 }
5955 #define SPEC_in2_mri2_64 0
5956 
5957 static void in2_i2(DisasContext *s, DisasOps *o)
5958 {
5959     o->in2 = tcg_constant_i64(get_field(s, i2));
5960 }
5961 #define SPEC_in2_i2 0
5962 
5963 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5964 {
5965     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5966 }
5967 #define SPEC_in2_i2_8u 0
5968 
5969 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5970 {
5971     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5972 }
5973 #define SPEC_in2_i2_16u 0
5974 
5975 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5976 {
5977     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5978 }
5979 #define SPEC_in2_i2_32u 0
5980 
5981 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5982 {
5983     uint64_t i2 = (uint16_t)get_field(s, i2);
5984     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5985 }
5986 #define SPEC_in2_i2_16u_shl 0
5987 
5988 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5989 {
5990     uint64_t i2 = (uint32_t)get_field(s, i2);
5991     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5992 }
5993 #define SPEC_in2_i2_32u_shl 0
5994 
5995 #ifndef CONFIG_USER_ONLY
5996 static void in2_insn(DisasContext *s, DisasOps *o)
5997 {
5998     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5999 }
6000 #define SPEC_in2_insn 0
6001 #endif
6002 
6003 /* ====================================================================== */
6004 
6005 /* Find opc within the table of insns.  This is formulated as a switch
6006    statement so that (1) we get compile-time notice of cut-paste errors
6007    for duplicated opcodes, and (2) the compiler generates the binary
6008    search tree, rather than us having to post-process the table.  */
6009 
6010 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6011     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6012 
6013 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6014     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6015 
6016 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6017     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6018 
6019 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6020 
6021 enum DisasInsnEnum {
6022 #include "insn-data.h.inc"
6023 };
6024 
6025 #undef E
6026 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6027     .opc = OPC,                                                             \
6028     .flags = FL,                                                            \
6029     .fmt = FMT_##FT,                                                        \
6030     .fac = FAC_##FC,                                                        \
6031     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6032     .name = #NM,                                                            \
6033     .help_in1 = in1_##I1,                                                   \
6034     .help_in2 = in2_##I2,                                                   \
6035     .help_prep = prep_##P,                                                  \
6036     .help_wout = wout_##W,                                                  \
6037     .help_cout = cout_##CC,                                                 \
6038     .help_op = op_##OP,                                                     \
6039     .data = D                                                               \
6040  },
6041 
6042 /* Allow 0 to be used for NULL in the table below.  */
6043 #define in1_0  NULL
6044 #define in2_0  NULL
6045 #define prep_0  NULL
6046 #define wout_0  NULL
6047 #define cout_0  NULL
6048 #define op_0  NULL
6049 
6050 #define SPEC_in1_0 0
6051 #define SPEC_in2_0 0
6052 #define SPEC_prep_0 0
6053 #define SPEC_wout_0 0
6054 
6055 /* Give smaller names to the various facilities.  */
6056 #define FAC_Z           S390_FEAT_ZARCH
6057 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6058 #define FAC_DFP         S390_FEAT_DFP
6059 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6060 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6061 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6062 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6063 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6064 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6065 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6066 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6067 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6068 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6069 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6070 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6071 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6072 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6073 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6074 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6075 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6076 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6077 #define FAC_SFLE        S390_FEAT_STFLE
6078 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6079 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6080 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6081 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6082 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6083 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6084 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6085 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6086 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6087 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6088 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6089 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6090 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6091 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6092 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6093 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6094 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6095 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6096 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6097 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6098 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6099 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6100 
6101 static const DisasInsn insn_info[] = {
6102 #include "insn-data.h.inc"
6103 };
6104 
6105 #undef E
6106 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6107     case OPC: return &insn_info[insn_ ## NM];
6108 
6109 static const DisasInsn *lookup_opc(uint16_t opc)
6110 {
6111     switch (opc) {
6112 #include "insn-data.h.inc"
6113     default:
6114         return NULL;
6115     }
6116 }
6117 
6118 #undef F
6119 #undef E
6120 #undef D
6121 #undef C
6122 
6123 /* Extract a field from the insn.  The INSN should be left-aligned in
6124    the uint64_t so that we can more easily utilize the big-bit-endian
6125    definitions we extract from the Principals of Operation.  */
6126 
6127 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6128 {
6129     uint32_t r, m;
6130 
6131     if (f->size == 0) {
6132         return;
6133     }
6134 
6135     /* Zero extract the field from the insn.  */
6136     r = (insn << f->beg) >> (64 - f->size);
6137 
6138     /* Sign-extend, or un-swap the field as necessary.  */
6139     switch (f->type) {
6140     case 0: /* unsigned */
6141         break;
6142     case 1: /* signed */
6143         assert(f->size <= 32);
6144         m = 1u << (f->size - 1);
6145         r = (r ^ m) - m;
6146         break;
6147     case 2: /* dl+dh split, signed 20 bit. */
6148         r = ((int8_t)r << 12) | (r >> 8);
6149         break;
6150     case 3: /* MSB stored in RXB */
6151         g_assert(f->size == 4);
6152         switch (f->beg) {
6153         case 8:
6154             r |= extract64(insn, 63 - 36, 1) << 4;
6155             break;
6156         case 12:
6157             r |= extract64(insn, 63 - 37, 1) << 4;
6158             break;
6159         case 16:
6160             r |= extract64(insn, 63 - 38, 1) << 4;
6161             break;
6162         case 32:
6163             r |= extract64(insn, 63 - 39, 1) << 4;
6164             break;
6165         default:
6166             g_assert_not_reached();
6167         }
6168         break;
6169     default:
6170         abort();
6171     }
6172 
6173     /*
6174      * Validate that the "compressed" encoding we selected above is valid.
6175      * I.e. we haven't made two different original fields overlap.
6176      */
6177     assert(((o->presentC >> f->indexC) & 1) == 0);
6178     o->presentC |= 1 << f->indexC;
6179     o->presentO |= 1 << f->indexO;
6180 
6181     o->c[f->indexC] = r;
6182 }
6183 
6184 /* Lookup the insn at the current PC, extracting the operands into O and
6185    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6186 
6187 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6188 {
6189     uint64_t insn, pc = s->base.pc_next;
6190     int op, op2, ilen;
6191     const DisasInsn *info;
6192 
6193     if (unlikely(s->ex_value)) {
6194         uint64_t be_insn;
6195 
6196         /* Drop the EX data now, so that it's clear on exception paths.  */
6197         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6198                        offsetof(CPUS390XState, ex_value));
6199 
6200         /* Extract the values saved by EXECUTE.  */
6201         insn = s->ex_value & 0xffffffffffff0000ull;
6202         ilen = s->ex_value & 0xf;
6203         op = insn >> 56;
6204 
6205         /* Register insn bytes with translator so plugins work. */
6206         be_insn = cpu_to_be64(insn);
6207         translator_fake_ld(&s->base, &be_insn, get_ilen(op));
6208     } else {
6209         insn = ld_code2(env, s, pc);
6210         op = (insn >> 8) & 0xff;
6211         ilen = get_ilen(op);
6212         switch (ilen) {
6213         case 2:
6214             insn = insn << 48;
6215             break;
6216         case 4:
6217             insn = ld_code4(env, s, pc) << 32;
6218             break;
6219         case 6:
6220             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6221             break;
6222         default:
6223             g_assert_not_reached();
6224         }
6225     }
6226     s->pc_tmp = s->base.pc_next + ilen;
6227     s->ilen = ilen;
6228 
6229     /* We can't actually determine the insn format until we've looked up
6230        the full insn opcode.  Which we can't do without locating the
6231        secondary opcode.  Assume by default that OP2 is at bit 40; for
6232        those smaller insns that don't actually have a secondary opcode
6233        this will correctly result in OP2 = 0. */
6234     switch (op) {
6235     case 0x01: /* E */
6236     case 0x80: /* S */
6237     case 0x82: /* S */
6238     case 0x93: /* S */
6239     case 0xb2: /* S, RRF, RRE, IE */
6240     case 0xb3: /* RRE, RRD, RRF */
6241     case 0xb9: /* RRE, RRF */
6242     case 0xe5: /* SSE, SIL */
6243         op2 = (insn << 8) >> 56;
6244         break;
6245     case 0xa5: /* RI */
6246     case 0xa7: /* RI */
6247     case 0xc0: /* RIL */
6248     case 0xc2: /* RIL */
6249     case 0xc4: /* RIL */
6250     case 0xc6: /* RIL */
6251     case 0xc8: /* SSF */
6252     case 0xcc: /* RIL */
6253         op2 = (insn << 12) >> 60;
6254         break;
6255     case 0xc5: /* MII */
6256     case 0xc7: /* SMI */
6257     case 0xd0 ... 0xdf: /* SS */
6258     case 0xe1: /* SS */
6259     case 0xe2: /* SS */
6260     case 0xe8: /* SS */
6261     case 0xe9: /* SS */
6262     case 0xea: /* SS */
6263     case 0xee ... 0xf3: /* SS */
6264     case 0xf8 ... 0xfd: /* SS */
6265         op2 = 0;
6266         break;
6267     default:
6268         op2 = (insn << 40) >> 56;
6269         break;
6270     }
6271 
6272     memset(&s->fields, 0, sizeof(s->fields));
6273     s->fields.raw_insn = insn;
6274     s->fields.op = op;
6275     s->fields.op2 = op2;
6276 
6277     /* Lookup the instruction.  */
6278     info = lookup_opc(op << 8 | op2);
6279     s->insn = info;
6280 
6281     /* If we found it, extract the operands.  */
6282     if (info != NULL) {
6283         DisasFormat fmt = info->fmt;
6284         int i;
6285 
6286         for (i = 0; i < NUM_C_FIELD; ++i) {
6287             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6288         }
6289     }
6290     return info;
6291 }
6292 
6293 static bool is_afp_reg(int reg)
6294 {
6295     return reg % 2 || reg > 6;
6296 }
6297 
6298 static bool is_fp_pair(int reg)
6299 {
6300     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6301     return !(reg & 0x2);
6302 }
6303 
6304 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6305 {
6306     const DisasInsn *insn;
6307     DisasJumpType ret = DISAS_NEXT;
6308     DisasOps o = {};
6309     bool icount = false;
6310 
6311     /* Search for the insn in the table.  */
6312     insn = extract_insn(env, s);
6313 
6314     /* Update insn_start now that we know the ILEN.  */
6315     tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen);
6316 
6317     /* Not found means unimplemented/illegal opcode.  */
6318     if (insn == NULL) {
6319         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6320                       s->fields.op, s->fields.op2);
6321         gen_illegal_opcode(s);
6322         ret = DISAS_NORETURN;
6323         goto out;
6324     }
6325 
6326 #ifndef CONFIG_USER_ONLY
6327     if (s->base.tb->flags & FLAG_MASK_PER) {
6328         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6329         gen_helper_per_ifetch(tcg_env, addr);
6330     }
6331 #endif
6332 
6333     /* process flags */
6334     if (insn->flags) {
6335         /* privileged instruction */
6336         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6337             gen_program_exception(s, PGM_PRIVILEGED);
6338             ret = DISAS_NORETURN;
6339             goto out;
6340         }
6341 
6342         /* if AFP is not enabled, instructions and registers are forbidden */
6343         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6344             uint8_t dxc = 0;
6345 
6346             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6347                 dxc = 1;
6348             }
6349             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6350                 dxc = 1;
6351             }
6352             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6353                 dxc = 1;
6354             }
6355             if (insn->flags & IF_BFP) {
6356                 dxc = 2;
6357             }
6358             if (insn->flags & IF_DFP) {
6359                 dxc = 3;
6360             }
6361             if (insn->flags & IF_VEC) {
6362                 dxc = 0xfe;
6363             }
6364             if (dxc) {
6365                 gen_data_exception(dxc);
6366                 ret = DISAS_NORETURN;
6367                 goto out;
6368             }
6369         }
6370 
6371         /* if vector instructions not enabled, executing them is forbidden */
6372         if (insn->flags & IF_VEC) {
6373             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6374                 gen_data_exception(0xfe);
6375                 ret = DISAS_NORETURN;
6376                 goto out;
6377             }
6378         }
6379 
6380         /* input/output is the special case for icount mode */
6381         if (unlikely(insn->flags & IF_IO)) {
6382             icount = translator_io_start(&s->base);
6383         }
6384     }
6385 
6386     /* Check for insn specification exceptions.  */
6387     if (insn->spec) {
6388         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6389             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6390             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6391             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6392             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6393             gen_program_exception(s, PGM_SPECIFICATION);
6394             ret = DISAS_NORETURN;
6395             goto out;
6396         }
6397     }
6398 
6399     /* Implement the instruction.  */
6400     if (insn->help_in1) {
6401         insn->help_in1(s, &o);
6402     }
6403     if (insn->help_in2) {
6404         insn->help_in2(s, &o);
6405     }
6406     if (insn->help_prep) {
6407         insn->help_prep(s, &o);
6408     }
6409     if (insn->help_op) {
6410         ret = insn->help_op(s, &o);
6411     }
6412     if (ret != DISAS_NORETURN) {
6413         if (insn->help_wout) {
6414             insn->help_wout(s, &o);
6415         }
6416         if (insn->help_cout) {
6417             insn->help_cout(s, &o);
6418         }
6419     }
6420 
6421     /* io should be the last instruction in tb when icount is enabled */
6422     if (unlikely(icount && ret == DISAS_NEXT)) {
6423         ret = DISAS_TOO_MANY;
6424     }
6425 
6426 #ifndef CONFIG_USER_ONLY
6427     if (s->base.tb->flags & FLAG_MASK_PER) {
6428         /* An exception might be triggered, save PSW if not already done.  */
6429         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6430             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6431         }
6432 
6433         /* Call the helper to check for a possible PER exception.  */
6434         gen_helper_per_check_exception(tcg_env);
6435     }
6436 #endif
6437 
6438 out:
6439     /* Advance to the next instruction.  */
6440     s->base.pc_next = s->pc_tmp;
6441     return ret;
6442 }
6443 
6444 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6445 {
6446     DisasContext *dc = container_of(dcbase, DisasContext, base);
6447 
6448     /* 31-bit mode */
6449     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6450         dc->base.pc_first &= 0x7fffffff;
6451         dc->base.pc_next = dc->base.pc_first;
6452     }
6453 
6454     dc->cc_op = CC_OP_DYNAMIC;
6455     dc->ex_value = dc->base.tb->cs_base;
6456     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6457 }
6458 
6459 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6460 {
6461 }
6462 
6463 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6464 {
6465     DisasContext *dc = container_of(dcbase, DisasContext, base);
6466 
6467     /* Delay the set of ilen until we've read the insn. */
6468     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6469 }
6470 
6471 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6472                                 uint64_t pc)
6473 {
6474     uint64_t insn = cpu_lduw_code(env, pc);
6475 
6476     return pc + get_ilen((insn >> 8) & 0xff);
6477 }
6478 
6479 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6480 {
6481     CPUS390XState *env = cpu_env(cs);
6482     DisasContext *dc = container_of(dcbase, DisasContext, base);
6483 
6484     dc->base.is_jmp = translate_one(env, dc);
6485     if (dc->base.is_jmp == DISAS_NEXT) {
6486         if (dc->ex_value ||
6487             !is_same_page(dcbase, dc->base.pc_next) ||
6488             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6489             dc->base.is_jmp = DISAS_TOO_MANY;
6490         }
6491     }
6492 }
6493 
6494 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6495 {
6496     DisasContext *dc = container_of(dcbase, DisasContext, base);
6497 
6498     switch (dc->base.is_jmp) {
6499     case DISAS_NORETURN:
6500         break;
6501     case DISAS_TOO_MANY:
6502         update_psw_addr(dc);
6503         /* FALLTHRU */
6504     case DISAS_PC_UPDATED:
6505         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6506            cc op type is in env */
6507         update_cc_op(dc);
6508         /* FALLTHRU */
6509     case DISAS_PC_CC_UPDATED:
6510         /* Exit the TB, either by raising a debug exception or by return.  */
6511         if (dc->exit_to_mainloop) {
6512             tcg_gen_exit_tb(NULL, 0);
6513         } else {
6514             tcg_gen_lookup_and_goto_ptr();
6515         }
6516         break;
6517     default:
6518         g_assert_not_reached();
6519     }
6520 }
6521 
6522 static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
6523                                CPUState *cs, FILE *logfile)
6524 {
6525     DisasContext *dc = container_of(dcbase, DisasContext, base);
6526 
6527     if (unlikely(dc->ex_value)) {
6528         /* The ex_value has been recorded with translator_fake_ld. */
6529         fprintf(logfile, "IN: EXECUTE\n");
6530         target_disas(logfile, cs, &dc->base);
6531         return true;
6532     }
6533     return false;
6534 }
6535 
6536 static const TranslatorOps s390x_tr_ops = {
6537     .init_disas_context = s390x_tr_init_disas_context,
6538     .tb_start           = s390x_tr_tb_start,
6539     .insn_start         = s390x_tr_insn_start,
6540     .translate_insn     = s390x_tr_translate_insn,
6541     .tb_stop            = s390x_tr_tb_stop,
6542     .disas_log          = s390x_tr_disas_log,
6543 };
6544 
6545 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6546                            vaddr pc, void *host_pc)
6547 {
6548     DisasContext dc;
6549 
6550     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6551 }
6552 
6553 void s390x_restore_state_to_opc(CPUState *cs,
6554                                 const TranslationBlock *tb,
6555                                 const uint64_t *data)
6556 {
6557     CPUS390XState *env = cpu_env(cs);
6558     int cc_op = data[1];
6559 
6560     env->psw.addr = data[0];
6561 
6562     /* Update the CC opcode if it is not already up-to-date.  */
6563     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6564         env->cc_op = cc_op;
6565     }
6566 
6567     /* Record ILEN.  */
6568     env->int_pgm_ilen = data[2];
6569 }
6570