xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 67043607d17cf62f4ae8110151c44fb73295e66f)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
49 
50 
51 /* Information that (most) every instruction needs to manipulate.  */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55 
56 /*
57  * Define a structure to hold the decoded fields.  We'll store each inside
58  * an array indexed by an enum.  In order to conserve memory, we'll arrange
59  * for fields that do not exist at the same time to overlap, thus the "C"
60  * for compact.  For checking purposes there is an "O" for original index
61  * as well that will be applied to availability bitmaps.
62  */
63 
64 enum DisasFieldIndexO {
65     FLD_O_r1,
66     FLD_O_r2,
67     FLD_O_r3,
68     FLD_O_m1,
69     FLD_O_m3,
70     FLD_O_m4,
71     FLD_O_m5,
72     FLD_O_m6,
73     FLD_O_b1,
74     FLD_O_b2,
75     FLD_O_b4,
76     FLD_O_d1,
77     FLD_O_d2,
78     FLD_O_d4,
79     FLD_O_x2,
80     FLD_O_l1,
81     FLD_O_l2,
82     FLD_O_i1,
83     FLD_O_i2,
84     FLD_O_i3,
85     FLD_O_i4,
86     FLD_O_i5,
87     FLD_O_v1,
88     FLD_O_v2,
89     FLD_O_v3,
90     FLD_O_v4,
91 };
92 
93 enum DisasFieldIndexC {
94     FLD_C_r1 = 0,
95     FLD_C_m1 = 0,
96     FLD_C_b1 = 0,
97     FLD_C_i1 = 0,
98     FLD_C_v1 = 0,
99 
100     FLD_C_r2 = 1,
101     FLD_C_b2 = 1,
102     FLD_C_i2 = 1,
103 
104     FLD_C_r3 = 2,
105     FLD_C_m3 = 2,
106     FLD_C_i3 = 2,
107     FLD_C_v3 = 2,
108 
109     FLD_C_m4 = 3,
110     FLD_C_b4 = 3,
111     FLD_C_i4 = 3,
112     FLD_C_l1 = 3,
113     FLD_C_v4 = 3,
114 
115     FLD_C_i5 = 4,
116     FLD_C_d1 = 4,
117     FLD_C_m5 = 4,
118 
119     FLD_C_d2 = 5,
120     FLD_C_m6 = 5,
121 
122     FLD_C_d4 = 6,
123     FLD_C_x2 = 6,
124     FLD_C_l2 = 6,
125     FLD_C_v2 = 6,
126 
127     NUM_C_FIELD = 7
128 };
129 
130 struct DisasFields {
131     uint64_t raw_insn;
132     unsigned op:8;
133     unsigned op2:8;
134     unsigned presentC:16;
135     unsigned int presentO;
136     int c[NUM_C_FIELD];
137 };
138 
139 struct DisasContext {
140     DisasContextBase base;
141     const DisasInsn *insn;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool do_debug;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     bool g1;
160     bool g2;
161     union {
162         struct { TCGv_i64 a, b; } s64;
163         struct { TCGv_i32 a, b; } s32;
164     } u;
165 } DisasCompare;
166 
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171 
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174     TCGv_i64 tmp;
175 
176     if (s->base.tb->flags & FLAG_MASK_32) {
177         if (s->base.tb->flags & FLAG_MASK_64) {
178             tcg_gen_movi_i64(out, pc);
179             return;
180         }
181         pc |= 0x80000000;
182     }
183     assert(!(s->base.tb->flags & FLAG_MASK_64));
184     tmp = tcg_const_i64(pc);
185     tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186     tcg_temp_free_i64(tmp);
187 }
188 
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
192 
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
197 
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
200 
201 void s390x_translate_init(void)
202 {
203     int i;
204 
205     psw_addr = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.addr),
207                                       "psw_addr");
208     psw_mask = tcg_global_mem_new_i64(cpu_env,
209                                       offsetof(CPUS390XState, psw.mask),
210                                       "psw_mask");
211     gbea = tcg_global_mem_new_i64(cpu_env,
212                                   offsetof(CPUS390XState, gbea),
213                                   "gbea");
214 
215     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216                                    "cc_op");
217     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218                                     "cc_src");
219     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220                                     "cc_dst");
221     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222                                    "cc_vr");
223 
224     for (i = 0; i < 16; i++) {
225         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226         regs[i] = tcg_global_mem_new(cpu_env,
227                                      offsetof(CPUS390XState, regs[i]),
228                                      cpu_reg_names[i]);
229     }
230 }
231 
232 static inline int vec_full_reg_offset(uint8_t reg)
233 {
234     g_assert(reg < 32);
235     return offsetof(CPUS390XState, vregs[reg][0]);
236 }
237 
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
239 {
240     /* Convert element size (es) - e.g. MO_8 - to bytes */
241     const uint8_t bytes = 1 << es;
242     int offs = enr * bytes;
243 
244     /*
245      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246      * of the 16 byte vector, on both, little and big endian systems.
247      *
248      * Big Endian (target/possible host)
249      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
251      * W:  [             0][             1] - [             2][             3]
252      * DW: [                             0] - [                             1]
253      *
254      * Little Endian (possible host)
255      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
257      * W:  [             1][             0] - [             3][             2]
258      * DW: [                             0] - [                             1]
259      *
260      * For 16 byte elements, the two 8 byte halves will not form a host
261      * int128 if the host is little endian, since they're in the wrong order.
262      * Some operations (e.g. xor) do not care. For operations like addition,
263      * the two 8 byte elements have to be loaded separately. Let's force all
264      * 16 byte operations to handle it in a special way.
265      */
266     g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268     offs ^= (8 - bytes);
269 #endif
270     return offs + vec_full_reg_offset(reg);
271 }
272 
273 static inline int freg64_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_64);
277 }
278 
279 static inline int freg32_offset(uint8_t reg)
280 {
281     g_assert(reg < 16);
282     return vec_reg_offset(reg, 0, MO_32);
283 }
284 
285 static TCGv_i64 load_reg(int reg)
286 {
287     TCGv_i64 r = tcg_temp_new_i64();
288     tcg_gen_mov_i64(r, regs[reg]);
289     return r;
290 }
291 
292 static TCGv_i64 load_freg(int reg)
293 {
294     TCGv_i64 r = tcg_temp_new_i64();
295 
296     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297     return r;
298 }
299 
300 static TCGv_i64 load_freg32_i64(int reg)
301 {
302     TCGv_i64 r = tcg_temp_new_i64();
303 
304     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305     return r;
306 }
307 
308 static void store_reg(int reg, TCGv_i64 v)
309 {
310     tcg_gen_mov_i64(regs[reg], v);
311 }
312 
313 static void store_freg(int reg, TCGv_i64 v)
314 {
315     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
316 }
317 
318 static void store_reg32_i64(int reg, TCGv_i64 v)
319 {
320     /* 32 bit register writes keep the upper half */
321     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
322 }
323 
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
325 {
326     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
327 }
328 
329 static void store_freg32_i64(int reg, TCGv_i64 v)
330 {
331     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
332 }
333 
334 static void return_low128(TCGv_i64 dest)
335 {
336     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
337 }
338 
339 static void update_psw_addr(DisasContext *s)
340 {
341     /* psw.addr */
342     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
343 }
344 
345 static void per_branch(DisasContext *s, bool to_next)
346 {
347 #ifndef CONFIG_USER_ONLY
348     tcg_gen_movi_i64(gbea, s->base.pc_next);
349 
350     if (s->base.tb->flags & FLAG_MASK_PER) {
351         TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352         gen_helper_per_branch(cpu_env, gbea, next_pc);
353         if (to_next) {
354             tcg_temp_free_i64(next_pc);
355         }
356     }
357 #endif
358 }
359 
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361                             TCGv_i64 arg1, TCGv_i64 arg2)
362 {
363 #ifndef CONFIG_USER_ONLY
364     if (s->base.tb->flags & FLAG_MASK_PER) {
365         TCGLabel *lab = gen_new_label();
366         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367 
368         tcg_gen_movi_i64(gbea, s->base.pc_next);
369         gen_helper_per_branch(cpu_env, gbea, psw_addr);
370 
371         gen_set_label(lab);
372     } else {
373         TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375         tcg_temp_free_i64(pc);
376     }
377 #endif
378 }
379 
380 static void per_breaking_event(DisasContext *s)
381 {
382     tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384 
385 static void update_cc_op(DisasContext *s)
386 {
387     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388         tcg_gen_movi_i32(cc_op, s->cc_op);
389     }
390 }
391 
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
393 {
394     return (uint64_t)cpu_lduw_code(env, pc);
395 }
396 
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         tcg_abort();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     TCGv_i32 tmp = tcg_const_i32(excp);
428     gen_helper_exception(cpu_env, tmp);
429     tcg_temp_free_i32(tmp);
430 }
431 
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434     TCGv_i32 tmp;
435 
436     /* Remember what pgm exeption this was.  */
437     tmp = tcg_const_i32(code);
438     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439     tcg_temp_free_i32(tmp);
440 
441     tmp = tcg_const_i32(s->ilen);
442     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443     tcg_temp_free_i32(tmp);
444 
445     /* update the psw */
446     update_psw_addr(s);
447 
448     /* Save off cc.  */
449     update_cc_op(s);
450 
451     /* Trigger exception.  */
452     gen_exception(EXCP_PGM);
453 }
454 
455 static inline void gen_illegal_opcode(DisasContext *s)
456 {
457     gen_program_exception(s, PGM_OPERATION);
458 }
459 
460 static inline void gen_data_exception(uint8_t dxc)
461 {
462     TCGv_i32 tmp = tcg_const_i32(dxc);
463     gen_helper_data_exception(cpu_env, tmp);
464     tcg_temp_free_i32(tmp);
465 }
466 
467 static inline void gen_trap(DisasContext *s)
468 {
469     /* Set DXC to 0xff */
470     gen_data_exception(0xff);
471 }
472 
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474                                   int64_t imm)
475 {
476     tcg_gen_addi_i64(dst, src, imm);
477     if (!(s->base.tb->flags & FLAG_MASK_64)) {
478         if (s->base.tb->flags & FLAG_MASK_32) {
479             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480         } else {
481             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
482         }
483     }
484 }
485 
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
487 {
488     TCGv_i64 tmp = tcg_temp_new_i64();
489 
490     /*
491      * Note that d2 is limited to 20 bits, signed.  If we crop negative
492      * displacements early we create larger immedate addends.
493      */
494     if (b2 && x2) {
495         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497     } else if (b2) {
498         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499     } else if (x2) {
500         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502         if (s->base.tb->flags & FLAG_MASK_32) {
503             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504         } else {
505             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
506         }
507     } else {
508         tcg_gen_movi_i64(tmp, d2);
509     }
510 
511     return tmp;
512 }
513 
514 static inline bool live_cc_data(DisasContext *s)
515 {
516     return (s->cc_op != CC_OP_DYNAMIC
517             && s->cc_op != CC_OP_STATIC
518             && s->cc_op > 3);
519 }
520 
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
522 {
523     if (live_cc_data(s)) {
524         tcg_gen_discard_i64(cc_src);
525         tcg_gen_discard_i64(cc_dst);
526         tcg_gen_discard_i64(cc_vr);
527     }
528     s->cc_op = CC_OP_CONST0 + val;
529 }
530 
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
532 {
533     if (live_cc_data(s)) {
534         tcg_gen_discard_i64(cc_src);
535         tcg_gen_discard_i64(cc_vr);
536     }
537     tcg_gen_mov_i64(cc_dst, dst);
538     s->cc_op = op;
539 }
540 
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542                                   TCGv_i64 dst)
543 {
544     if (live_cc_data(s)) {
545         tcg_gen_discard_i64(cc_vr);
546     }
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     s->cc_op = op;
550 }
551 
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553                                   TCGv_i64 dst, TCGv_i64 vr)
554 {
555     tcg_gen_mov_i64(cc_src, src);
556     tcg_gen_mov_i64(cc_dst, dst);
557     tcg_gen_mov_i64(cc_vr, vr);
558     s->cc_op = op;
559 }
560 
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
562 {
563     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
564 }
565 
566 /* CC value is in env->cc_op */
567 static void set_cc_static(DisasContext *s)
568 {
569     if (live_cc_data(s)) {
570         tcg_gen_discard_i64(cc_src);
571         tcg_gen_discard_i64(cc_dst);
572         tcg_gen_discard_i64(cc_vr);
573     }
574     s->cc_op = CC_OP_STATIC;
575 }
576 
577 /* calculates cc into cc_op */
578 static void gen_op_calc_cc(DisasContext *s)
579 {
580     TCGv_i32 local_cc_op = NULL;
581     TCGv_i64 dummy = NULL;
582 
583     switch (s->cc_op) {
584     default:
585         dummy = tcg_const_i64(0);
586         /* FALLTHRU */
587     case CC_OP_ADD_64:
588     case CC_OP_SUB_64:
589     case CC_OP_ADD_32:
590     case CC_OP_SUB_32:
591         local_cc_op = tcg_const_i32(s->cc_op);
592         break;
593     case CC_OP_CONST0:
594     case CC_OP_CONST1:
595     case CC_OP_CONST2:
596     case CC_OP_CONST3:
597     case CC_OP_STATIC:
598     case CC_OP_DYNAMIC:
599         break;
600     }
601 
602     switch (s->cc_op) {
603     case CC_OP_CONST0:
604     case CC_OP_CONST1:
605     case CC_OP_CONST2:
606     case CC_OP_CONST3:
607         /* s->cc_op is the cc value */
608         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
609         break;
610     case CC_OP_STATIC:
611         /* env->cc_op already is the cc value */
612         break;
613     case CC_OP_NZ:
614     case CC_OP_ABS_64:
615     case CC_OP_NABS_64:
616     case CC_OP_ABS_32:
617     case CC_OP_NABS_32:
618     case CC_OP_LTGT0_32:
619     case CC_OP_LTGT0_64:
620     case CC_OP_COMP_32:
621     case CC_OP_COMP_64:
622     case CC_OP_NZ_F32:
623     case CC_OP_NZ_F64:
624     case CC_OP_FLOGR:
625     case CC_OP_LCBB:
626     case CC_OP_MULS_32:
627         /* 1 argument */
628         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
629         break;
630     case CC_OP_ADDU:
631     case CC_OP_ICM:
632     case CC_OP_LTGT_32:
633     case CC_OP_LTGT_64:
634     case CC_OP_LTUGTU_32:
635     case CC_OP_LTUGTU_64:
636     case CC_OP_TM_32:
637     case CC_OP_TM_64:
638     case CC_OP_SLA_32:
639     case CC_OP_SLA_64:
640     case CC_OP_SUBU:
641     case CC_OP_NZ_F128:
642     case CC_OP_VC:
643     case CC_OP_MULS_64:
644         /* 2 arguments */
645         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
646         break;
647     case CC_OP_ADD_64:
648     case CC_OP_SUB_64:
649     case CC_OP_ADD_32:
650     case CC_OP_SUB_32:
651         /* 3 arguments */
652         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     case CC_OP_DYNAMIC:
655         /* unknown operation - assume 3 arguments and cc_op in env */
656         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
657         break;
658     default:
659         tcg_abort();
660     }
661 
662     if (local_cc_op) {
663         tcg_temp_free_i32(local_cc_op);
664     }
665     if (dummy) {
666         tcg_temp_free_i64(dummy);
667     }
668 
669     /* We now have cc in cc_op as constant */
670     set_cc_static(s);
671 }
672 
673 static bool use_exit_tb(DisasContext *s)
674 {
675     return s->base.singlestep_enabled ||
676             (tb_cflags(s->base.tb) & CF_LAST_IO) ||
677             (s->base.tb->flags & FLAG_MASK_PER);
678 }
679 
680 static bool use_goto_tb(DisasContext *s, uint64_t dest)
681 {
682     if (unlikely(use_exit_tb(s))) {
683         return false;
684     }
685 #ifndef CONFIG_USER_ONLY
686     return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
687            (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
688 #else
689     return true;
690 #endif
691 }
692 
693 static void account_noninline_branch(DisasContext *s, int cc_op)
694 {
695 #ifdef DEBUG_INLINE_BRANCHES
696     inline_branch_miss[cc_op]++;
697 #endif
698 }
699 
700 static void account_inline_branch(DisasContext *s, int cc_op)
701 {
702 #ifdef DEBUG_INLINE_BRANCHES
703     inline_branch_hit[cc_op]++;
704 #endif
705 }
706 
707 /* Table of mask values to comparison codes, given a comparison as input.
708    For such, CC=3 should not be possible.  */
709 static const TCGCond ltgt_cond[16] = {
710     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
711     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
712     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
713     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
714     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
715     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
716     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
717     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
718 };
719 
720 /* Table of mask values to comparison codes, given a logic op as input.
721    For such, only CC=0 and CC=1 should be possible.  */
722 static const TCGCond nz_cond[16] = {
723     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
724     TCG_COND_NEVER, TCG_COND_NEVER,
725     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
726     TCG_COND_NE, TCG_COND_NE,
727     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
728     TCG_COND_EQ, TCG_COND_EQ,
729     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
730     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
731 };
732 
733 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
734    details required to generate a TCG comparison.  */
735 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
736 {
737     TCGCond cond;
738     enum cc_op old_cc_op = s->cc_op;
739 
740     if (mask == 15 || mask == 0) {
741         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
742         c->u.s32.a = cc_op;
743         c->u.s32.b = cc_op;
744         c->g1 = c->g2 = true;
745         c->is_64 = false;
746         return;
747     }
748 
749     /* Find the TCG condition for the mask + cc op.  */
750     switch (old_cc_op) {
751     case CC_OP_LTGT0_32:
752     case CC_OP_LTGT0_64:
753     case CC_OP_LTGT_32:
754     case CC_OP_LTGT_64:
755         cond = ltgt_cond[mask];
756         if (cond == TCG_COND_NEVER) {
757             goto do_dynamic;
758         }
759         account_inline_branch(s, old_cc_op);
760         break;
761 
762     case CC_OP_LTUGTU_32:
763     case CC_OP_LTUGTU_64:
764         cond = tcg_unsigned_cond(ltgt_cond[mask]);
765         if (cond == TCG_COND_NEVER) {
766             goto do_dynamic;
767         }
768         account_inline_branch(s, old_cc_op);
769         break;
770 
771     case CC_OP_NZ:
772         cond = nz_cond[mask];
773         if (cond == TCG_COND_NEVER) {
774             goto do_dynamic;
775         }
776         account_inline_branch(s, old_cc_op);
777         break;
778 
779     case CC_OP_TM_32:
780     case CC_OP_TM_64:
781         switch (mask) {
782         case 8:
783             cond = TCG_COND_EQ;
784             break;
785         case 4 | 2 | 1:
786             cond = TCG_COND_NE;
787             break;
788         default:
789             goto do_dynamic;
790         }
791         account_inline_branch(s, old_cc_op);
792         break;
793 
794     case CC_OP_ICM:
795         switch (mask) {
796         case 8:
797             cond = TCG_COND_EQ;
798             break;
799         case 4 | 2 | 1:
800         case 4 | 2:
801             cond = TCG_COND_NE;
802             break;
803         default:
804             goto do_dynamic;
805         }
806         account_inline_branch(s, old_cc_op);
807         break;
808 
809     case CC_OP_FLOGR:
810         switch (mask & 0xa) {
811         case 8: /* src == 0 -> no one bit found */
812             cond = TCG_COND_EQ;
813             break;
814         case 2: /* src != 0 -> one bit found */
815             cond = TCG_COND_NE;
816             break;
817         default:
818             goto do_dynamic;
819         }
820         account_inline_branch(s, old_cc_op);
821         break;
822 
823     case CC_OP_ADDU:
824     case CC_OP_SUBU:
825         switch (mask) {
826         case 8 | 2: /* result == 0 */
827             cond = TCG_COND_EQ;
828             break;
829         case 4 | 1: /* result != 0 */
830             cond = TCG_COND_NE;
831             break;
832         case 8 | 4: /* !carry (borrow) */
833             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
834             break;
835         case 2 | 1: /* carry (!borrow) */
836             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
837             break;
838         default:
839             goto do_dynamic;
840         }
841         account_inline_branch(s, old_cc_op);
842         break;
843 
844     default:
845     do_dynamic:
846         /* Calculate cc value.  */
847         gen_op_calc_cc(s);
848         /* FALLTHRU */
849 
850     case CC_OP_STATIC:
851         /* Jump based on CC.  We'll load up the real cond below;
852            the assignment here merely avoids a compiler warning.  */
853         account_noninline_branch(s, old_cc_op);
854         old_cc_op = CC_OP_STATIC;
855         cond = TCG_COND_NEVER;
856         break;
857     }
858 
859     /* Load up the arguments of the comparison.  */
860     c->is_64 = true;
861     c->g1 = c->g2 = false;
862     switch (old_cc_op) {
863     case CC_OP_LTGT0_32:
864         c->is_64 = false;
865         c->u.s32.a = tcg_temp_new_i32();
866         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
867         c->u.s32.b = tcg_const_i32(0);
868         break;
869     case CC_OP_LTGT_32:
870     case CC_OP_LTUGTU_32:
871         c->is_64 = false;
872         c->u.s32.a = tcg_temp_new_i32();
873         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
874         c->u.s32.b = tcg_temp_new_i32();
875         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
876         break;
877 
878     case CC_OP_LTGT0_64:
879     case CC_OP_NZ:
880     case CC_OP_FLOGR:
881         c->u.s64.a = cc_dst;
882         c->u.s64.b = tcg_const_i64(0);
883         c->g1 = true;
884         break;
885     case CC_OP_LTGT_64:
886     case CC_OP_LTUGTU_64:
887         c->u.s64.a = cc_src;
888         c->u.s64.b = cc_dst;
889         c->g1 = c->g2 = true;
890         break;
891 
892     case CC_OP_TM_32:
893     case CC_OP_TM_64:
894     case CC_OP_ICM:
895         c->u.s64.a = tcg_temp_new_i64();
896         c->u.s64.b = tcg_const_i64(0);
897         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
898         break;
899 
900     case CC_OP_ADDU:
901     case CC_OP_SUBU:
902         c->is_64 = true;
903         c->u.s64.b = tcg_const_i64(0);
904         c->g1 = true;
905         switch (mask) {
906         case 8 | 2:
907         case 4 | 1: /* result */
908             c->u.s64.a = cc_dst;
909             break;
910         case 8 | 4:
911         case 2 | 1: /* carry */
912             c->u.s64.a = cc_src;
913             break;
914         default:
915             g_assert_not_reached();
916         }
917         break;
918 
919     case CC_OP_STATIC:
920         c->is_64 = false;
921         c->u.s32.a = cc_op;
922         c->g1 = true;
923         switch (mask) {
924         case 0x8 | 0x4 | 0x2: /* cc != 3 */
925             cond = TCG_COND_NE;
926             c->u.s32.b = tcg_const_i32(3);
927             break;
928         case 0x8 | 0x4 | 0x1: /* cc != 2 */
929             cond = TCG_COND_NE;
930             c->u.s32.b = tcg_const_i32(2);
931             break;
932         case 0x8 | 0x2 | 0x1: /* cc != 1 */
933             cond = TCG_COND_NE;
934             c->u.s32.b = tcg_const_i32(1);
935             break;
936         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
937             cond = TCG_COND_EQ;
938             c->g1 = false;
939             c->u.s32.a = tcg_temp_new_i32();
940             c->u.s32.b = tcg_const_i32(0);
941             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
942             break;
943         case 0x8 | 0x4: /* cc < 2 */
944             cond = TCG_COND_LTU;
945             c->u.s32.b = tcg_const_i32(2);
946             break;
947         case 0x8: /* cc == 0 */
948             cond = TCG_COND_EQ;
949             c->u.s32.b = tcg_const_i32(0);
950             break;
951         case 0x4 | 0x2 | 0x1: /* cc != 0 */
952             cond = TCG_COND_NE;
953             c->u.s32.b = tcg_const_i32(0);
954             break;
955         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
956             cond = TCG_COND_NE;
957             c->g1 = false;
958             c->u.s32.a = tcg_temp_new_i32();
959             c->u.s32.b = tcg_const_i32(0);
960             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
961             break;
962         case 0x4: /* cc == 1 */
963             cond = TCG_COND_EQ;
964             c->u.s32.b = tcg_const_i32(1);
965             break;
966         case 0x2 | 0x1: /* cc > 1 */
967             cond = TCG_COND_GTU;
968             c->u.s32.b = tcg_const_i32(1);
969             break;
970         case 0x2: /* cc == 2 */
971             cond = TCG_COND_EQ;
972             c->u.s32.b = tcg_const_i32(2);
973             break;
974         case 0x1: /* cc == 3 */
975             cond = TCG_COND_EQ;
976             c->u.s32.b = tcg_const_i32(3);
977             break;
978         default:
979             /* CC is masked by something else: (8 >> cc) & mask.  */
980             cond = TCG_COND_NE;
981             c->g1 = false;
982             c->u.s32.a = tcg_const_i32(8);
983             c->u.s32.b = tcg_const_i32(0);
984             tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
985             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
986             break;
987         }
988         break;
989 
990     default:
991         abort();
992     }
993     c->cond = cond;
994 }
995 
996 static void free_compare(DisasCompare *c)
997 {
998     if (!c->g1) {
999         if (c->is_64) {
1000             tcg_temp_free_i64(c->u.s64.a);
1001         } else {
1002             tcg_temp_free_i32(c->u.s32.a);
1003         }
1004     }
1005     if (!c->g2) {
1006         if (c->is_64) {
1007             tcg_temp_free_i64(c->u.s64.b);
1008         } else {
1009             tcg_temp_free_i32(c->u.s32.b);
1010         }
1011     }
1012 }
1013 
1014 /* ====================================================================== */
1015 /* Define the insn format enumeration.  */
1016 #define F0(N)                         FMT_##N,
1017 #define F1(N, X1)                     F0(N)
1018 #define F2(N, X1, X2)                 F0(N)
1019 #define F3(N, X1, X2, X3)             F0(N)
1020 #define F4(N, X1, X2, X3, X4)         F0(N)
1021 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
1022 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1023 
1024 typedef enum {
1025 #include "insn-format.def"
1026 } DisasFormat;
1027 
1028 #undef F0
1029 #undef F1
1030 #undef F2
1031 #undef F3
1032 #undef F4
1033 #undef F5
1034 #undef F6
1035 
1036 /* This is the way fields are to be accessed out of DisasFields.  */
1037 #define have_field(S, F)  have_field1((S), FLD_O_##F)
1038 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1039 
1040 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1041 {
1042     return (s->fields.presentO >> c) & 1;
1043 }
1044 
1045 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1046                       enum DisasFieldIndexC c)
1047 {
1048     assert(have_field1(s, o));
1049     return s->fields.c[c];
1050 }
1051 
1052 /* Describe the layout of each field in each format.  */
1053 typedef struct DisasField {
1054     unsigned int beg:8;
1055     unsigned int size:8;
1056     unsigned int type:2;
1057     unsigned int indexC:6;
1058     enum DisasFieldIndexO indexO:8;
1059 } DisasField;
1060 
1061 typedef struct DisasFormatInfo {
1062     DisasField op[NUM_C_FIELD];
1063 } DisasFormatInfo;
1064 
1065 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1066 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1067 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1068 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1069                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1070 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1071                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1072                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1073 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1074                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1075 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1076                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1077                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1078 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1079 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1080 
1081 #define F0(N)                     { { } },
1082 #define F1(N, X1)                 { { X1 } },
1083 #define F2(N, X1, X2)             { { X1, X2 } },
1084 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1085 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1086 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1087 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1088 
1089 static const DisasFormatInfo format_info[] = {
1090 #include "insn-format.def"
1091 };
1092 
1093 #undef F0
1094 #undef F1
1095 #undef F2
1096 #undef F3
1097 #undef F4
1098 #undef F5
1099 #undef F6
1100 #undef R
1101 #undef M
1102 #undef V
1103 #undef BD
1104 #undef BXD
1105 #undef BDL
1106 #undef BXDL
1107 #undef I
1108 #undef L
1109 
1110 /* Generally, we'll extract operands into this structures, operate upon
1111    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1112    of routines below for more details.  */
1113 typedef struct {
1114     bool g_out, g_out2, g_in1, g_in2;
1115     TCGv_i64 out, out2, in1, in2;
1116     TCGv_i64 addr1;
1117 } DisasOps;
1118 
1119 /* Instructions can place constraints on their operands, raising specification
1120    exceptions if they are violated.  To make this easy to automate, each "in1",
1121    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1122    of the following, or 0.  To make this easy to document, we'll put the
1123    SPEC_<name> defines next to <name>.  */
1124 
1125 #define SPEC_r1_even    1
1126 #define SPEC_r2_even    2
1127 #define SPEC_r3_even    4
1128 #define SPEC_r1_f128    8
1129 #define SPEC_r2_f128    16
1130 
1131 /* Return values from translate_one, indicating the state of the TB.  */
1132 
1133 /* We are not using a goto_tb (for whatever reason), but have updated
1134    the PC (for whatever reason), so there's no need to do it again on
1135    exiting the TB.  */
1136 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1137 
1138 /* We have emitted one or more goto_tb.  No fixup required.  */
1139 #define DISAS_GOTO_TB           DISAS_TARGET_1
1140 
1141 /* We have updated the PC and CC values.  */
1142 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1143 
1144 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1145    updated the PC for the next instruction to be executed.  */
1146 #define DISAS_PC_STALE          DISAS_TARGET_3
1147 
1148 /* We are exiting the TB to the main loop.  */
1149 #define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1150 
1151 
1152 /* Instruction flags */
1153 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1154 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1155 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1156 #define IF_BFP      0x0008      /* binary floating point instruction */
1157 #define IF_DFP      0x0010      /* decimal floating point instruction */
1158 #define IF_PRIV     0x0020      /* privileged instruction */
1159 #define IF_VEC      0x0040      /* vector instruction */
1160 #define IF_IO       0x0080      /* input/output instruction */
1161 
1162 struct DisasInsn {
1163     unsigned opc:16;
1164     unsigned flags:16;
1165     DisasFormat fmt:8;
1166     unsigned fac:8;
1167     unsigned spec:8;
1168 
1169     const char *name;
1170 
1171     /* Pre-process arguments before HELP_OP.  */
1172     void (*help_in1)(DisasContext *, DisasOps *);
1173     void (*help_in2)(DisasContext *, DisasOps *);
1174     void (*help_prep)(DisasContext *, DisasOps *);
1175 
1176     /*
1177      * Post-process output after HELP_OP.
1178      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1179      */
1180     void (*help_wout)(DisasContext *, DisasOps *);
1181     void (*help_cout)(DisasContext *, DisasOps *);
1182 
1183     /* Implement the operation itself.  */
1184     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1185 
1186     uint64_t data;
1187 };
1188 
1189 /* ====================================================================== */
1190 /* Miscellaneous helpers, used by several operations.  */
1191 
1192 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1193 {
1194     int b2 = get_field(s, b2);
1195     int d2 = get_field(s, d2);
1196 
1197     if (b2 == 0) {
1198         o->in2 = tcg_const_i64(d2 & mask);
1199     } else {
1200         o->in2 = get_address(s, 0, b2, d2);
1201         tcg_gen_andi_i64(o->in2, o->in2, mask);
1202     }
1203 }
1204 
1205 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1206 {
1207     if (dest == s->pc_tmp) {
1208         per_branch(s, true);
1209         return DISAS_NEXT;
1210     }
1211     if (use_goto_tb(s, dest)) {
1212         update_cc_op(s);
1213         per_breaking_event(s);
1214         tcg_gen_goto_tb(0);
1215         tcg_gen_movi_i64(psw_addr, dest);
1216         tcg_gen_exit_tb(s->base.tb, 0);
1217         return DISAS_GOTO_TB;
1218     } else {
1219         tcg_gen_movi_i64(psw_addr, dest);
1220         per_branch(s, false);
1221         return DISAS_PC_UPDATED;
1222     }
1223 }
1224 
1225 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1226                                  bool is_imm, int imm, TCGv_i64 cdest)
1227 {
1228     DisasJumpType ret;
1229     uint64_t dest = s->base.pc_next + 2 * imm;
1230     TCGLabel *lab;
1231 
1232     /* Take care of the special cases first.  */
1233     if (c->cond == TCG_COND_NEVER) {
1234         ret = DISAS_NEXT;
1235         goto egress;
1236     }
1237     if (is_imm) {
1238         if (dest == s->pc_tmp) {
1239             /* Branch to next.  */
1240             per_branch(s, true);
1241             ret = DISAS_NEXT;
1242             goto egress;
1243         }
1244         if (c->cond == TCG_COND_ALWAYS) {
1245             ret = help_goto_direct(s, dest);
1246             goto egress;
1247         }
1248     } else {
1249         if (!cdest) {
1250             /* E.g. bcr %r0 -> no branch.  */
1251             ret = DISAS_NEXT;
1252             goto egress;
1253         }
1254         if (c->cond == TCG_COND_ALWAYS) {
1255             tcg_gen_mov_i64(psw_addr, cdest);
1256             per_branch(s, false);
1257             ret = DISAS_PC_UPDATED;
1258             goto egress;
1259         }
1260     }
1261 
1262     if (use_goto_tb(s, s->pc_tmp)) {
1263         if (is_imm && use_goto_tb(s, dest)) {
1264             /* Both exits can use goto_tb.  */
1265             update_cc_op(s);
1266 
1267             lab = gen_new_label();
1268             if (c->is_64) {
1269                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1270             } else {
1271                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1272             }
1273 
1274             /* Branch not taken.  */
1275             tcg_gen_goto_tb(0);
1276             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1277             tcg_gen_exit_tb(s->base.tb, 0);
1278 
1279             /* Branch taken.  */
1280             gen_set_label(lab);
1281             per_breaking_event(s);
1282             tcg_gen_goto_tb(1);
1283             tcg_gen_movi_i64(psw_addr, dest);
1284             tcg_gen_exit_tb(s->base.tb, 1);
1285 
1286             ret = DISAS_GOTO_TB;
1287         } else {
1288             /* Fallthru can use goto_tb, but taken branch cannot.  */
1289             /* Store taken branch destination before the brcond.  This
1290                avoids having to allocate a new local temp to hold it.
1291                We'll overwrite this in the not taken case anyway.  */
1292             if (!is_imm) {
1293                 tcg_gen_mov_i64(psw_addr, cdest);
1294             }
1295 
1296             lab = gen_new_label();
1297             if (c->is_64) {
1298                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1299             } else {
1300                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1301             }
1302 
1303             /* Branch not taken.  */
1304             update_cc_op(s);
1305             tcg_gen_goto_tb(0);
1306             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1307             tcg_gen_exit_tb(s->base.tb, 0);
1308 
1309             gen_set_label(lab);
1310             if (is_imm) {
1311                 tcg_gen_movi_i64(psw_addr, dest);
1312             }
1313             per_breaking_event(s);
1314             ret = DISAS_PC_UPDATED;
1315         }
1316     } else {
1317         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1318            Most commonly we're single-stepping or some other condition that
1319            disables all use of goto_tb.  Just update the PC and exit.  */
1320 
1321         TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1322         if (is_imm) {
1323             cdest = tcg_const_i64(dest);
1324         }
1325 
1326         if (c->is_64) {
1327             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1328                                 cdest, next);
1329             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1330         } else {
1331             TCGv_i32 t0 = tcg_temp_new_i32();
1332             TCGv_i64 t1 = tcg_temp_new_i64();
1333             TCGv_i64 z = tcg_const_i64(0);
1334             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1335             tcg_gen_extu_i32_i64(t1, t0);
1336             tcg_temp_free_i32(t0);
1337             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1338             per_branch_cond(s, TCG_COND_NE, t1, z);
1339             tcg_temp_free_i64(t1);
1340             tcg_temp_free_i64(z);
1341         }
1342 
1343         if (is_imm) {
1344             tcg_temp_free_i64(cdest);
1345         }
1346         tcg_temp_free_i64(next);
1347 
1348         ret = DISAS_PC_UPDATED;
1349     }
1350 
1351  egress:
1352     free_compare(c);
1353     return ret;
1354 }
1355 
1356 /* ====================================================================== */
1357 /* The operations.  These perform the bulk of the work for any insn,
1358    usually after the operands have been loaded and output initialized.  */
1359 
1360 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1361 {
1362     tcg_gen_abs_i64(o->out, o->in2);
1363     return DISAS_NEXT;
1364 }
1365 
1366 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1367 {
1368     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1369     return DISAS_NEXT;
1370 }
1371 
1372 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1373 {
1374     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1375     return DISAS_NEXT;
1376 }
1377 
1378 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1379 {
1380     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1381     tcg_gen_mov_i64(o->out2, o->in2);
1382     return DISAS_NEXT;
1383 }
1384 
1385 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1386 {
1387     tcg_gen_add_i64(o->out, o->in1, o->in2);
1388     return DISAS_NEXT;
1389 }
1390 
1391 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1392 {
1393     tcg_gen_movi_i64(cc_src, 0);
1394     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1395     return DISAS_NEXT;
1396 }
1397 
1398 /* Compute carry into cc_src. */
1399 static void compute_carry(DisasContext *s)
1400 {
1401     switch (s->cc_op) {
1402     case CC_OP_ADDU:
1403         /* The carry value is already in cc_src (1,0). */
1404         break;
1405     case CC_OP_SUBU:
1406         tcg_gen_addi_i64(cc_src, cc_src, 1);
1407         break;
1408     default:
1409         gen_op_calc_cc(s);
1410         /* fall through */
1411     case CC_OP_STATIC:
1412         /* The carry flag is the msb of CC; compute into cc_src. */
1413         tcg_gen_extu_i32_i64(cc_src, cc_op);
1414         tcg_gen_shri_i64(cc_src, cc_src, 1);
1415         break;
1416     }
1417 }
1418 
1419 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1420 {
1421     compute_carry(s);
1422     tcg_gen_add_i64(o->out, o->in1, o->in2);
1423     tcg_gen_add_i64(o->out, o->out, cc_src);
1424     return DISAS_NEXT;
1425 }
1426 
1427 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1428 {
1429     compute_carry(s);
1430 
1431     TCGv_i64 zero = tcg_const_i64(0);
1432     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1433     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1434     tcg_temp_free_i64(zero);
1435 
1436     return DISAS_NEXT;
1437 }
1438 
1439 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1440 {
1441     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1442 
1443     o->in1 = tcg_temp_new_i64();
1444     if (non_atomic) {
1445         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1446     } else {
1447         /* Perform the atomic addition in memory. */
1448         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1449                                      s->insn->data);
1450     }
1451 
1452     /* Recompute also for atomic case: needed for setting CC. */
1453     tcg_gen_add_i64(o->out, o->in1, o->in2);
1454 
1455     if (non_atomic) {
1456         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1457     }
1458     return DISAS_NEXT;
1459 }
1460 
1461 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1462 {
1463     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1464 
1465     o->in1 = tcg_temp_new_i64();
1466     if (non_atomic) {
1467         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1468     } else {
1469         /* Perform the atomic addition in memory. */
1470         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1471                                      s->insn->data);
1472     }
1473 
1474     /* Recompute also for atomic case: needed for setting CC. */
1475     tcg_gen_movi_i64(cc_src, 0);
1476     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1477 
1478     if (non_atomic) {
1479         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1480     }
1481     return DISAS_NEXT;
1482 }
1483 
1484 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1485 {
1486     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1487     return DISAS_NEXT;
1488 }
1489 
1490 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1491 {
1492     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1493     return DISAS_NEXT;
1494 }
1495 
1496 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1497 {
1498     gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1499     return_low128(o->out2);
1500     return DISAS_NEXT;
1501 }
1502 
1503 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1504 {
1505     tcg_gen_and_i64(o->out, o->in1, o->in2);
1506     return DISAS_NEXT;
1507 }
1508 
1509 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1510 {
1511     int shift = s->insn->data & 0xff;
1512     int size = s->insn->data >> 8;
1513     uint64_t mask = ((1ull << size) - 1) << shift;
1514 
1515     assert(!o->g_in2);
1516     tcg_gen_shli_i64(o->in2, o->in2, shift);
1517     tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1518     tcg_gen_and_i64(o->out, o->in1, o->in2);
1519 
1520     /* Produce the CC from only the bits manipulated.  */
1521     tcg_gen_andi_i64(cc_dst, o->out, mask);
1522     set_cc_nz_u64(s, cc_dst);
1523     return DISAS_NEXT;
1524 }
1525 
1526 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1527 {
1528     o->in1 = tcg_temp_new_i64();
1529 
1530     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1531         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1532     } else {
1533         /* Perform the atomic operation in memory. */
1534         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1535                                      s->insn->data);
1536     }
1537 
1538     /* Recompute also for atomic case: needed for setting CC. */
1539     tcg_gen_and_i64(o->out, o->in1, o->in2);
1540 
1541     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1542         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1543     }
1544     return DISAS_NEXT;
1545 }
1546 
1547 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1548 {
1549     pc_to_link_info(o->out, s, s->pc_tmp);
1550     if (o->in2) {
1551         tcg_gen_mov_i64(psw_addr, o->in2);
1552         per_branch(s, false);
1553         return DISAS_PC_UPDATED;
1554     } else {
1555         return DISAS_NEXT;
1556     }
1557 }
1558 
1559 static void save_link_info(DisasContext *s, DisasOps *o)
1560 {
1561     TCGv_i64 t;
1562 
1563     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1564         pc_to_link_info(o->out, s, s->pc_tmp);
1565         return;
1566     }
1567     gen_op_calc_cc(s);
1568     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1569     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1570     t = tcg_temp_new_i64();
1571     tcg_gen_shri_i64(t, psw_mask, 16);
1572     tcg_gen_andi_i64(t, t, 0x0f000000);
1573     tcg_gen_or_i64(o->out, o->out, t);
1574     tcg_gen_extu_i32_i64(t, cc_op);
1575     tcg_gen_shli_i64(t, t, 28);
1576     tcg_gen_or_i64(o->out, o->out, t);
1577     tcg_temp_free_i64(t);
1578 }
1579 
1580 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1581 {
1582     save_link_info(s, o);
1583     if (o->in2) {
1584         tcg_gen_mov_i64(psw_addr, o->in2);
1585         per_branch(s, false);
1586         return DISAS_PC_UPDATED;
1587     } else {
1588         return DISAS_NEXT;
1589     }
1590 }
1591 
1592 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1593 {
1594     pc_to_link_info(o->out, s, s->pc_tmp);
1595     return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1596 }
1597 
1598 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1599 {
1600     int m1 = get_field(s, m1);
1601     bool is_imm = have_field(s, i2);
1602     int imm = is_imm ? get_field(s, i2) : 0;
1603     DisasCompare c;
1604 
1605     /* BCR with R2 = 0 causes no branching */
1606     if (have_field(s, r2) && get_field(s, r2) == 0) {
1607         if (m1 == 14) {
1608             /* Perform serialization */
1609             /* FIXME: check for fast-BCR-serialization facility */
1610             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1611         }
1612         if (m1 == 15) {
1613             /* Perform serialization */
1614             /* FIXME: perform checkpoint-synchronisation */
1615             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1616         }
1617         return DISAS_NEXT;
1618     }
1619 
1620     disas_jcc(s, &c, m1);
1621     return help_branch(s, &c, is_imm, imm, o->in2);
1622 }
1623 
1624 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1625 {
1626     int r1 = get_field(s, r1);
1627     bool is_imm = have_field(s, i2);
1628     int imm = is_imm ? get_field(s, i2) : 0;
1629     DisasCompare c;
1630     TCGv_i64 t;
1631 
1632     c.cond = TCG_COND_NE;
1633     c.is_64 = false;
1634     c.g1 = false;
1635     c.g2 = false;
1636 
1637     t = tcg_temp_new_i64();
1638     tcg_gen_subi_i64(t, regs[r1], 1);
1639     store_reg32_i64(r1, t);
1640     c.u.s32.a = tcg_temp_new_i32();
1641     c.u.s32.b = tcg_const_i32(0);
1642     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1643     tcg_temp_free_i64(t);
1644 
1645     return help_branch(s, &c, is_imm, imm, o->in2);
1646 }
1647 
1648 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1649 {
1650     int r1 = get_field(s, r1);
1651     int imm = get_field(s, i2);
1652     DisasCompare c;
1653     TCGv_i64 t;
1654 
1655     c.cond = TCG_COND_NE;
1656     c.is_64 = false;
1657     c.g1 = false;
1658     c.g2 = false;
1659 
1660     t = tcg_temp_new_i64();
1661     tcg_gen_shri_i64(t, regs[r1], 32);
1662     tcg_gen_subi_i64(t, t, 1);
1663     store_reg32h_i64(r1, t);
1664     c.u.s32.a = tcg_temp_new_i32();
1665     c.u.s32.b = tcg_const_i32(0);
1666     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1667     tcg_temp_free_i64(t);
1668 
1669     return help_branch(s, &c, 1, imm, o->in2);
1670 }
1671 
1672 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1673 {
1674     int r1 = get_field(s, r1);
1675     bool is_imm = have_field(s, i2);
1676     int imm = is_imm ? get_field(s, i2) : 0;
1677     DisasCompare c;
1678 
1679     c.cond = TCG_COND_NE;
1680     c.is_64 = true;
1681     c.g1 = true;
1682     c.g2 = false;
1683 
1684     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1685     c.u.s64.a = regs[r1];
1686     c.u.s64.b = tcg_const_i64(0);
1687 
1688     return help_branch(s, &c, is_imm, imm, o->in2);
1689 }
1690 
1691 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1692 {
1693     int r1 = get_field(s, r1);
1694     int r3 = get_field(s, r3);
1695     bool is_imm = have_field(s, i2);
1696     int imm = is_imm ? get_field(s, i2) : 0;
1697     DisasCompare c;
1698     TCGv_i64 t;
1699 
1700     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1701     c.is_64 = false;
1702     c.g1 = false;
1703     c.g2 = false;
1704 
1705     t = tcg_temp_new_i64();
1706     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1707     c.u.s32.a = tcg_temp_new_i32();
1708     c.u.s32.b = tcg_temp_new_i32();
1709     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1710     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1711     store_reg32_i64(r1, t);
1712     tcg_temp_free_i64(t);
1713 
1714     return help_branch(s, &c, is_imm, imm, o->in2);
1715 }
1716 
1717 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1718 {
1719     int r1 = get_field(s, r1);
1720     int r3 = get_field(s, r3);
1721     bool is_imm = have_field(s, i2);
1722     int imm = is_imm ? get_field(s, i2) : 0;
1723     DisasCompare c;
1724 
1725     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1726     c.is_64 = true;
1727 
1728     if (r1 == (r3 | 1)) {
1729         c.u.s64.b = load_reg(r3 | 1);
1730         c.g2 = false;
1731     } else {
1732         c.u.s64.b = regs[r3 | 1];
1733         c.g2 = true;
1734     }
1735 
1736     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1737     c.u.s64.a = regs[r1];
1738     c.g1 = true;
1739 
1740     return help_branch(s, &c, is_imm, imm, o->in2);
1741 }
1742 
1743 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1744 {
1745     int imm, m3 = get_field(s, m3);
1746     bool is_imm;
1747     DisasCompare c;
1748 
1749     c.cond = ltgt_cond[m3];
1750     if (s->insn->data) {
1751         c.cond = tcg_unsigned_cond(c.cond);
1752     }
1753     c.is_64 = c.g1 = c.g2 = true;
1754     c.u.s64.a = o->in1;
1755     c.u.s64.b = o->in2;
1756 
1757     is_imm = have_field(s, i4);
1758     if (is_imm) {
1759         imm = get_field(s, i4);
1760     } else {
1761         imm = 0;
1762         o->out = get_address(s, 0, get_field(s, b4),
1763                              get_field(s, d4));
1764     }
1765 
1766     return help_branch(s, &c, is_imm, imm, o->out);
1767 }
1768 
1769 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1770 {
1771     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1772     set_cc_static(s);
1773     return DISAS_NEXT;
1774 }
1775 
1776 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1777 {
1778     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1779     set_cc_static(s);
1780     return DISAS_NEXT;
1781 }
1782 
1783 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1784 {
1785     gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1786     set_cc_static(s);
1787     return DISAS_NEXT;
1788 }
1789 
1790 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1791                                    bool m4_with_fpe)
1792 {
1793     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1794     uint8_t m3 = get_field(s, m3);
1795     uint8_t m4 = get_field(s, m4);
1796 
1797     /* m3 field was introduced with FPE */
1798     if (!fpe && m3_with_fpe) {
1799         m3 = 0;
1800     }
1801     /* m4 field was introduced with FPE */
1802     if (!fpe && m4_with_fpe) {
1803         m4 = 0;
1804     }
1805 
1806     /* Check for valid rounding modes. Mode 3 was introduced later. */
1807     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1808         gen_program_exception(s, PGM_SPECIFICATION);
1809         return NULL;
1810     }
1811 
1812     return tcg_const_i32(deposit32(m3, 4, 4, m4));
1813 }
1814 
1815 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1816 {
1817     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1818 
1819     if (!m34) {
1820         return DISAS_NORETURN;
1821     }
1822     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1823     tcg_temp_free_i32(m34);
1824     set_cc_static(s);
1825     return DISAS_NEXT;
1826 }
1827 
1828 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1829 {
1830     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1831 
1832     if (!m34) {
1833         return DISAS_NORETURN;
1834     }
1835     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1836     tcg_temp_free_i32(m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1849     tcg_temp_free_i32(m34);
1850     set_cc_static(s);
1851     return DISAS_NEXT;
1852 }
1853 
1854 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1855 {
1856     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1857 
1858     if (!m34) {
1859         return DISAS_NORETURN;
1860     }
1861     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1862     tcg_temp_free_i32(m34);
1863     set_cc_static(s);
1864     return DISAS_NEXT;
1865 }
1866 
1867 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1868 {
1869     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1870 
1871     if (!m34) {
1872         return DISAS_NORETURN;
1873     }
1874     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1875     tcg_temp_free_i32(m34);
1876     set_cc_static(s);
1877     return DISAS_NEXT;
1878 }
1879 
1880 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1881 {
1882     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1883 
1884     if (!m34) {
1885         return DISAS_NORETURN;
1886     }
1887     gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1888     tcg_temp_free_i32(m34);
1889     set_cc_static(s);
1890     return DISAS_NEXT;
1891 }
1892 
1893 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1894 {
1895     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1896 
1897     if (!m34) {
1898         return DISAS_NORETURN;
1899     }
1900     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1901     tcg_temp_free_i32(m34);
1902     set_cc_static(s);
1903     return DISAS_NEXT;
1904 }
1905 
1906 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1907 {
1908     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1909 
1910     if (!m34) {
1911         return DISAS_NORETURN;
1912     }
1913     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1914     tcg_temp_free_i32(m34);
1915     set_cc_static(s);
1916     return DISAS_NEXT;
1917 }
1918 
1919 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1920 {
1921     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1922 
1923     if (!m34) {
1924         return DISAS_NORETURN;
1925     }
1926     gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1927     tcg_temp_free_i32(m34);
1928     set_cc_static(s);
1929     return DISAS_NEXT;
1930 }
1931 
1932 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1933 {
1934     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1935 
1936     if (!m34) {
1937         return DISAS_NORETURN;
1938     }
1939     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1940     tcg_temp_free_i32(m34);
1941     set_cc_static(s);
1942     return DISAS_NEXT;
1943 }
1944 
1945 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1946 {
1947     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1948 
1949     if (!m34) {
1950         return DISAS_NORETURN;
1951     }
1952     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1953     tcg_temp_free_i32(m34);
1954     set_cc_static(s);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1966     tcg_temp_free_i32(m34);
1967     set_cc_static(s);
1968     return DISAS_NEXT;
1969 }
1970 
1971 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1972 {
1973     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1974 
1975     if (!m34) {
1976         return DISAS_NORETURN;
1977     }
1978     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1979     tcg_temp_free_i32(m34);
1980     return DISAS_NEXT;
1981 }
1982 
1983 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1984 {
1985     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1986 
1987     if (!m34) {
1988         return DISAS_NORETURN;
1989     }
1990     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1991     tcg_temp_free_i32(m34);
1992     return DISAS_NEXT;
1993 }
1994 
1995 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1996 {
1997     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1998 
1999     if (!m34) {
2000         return DISAS_NORETURN;
2001     }
2002     gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2003     tcg_temp_free_i32(m34);
2004     return_low128(o->out2);
2005     return DISAS_NEXT;
2006 }
2007 
2008 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2009 {
2010     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2011 
2012     if (!m34) {
2013         return DISAS_NORETURN;
2014     }
2015     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2016     tcg_temp_free_i32(m34);
2017     return DISAS_NEXT;
2018 }
2019 
2020 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2021 {
2022     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2023 
2024     if (!m34) {
2025         return DISAS_NORETURN;
2026     }
2027     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2028     tcg_temp_free_i32(m34);
2029     return DISAS_NEXT;
2030 }
2031 
2032 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2033 {
2034     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2035 
2036     if (!m34) {
2037         return DISAS_NORETURN;
2038     }
2039     gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2040     tcg_temp_free_i32(m34);
2041     return_low128(o->out2);
2042     return DISAS_NEXT;
2043 }
2044 
2045 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2046 {
2047     int r2 = get_field(s, r2);
2048     TCGv_i64 len = tcg_temp_new_i64();
2049 
2050     gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2051     set_cc_static(s);
2052     return_low128(o->out);
2053 
2054     tcg_gen_add_i64(regs[r2], regs[r2], len);
2055     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2056     tcg_temp_free_i64(len);
2057 
2058     return DISAS_NEXT;
2059 }
2060 
2061 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2062 {
2063     int l = get_field(s, l1);
2064     TCGv_i32 vl;
2065 
2066     switch (l + 1) {
2067     case 1:
2068         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2069         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2070         break;
2071     case 2:
2072         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2073         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2074         break;
2075     case 4:
2076         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2077         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2078         break;
2079     case 8:
2080         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2081         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2082         break;
2083     default:
2084         vl = tcg_const_i32(l);
2085         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2086         tcg_temp_free_i32(vl);
2087         set_cc_static(s);
2088         return DISAS_NEXT;
2089     }
2090     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2091     return DISAS_NEXT;
2092 }
2093 
2094 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2095 {
2096     int r1 = get_field(s, r1);
2097     int r2 = get_field(s, r2);
2098     TCGv_i32 t1, t2;
2099 
2100     /* r1 and r2 must be even.  */
2101     if (r1 & 1 || r2 & 1) {
2102         gen_program_exception(s, PGM_SPECIFICATION);
2103         return DISAS_NORETURN;
2104     }
2105 
2106     t1 = tcg_const_i32(r1);
2107     t2 = tcg_const_i32(r2);
2108     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2109     tcg_temp_free_i32(t1);
2110     tcg_temp_free_i32(t2);
2111     set_cc_static(s);
2112     return DISAS_NEXT;
2113 }
2114 
2115 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2116 {
2117     int r1 = get_field(s, r1);
2118     int r3 = get_field(s, r3);
2119     TCGv_i32 t1, t3;
2120 
2121     /* r1 and r3 must be even.  */
2122     if (r1 & 1 || r3 & 1) {
2123         gen_program_exception(s, PGM_SPECIFICATION);
2124         return DISAS_NORETURN;
2125     }
2126 
2127     t1 = tcg_const_i32(r1);
2128     t3 = tcg_const_i32(r3);
2129     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2130     tcg_temp_free_i32(t1);
2131     tcg_temp_free_i32(t3);
2132     set_cc_static(s);
2133     return DISAS_NEXT;
2134 }
2135 
2136 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2137 {
2138     int r1 = get_field(s, r1);
2139     int r3 = get_field(s, r3);
2140     TCGv_i32 t1, t3;
2141 
2142     /* r1 and r3 must be even.  */
2143     if (r1 & 1 || r3 & 1) {
2144         gen_program_exception(s, PGM_SPECIFICATION);
2145         return DISAS_NORETURN;
2146     }
2147 
2148     t1 = tcg_const_i32(r1);
2149     t3 = tcg_const_i32(r3);
2150     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2151     tcg_temp_free_i32(t1);
2152     tcg_temp_free_i32(t3);
2153     set_cc_static(s);
2154     return DISAS_NEXT;
2155 }
2156 
2157 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2158 {
2159     TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2160     TCGv_i32 t1 = tcg_temp_new_i32();
2161     tcg_gen_extrl_i64_i32(t1, o->in1);
2162     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2163     set_cc_static(s);
2164     tcg_temp_free_i32(t1);
2165     tcg_temp_free_i32(m3);
2166     return DISAS_NEXT;
2167 }
2168 
2169 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2170 {
2171     gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2172     set_cc_static(s);
2173     return_low128(o->in2);
2174     return DISAS_NEXT;
2175 }
2176 
2177 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2178 {
2179     TCGv_i64 t = tcg_temp_new_i64();
2180     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2181     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2182     tcg_gen_or_i64(o->out, o->out, t);
2183     tcg_temp_free_i64(t);
2184     return DISAS_NEXT;
2185 }
2186 
2187 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2188 {
2189     int d2 = get_field(s, d2);
2190     int b2 = get_field(s, b2);
2191     TCGv_i64 addr, cc;
2192 
2193     /* Note that in1 = R3 (new value) and
2194        in2 = (zero-extended) R1 (expected value).  */
2195 
2196     addr = get_address(s, 0, b2, d2);
2197     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2198                                get_mem_index(s), s->insn->data | MO_ALIGN);
2199     tcg_temp_free_i64(addr);
2200 
2201     /* Are the memory and expected values (un)equal?  Note that this setcond
2202        produces the output CC value, thus the NE sense of the test.  */
2203     cc = tcg_temp_new_i64();
2204     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2205     tcg_gen_extrl_i64_i32(cc_op, cc);
2206     tcg_temp_free_i64(cc);
2207     set_cc_static(s);
2208 
2209     return DISAS_NEXT;
2210 }
2211 
2212 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2213 {
2214     int r1 = get_field(s, r1);
2215     int r3 = get_field(s, r3);
2216     int d2 = get_field(s, d2);
2217     int b2 = get_field(s, b2);
2218     DisasJumpType ret = DISAS_NEXT;
2219     TCGv_i64 addr;
2220     TCGv_i32 t_r1, t_r3;
2221 
2222     /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2223     addr = get_address(s, 0, b2, d2);
2224     t_r1 = tcg_const_i32(r1);
2225     t_r3 = tcg_const_i32(r3);
2226     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2227         gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2228     } else if (HAVE_CMPXCHG128) {
2229         gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2230     } else {
2231         gen_helper_exit_atomic(cpu_env);
2232         ret = DISAS_NORETURN;
2233     }
2234     tcg_temp_free_i64(addr);
2235     tcg_temp_free_i32(t_r1);
2236     tcg_temp_free_i32(t_r3);
2237 
2238     set_cc_static(s);
2239     return ret;
2240 }
2241 
2242 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2243 {
2244     int r3 = get_field(s, r3);
2245     TCGv_i32 t_r3 = tcg_const_i32(r3);
2246 
2247     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2248         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2249     } else {
2250         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2251     }
2252     tcg_temp_free_i32(t_r3);
2253 
2254     set_cc_static(s);
2255     return DISAS_NEXT;
2256 }
2257 
2258 #ifndef CONFIG_USER_ONLY
2259 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2260 {
2261     MemOp mop = s->insn->data;
2262     TCGv_i64 addr, old, cc;
2263     TCGLabel *lab = gen_new_label();
2264 
2265     /* Note that in1 = R1 (zero-extended expected value),
2266        out = R1 (original reg), out2 = R1+1 (new value).  */
2267 
2268     addr = tcg_temp_new_i64();
2269     old = tcg_temp_new_i64();
2270     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2271     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2272                                get_mem_index(s), mop | MO_ALIGN);
2273     tcg_temp_free_i64(addr);
2274 
2275     /* Are the memory and expected values (un)equal?  */
2276     cc = tcg_temp_new_i64();
2277     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2278     tcg_gen_extrl_i64_i32(cc_op, cc);
2279 
2280     /* Write back the output now, so that it happens before the
2281        following branch, so that we don't need local temps.  */
2282     if ((mop & MO_SIZE) == MO_32) {
2283         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2284     } else {
2285         tcg_gen_mov_i64(o->out, old);
2286     }
2287     tcg_temp_free_i64(old);
2288 
2289     /* If the comparison was equal, and the LSB of R2 was set,
2290        then we need to flush the TLB (for all cpus).  */
2291     tcg_gen_xori_i64(cc, cc, 1);
2292     tcg_gen_and_i64(cc, cc, o->in2);
2293     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2294     tcg_temp_free_i64(cc);
2295 
2296     gen_helper_purge(cpu_env);
2297     gen_set_label(lab);
2298 
2299     return DISAS_NEXT;
2300 }
2301 #endif
2302 
2303 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2304 {
2305     TCGv_i64 t1 = tcg_temp_new_i64();
2306     TCGv_i32 t2 = tcg_temp_new_i32();
2307     tcg_gen_extrl_i64_i32(t2, o->in1);
2308     gen_helper_cvd(t1, t2);
2309     tcg_temp_free_i32(t2);
2310     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2311     tcg_temp_free_i64(t1);
2312     return DISAS_NEXT;
2313 }
2314 
2315 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2316 {
2317     int m3 = get_field(s, m3);
2318     TCGLabel *lab = gen_new_label();
2319     TCGCond c;
2320 
2321     c = tcg_invert_cond(ltgt_cond[m3]);
2322     if (s->insn->data) {
2323         c = tcg_unsigned_cond(c);
2324     }
2325     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2326 
2327     /* Trap.  */
2328     gen_trap(s);
2329 
2330     gen_set_label(lab);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2335 {
2336     int m3 = get_field(s, m3);
2337     int r1 = get_field(s, r1);
2338     int r2 = get_field(s, r2);
2339     TCGv_i32 tr1, tr2, chk;
2340 
2341     /* R1 and R2 must both be even.  */
2342     if ((r1 | r2) & 1) {
2343         gen_program_exception(s, PGM_SPECIFICATION);
2344         return DISAS_NORETURN;
2345     }
2346     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2347         m3 = 0;
2348     }
2349 
2350     tr1 = tcg_const_i32(r1);
2351     tr2 = tcg_const_i32(r2);
2352     chk = tcg_const_i32(m3);
2353 
2354     switch (s->insn->data) {
2355     case 12:
2356         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2357         break;
2358     case 14:
2359         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2360         break;
2361     case 21:
2362         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2363         break;
2364     case 24:
2365         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2366         break;
2367     case 41:
2368         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2369         break;
2370     case 42:
2371         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2372         break;
2373     default:
2374         g_assert_not_reached();
2375     }
2376 
2377     tcg_temp_free_i32(tr1);
2378     tcg_temp_free_i32(tr2);
2379     tcg_temp_free_i32(chk);
2380     set_cc_static(s);
2381     return DISAS_NEXT;
2382 }
2383 
2384 #ifndef CONFIG_USER_ONLY
2385 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2386 {
2387     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2388     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2389     TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2390 
2391     gen_helper_diag(cpu_env, r1, r3, func_code);
2392 
2393     tcg_temp_free_i32(func_code);
2394     tcg_temp_free_i32(r3);
2395     tcg_temp_free_i32(r1);
2396     return DISAS_NEXT;
2397 }
2398 #endif
2399 
2400 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2401 {
2402     gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2403     return_low128(o->out);
2404     return DISAS_NEXT;
2405 }
2406 
2407 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2408 {
2409     gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2410     return_low128(o->out);
2411     return DISAS_NEXT;
2412 }
2413 
2414 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2415 {
2416     gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2417     return_low128(o->out);
2418     return DISAS_NEXT;
2419 }
2420 
2421 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2422 {
2423     gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2424     return_low128(o->out);
2425     return DISAS_NEXT;
2426 }
2427 
2428 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2429 {
2430     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2431     return DISAS_NEXT;
2432 }
2433 
2434 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2435 {
2436     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2437     return DISAS_NEXT;
2438 }
2439 
2440 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2441 {
2442     gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2443     return_low128(o->out2);
2444     return DISAS_NEXT;
2445 }
2446 
2447 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2448 {
2449     int r2 = get_field(s, r2);
2450     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2451     return DISAS_NEXT;
2452 }
2453 
2454 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2455 {
2456     /* No cache information provided.  */
2457     tcg_gen_movi_i64(o->out, -1);
2458     return DISAS_NEXT;
2459 }
2460 
2461 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2462 {
2463     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2464     return DISAS_NEXT;
2465 }
2466 
2467 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2468 {
2469     int r1 = get_field(s, r1);
2470     int r2 = get_field(s, r2);
2471     TCGv_i64 t = tcg_temp_new_i64();
2472 
2473     /* Note the "subsequently" in the PoO, which implies a defined result
2474        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2475     tcg_gen_shri_i64(t, psw_mask, 32);
2476     store_reg32_i64(r1, t);
2477     if (r2 != 0) {
2478         store_reg32_i64(r2, psw_mask);
2479     }
2480 
2481     tcg_temp_free_i64(t);
2482     return DISAS_NEXT;
2483 }
2484 
2485 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2486 {
2487     int r1 = get_field(s, r1);
2488     TCGv_i32 ilen;
2489     TCGv_i64 v1;
2490 
2491     /* Nested EXECUTE is not allowed.  */
2492     if (unlikely(s->ex_value)) {
2493         gen_program_exception(s, PGM_EXECUTE);
2494         return DISAS_NORETURN;
2495     }
2496 
2497     update_psw_addr(s);
2498     update_cc_op(s);
2499 
2500     if (r1 == 0) {
2501         v1 = tcg_const_i64(0);
2502     } else {
2503         v1 = regs[r1];
2504     }
2505 
2506     ilen = tcg_const_i32(s->ilen);
2507     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2508     tcg_temp_free_i32(ilen);
2509 
2510     if (r1 == 0) {
2511         tcg_temp_free_i64(v1);
2512     }
2513 
2514     return DISAS_PC_CC_UPDATED;
2515 }
2516 
2517 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2518 {
2519     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2520 
2521     if (!m34) {
2522         return DISAS_NORETURN;
2523     }
2524     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2525     tcg_temp_free_i32(m34);
2526     return DISAS_NEXT;
2527 }
2528 
2529 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2530 {
2531     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2532 
2533     if (!m34) {
2534         return DISAS_NORETURN;
2535     }
2536     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2537     tcg_temp_free_i32(m34);
2538     return DISAS_NEXT;
2539 }
2540 
2541 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2542 {
2543     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2544 
2545     if (!m34) {
2546         return DISAS_NORETURN;
2547     }
2548     gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2549     return_low128(o->out2);
2550     tcg_temp_free_i32(m34);
2551     return DISAS_NEXT;
2552 }
2553 
2554 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2555 {
2556     /* We'll use the original input for cc computation, since we get to
2557        compare that against 0, which ought to be better than comparing
2558        the real output against 64.  It also lets cc_dst be a convenient
2559        temporary during our computation.  */
2560     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2561 
2562     /* R1 = IN ? CLZ(IN) : 64.  */
2563     tcg_gen_clzi_i64(o->out, o->in2, 64);
2564 
2565     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2566        value by 64, which is undefined.  But since the shift is 64 iff the
2567        input is zero, we still get the correct result after and'ing.  */
2568     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2569     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2570     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2571     return DISAS_NEXT;
2572 }
2573 
2574 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2575 {
2576     int m3 = get_field(s, m3);
2577     int pos, len, base = s->insn->data;
2578     TCGv_i64 tmp = tcg_temp_new_i64();
2579     uint64_t ccm;
2580 
2581     switch (m3) {
2582     case 0xf:
2583         /* Effectively a 32-bit load.  */
2584         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2585         len = 32;
2586         goto one_insert;
2587 
2588     case 0xc:
2589     case 0x6:
2590     case 0x3:
2591         /* Effectively a 16-bit load.  */
2592         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2593         len = 16;
2594         goto one_insert;
2595 
2596     case 0x8:
2597     case 0x4:
2598     case 0x2:
2599     case 0x1:
2600         /* Effectively an 8-bit load.  */
2601         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2602         len = 8;
2603         goto one_insert;
2604 
2605     one_insert:
2606         pos = base + ctz32(m3) * 8;
2607         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2608         ccm = ((1ull << len) - 1) << pos;
2609         break;
2610 
2611     default:
2612         /* This is going to be a sequence of loads and inserts.  */
2613         pos = base + 32 - 8;
2614         ccm = 0;
2615         while (m3) {
2616             if (m3 & 0x8) {
2617                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2618                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2619                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2620                 ccm |= 0xff << pos;
2621             }
2622             m3 = (m3 << 1) & 0xf;
2623             pos -= 8;
2624         }
2625         break;
2626     }
2627 
2628     tcg_gen_movi_i64(tmp, ccm);
2629     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2630     tcg_temp_free_i64(tmp);
2631     return DISAS_NEXT;
2632 }
2633 
2634 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2635 {
2636     int shift = s->insn->data & 0xff;
2637     int size = s->insn->data >> 8;
2638     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2639     return DISAS_NEXT;
2640 }
2641 
2642 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2643 {
2644     TCGv_i64 t1, t2;
2645 
2646     gen_op_calc_cc(s);
2647     t1 = tcg_temp_new_i64();
2648     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2649     t2 = tcg_temp_new_i64();
2650     tcg_gen_extu_i32_i64(t2, cc_op);
2651     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2652     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2653     tcg_temp_free_i64(t1);
2654     tcg_temp_free_i64(t2);
2655     return DISAS_NEXT;
2656 }
2657 
2658 #ifndef CONFIG_USER_ONLY
2659 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2660 {
2661     TCGv_i32 m4;
2662 
2663     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2664         m4 = tcg_const_i32(get_field(s, m4));
2665     } else {
2666         m4 = tcg_const_i32(0);
2667     }
2668     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2669     tcg_temp_free_i32(m4);
2670     return DISAS_NEXT;
2671 }
2672 
2673 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2674 {
2675     TCGv_i32 m4;
2676 
2677     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2678         m4 = tcg_const_i32(get_field(s, m4));
2679     } else {
2680         m4 = tcg_const_i32(0);
2681     }
2682     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2683     tcg_temp_free_i32(m4);
2684     return DISAS_NEXT;
2685 }
2686 
2687 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2688 {
2689     gen_helper_iske(o->out, cpu_env, o->in2);
2690     return DISAS_NEXT;
2691 }
2692 #endif
2693 
2694 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2695 {
2696     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2697     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2698     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2699     TCGv_i32 t_r1, t_r2, t_r3, type;
2700 
2701     switch (s->insn->data) {
2702     case S390_FEAT_TYPE_KMA:
2703         if (r3 == r1 || r3 == r2) {
2704             gen_program_exception(s, PGM_SPECIFICATION);
2705             return DISAS_NORETURN;
2706         }
2707         /* FALL THROUGH */
2708     case S390_FEAT_TYPE_KMCTR:
2709         if (r3 & 1 || !r3) {
2710             gen_program_exception(s, PGM_SPECIFICATION);
2711             return DISAS_NORETURN;
2712         }
2713         /* FALL THROUGH */
2714     case S390_FEAT_TYPE_PPNO:
2715     case S390_FEAT_TYPE_KMF:
2716     case S390_FEAT_TYPE_KMC:
2717     case S390_FEAT_TYPE_KMO:
2718     case S390_FEAT_TYPE_KM:
2719         if (r1 & 1 || !r1) {
2720             gen_program_exception(s, PGM_SPECIFICATION);
2721             return DISAS_NORETURN;
2722         }
2723         /* FALL THROUGH */
2724     case S390_FEAT_TYPE_KMAC:
2725     case S390_FEAT_TYPE_KIMD:
2726     case S390_FEAT_TYPE_KLMD:
2727         if (r2 & 1 || !r2) {
2728             gen_program_exception(s, PGM_SPECIFICATION);
2729             return DISAS_NORETURN;
2730         }
2731         /* FALL THROUGH */
2732     case S390_FEAT_TYPE_PCKMO:
2733     case S390_FEAT_TYPE_PCC:
2734         break;
2735     default:
2736         g_assert_not_reached();
2737     };
2738 
2739     t_r1 = tcg_const_i32(r1);
2740     t_r2 = tcg_const_i32(r2);
2741     t_r3 = tcg_const_i32(r3);
2742     type = tcg_const_i32(s->insn->data);
2743     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2744     set_cc_static(s);
2745     tcg_temp_free_i32(t_r1);
2746     tcg_temp_free_i32(t_r2);
2747     tcg_temp_free_i32(t_r3);
2748     tcg_temp_free_i32(type);
2749     return DISAS_NEXT;
2750 }
2751 
2752 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2753 {
2754     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2755     set_cc_static(s);
2756     return DISAS_NEXT;
2757 }
2758 
2759 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2760 {
2761     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2762     set_cc_static(s);
2763     return DISAS_NEXT;
2764 }
2765 
2766 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2767 {
2768     gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2769     set_cc_static(s);
2770     return DISAS_NEXT;
2771 }
2772 
2773 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2774 {
2775     /* The real output is indeed the original value in memory;
2776        recompute the addition for the computation of CC.  */
2777     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2778                                  s->insn->data | MO_ALIGN);
2779     /* However, we need to recompute the addition for setting CC.  */
2780     tcg_gen_add_i64(o->out, o->in1, o->in2);
2781     return DISAS_NEXT;
2782 }
2783 
2784 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2785 {
2786     /* The real output is indeed the original value in memory;
2787        recompute the addition for the computation of CC.  */
2788     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2789                                  s->insn->data | MO_ALIGN);
2790     /* However, we need to recompute the operation for setting CC.  */
2791     tcg_gen_and_i64(o->out, o->in1, o->in2);
2792     return DISAS_NEXT;
2793 }
2794 
2795 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2796 {
2797     /* The real output is indeed the original value in memory;
2798        recompute the addition for the computation of CC.  */
2799     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2800                                 s->insn->data | MO_ALIGN);
2801     /* However, we need to recompute the operation for setting CC.  */
2802     tcg_gen_or_i64(o->out, o->in1, o->in2);
2803     return DISAS_NEXT;
2804 }
2805 
2806 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2807 {
2808     /* The real output is indeed the original value in memory;
2809        recompute the addition for the computation of CC.  */
2810     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2811                                  s->insn->data | MO_ALIGN);
2812     /* However, we need to recompute the operation for setting CC.  */
2813     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2814     return DISAS_NEXT;
2815 }
2816 
2817 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2818 {
2819     gen_helper_ldeb(o->out, cpu_env, o->in2);
2820     return DISAS_NEXT;
2821 }
2822 
2823 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2824 {
2825     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2826 
2827     if (!m34) {
2828         return DISAS_NORETURN;
2829     }
2830     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2831     tcg_temp_free_i32(m34);
2832     return DISAS_NEXT;
2833 }
2834 
2835 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2836 {
2837     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2838 
2839     if (!m34) {
2840         return DISAS_NORETURN;
2841     }
2842     gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2843     tcg_temp_free_i32(m34);
2844     return DISAS_NEXT;
2845 }
2846 
2847 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2848 {
2849     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2850 
2851     if (!m34) {
2852         return DISAS_NORETURN;
2853     }
2854     gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2855     tcg_temp_free_i32(m34);
2856     return DISAS_NEXT;
2857 }
2858 
2859 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2860 {
2861     gen_helper_lxdb(o->out, cpu_env, o->in2);
2862     return_low128(o->out2);
2863     return DISAS_NEXT;
2864 }
2865 
2866 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2867 {
2868     gen_helper_lxeb(o->out, cpu_env, o->in2);
2869     return_low128(o->out2);
2870     return DISAS_NEXT;
2871 }
2872 
2873 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2874 {
2875     tcg_gen_shli_i64(o->out, o->in2, 32);
2876     return DISAS_NEXT;
2877 }
2878 
2879 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2880 {
2881     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2882     return DISAS_NEXT;
2883 }
2884 
2885 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2886 {
2887     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2888     return DISAS_NEXT;
2889 }
2890 
2891 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2892 {
2893     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2894     return DISAS_NEXT;
2895 }
2896 
2897 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2898 {
2899     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2900     return DISAS_NEXT;
2901 }
2902 
2903 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2904 {
2905     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2906     return DISAS_NEXT;
2907 }
2908 
2909 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2910 {
2911     tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2912     return DISAS_NEXT;
2913 }
2914 
2915 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2916 {
2917     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2918     return DISAS_NEXT;
2919 }
2920 
2921 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2922 {
2923     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2924     return DISAS_NEXT;
2925 }
2926 
2927 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2928 {
2929     TCGLabel *lab = gen_new_label();
2930     store_reg32_i64(get_field(s, r1), o->in2);
2931     /* The value is stored even in case of trap. */
2932     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2933     gen_trap(s);
2934     gen_set_label(lab);
2935     return DISAS_NEXT;
2936 }
2937 
2938 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2939 {
2940     TCGLabel *lab = gen_new_label();
2941     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2942     /* The value is stored even in case of trap. */
2943     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2944     gen_trap(s);
2945     gen_set_label(lab);
2946     return DISAS_NEXT;
2947 }
2948 
2949 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2950 {
2951     TCGLabel *lab = gen_new_label();
2952     store_reg32h_i64(get_field(s, r1), o->in2);
2953     /* The value is stored even in case of trap. */
2954     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2955     gen_trap(s);
2956     gen_set_label(lab);
2957     return DISAS_NEXT;
2958 }
2959 
2960 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2961 {
2962     TCGLabel *lab = gen_new_label();
2963     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2964     /* The value is stored even in case of trap. */
2965     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2966     gen_trap(s);
2967     gen_set_label(lab);
2968     return DISAS_NEXT;
2969 }
2970 
2971 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2972 {
2973     TCGLabel *lab = gen_new_label();
2974     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2975     /* The value is stored even in case of trap. */
2976     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2977     gen_trap(s);
2978     gen_set_label(lab);
2979     return DISAS_NEXT;
2980 }
2981 
2982 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2983 {
2984     DisasCompare c;
2985 
2986     disas_jcc(s, &c, get_field(s, m3));
2987 
2988     if (c.is_64) {
2989         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2990                             o->in2, o->in1);
2991         free_compare(&c);
2992     } else {
2993         TCGv_i32 t32 = tcg_temp_new_i32();
2994         TCGv_i64 t, z;
2995 
2996         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2997         free_compare(&c);
2998 
2999         t = tcg_temp_new_i64();
3000         tcg_gen_extu_i32_i64(t, t32);
3001         tcg_temp_free_i32(t32);
3002 
3003         z = tcg_const_i64(0);
3004         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3005         tcg_temp_free_i64(t);
3006         tcg_temp_free_i64(z);
3007     }
3008 
3009     return DISAS_NEXT;
3010 }
3011 
3012 #ifndef CONFIG_USER_ONLY
3013 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3014 {
3015     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3016     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3017     gen_helper_lctl(cpu_env, r1, o->in2, r3);
3018     tcg_temp_free_i32(r1);
3019     tcg_temp_free_i32(r3);
3020     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3021     return DISAS_PC_STALE_NOCHAIN;
3022 }
3023 
3024 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3025 {
3026     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3027     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3028     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3029     tcg_temp_free_i32(r1);
3030     tcg_temp_free_i32(r3);
3031     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3032     return DISAS_PC_STALE_NOCHAIN;
3033 }
3034 
3035 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3036 {
3037     gen_helper_lra(o->out, cpu_env, o->in2);
3038     set_cc_static(s);
3039     return DISAS_NEXT;
3040 }
3041 
3042 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3043 {
3044     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3045     return DISAS_NEXT;
3046 }
3047 
3048 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3049 {
3050     TCGv_i64 t1, t2;
3051 
3052     per_breaking_event(s);
3053 
3054     t1 = tcg_temp_new_i64();
3055     t2 = tcg_temp_new_i64();
3056     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3057                         MO_TEUL | MO_ALIGN_8);
3058     tcg_gen_addi_i64(o->in2, o->in2, 4);
3059     tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3060     /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3061     tcg_gen_shli_i64(t1, t1, 32);
3062     gen_helper_load_psw(cpu_env, t1, t2);
3063     tcg_temp_free_i64(t1);
3064     tcg_temp_free_i64(t2);
3065     return DISAS_NORETURN;
3066 }
3067 
3068 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3069 {
3070     TCGv_i64 t1, t2;
3071 
3072     per_breaking_event(s);
3073 
3074     t1 = tcg_temp_new_i64();
3075     t2 = tcg_temp_new_i64();
3076     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3077                         MO_TEQ | MO_ALIGN_8);
3078     tcg_gen_addi_i64(o->in2, o->in2, 8);
3079     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3080     gen_helper_load_psw(cpu_env, t1, t2);
3081     tcg_temp_free_i64(t1);
3082     tcg_temp_free_i64(t2);
3083     return DISAS_NORETURN;
3084 }
3085 #endif
3086 
3087 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3088 {
3089     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3090     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3091     gen_helper_lam(cpu_env, r1, o->in2, r3);
3092     tcg_temp_free_i32(r1);
3093     tcg_temp_free_i32(r3);
3094     return DISAS_NEXT;
3095 }
3096 
3097 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3098 {
3099     int r1 = get_field(s, r1);
3100     int r3 = get_field(s, r3);
3101     TCGv_i64 t1, t2;
3102 
3103     /* Only one register to read. */
3104     t1 = tcg_temp_new_i64();
3105     if (unlikely(r1 == r3)) {
3106         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3107         store_reg32_i64(r1, t1);
3108         tcg_temp_free(t1);
3109         return DISAS_NEXT;
3110     }
3111 
3112     /* First load the values of the first and last registers to trigger
3113        possible page faults. */
3114     t2 = tcg_temp_new_i64();
3115     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3116     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3117     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3118     store_reg32_i64(r1, t1);
3119     store_reg32_i64(r3, t2);
3120 
3121     /* Only two registers to read. */
3122     if (((r1 + 1) & 15) == r3) {
3123         tcg_temp_free(t2);
3124         tcg_temp_free(t1);
3125         return DISAS_NEXT;
3126     }
3127 
3128     /* Then load the remaining registers. Page fault can't occur. */
3129     r3 = (r3 - 1) & 15;
3130     tcg_gen_movi_i64(t2, 4);
3131     while (r1 != r3) {
3132         r1 = (r1 + 1) & 15;
3133         tcg_gen_add_i64(o->in2, o->in2, t2);
3134         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3135         store_reg32_i64(r1, t1);
3136     }
3137     tcg_temp_free(t2);
3138     tcg_temp_free(t1);
3139 
3140     return DISAS_NEXT;
3141 }
3142 
3143 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3144 {
3145     int r1 = get_field(s, r1);
3146     int r3 = get_field(s, r3);
3147     TCGv_i64 t1, t2;
3148 
3149     /* Only one register to read. */
3150     t1 = tcg_temp_new_i64();
3151     if (unlikely(r1 == r3)) {
3152         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3153         store_reg32h_i64(r1, t1);
3154         tcg_temp_free(t1);
3155         return DISAS_NEXT;
3156     }
3157 
3158     /* First load the values of the first and last registers to trigger
3159        possible page faults. */
3160     t2 = tcg_temp_new_i64();
3161     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3162     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3163     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3164     store_reg32h_i64(r1, t1);
3165     store_reg32h_i64(r3, t2);
3166 
3167     /* Only two registers to read. */
3168     if (((r1 + 1) & 15) == r3) {
3169         tcg_temp_free(t2);
3170         tcg_temp_free(t1);
3171         return DISAS_NEXT;
3172     }
3173 
3174     /* Then load the remaining registers. Page fault can't occur. */
3175     r3 = (r3 - 1) & 15;
3176     tcg_gen_movi_i64(t2, 4);
3177     while (r1 != r3) {
3178         r1 = (r1 + 1) & 15;
3179         tcg_gen_add_i64(o->in2, o->in2, t2);
3180         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3181         store_reg32h_i64(r1, t1);
3182     }
3183     tcg_temp_free(t2);
3184     tcg_temp_free(t1);
3185 
3186     return DISAS_NEXT;
3187 }
3188 
3189 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3190 {
3191     int r1 = get_field(s, r1);
3192     int r3 = get_field(s, r3);
3193     TCGv_i64 t1, t2;
3194 
3195     /* Only one register to read. */
3196     if (unlikely(r1 == r3)) {
3197         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3198         return DISAS_NEXT;
3199     }
3200 
3201     /* First load the values of the first and last registers to trigger
3202        possible page faults. */
3203     t1 = tcg_temp_new_i64();
3204     t2 = tcg_temp_new_i64();
3205     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3206     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3207     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3208     tcg_gen_mov_i64(regs[r1], t1);
3209     tcg_temp_free(t2);
3210 
3211     /* Only two registers to read. */
3212     if (((r1 + 1) & 15) == r3) {
3213         tcg_temp_free(t1);
3214         return DISAS_NEXT;
3215     }
3216 
3217     /* Then load the remaining registers. Page fault can't occur. */
3218     r3 = (r3 - 1) & 15;
3219     tcg_gen_movi_i64(t1, 8);
3220     while (r1 != r3) {
3221         r1 = (r1 + 1) & 15;
3222         tcg_gen_add_i64(o->in2, o->in2, t1);
3223         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3224     }
3225     tcg_temp_free(t1);
3226 
3227     return DISAS_NEXT;
3228 }
3229 
3230 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3231 {
3232     TCGv_i64 a1, a2;
3233     MemOp mop = s->insn->data;
3234 
3235     /* In a parallel context, stop the world and single step.  */
3236     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3237         update_psw_addr(s);
3238         update_cc_op(s);
3239         gen_exception(EXCP_ATOMIC);
3240         return DISAS_NORETURN;
3241     }
3242 
3243     /* In a serial context, perform the two loads ... */
3244     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3245     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3246     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3247     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3248     tcg_temp_free_i64(a1);
3249     tcg_temp_free_i64(a2);
3250 
3251     /* ... and indicate that we performed them while interlocked.  */
3252     gen_op_movi_cc(s, 0);
3253     return DISAS_NEXT;
3254 }
3255 
3256 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3257 {
3258     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3259         gen_helper_lpq(o->out, cpu_env, o->in2);
3260     } else if (HAVE_ATOMIC128) {
3261         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3262     } else {
3263         gen_helper_exit_atomic(cpu_env);
3264         return DISAS_NORETURN;
3265     }
3266     return_low128(o->out2);
3267     return DISAS_NEXT;
3268 }
3269 
3270 #ifndef CONFIG_USER_ONLY
3271 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3272 {
3273     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3274     return DISAS_NEXT;
3275 }
3276 #endif
3277 
3278 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3279 {
3280     tcg_gen_andi_i64(o->out, o->in2, -256);
3281     return DISAS_NEXT;
3282 }
3283 
3284 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3285 {
3286     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3287 
3288     if (get_field(s, m3) > 6) {
3289         gen_program_exception(s, PGM_SPECIFICATION);
3290         return DISAS_NORETURN;
3291     }
3292 
3293     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3294     tcg_gen_neg_i64(o->addr1, o->addr1);
3295     tcg_gen_movi_i64(o->out, 16);
3296     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3297     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3298     return DISAS_NEXT;
3299 }
3300 
3301 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3302 {
3303 #if !defined(CONFIG_USER_ONLY)
3304     TCGv_i32 i2;
3305 #endif
3306     const uint16_t monitor_class = get_field(s, i2);
3307 
3308     if (monitor_class & 0xff00) {
3309         gen_program_exception(s, PGM_SPECIFICATION);
3310         return DISAS_NORETURN;
3311     }
3312 
3313 #if !defined(CONFIG_USER_ONLY)
3314     i2 = tcg_const_i32(monitor_class);
3315     gen_helper_monitor_call(cpu_env, o->addr1, i2);
3316     tcg_temp_free_i32(i2);
3317 #endif
3318     /* Defaults to a NOP. */
3319     return DISAS_NEXT;
3320 }
3321 
3322 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3323 {
3324     o->out = o->in2;
3325     o->g_out = o->g_in2;
3326     o->in2 = NULL;
3327     o->g_in2 = false;
3328     return DISAS_NEXT;
3329 }
3330 
3331 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3332 {
3333     int b2 = get_field(s, b2);
3334     TCGv ar1 = tcg_temp_new_i64();
3335 
3336     o->out = o->in2;
3337     o->g_out = o->g_in2;
3338     o->in2 = NULL;
3339     o->g_in2 = false;
3340 
3341     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3342     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3343         tcg_gen_movi_i64(ar1, 0);
3344         break;
3345     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3346         tcg_gen_movi_i64(ar1, 1);
3347         break;
3348     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3349         if (b2) {
3350             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3351         } else {
3352             tcg_gen_movi_i64(ar1, 0);
3353         }
3354         break;
3355     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3356         tcg_gen_movi_i64(ar1, 2);
3357         break;
3358     }
3359 
3360     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3361     tcg_temp_free_i64(ar1);
3362 
3363     return DISAS_NEXT;
3364 }
3365 
3366 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3367 {
3368     o->out = o->in1;
3369     o->out2 = o->in2;
3370     o->g_out = o->g_in1;
3371     o->g_out2 = o->g_in2;
3372     o->in1 = NULL;
3373     o->in2 = NULL;
3374     o->g_in1 = o->g_in2 = false;
3375     return DISAS_NEXT;
3376 }
3377 
3378 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3379 {
3380     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3381     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3382     tcg_temp_free_i32(l);
3383     return DISAS_NEXT;
3384 }
3385 
3386 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3387 {
3388     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3389     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3390     tcg_temp_free_i32(l);
3391     return DISAS_NEXT;
3392 }
3393 
3394 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3395 {
3396     int r1 = get_field(s, r1);
3397     int r2 = get_field(s, r2);
3398     TCGv_i32 t1, t2;
3399 
3400     /* r1 and r2 must be even.  */
3401     if (r1 & 1 || r2 & 1) {
3402         gen_program_exception(s, PGM_SPECIFICATION);
3403         return DISAS_NORETURN;
3404     }
3405 
3406     t1 = tcg_const_i32(r1);
3407     t2 = tcg_const_i32(r2);
3408     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3409     tcg_temp_free_i32(t1);
3410     tcg_temp_free_i32(t2);
3411     set_cc_static(s);
3412     return DISAS_NEXT;
3413 }
3414 
3415 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3416 {
3417     int r1 = get_field(s, r1);
3418     int r3 = get_field(s, r3);
3419     TCGv_i32 t1, t3;
3420 
3421     /* r1 and r3 must be even.  */
3422     if (r1 & 1 || r3 & 1) {
3423         gen_program_exception(s, PGM_SPECIFICATION);
3424         return DISAS_NORETURN;
3425     }
3426 
3427     t1 = tcg_const_i32(r1);
3428     t3 = tcg_const_i32(r3);
3429     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3430     tcg_temp_free_i32(t1);
3431     tcg_temp_free_i32(t3);
3432     set_cc_static(s);
3433     return DISAS_NEXT;
3434 }
3435 
3436 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3437 {
3438     int r1 = get_field(s, r1);
3439     int r3 = get_field(s, r3);
3440     TCGv_i32 t1, t3;
3441 
3442     /* r1 and r3 must be even.  */
3443     if (r1 & 1 || r3 & 1) {
3444         gen_program_exception(s, PGM_SPECIFICATION);
3445         return DISAS_NORETURN;
3446     }
3447 
3448     t1 = tcg_const_i32(r1);
3449     t3 = tcg_const_i32(r3);
3450     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3451     tcg_temp_free_i32(t1);
3452     tcg_temp_free_i32(t3);
3453     set_cc_static(s);
3454     return DISAS_NEXT;
3455 }
3456 
3457 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3458 {
3459     int r3 = get_field(s, r3);
3460     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3461     set_cc_static(s);
3462     return DISAS_NEXT;
3463 }
3464 
3465 #ifndef CONFIG_USER_ONLY
3466 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3467 {
3468     int r1 = get_field(s, l1);
3469     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3470     set_cc_static(s);
3471     return DISAS_NEXT;
3472 }
3473 
3474 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3475 {
3476     int r1 = get_field(s, l1);
3477     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3478     set_cc_static(s);
3479     return DISAS_NEXT;
3480 }
3481 #endif
3482 
3483 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3484 {
3485     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3486     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3487     tcg_temp_free_i32(l);
3488     return DISAS_NEXT;
3489 }
3490 
3491 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3492 {
3493     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3494     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3495     tcg_temp_free_i32(l);
3496     return DISAS_NEXT;
3497 }
3498 
3499 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3500 {
3501     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3502     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3503 
3504     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3505     tcg_temp_free_i32(t1);
3506     tcg_temp_free_i32(t2);
3507     set_cc_static(s);
3508     return DISAS_NEXT;
3509 }
3510 
3511 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3512 {
3513     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3514     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3515 
3516     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3517     tcg_temp_free_i32(t1);
3518     tcg_temp_free_i32(t2);
3519     set_cc_static(s);
3520     return DISAS_NEXT;
3521 }
3522 
3523 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3524 {
3525     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3526     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3527     tcg_temp_free_i32(l);
3528     return DISAS_NEXT;
3529 }
3530 
3531 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3532 {
3533     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3534     return DISAS_NEXT;
3535 }
3536 
3537 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3538 {
3539     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3540     return DISAS_NEXT;
3541 }
3542 
3543 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3544 {
3545     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3546     return DISAS_NEXT;
3547 }
3548 
3549 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3550 {
3551     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3552     return DISAS_NEXT;
3553 }
3554 
3555 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3556 {
3557     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3558     return DISAS_NEXT;
3559 }
3560 
3561 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3562 {
3563     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3564     return DISAS_NEXT;
3565 }
3566 
3567 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3568 {
3569     gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3570     return_low128(o->out2);
3571     return DISAS_NEXT;
3572 }
3573 
3574 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3575 {
3576     gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3577     return_low128(o->out2);
3578     return DISAS_NEXT;
3579 }
3580 
3581 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3582 {
3583     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3584     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3585     tcg_temp_free_i64(r3);
3586     return DISAS_NEXT;
3587 }
3588 
3589 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3590 {
3591     TCGv_i64 r3 = load_freg(get_field(s, r3));
3592     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3593     tcg_temp_free_i64(r3);
3594     return DISAS_NEXT;
3595 }
3596 
3597 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3598 {
3599     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3600     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3601     tcg_temp_free_i64(r3);
3602     return DISAS_NEXT;
3603 }
3604 
3605 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3606 {
3607     TCGv_i64 r3 = load_freg(get_field(s, r3));
3608     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3609     tcg_temp_free_i64(r3);
3610     return DISAS_NEXT;
3611 }
3612 
3613 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3614 {
3615     TCGv_i64 z, n;
3616     z = tcg_const_i64(0);
3617     n = tcg_temp_new_i64();
3618     tcg_gen_neg_i64(n, o->in2);
3619     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3620     tcg_temp_free_i64(n);
3621     tcg_temp_free_i64(z);
3622     return DISAS_NEXT;
3623 }
3624 
3625 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3626 {
3627     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3628     return DISAS_NEXT;
3629 }
3630 
3631 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3632 {
3633     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3634     return DISAS_NEXT;
3635 }
3636 
3637 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3638 {
3639     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3640     tcg_gen_mov_i64(o->out2, o->in2);
3641     return DISAS_NEXT;
3642 }
3643 
3644 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3645 {
3646     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3647     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3648     tcg_temp_free_i32(l);
3649     set_cc_static(s);
3650     return DISAS_NEXT;
3651 }
3652 
3653 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3654 {
3655     tcg_gen_neg_i64(o->out, o->in2);
3656     return DISAS_NEXT;
3657 }
3658 
3659 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3660 {
3661     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3662     return DISAS_NEXT;
3663 }
3664 
3665 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3666 {
3667     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3668     return DISAS_NEXT;
3669 }
3670 
3671 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3672 {
3673     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3674     tcg_gen_mov_i64(o->out2, o->in2);
3675     return DISAS_NEXT;
3676 }
3677 
3678 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3679 {
3680     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3681     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3682     tcg_temp_free_i32(l);
3683     set_cc_static(s);
3684     return DISAS_NEXT;
3685 }
3686 
3687 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3688 {
3689     tcg_gen_or_i64(o->out, o->in1, o->in2);
3690     return DISAS_NEXT;
3691 }
3692 
3693 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3694 {
3695     int shift = s->insn->data & 0xff;
3696     int size = s->insn->data >> 8;
3697     uint64_t mask = ((1ull << size) - 1) << shift;
3698 
3699     assert(!o->g_in2);
3700     tcg_gen_shli_i64(o->in2, o->in2, shift);
3701     tcg_gen_or_i64(o->out, o->in1, o->in2);
3702 
3703     /* Produce the CC from only the bits manipulated.  */
3704     tcg_gen_andi_i64(cc_dst, o->out, mask);
3705     set_cc_nz_u64(s, cc_dst);
3706     return DISAS_NEXT;
3707 }
3708 
3709 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3710 {
3711     o->in1 = tcg_temp_new_i64();
3712 
3713     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3714         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3715     } else {
3716         /* Perform the atomic operation in memory. */
3717         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3718                                     s->insn->data);
3719     }
3720 
3721     /* Recompute also for atomic case: needed for setting CC. */
3722     tcg_gen_or_i64(o->out, o->in1, o->in2);
3723 
3724     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3725         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3726     }
3727     return DISAS_NEXT;
3728 }
3729 
3730 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3731 {
3732     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3733     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3734     tcg_temp_free_i32(l);
3735     return DISAS_NEXT;
3736 }
3737 
3738 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3739 {
3740     int l2 = get_field(s, l2) + 1;
3741     TCGv_i32 l;
3742 
3743     /* The length must not exceed 32 bytes.  */
3744     if (l2 > 32) {
3745         gen_program_exception(s, PGM_SPECIFICATION);
3746         return DISAS_NORETURN;
3747     }
3748     l = tcg_const_i32(l2);
3749     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3750     tcg_temp_free_i32(l);
3751     return DISAS_NEXT;
3752 }
3753 
3754 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3755 {
3756     int l2 = get_field(s, l2) + 1;
3757     TCGv_i32 l;
3758 
3759     /* The length must be even and should not exceed 64 bytes.  */
3760     if ((l2 & 1) || (l2 > 64)) {
3761         gen_program_exception(s, PGM_SPECIFICATION);
3762         return DISAS_NORETURN;
3763     }
3764     l = tcg_const_i32(l2);
3765     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3766     tcg_temp_free_i32(l);
3767     return DISAS_NEXT;
3768 }
3769 
3770 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3771 {
3772     gen_helper_popcnt(o->out, o->in2);
3773     return DISAS_NEXT;
3774 }
3775 
3776 #ifndef CONFIG_USER_ONLY
3777 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3778 {
3779     gen_helper_ptlb(cpu_env);
3780     return DISAS_NEXT;
3781 }
3782 #endif
3783 
3784 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3785 {
3786     int i3 = get_field(s, i3);
3787     int i4 = get_field(s, i4);
3788     int i5 = get_field(s, i5);
3789     int do_zero = i4 & 0x80;
3790     uint64_t mask, imask, pmask;
3791     int pos, len, rot;
3792 
3793     /* Adjust the arguments for the specific insn.  */
3794     switch (s->fields.op2) {
3795     case 0x55: /* risbg */
3796     case 0x59: /* risbgn */
3797         i3 &= 63;
3798         i4 &= 63;
3799         pmask = ~0;
3800         break;
3801     case 0x5d: /* risbhg */
3802         i3 &= 31;
3803         i4 &= 31;
3804         pmask = 0xffffffff00000000ull;
3805         break;
3806     case 0x51: /* risblg */
3807         i3 = (i3 & 31) + 32;
3808         i4 = (i4 & 31) + 32;
3809         pmask = 0x00000000ffffffffull;
3810         break;
3811     default:
3812         g_assert_not_reached();
3813     }
3814 
3815     /* MASK is the set of bits to be inserted from R2. */
3816     if (i3 <= i4) {
3817         /* [0...i3---i4...63] */
3818         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3819     } else {
3820         /* [0---i4...i3---63] */
3821         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3822     }
3823     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3824     mask &= pmask;
3825 
3826     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3827        insns, we need to keep the other half of the register.  */
3828     imask = ~mask | ~pmask;
3829     if (do_zero) {
3830         imask = ~pmask;
3831     }
3832 
3833     len = i4 - i3 + 1;
3834     pos = 63 - i4;
3835     rot = i5 & 63;
3836 
3837     /* In some cases we can implement this with extract.  */
3838     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3839         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3840         return DISAS_NEXT;
3841     }
3842 
3843     /* In some cases we can implement this with deposit.  */
3844     if (len > 0 && (imask == 0 || ~mask == imask)) {
3845         /* Note that we rotate the bits to be inserted to the lsb, not to
3846            the position as described in the PoO.  */
3847         rot = (rot - pos) & 63;
3848     } else {
3849         pos = -1;
3850     }
3851 
3852     /* Rotate the input as necessary.  */
3853     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3854 
3855     /* Insert the selected bits into the output.  */
3856     if (pos >= 0) {
3857         if (imask == 0) {
3858             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3859         } else {
3860             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3861         }
3862     } else if (imask == 0) {
3863         tcg_gen_andi_i64(o->out, o->in2, mask);
3864     } else {
3865         tcg_gen_andi_i64(o->in2, o->in2, mask);
3866         tcg_gen_andi_i64(o->out, o->out, imask);
3867         tcg_gen_or_i64(o->out, o->out, o->in2);
3868     }
3869     return DISAS_NEXT;
3870 }
3871 
3872 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3873 {
3874     int i3 = get_field(s, i3);
3875     int i4 = get_field(s, i4);
3876     int i5 = get_field(s, i5);
3877     uint64_t mask;
3878 
3879     /* If this is a test-only form, arrange to discard the result.  */
3880     if (i3 & 0x80) {
3881         o->out = tcg_temp_new_i64();
3882         o->g_out = false;
3883     }
3884 
3885     i3 &= 63;
3886     i4 &= 63;
3887     i5 &= 63;
3888 
3889     /* MASK is the set of bits to be operated on from R2.
3890        Take care for I3/I4 wraparound.  */
3891     mask = ~0ull >> i3;
3892     if (i3 <= i4) {
3893         mask ^= ~0ull >> i4 >> 1;
3894     } else {
3895         mask |= ~(~0ull >> i4 >> 1);
3896     }
3897 
3898     /* Rotate the input as necessary.  */
3899     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3900 
3901     /* Operate.  */
3902     switch (s->fields.op2) {
3903     case 0x54: /* AND */
3904         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3905         tcg_gen_and_i64(o->out, o->out, o->in2);
3906         break;
3907     case 0x56: /* OR */
3908         tcg_gen_andi_i64(o->in2, o->in2, mask);
3909         tcg_gen_or_i64(o->out, o->out, o->in2);
3910         break;
3911     case 0x57: /* XOR */
3912         tcg_gen_andi_i64(o->in2, o->in2, mask);
3913         tcg_gen_xor_i64(o->out, o->out, o->in2);
3914         break;
3915     default:
3916         abort();
3917     }
3918 
3919     /* Set the CC.  */
3920     tcg_gen_andi_i64(cc_dst, o->out, mask);
3921     set_cc_nz_u64(s, cc_dst);
3922     return DISAS_NEXT;
3923 }
3924 
3925 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3926 {
3927     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3928     return DISAS_NEXT;
3929 }
3930 
3931 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3932 {
3933     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3934     return DISAS_NEXT;
3935 }
3936 
3937 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3938 {
3939     tcg_gen_bswap64_i64(o->out, o->in2);
3940     return DISAS_NEXT;
3941 }
3942 
3943 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3944 {
3945     TCGv_i32 t1 = tcg_temp_new_i32();
3946     TCGv_i32 t2 = tcg_temp_new_i32();
3947     TCGv_i32 to = tcg_temp_new_i32();
3948     tcg_gen_extrl_i64_i32(t1, o->in1);
3949     tcg_gen_extrl_i64_i32(t2, o->in2);
3950     tcg_gen_rotl_i32(to, t1, t2);
3951     tcg_gen_extu_i32_i64(o->out, to);
3952     tcg_temp_free_i32(t1);
3953     tcg_temp_free_i32(t2);
3954     tcg_temp_free_i32(to);
3955     return DISAS_NEXT;
3956 }
3957 
3958 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3959 {
3960     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3961     return DISAS_NEXT;
3962 }
3963 
3964 #ifndef CONFIG_USER_ONLY
3965 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3966 {
3967     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3968     set_cc_static(s);
3969     return DISAS_NEXT;
3970 }
3971 
3972 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3973 {
3974     gen_helper_sacf(cpu_env, o->in2);
3975     /* Addressing mode has changed, so end the block.  */
3976     return DISAS_PC_STALE;
3977 }
3978 #endif
3979 
3980 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3981 {
3982     int sam = s->insn->data;
3983     TCGv_i64 tsam;
3984     uint64_t mask;
3985 
3986     switch (sam) {
3987     case 0:
3988         mask = 0xffffff;
3989         break;
3990     case 1:
3991         mask = 0x7fffffff;
3992         break;
3993     default:
3994         mask = -1;
3995         break;
3996     }
3997 
3998     /* Bizarre but true, we check the address of the current insn for the
3999        specification exception, not the next to be executed.  Thus the PoO
4000        documents that Bad Things Happen two bytes before the end.  */
4001     if (s->base.pc_next & ~mask) {
4002         gen_program_exception(s, PGM_SPECIFICATION);
4003         return DISAS_NORETURN;
4004     }
4005     s->pc_tmp &= mask;
4006 
4007     tsam = tcg_const_i64(sam);
4008     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4009     tcg_temp_free_i64(tsam);
4010 
4011     /* Always exit the TB, since we (may have) changed execution mode.  */
4012     return DISAS_PC_STALE;
4013 }
4014 
4015 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4016 {
4017     int r1 = get_field(s, r1);
4018     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4019     return DISAS_NEXT;
4020 }
4021 
4022 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4023 {
4024     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4025     return DISAS_NEXT;
4026 }
4027 
4028 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4029 {
4030     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4031     return DISAS_NEXT;
4032 }
4033 
4034 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4035 {
4036     gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4037     return_low128(o->out2);
4038     return DISAS_NEXT;
4039 }
4040 
4041 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4042 {
4043     gen_helper_sqeb(o->out, cpu_env, o->in2);
4044     return DISAS_NEXT;
4045 }
4046 
4047 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4048 {
4049     gen_helper_sqdb(o->out, cpu_env, o->in2);
4050     return DISAS_NEXT;
4051 }
4052 
4053 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4054 {
4055     gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4056     return_low128(o->out2);
4057     return DISAS_NEXT;
4058 }
4059 
4060 #ifndef CONFIG_USER_ONLY
4061 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4062 {
4063     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4064     set_cc_static(s);
4065     return DISAS_NEXT;
4066 }
4067 
4068 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4069 {
4070     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4071     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4072     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4073     set_cc_static(s);
4074     tcg_temp_free_i32(r1);
4075     tcg_temp_free_i32(r3);
4076     return DISAS_NEXT;
4077 }
4078 #endif
4079 
4080 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4081 {
4082     DisasCompare c;
4083     TCGv_i64 a, h;
4084     TCGLabel *lab;
4085     int r1;
4086 
4087     disas_jcc(s, &c, get_field(s, m3));
4088 
4089     /* We want to store when the condition is fulfilled, so branch
4090        out when it's not */
4091     c.cond = tcg_invert_cond(c.cond);
4092 
4093     lab = gen_new_label();
4094     if (c.is_64) {
4095         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4096     } else {
4097         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4098     }
4099     free_compare(&c);
4100 
4101     r1 = get_field(s, r1);
4102     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4103     switch (s->insn->data) {
4104     case 1: /* STOCG */
4105         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4106         break;
4107     case 0: /* STOC */
4108         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4109         break;
4110     case 2: /* STOCFH */
4111         h = tcg_temp_new_i64();
4112         tcg_gen_shri_i64(h, regs[r1], 32);
4113         tcg_gen_qemu_st32(h, a, get_mem_index(s));
4114         tcg_temp_free_i64(h);
4115         break;
4116     default:
4117         g_assert_not_reached();
4118     }
4119     tcg_temp_free_i64(a);
4120 
4121     gen_set_label(lab);
4122     return DISAS_NEXT;
4123 }
4124 
4125 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4126 {
4127     uint64_t sign = 1ull << s->insn->data;
4128     enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4129     gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4130     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4131     /* The arithmetic left shift is curious in that it does not affect
4132        the sign bit.  Copy that over from the source unchanged.  */
4133     tcg_gen_andi_i64(o->out, o->out, ~sign);
4134     tcg_gen_andi_i64(o->in1, o->in1, sign);
4135     tcg_gen_or_i64(o->out, o->out, o->in1);
4136     return DISAS_NEXT;
4137 }
4138 
4139 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4140 {
4141     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4142     return DISAS_NEXT;
4143 }
4144 
4145 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4146 {
4147     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4148     return DISAS_NEXT;
4149 }
4150 
4151 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4152 {
4153     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4154     return DISAS_NEXT;
4155 }
4156 
4157 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4158 {
4159     gen_helper_sfpc(cpu_env, o->in2);
4160     return DISAS_NEXT;
4161 }
4162 
4163 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4164 {
4165     gen_helper_sfas(cpu_env, o->in2);
4166     return DISAS_NEXT;
4167 }
4168 
4169 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4170 {
4171     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4172     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4173     gen_helper_srnm(cpu_env, o->addr1);
4174     return DISAS_NEXT;
4175 }
4176 
4177 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4178 {
4179     /* Bits 0-55 are are ignored. */
4180     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4181     gen_helper_srnm(cpu_env, o->addr1);
4182     return DISAS_NEXT;
4183 }
4184 
4185 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4186 {
4187     TCGv_i64 tmp = tcg_temp_new_i64();
4188 
4189     /* Bits other than 61-63 are ignored. */
4190     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4191 
4192     /* No need to call a helper, we don't implement dfp */
4193     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4194     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4195     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4196 
4197     tcg_temp_free_i64(tmp);
4198     return DISAS_NEXT;
4199 }
4200 
4201 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4202 {
4203     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4204     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4205     set_cc_static(s);
4206 
4207     tcg_gen_shri_i64(o->in1, o->in1, 24);
4208     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4209     return DISAS_NEXT;
4210 }
4211 
4212 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4213 {
4214     int b1 = get_field(s, b1);
4215     int d1 = get_field(s, d1);
4216     int b2 = get_field(s, b2);
4217     int d2 = get_field(s, d2);
4218     int r3 = get_field(s, r3);
4219     TCGv_i64 tmp = tcg_temp_new_i64();
4220 
4221     /* fetch all operands first */
4222     o->in1 = tcg_temp_new_i64();
4223     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4224     o->in2 = tcg_temp_new_i64();
4225     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4226     o->addr1 = tcg_temp_new_i64();
4227     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4228 
4229     /* load the third operand into r3 before modifying anything */
4230     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4231 
4232     /* subtract CPU timer from first operand and store in GR0 */
4233     gen_helper_stpt(tmp, cpu_env);
4234     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4235 
4236     /* store second operand in GR1 */
4237     tcg_gen_mov_i64(regs[1], o->in2);
4238 
4239     tcg_temp_free_i64(tmp);
4240     return DISAS_NEXT;
4241 }
4242 
4243 #ifndef CONFIG_USER_ONLY
4244 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4245 {
4246     tcg_gen_shri_i64(o->in2, o->in2, 4);
4247     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4248     return DISAS_NEXT;
4249 }
4250 
4251 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4252 {
4253     gen_helper_sske(cpu_env, o->in1, o->in2);
4254     return DISAS_NEXT;
4255 }
4256 
4257 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4258 {
4259     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4260     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4261     return DISAS_PC_STALE_NOCHAIN;
4262 }
4263 
4264 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4265 {
4266     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4267     return DISAS_NEXT;
4268 }
4269 #endif
4270 
4271 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4272 {
4273     gen_helper_stck(o->out, cpu_env);
4274     /* ??? We don't implement clock states.  */
4275     gen_op_movi_cc(s, 0);
4276     return DISAS_NEXT;
4277 }
4278 
4279 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4280 {
4281     TCGv_i64 c1 = tcg_temp_new_i64();
4282     TCGv_i64 c2 = tcg_temp_new_i64();
4283     TCGv_i64 todpr = tcg_temp_new_i64();
4284     gen_helper_stck(c1, cpu_env);
4285     /* 16 bit value store in an uint32_t (only valid bits set) */
4286     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4287     /* Shift the 64-bit value into its place as a zero-extended
4288        104-bit value.  Note that "bit positions 64-103 are always
4289        non-zero so that they compare differently to STCK"; we set
4290        the least significant bit to 1.  */
4291     tcg_gen_shli_i64(c2, c1, 56);
4292     tcg_gen_shri_i64(c1, c1, 8);
4293     tcg_gen_ori_i64(c2, c2, 0x10000);
4294     tcg_gen_or_i64(c2, c2, todpr);
4295     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4296     tcg_gen_addi_i64(o->in2, o->in2, 8);
4297     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4298     tcg_temp_free_i64(c1);
4299     tcg_temp_free_i64(c2);
4300     tcg_temp_free_i64(todpr);
4301     /* ??? We don't implement clock states.  */
4302     gen_op_movi_cc(s, 0);
4303     return DISAS_NEXT;
4304 }
4305 
4306 #ifndef CONFIG_USER_ONLY
4307 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4308 {
4309     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4310     gen_helper_sck(cc_op, cpu_env, o->in1);
4311     set_cc_static(s);
4312     return DISAS_NEXT;
4313 }
4314 
4315 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4316 {
4317     gen_helper_sckc(cpu_env, o->in2);
4318     return DISAS_NEXT;
4319 }
4320 
4321 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4322 {
4323     gen_helper_sckpf(cpu_env, regs[0]);
4324     return DISAS_NEXT;
4325 }
4326 
4327 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4328 {
4329     gen_helper_stckc(o->out, cpu_env);
4330     return DISAS_NEXT;
4331 }
4332 
4333 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4334 {
4335     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4336     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4337     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4338     tcg_temp_free_i32(r1);
4339     tcg_temp_free_i32(r3);
4340     return DISAS_NEXT;
4341 }
4342 
4343 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4344 {
4345     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4346     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4347     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4348     tcg_temp_free_i32(r1);
4349     tcg_temp_free_i32(r3);
4350     return DISAS_NEXT;
4351 }
4352 
4353 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4354 {
4355     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4356     return DISAS_NEXT;
4357 }
4358 
4359 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4360 {
4361     gen_helper_spt(cpu_env, o->in2);
4362     return DISAS_NEXT;
4363 }
4364 
4365 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4366 {
4367     gen_helper_stfl(cpu_env);
4368     return DISAS_NEXT;
4369 }
4370 
4371 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4372 {
4373     gen_helper_stpt(o->out, cpu_env);
4374     return DISAS_NEXT;
4375 }
4376 
4377 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4378 {
4379     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4380     set_cc_static(s);
4381     return DISAS_NEXT;
4382 }
4383 
4384 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4385 {
4386     gen_helper_spx(cpu_env, o->in2);
4387     return DISAS_NEXT;
4388 }
4389 
4390 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4391 {
4392     gen_helper_xsch(cpu_env, regs[1]);
4393     set_cc_static(s);
4394     return DISAS_NEXT;
4395 }
4396 
4397 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4398 {
4399     gen_helper_csch(cpu_env, regs[1]);
4400     set_cc_static(s);
4401     return DISAS_NEXT;
4402 }
4403 
4404 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4405 {
4406     gen_helper_hsch(cpu_env, regs[1]);
4407     set_cc_static(s);
4408     return DISAS_NEXT;
4409 }
4410 
4411 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4412 {
4413     gen_helper_msch(cpu_env, regs[1], o->in2);
4414     set_cc_static(s);
4415     return DISAS_NEXT;
4416 }
4417 
4418 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4419 {
4420     gen_helper_rchp(cpu_env, regs[1]);
4421     set_cc_static(s);
4422     return DISAS_NEXT;
4423 }
4424 
4425 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4426 {
4427     gen_helper_rsch(cpu_env, regs[1]);
4428     set_cc_static(s);
4429     return DISAS_NEXT;
4430 }
4431 
4432 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4433 {
4434     gen_helper_sal(cpu_env, regs[1]);
4435     return DISAS_NEXT;
4436 }
4437 
4438 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4439 {
4440     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4441     return DISAS_NEXT;
4442 }
4443 
4444 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4445 {
4446     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4447     gen_op_movi_cc(s, 3);
4448     return DISAS_NEXT;
4449 }
4450 
4451 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4452 {
4453     /* The instruction is suppressed if not provided. */
4454     return DISAS_NEXT;
4455 }
4456 
4457 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4458 {
4459     gen_helper_ssch(cpu_env, regs[1], o->in2);
4460     set_cc_static(s);
4461     return DISAS_NEXT;
4462 }
4463 
4464 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4465 {
4466     gen_helper_stsch(cpu_env, regs[1], o->in2);
4467     set_cc_static(s);
4468     return DISAS_NEXT;
4469 }
4470 
4471 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4472 {
4473     gen_helper_stcrw(cpu_env, o->in2);
4474     set_cc_static(s);
4475     return DISAS_NEXT;
4476 }
4477 
4478 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4479 {
4480     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4481     set_cc_static(s);
4482     return DISAS_NEXT;
4483 }
4484 
4485 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4486 {
4487     gen_helper_tsch(cpu_env, regs[1], o->in2);
4488     set_cc_static(s);
4489     return DISAS_NEXT;
4490 }
4491 
4492 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4493 {
4494     gen_helper_chsc(cpu_env, o->in2);
4495     set_cc_static(s);
4496     return DISAS_NEXT;
4497 }
4498 
4499 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4500 {
4501     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4502     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4503     return DISAS_NEXT;
4504 }
4505 
4506 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4507 {
4508     uint64_t i2 = get_field(s, i2);
4509     TCGv_i64 t;
4510 
4511     /* It is important to do what the instruction name says: STORE THEN.
4512        If we let the output hook perform the store then if we fault and
4513        restart, we'll have the wrong SYSTEM MASK in place.  */
4514     t = tcg_temp_new_i64();
4515     tcg_gen_shri_i64(t, psw_mask, 56);
4516     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4517     tcg_temp_free_i64(t);
4518 
4519     if (s->fields.op == 0xac) {
4520         tcg_gen_andi_i64(psw_mask, psw_mask,
4521                          (i2 << 56) | 0x00ffffffffffffffull);
4522     } else {
4523         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4524     }
4525 
4526     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4527     return DISAS_PC_STALE_NOCHAIN;
4528 }
4529 
4530 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4531 {
4532     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4533 
4534     if (s->base.tb->flags & FLAG_MASK_PER) {
4535         update_psw_addr(s);
4536         gen_helper_per_store_real(cpu_env);
4537     }
4538     return DISAS_NEXT;
4539 }
4540 #endif
4541 
4542 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4543 {
4544     gen_helper_stfle(cc_op, cpu_env, o->in2);
4545     set_cc_static(s);
4546     return DISAS_NEXT;
4547 }
4548 
4549 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4550 {
4551     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4552     return DISAS_NEXT;
4553 }
4554 
4555 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4556 {
4557     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4558     return DISAS_NEXT;
4559 }
4560 
4561 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4562 {
4563     tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4564     return DISAS_NEXT;
4565 }
4566 
4567 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4568 {
4569     tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4570     return DISAS_NEXT;
4571 }
4572 
4573 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4574 {
4575     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4576     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4577     gen_helper_stam(cpu_env, r1, o->in2, r3);
4578     tcg_temp_free_i32(r1);
4579     tcg_temp_free_i32(r3);
4580     return DISAS_NEXT;
4581 }
4582 
4583 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4584 {
4585     int m3 = get_field(s, m3);
4586     int pos, base = s->insn->data;
4587     TCGv_i64 tmp = tcg_temp_new_i64();
4588 
4589     pos = base + ctz32(m3) * 8;
4590     switch (m3) {
4591     case 0xf:
4592         /* Effectively a 32-bit store.  */
4593         tcg_gen_shri_i64(tmp, o->in1, pos);
4594         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4595         break;
4596 
4597     case 0xc:
4598     case 0x6:
4599     case 0x3:
4600         /* Effectively a 16-bit store.  */
4601         tcg_gen_shri_i64(tmp, o->in1, pos);
4602         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4603         break;
4604 
4605     case 0x8:
4606     case 0x4:
4607     case 0x2:
4608     case 0x1:
4609         /* Effectively an 8-bit store.  */
4610         tcg_gen_shri_i64(tmp, o->in1, pos);
4611         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4612         break;
4613 
4614     default:
4615         /* This is going to be a sequence of shifts and stores.  */
4616         pos = base + 32 - 8;
4617         while (m3) {
4618             if (m3 & 0x8) {
4619                 tcg_gen_shri_i64(tmp, o->in1, pos);
4620                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4621                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4622             }
4623             m3 = (m3 << 1) & 0xf;
4624             pos -= 8;
4625         }
4626         break;
4627     }
4628     tcg_temp_free_i64(tmp);
4629     return DISAS_NEXT;
4630 }
4631 
4632 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4633 {
4634     int r1 = get_field(s, r1);
4635     int r3 = get_field(s, r3);
4636     int size = s->insn->data;
4637     TCGv_i64 tsize = tcg_const_i64(size);
4638 
4639     while (1) {
4640         if (size == 8) {
4641             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4642         } else {
4643             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4644         }
4645         if (r1 == r3) {
4646             break;
4647         }
4648         tcg_gen_add_i64(o->in2, o->in2, tsize);
4649         r1 = (r1 + 1) & 15;
4650     }
4651 
4652     tcg_temp_free_i64(tsize);
4653     return DISAS_NEXT;
4654 }
4655 
4656 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4657 {
4658     int r1 = get_field(s, r1);
4659     int r3 = get_field(s, r3);
4660     TCGv_i64 t = tcg_temp_new_i64();
4661     TCGv_i64 t4 = tcg_const_i64(4);
4662     TCGv_i64 t32 = tcg_const_i64(32);
4663 
4664     while (1) {
4665         tcg_gen_shl_i64(t, regs[r1], t32);
4666         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4667         if (r1 == r3) {
4668             break;
4669         }
4670         tcg_gen_add_i64(o->in2, o->in2, t4);
4671         r1 = (r1 + 1) & 15;
4672     }
4673 
4674     tcg_temp_free_i64(t);
4675     tcg_temp_free_i64(t4);
4676     tcg_temp_free_i64(t32);
4677     return DISAS_NEXT;
4678 }
4679 
4680 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4681 {
4682     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4683         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4684     } else if (HAVE_ATOMIC128) {
4685         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4686     } else {
4687         gen_helper_exit_atomic(cpu_env);
4688         return DISAS_NORETURN;
4689     }
4690     return DISAS_NEXT;
4691 }
4692 
4693 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4694 {
4695     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4696     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4697 
4698     gen_helper_srst(cpu_env, r1, r2);
4699 
4700     tcg_temp_free_i32(r1);
4701     tcg_temp_free_i32(r2);
4702     set_cc_static(s);
4703     return DISAS_NEXT;
4704 }
4705 
4706 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4707 {
4708     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4709     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4710 
4711     gen_helper_srstu(cpu_env, r1, r2);
4712 
4713     tcg_temp_free_i32(r1);
4714     tcg_temp_free_i32(r2);
4715     set_cc_static(s);
4716     return DISAS_NEXT;
4717 }
4718 
4719 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4720 {
4721     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4722     return DISAS_NEXT;
4723 }
4724 
4725 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4726 {
4727     tcg_gen_movi_i64(cc_src, 0);
4728     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4729     return DISAS_NEXT;
4730 }
4731 
4732 /* Compute borrow (0, -1) into cc_src. */
4733 static void compute_borrow(DisasContext *s)
4734 {
4735     switch (s->cc_op) {
4736     case CC_OP_SUBU:
4737         /* The borrow value is already in cc_src (0,-1). */
4738         break;
4739     default:
4740         gen_op_calc_cc(s);
4741         /* fall through */
4742     case CC_OP_STATIC:
4743         /* The carry flag is the msb of CC; compute into cc_src. */
4744         tcg_gen_extu_i32_i64(cc_src, cc_op);
4745         tcg_gen_shri_i64(cc_src, cc_src, 1);
4746         /* fall through */
4747     case CC_OP_ADDU:
4748         /* Convert carry (1,0) to borrow (0,-1). */
4749         tcg_gen_subi_i64(cc_src, cc_src, 1);
4750         break;
4751     }
4752 }
4753 
4754 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4755 {
4756     compute_borrow(s);
4757 
4758     /* Borrow is {0, -1}, so add to subtract. */
4759     tcg_gen_add_i64(o->out, o->in1, cc_src);
4760     tcg_gen_sub_i64(o->out, o->out, o->in2);
4761     return DISAS_NEXT;
4762 }
4763 
4764 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4765 {
4766     compute_borrow(s);
4767 
4768     /*
4769      * Borrow is {0, -1}, so add to subtract; replicate the
4770      * borrow input to produce 128-bit -1 for the addition.
4771      */
4772     TCGv_i64 zero = tcg_const_i64(0);
4773     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4774     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4775     tcg_temp_free_i64(zero);
4776 
4777     return DISAS_NEXT;
4778 }
4779 
4780 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4781 {
4782     TCGv_i32 t;
4783 
4784     update_psw_addr(s);
4785     update_cc_op(s);
4786 
4787     t = tcg_const_i32(get_field(s, i1) & 0xff);
4788     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4789     tcg_temp_free_i32(t);
4790 
4791     t = tcg_const_i32(s->ilen);
4792     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4793     tcg_temp_free_i32(t);
4794 
4795     gen_exception(EXCP_SVC);
4796     return DISAS_NORETURN;
4797 }
4798 
4799 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4800 {
4801     int cc = 0;
4802 
4803     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4804     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4805     gen_op_movi_cc(s, cc);
4806     return DISAS_NEXT;
4807 }
4808 
4809 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4810 {
4811     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4812     set_cc_static(s);
4813     return DISAS_NEXT;
4814 }
4815 
4816 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4817 {
4818     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4819     set_cc_static(s);
4820     return DISAS_NEXT;
4821 }
4822 
4823 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4824 {
4825     gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4826     set_cc_static(s);
4827     return DISAS_NEXT;
4828 }
4829 
4830 #ifndef CONFIG_USER_ONLY
4831 
4832 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4833 {
4834     gen_helper_testblock(cc_op, cpu_env, o->in2);
4835     set_cc_static(s);
4836     return DISAS_NEXT;
4837 }
4838 
4839 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4840 {
4841     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4842     set_cc_static(s);
4843     return DISAS_NEXT;
4844 }
4845 
4846 #endif
4847 
4848 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4849 {
4850     TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4851     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4852     tcg_temp_free_i32(l1);
4853     set_cc_static(s);
4854     return DISAS_NEXT;
4855 }
4856 
4857 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4858 {
4859     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4860     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4861     tcg_temp_free_i32(l);
4862     set_cc_static(s);
4863     return DISAS_NEXT;
4864 }
4865 
4866 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4867 {
4868     gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4869     return_low128(o->out2);
4870     set_cc_static(s);
4871     return DISAS_NEXT;
4872 }
4873 
4874 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4875 {
4876     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4877     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4878     tcg_temp_free_i32(l);
4879     set_cc_static(s);
4880     return DISAS_NEXT;
4881 }
4882 
4883 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4884 {
4885     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4886     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4887     tcg_temp_free_i32(l);
4888     set_cc_static(s);
4889     return DISAS_NEXT;
4890 }
4891 
4892 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4893 {
4894     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4895     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4896     TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4897     TCGv_i32 tst = tcg_temp_new_i32();
4898     int m3 = get_field(s, m3);
4899 
4900     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4901         m3 = 0;
4902     }
4903     if (m3 & 1) {
4904         tcg_gen_movi_i32(tst, -1);
4905     } else {
4906         tcg_gen_extrl_i64_i32(tst, regs[0]);
4907         if (s->insn->opc & 3) {
4908             tcg_gen_ext8u_i32(tst, tst);
4909         } else {
4910             tcg_gen_ext16u_i32(tst, tst);
4911         }
4912     }
4913     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4914 
4915     tcg_temp_free_i32(r1);
4916     tcg_temp_free_i32(r2);
4917     tcg_temp_free_i32(sizes);
4918     tcg_temp_free_i32(tst);
4919     set_cc_static(s);
4920     return DISAS_NEXT;
4921 }
4922 
4923 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4924 {
4925     TCGv_i32 t1 = tcg_const_i32(0xff);
4926     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4927     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4928     tcg_temp_free_i32(t1);
4929     set_cc_static(s);
4930     return DISAS_NEXT;
4931 }
4932 
4933 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4934 {
4935     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4936     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4937     tcg_temp_free_i32(l);
4938     return DISAS_NEXT;
4939 }
4940 
4941 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4942 {
4943     int l1 = get_field(s, l1) + 1;
4944     TCGv_i32 l;
4945 
4946     /* The length must not exceed 32 bytes.  */
4947     if (l1 > 32) {
4948         gen_program_exception(s, PGM_SPECIFICATION);
4949         return DISAS_NORETURN;
4950     }
4951     l = tcg_const_i32(l1);
4952     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4953     tcg_temp_free_i32(l);
4954     set_cc_static(s);
4955     return DISAS_NEXT;
4956 }
4957 
4958 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4959 {
4960     int l1 = get_field(s, l1) + 1;
4961     TCGv_i32 l;
4962 
4963     /* The length must be even and should not exceed 64 bytes.  */
4964     if ((l1 & 1) || (l1 > 64)) {
4965         gen_program_exception(s, PGM_SPECIFICATION);
4966         return DISAS_NORETURN;
4967     }
4968     l = tcg_const_i32(l1);
4969     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4970     tcg_temp_free_i32(l);
4971     set_cc_static(s);
4972     return DISAS_NEXT;
4973 }
4974 
4975 
4976 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4977 {
4978     int d1 = get_field(s, d1);
4979     int d2 = get_field(s, d2);
4980     int b1 = get_field(s, b1);
4981     int b2 = get_field(s, b2);
4982     int l = get_field(s, l1);
4983     TCGv_i32 t32;
4984 
4985     o->addr1 = get_address(s, 0, b1, d1);
4986 
4987     /* If the addresses are identical, this is a store/memset of zero.  */
4988     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4989         o->in2 = tcg_const_i64(0);
4990 
4991         l++;
4992         while (l >= 8) {
4993             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4994             l -= 8;
4995             if (l > 0) {
4996                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4997             }
4998         }
4999         if (l >= 4) {
5000             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5001             l -= 4;
5002             if (l > 0) {
5003                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5004             }
5005         }
5006         if (l >= 2) {
5007             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5008             l -= 2;
5009             if (l > 0) {
5010                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5011             }
5012         }
5013         if (l) {
5014             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5015         }
5016         gen_op_movi_cc(s, 0);
5017         return DISAS_NEXT;
5018     }
5019 
5020     /* But in general we'll defer to a helper.  */
5021     o->in2 = get_address(s, 0, b2, d2);
5022     t32 = tcg_const_i32(l);
5023     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5024     tcg_temp_free_i32(t32);
5025     set_cc_static(s);
5026     return DISAS_NEXT;
5027 }
5028 
5029 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5030 {
5031     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5032     return DISAS_NEXT;
5033 }
5034 
5035 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5036 {
5037     int shift = s->insn->data & 0xff;
5038     int size = s->insn->data >> 8;
5039     uint64_t mask = ((1ull << size) - 1) << shift;
5040 
5041     assert(!o->g_in2);
5042     tcg_gen_shli_i64(o->in2, o->in2, shift);
5043     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5044 
5045     /* Produce the CC from only the bits manipulated.  */
5046     tcg_gen_andi_i64(cc_dst, o->out, mask);
5047     set_cc_nz_u64(s, cc_dst);
5048     return DISAS_NEXT;
5049 }
5050 
5051 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5052 {
5053     o->in1 = tcg_temp_new_i64();
5054 
5055     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5056         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5057     } else {
5058         /* Perform the atomic operation in memory. */
5059         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5060                                      s->insn->data);
5061     }
5062 
5063     /* Recompute also for atomic case: needed for setting CC. */
5064     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5065 
5066     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5067         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5068     }
5069     return DISAS_NEXT;
5070 }
5071 
5072 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5073 {
5074     o->out = tcg_const_i64(0);
5075     return DISAS_NEXT;
5076 }
5077 
5078 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5079 {
5080     o->out = tcg_const_i64(0);
5081     o->out2 = o->out;
5082     o->g_out2 = true;
5083     return DISAS_NEXT;
5084 }
5085 
5086 #ifndef CONFIG_USER_ONLY
5087 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5088 {
5089     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5090 
5091     gen_helper_clp(cpu_env, r2);
5092     tcg_temp_free_i32(r2);
5093     set_cc_static(s);
5094     return DISAS_NEXT;
5095 }
5096 
5097 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5098 {
5099     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5100     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5101 
5102     gen_helper_pcilg(cpu_env, r1, r2);
5103     tcg_temp_free_i32(r1);
5104     tcg_temp_free_i32(r2);
5105     set_cc_static(s);
5106     return DISAS_NEXT;
5107 }
5108 
5109 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5110 {
5111     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5112     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5113 
5114     gen_helper_pcistg(cpu_env, r1, r2);
5115     tcg_temp_free_i32(r1);
5116     tcg_temp_free_i32(r2);
5117     set_cc_static(s);
5118     return DISAS_NEXT;
5119 }
5120 
5121 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5122 {
5123     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5124     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5125 
5126     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5127     tcg_temp_free_i32(ar);
5128     tcg_temp_free_i32(r1);
5129     set_cc_static(s);
5130     return DISAS_NEXT;
5131 }
5132 
5133 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5134 {
5135     gen_helper_sic(cpu_env, o->in1, o->in2);
5136     return DISAS_NEXT;
5137 }
5138 
5139 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5140 {
5141     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5142     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5143 
5144     gen_helper_rpcit(cpu_env, r1, r2);
5145     tcg_temp_free_i32(r1);
5146     tcg_temp_free_i32(r2);
5147     set_cc_static(s);
5148     return DISAS_NEXT;
5149 }
5150 
5151 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5152 {
5153     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5154     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5155     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5156 
5157     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5158     tcg_temp_free_i32(ar);
5159     tcg_temp_free_i32(r1);
5160     tcg_temp_free_i32(r3);
5161     set_cc_static(s);
5162     return DISAS_NEXT;
5163 }
5164 
5165 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5166 {
5167     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5168     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5169 
5170     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5171     tcg_temp_free_i32(ar);
5172     tcg_temp_free_i32(r1);
5173     set_cc_static(s);
5174     return DISAS_NEXT;
5175 }
5176 #endif
5177 
5178 #include "translate_vx.c.inc"
5179 
5180 /* ====================================================================== */
5181 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5182    the original inputs), update the various cc data structures in order to
5183    be able to compute the new condition code.  */
5184 
5185 static void cout_abs32(DisasContext *s, DisasOps *o)
5186 {
5187     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5188 }
5189 
5190 static void cout_abs64(DisasContext *s, DisasOps *o)
5191 {
5192     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5193 }
5194 
5195 static void cout_adds32(DisasContext *s, DisasOps *o)
5196 {
5197     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5198 }
5199 
5200 static void cout_adds64(DisasContext *s, DisasOps *o)
5201 {
5202     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5203 }
5204 
5205 static void cout_addu32(DisasContext *s, DisasOps *o)
5206 {
5207     tcg_gen_shri_i64(cc_src, o->out, 32);
5208     tcg_gen_ext32u_i64(cc_dst, o->out);
5209     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5210 }
5211 
5212 static void cout_addu64(DisasContext *s, DisasOps *o)
5213 {
5214     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5215 }
5216 
5217 static void cout_cmps32(DisasContext *s, DisasOps *o)
5218 {
5219     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5220 }
5221 
5222 static void cout_cmps64(DisasContext *s, DisasOps *o)
5223 {
5224     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5225 }
5226 
5227 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5228 {
5229     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5230 }
5231 
5232 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5233 {
5234     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5235 }
5236 
5237 static void cout_f32(DisasContext *s, DisasOps *o)
5238 {
5239     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5240 }
5241 
5242 static void cout_f64(DisasContext *s, DisasOps *o)
5243 {
5244     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5245 }
5246 
5247 static void cout_f128(DisasContext *s, DisasOps *o)
5248 {
5249     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5250 }
5251 
5252 static void cout_nabs32(DisasContext *s, DisasOps *o)
5253 {
5254     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5255 }
5256 
5257 static void cout_nabs64(DisasContext *s, DisasOps *o)
5258 {
5259     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5260 }
5261 
5262 static void cout_neg32(DisasContext *s, DisasOps *o)
5263 {
5264     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5265 }
5266 
5267 static void cout_neg64(DisasContext *s, DisasOps *o)
5268 {
5269     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5270 }
5271 
5272 static void cout_nz32(DisasContext *s, DisasOps *o)
5273 {
5274     tcg_gen_ext32u_i64(cc_dst, o->out);
5275     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5276 }
5277 
5278 static void cout_nz64(DisasContext *s, DisasOps *o)
5279 {
5280     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5281 }
5282 
5283 static void cout_s32(DisasContext *s, DisasOps *o)
5284 {
5285     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5286 }
5287 
5288 static void cout_s64(DisasContext *s, DisasOps *o)
5289 {
5290     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5291 }
5292 
5293 static void cout_subs32(DisasContext *s, DisasOps *o)
5294 {
5295     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5296 }
5297 
5298 static void cout_subs64(DisasContext *s, DisasOps *o)
5299 {
5300     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5301 }
5302 
5303 static void cout_subu32(DisasContext *s, DisasOps *o)
5304 {
5305     tcg_gen_sari_i64(cc_src, o->out, 32);
5306     tcg_gen_ext32u_i64(cc_dst, o->out);
5307     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5308 }
5309 
5310 static void cout_subu64(DisasContext *s, DisasOps *o)
5311 {
5312     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5313 }
5314 
5315 static void cout_tm32(DisasContext *s, DisasOps *o)
5316 {
5317     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5318 }
5319 
5320 static void cout_tm64(DisasContext *s, DisasOps *o)
5321 {
5322     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5323 }
5324 
5325 static void cout_muls32(DisasContext *s, DisasOps *o)
5326 {
5327     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5328 }
5329 
5330 static void cout_muls64(DisasContext *s, DisasOps *o)
5331 {
5332     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5333     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5334 }
5335 
5336 /* ====================================================================== */
5337 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5338    with the TCG register to which we will write.  Used in combination with
5339    the "wout" generators, in some cases we need a new temporary, and in
5340    some cases we can write to a TCG global.  */
5341 
5342 static void prep_new(DisasContext *s, DisasOps *o)
5343 {
5344     o->out = tcg_temp_new_i64();
5345 }
5346 #define SPEC_prep_new 0
5347 
5348 static void prep_new_P(DisasContext *s, DisasOps *o)
5349 {
5350     o->out = tcg_temp_new_i64();
5351     o->out2 = tcg_temp_new_i64();
5352 }
5353 #define SPEC_prep_new_P 0
5354 
5355 static void prep_r1(DisasContext *s, DisasOps *o)
5356 {
5357     o->out = regs[get_field(s, r1)];
5358     o->g_out = true;
5359 }
5360 #define SPEC_prep_r1 0
5361 
5362 static void prep_r1_P(DisasContext *s, DisasOps *o)
5363 {
5364     int r1 = get_field(s, r1);
5365     o->out = regs[r1];
5366     o->out2 = regs[r1 + 1];
5367     o->g_out = o->g_out2 = true;
5368 }
5369 #define SPEC_prep_r1_P SPEC_r1_even
5370 
5371 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5372 static void prep_x1(DisasContext *s, DisasOps *o)
5373 {
5374     o->out = load_freg(get_field(s, r1));
5375     o->out2 = load_freg(get_field(s, r1) + 2);
5376 }
5377 #define SPEC_prep_x1 SPEC_r1_f128
5378 
5379 /* ====================================================================== */
5380 /* The "Write OUTput" generators.  These generally perform some non-trivial
5381    copy of data to TCG globals, or to main memory.  The trivial cases are
5382    generally handled by having a "prep" generator install the TCG global
5383    as the destination of the operation.  */
5384 
5385 static void wout_r1(DisasContext *s, DisasOps *o)
5386 {
5387     store_reg(get_field(s, r1), o->out);
5388 }
5389 #define SPEC_wout_r1 0
5390 
5391 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5392 {
5393     store_reg(get_field(s, r1), o->out2);
5394 }
5395 #define SPEC_wout_out2_r1 0
5396 
5397 static void wout_r1_8(DisasContext *s, DisasOps *o)
5398 {
5399     int r1 = get_field(s, r1);
5400     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5401 }
5402 #define SPEC_wout_r1_8 0
5403 
5404 static void wout_r1_16(DisasContext *s, DisasOps *o)
5405 {
5406     int r1 = get_field(s, r1);
5407     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5408 }
5409 #define SPEC_wout_r1_16 0
5410 
5411 static void wout_r1_32(DisasContext *s, DisasOps *o)
5412 {
5413     store_reg32_i64(get_field(s, r1), o->out);
5414 }
5415 #define SPEC_wout_r1_32 0
5416 
5417 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5418 {
5419     store_reg32h_i64(get_field(s, r1), o->out);
5420 }
5421 #define SPEC_wout_r1_32h 0
5422 
5423 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5424 {
5425     int r1 = get_field(s, r1);
5426     store_reg32_i64(r1, o->out);
5427     store_reg32_i64(r1 + 1, o->out2);
5428 }
5429 #define SPEC_wout_r1_P32 SPEC_r1_even
5430 
5431 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5432 {
5433     int r1 = get_field(s, r1);
5434     store_reg32_i64(r1 + 1, o->out);
5435     tcg_gen_shri_i64(o->out, o->out, 32);
5436     store_reg32_i64(r1, o->out);
5437 }
5438 #define SPEC_wout_r1_D32 SPEC_r1_even
5439 
5440 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5441 {
5442     int r3 = get_field(s, r3);
5443     store_reg32_i64(r3, o->out);
5444     store_reg32_i64(r3 + 1, o->out2);
5445 }
5446 #define SPEC_wout_r3_P32 SPEC_r3_even
5447 
5448 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5449 {
5450     int r3 = get_field(s, r3);
5451     store_reg(r3, o->out);
5452     store_reg(r3 + 1, o->out2);
5453 }
5454 #define SPEC_wout_r3_P64 SPEC_r3_even
5455 
5456 static void wout_e1(DisasContext *s, DisasOps *o)
5457 {
5458     store_freg32_i64(get_field(s, r1), o->out);
5459 }
5460 #define SPEC_wout_e1 0
5461 
5462 static void wout_f1(DisasContext *s, DisasOps *o)
5463 {
5464     store_freg(get_field(s, r1), o->out);
5465 }
5466 #define SPEC_wout_f1 0
5467 
5468 static void wout_x1(DisasContext *s, DisasOps *o)
5469 {
5470     int f1 = get_field(s, r1);
5471     store_freg(f1, o->out);
5472     store_freg(f1 + 2, o->out2);
5473 }
5474 #define SPEC_wout_x1 SPEC_r1_f128
5475 
5476 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5477 {
5478     if (get_field(s, r1) != get_field(s, r2)) {
5479         store_reg32_i64(get_field(s, r1), o->out);
5480     }
5481 }
5482 #define SPEC_wout_cond_r1r2_32 0
5483 
5484 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5485 {
5486     if (get_field(s, r1) != get_field(s, r2)) {
5487         store_freg32_i64(get_field(s, r1), o->out);
5488     }
5489 }
5490 #define SPEC_wout_cond_e1e2 0
5491 
5492 static void wout_m1_8(DisasContext *s, DisasOps *o)
5493 {
5494     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5495 }
5496 #define SPEC_wout_m1_8 0
5497 
5498 static void wout_m1_16(DisasContext *s, DisasOps *o)
5499 {
5500     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5501 }
5502 #define SPEC_wout_m1_16 0
5503 
5504 #ifndef CONFIG_USER_ONLY
5505 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5506 {
5507     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5508 }
5509 #define SPEC_wout_m1_16a 0
5510 #endif
5511 
5512 static void wout_m1_32(DisasContext *s, DisasOps *o)
5513 {
5514     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5515 }
5516 #define SPEC_wout_m1_32 0
5517 
5518 #ifndef CONFIG_USER_ONLY
5519 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5520 {
5521     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5522 }
5523 #define SPEC_wout_m1_32a 0
5524 #endif
5525 
5526 static void wout_m1_64(DisasContext *s, DisasOps *o)
5527 {
5528     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5529 }
5530 #define SPEC_wout_m1_64 0
5531 
5532 #ifndef CONFIG_USER_ONLY
5533 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5534 {
5535     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5536 }
5537 #define SPEC_wout_m1_64a 0
5538 #endif
5539 
5540 static void wout_m2_32(DisasContext *s, DisasOps *o)
5541 {
5542     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5543 }
5544 #define SPEC_wout_m2_32 0
5545 
5546 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5547 {
5548     store_reg(get_field(s, r1), o->in2);
5549 }
5550 #define SPEC_wout_in2_r1 0
5551 
5552 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5553 {
5554     store_reg32_i64(get_field(s, r1), o->in2);
5555 }
5556 #define SPEC_wout_in2_r1_32 0
5557 
5558 /* ====================================================================== */
5559 /* The "INput 1" generators.  These load the first operand to an insn.  */
5560 
5561 static void in1_r1(DisasContext *s, DisasOps *o)
5562 {
5563     o->in1 = load_reg(get_field(s, r1));
5564 }
5565 #define SPEC_in1_r1 0
5566 
5567 static void in1_r1_o(DisasContext *s, DisasOps *o)
5568 {
5569     o->in1 = regs[get_field(s, r1)];
5570     o->g_in1 = true;
5571 }
5572 #define SPEC_in1_r1_o 0
5573 
5574 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5575 {
5576     o->in1 = tcg_temp_new_i64();
5577     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5578 }
5579 #define SPEC_in1_r1_32s 0
5580 
5581 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5582 {
5583     o->in1 = tcg_temp_new_i64();
5584     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5585 }
5586 #define SPEC_in1_r1_32u 0
5587 
5588 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5589 {
5590     o->in1 = tcg_temp_new_i64();
5591     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5592 }
5593 #define SPEC_in1_r1_sr32 0
5594 
5595 static void in1_r1p1(DisasContext *s, DisasOps *o)
5596 {
5597     o->in1 = load_reg(get_field(s, r1) + 1);
5598 }
5599 #define SPEC_in1_r1p1 SPEC_r1_even
5600 
5601 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5602 {
5603     o->in1 = regs[get_field(s, r1) + 1];
5604     o->g_in1 = true;
5605 }
5606 #define SPEC_in1_r1p1_o SPEC_r1_even
5607 
5608 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5609 {
5610     o->in1 = tcg_temp_new_i64();
5611     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5612 }
5613 #define SPEC_in1_r1p1_32s SPEC_r1_even
5614 
5615 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5616 {
5617     o->in1 = tcg_temp_new_i64();
5618     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5619 }
5620 #define SPEC_in1_r1p1_32u SPEC_r1_even
5621 
5622 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5623 {
5624     int r1 = get_field(s, r1);
5625     o->in1 = tcg_temp_new_i64();
5626     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5627 }
5628 #define SPEC_in1_r1_D32 SPEC_r1_even
5629 
5630 static void in1_r2(DisasContext *s, DisasOps *o)
5631 {
5632     o->in1 = load_reg(get_field(s, r2));
5633 }
5634 #define SPEC_in1_r2 0
5635 
5636 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5637 {
5638     o->in1 = tcg_temp_new_i64();
5639     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5640 }
5641 #define SPEC_in1_r2_sr32 0
5642 
5643 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5644 {
5645     o->in1 = tcg_temp_new_i64();
5646     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5647 }
5648 #define SPEC_in1_r2_32u 0
5649 
5650 static void in1_r3(DisasContext *s, DisasOps *o)
5651 {
5652     o->in1 = load_reg(get_field(s, r3));
5653 }
5654 #define SPEC_in1_r3 0
5655 
5656 static void in1_r3_o(DisasContext *s, DisasOps *o)
5657 {
5658     o->in1 = regs[get_field(s, r3)];
5659     o->g_in1 = true;
5660 }
5661 #define SPEC_in1_r3_o 0
5662 
5663 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5664 {
5665     o->in1 = tcg_temp_new_i64();
5666     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5667 }
5668 #define SPEC_in1_r3_32s 0
5669 
5670 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5671 {
5672     o->in1 = tcg_temp_new_i64();
5673     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5674 }
5675 #define SPEC_in1_r3_32u 0
5676 
5677 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5678 {
5679     int r3 = get_field(s, r3);
5680     o->in1 = tcg_temp_new_i64();
5681     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5682 }
5683 #define SPEC_in1_r3_D32 SPEC_r3_even
5684 
5685 static void in1_e1(DisasContext *s, DisasOps *o)
5686 {
5687     o->in1 = load_freg32_i64(get_field(s, r1));
5688 }
5689 #define SPEC_in1_e1 0
5690 
5691 static void in1_f1(DisasContext *s, DisasOps *o)
5692 {
5693     o->in1 = load_freg(get_field(s, r1));
5694 }
5695 #define SPEC_in1_f1 0
5696 
5697 /* Load the high double word of an extended (128-bit) format FP number */
5698 static void in1_x2h(DisasContext *s, DisasOps *o)
5699 {
5700     o->in1 = load_freg(get_field(s, r2));
5701 }
5702 #define SPEC_in1_x2h SPEC_r2_f128
5703 
5704 static void in1_f3(DisasContext *s, DisasOps *o)
5705 {
5706     o->in1 = load_freg(get_field(s, r3));
5707 }
5708 #define SPEC_in1_f3 0
5709 
5710 static void in1_la1(DisasContext *s, DisasOps *o)
5711 {
5712     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5713 }
5714 #define SPEC_in1_la1 0
5715 
5716 static void in1_la2(DisasContext *s, DisasOps *o)
5717 {
5718     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5719     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5720 }
5721 #define SPEC_in1_la2 0
5722 
5723 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5724 {
5725     in1_la1(s, o);
5726     o->in1 = tcg_temp_new_i64();
5727     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5728 }
5729 #define SPEC_in1_m1_8u 0
5730 
5731 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5732 {
5733     in1_la1(s, o);
5734     o->in1 = tcg_temp_new_i64();
5735     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5736 }
5737 #define SPEC_in1_m1_16s 0
5738 
5739 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5740 {
5741     in1_la1(s, o);
5742     o->in1 = tcg_temp_new_i64();
5743     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5744 }
5745 #define SPEC_in1_m1_16u 0
5746 
5747 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5748 {
5749     in1_la1(s, o);
5750     o->in1 = tcg_temp_new_i64();
5751     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5752 }
5753 #define SPEC_in1_m1_32s 0
5754 
5755 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5756 {
5757     in1_la1(s, o);
5758     o->in1 = tcg_temp_new_i64();
5759     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5760 }
5761 #define SPEC_in1_m1_32u 0
5762 
5763 static void in1_m1_64(DisasContext *s, DisasOps *o)
5764 {
5765     in1_la1(s, o);
5766     o->in1 = tcg_temp_new_i64();
5767     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5768 }
5769 #define SPEC_in1_m1_64 0
5770 
5771 /* ====================================================================== */
5772 /* The "INput 2" generators.  These load the second operand to an insn.  */
5773 
5774 static void in2_r1_o(DisasContext *s, DisasOps *o)
5775 {
5776     o->in2 = regs[get_field(s, r1)];
5777     o->g_in2 = true;
5778 }
5779 #define SPEC_in2_r1_o 0
5780 
5781 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5782 {
5783     o->in2 = tcg_temp_new_i64();
5784     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5785 }
5786 #define SPEC_in2_r1_16u 0
5787 
5788 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5789 {
5790     o->in2 = tcg_temp_new_i64();
5791     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5792 }
5793 #define SPEC_in2_r1_32u 0
5794 
5795 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5796 {
5797     int r1 = get_field(s, r1);
5798     o->in2 = tcg_temp_new_i64();
5799     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5800 }
5801 #define SPEC_in2_r1_D32 SPEC_r1_even
5802 
5803 static void in2_r2(DisasContext *s, DisasOps *o)
5804 {
5805     o->in2 = load_reg(get_field(s, r2));
5806 }
5807 #define SPEC_in2_r2 0
5808 
5809 static void in2_r2_o(DisasContext *s, DisasOps *o)
5810 {
5811     o->in2 = regs[get_field(s, r2)];
5812     o->g_in2 = true;
5813 }
5814 #define SPEC_in2_r2_o 0
5815 
5816 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5817 {
5818     int r2 = get_field(s, r2);
5819     if (r2 != 0) {
5820         o->in2 = load_reg(r2);
5821     }
5822 }
5823 #define SPEC_in2_r2_nz 0
5824 
5825 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5826 {
5827     o->in2 = tcg_temp_new_i64();
5828     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5829 }
5830 #define SPEC_in2_r2_8s 0
5831 
5832 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5833 {
5834     o->in2 = tcg_temp_new_i64();
5835     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5836 }
5837 #define SPEC_in2_r2_8u 0
5838 
5839 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5840 {
5841     o->in2 = tcg_temp_new_i64();
5842     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5843 }
5844 #define SPEC_in2_r2_16s 0
5845 
5846 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5847 {
5848     o->in2 = tcg_temp_new_i64();
5849     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5850 }
5851 #define SPEC_in2_r2_16u 0
5852 
5853 static void in2_r3(DisasContext *s, DisasOps *o)
5854 {
5855     o->in2 = load_reg(get_field(s, r3));
5856 }
5857 #define SPEC_in2_r3 0
5858 
5859 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5860 {
5861     o->in2 = tcg_temp_new_i64();
5862     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5863 }
5864 #define SPEC_in2_r3_sr32 0
5865 
5866 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5867 {
5868     o->in2 = tcg_temp_new_i64();
5869     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5870 }
5871 #define SPEC_in2_r3_32u 0
5872 
5873 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5874 {
5875     o->in2 = tcg_temp_new_i64();
5876     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5877 }
5878 #define SPEC_in2_r2_32s 0
5879 
5880 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5881 {
5882     o->in2 = tcg_temp_new_i64();
5883     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5884 }
5885 #define SPEC_in2_r2_32u 0
5886 
5887 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5888 {
5889     o->in2 = tcg_temp_new_i64();
5890     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5891 }
5892 #define SPEC_in2_r2_sr32 0
5893 
5894 static void in2_e2(DisasContext *s, DisasOps *o)
5895 {
5896     o->in2 = load_freg32_i64(get_field(s, r2));
5897 }
5898 #define SPEC_in2_e2 0
5899 
5900 static void in2_f2(DisasContext *s, DisasOps *o)
5901 {
5902     o->in2 = load_freg(get_field(s, r2));
5903 }
5904 #define SPEC_in2_f2 0
5905 
5906 /* Load the low double word of an extended (128-bit) format FP number */
5907 static void in2_x2l(DisasContext *s, DisasOps *o)
5908 {
5909     o->in2 = load_freg(get_field(s, r2) + 2);
5910 }
5911 #define SPEC_in2_x2l SPEC_r2_f128
5912 
5913 static void in2_ra2(DisasContext *s, DisasOps *o)
5914 {
5915     int r2 = get_field(s, r2);
5916 
5917     /* Note: *don't* treat !r2 as 0, use the reg value. */
5918     o->in2 = tcg_temp_new_i64();
5919     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5920 }
5921 #define SPEC_in2_ra2 0
5922 
5923 static void in2_a2(DisasContext *s, DisasOps *o)
5924 {
5925     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5926     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5927 }
5928 #define SPEC_in2_a2 0
5929 
5930 static void in2_ri2(DisasContext *s, DisasOps *o)
5931 {
5932     o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5933 }
5934 #define SPEC_in2_ri2 0
5935 
5936 static void in2_sh32(DisasContext *s, DisasOps *o)
5937 {
5938     help_l2_shift(s, o, 31);
5939 }
5940 #define SPEC_in2_sh32 0
5941 
5942 static void in2_sh64(DisasContext *s, DisasOps *o)
5943 {
5944     help_l2_shift(s, o, 63);
5945 }
5946 #define SPEC_in2_sh64 0
5947 
5948 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5949 {
5950     in2_a2(s, o);
5951     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5952 }
5953 #define SPEC_in2_m2_8u 0
5954 
5955 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5956 {
5957     in2_a2(s, o);
5958     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5959 }
5960 #define SPEC_in2_m2_16s 0
5961 
5962 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5963 {
5964     in2_a2(s, o);
5965     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5966 }
5967 #define SPEC_in2_m2_16u 0
5968 
5969 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5970 {
5971     in2_a2(s, o);
5972     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5973 }
5974 #define SPEC_in2_m2_32s 0
5975 
5976 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5977 {
5978     in2_a2(s, o);
5979     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5980 }
5981 #define SPEC_in2_m2_32u 0
5982 
5983 #ifndef CONFIG_USER_ONLY
5984 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5985 {
5986     in2_a2(s, o);
5987     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5988 }
5989 #define SPEC_in2_m2_32ua 0
5990 #endif
5991 
5992 static void in2_m2_64(DisasContext *s, DisasOps *o)
5993 {
5994     in2_a2(s, o);
5995     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5996 }
5997 #define SPEC_in2_m2_64 0
5998 
5999 static void in2_m2_64w(DisasContext *s, DisasOps *o)
6000 {
6001     in2_a2(s, o);
6002     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6003     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6004 }
6005 #define SPEC_in2_m2_64w 0
6006 
6007 #ifndef CONFIG_USER_ONLY
6008 static void in2_m2_64a(DisasContext *s, DisasOps *o)
6009 {
6010     in2_a2(s, o);
6011     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
6012 }
6013 #define SPEC_in2_m2_64a 0
6014 #endif
6015 
6016 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6017 {
6018     in2_ri2(s, o);
6019     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6020 }
6021 #define SPEC_in2_mri2_16u 0
6022 
6023 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6024 {
6025     in2_ri2(s, o);
6026     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6027 }
6028 #define SPEC_in2_mri2_32s 0
6029 
6030 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6031 {
6032     in2_ri2(s, o);
6033     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6034 }
6035 #define SPEC_in2_mri2_32u 0
6036 
6037 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6038 {
6039     in2_ri2(s, o);
6040     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6041 }
6042 #define SPEC_in2_mri2_64 0
6043 
6044 static void in2_i2(DisasContext *s, DisasOps *o)
6045 {
6046     o->in2 = tcg_const_i64(get_field(s, i2));
6047 }
6048 #define SPEC_in2_i2 0
6049 
6050 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6051 {
6052     o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6053 }
6054 #define SPEC_in2_i2_8u 0
6055 
6056 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6057 {
6058     o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6059 }
6060 #define SPEC_in2_i2_16u 0
6061 
6062 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6063 {
6064     o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6065 }
6066 #define SPEC_in2_i2_32u 0
6067 
6068 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6069 {
6070     uint64_t i2 = (uint16_t)get_field(s, i2);
6071     o->in2 = tcg_const_i64(i2 << s->insn->data);
6072 }
6073 #define SPEC_in2_i2_16u_shl 0
6074 
6075 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6076 {
6077     uint64_t i2 = (uint32_t)get_field(s, i2);
6078     o->in2 = tcg_const_i64(i2 << s->insn->data);
6079 }
6080 #define SPEC_in2_i2_32u_shl 0
6081 
6082 #ifndef CONFIG_USER_ONLY
6083 static void in2_insn(DisasContext *s, DisasOps *o)
6084 {
6085     o->in2 = tcg_const_i64(s->fields.raw_insn);
6086 }
6087 #define SPEC_in2_insn 0
6088 #endif
6089 
6090 /* ====================================================================== */
6091 
6092 /* Find opc within the table of insns.  This is formulated as a switch
6093    statement so that (1) we get compile-time notice of cut-paste errors
6094    for duplicated opcodes, and (2) the compiler generates the binary
6095    search tree, rather than us having to post-process the table.  */
6096 
6097 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6098     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6099 
6100 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6101     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6102 
6103 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6104     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6105 
6106 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6107 
6108 enum DisasInsnEnum {
6109 #include "insn-data.def"
6110 };
6111 
6112 #undef E
6113 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6114     .opc = OPC,                                                             \
6115     .flags = FL,                                                            \
6116     .fmt = FMT_##FT,                                                        \
6117     .fac = FAC_##FC,                                                        \
6118     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6119     .name = #NM,                                                            \
6120     .help_in1 = in1_##I1,                                                   \
6121     .help_in2 = in2_##I2,                                                   \
6122     .help_prep = prep_##P,                                                  \
6123     .help_wout = wout_##W,                                                  \
6124     .help_cout = cout_##CC,                                                 \
6125     .help_op = op_##OP,                                                     \
6126     .data = D                                                               \
6127  },
6128 
6129 /* Allow 0 to be used for NULL in the table below.  */
6130 #define in1_0  NULL
6131 #define in2_0  NULL
6132 #define prep_0  NULL
6133 #define wout_0  NULL
6134 #define cout_0  NULL
6135 #define op_0  NULL
6136 
6137 #define SPEC_in1_0 0
6138 #define SPEC_in2_0 0
6139 #define SPEC_prep_0 0
6140 #define SPEC_wout_0 0
6141 
6142 /* Give smaller names to the various facilities.  */
6143 #define FAC_Z           S390_FEAT_ZARCH
6144 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6145 #define FAC_DFP         S390_FEAT_DFP
6146 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6147 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6148 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6149 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6150 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6151 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6152 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6153 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6154 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6155 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6156 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6157 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6158 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6159 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6160 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6161 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6162 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6163 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6164 #define FAC_SFLE        S390_FEAT_STFLE
6165 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6166 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6167 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6168 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6169 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6170 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6171 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6172 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6173 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6174 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6175 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6176 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6177 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6178 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6179 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6180 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6181 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6182 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6183 #define FAC_VE          S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6184 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6185 
6186 static const DisasInsn insn_info[] = {
6187 #include "insn-data.def"
6188 };
6189 
6190 #undef E
6191 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6192     case OPC: return &insn_info[insn_ ## NM];
6193 
6194 static const DisasInsn *lookup_opc(uint16_t opc)
6195 {
6196     switch (opc) {
6197 #include "insn-data.def"
6198     default:
6199         return NULL;
6200     }
6201 }
6202 
6203 #undef F
6204 #undef E
6205 #undef D
6206 #undef C
6207 
6208 /* Extract a field from the insn.  The INSN should be left-aligned in
6209    the uint64_t so that we can more easily utilize the big-bit-endian
6210    definitions we extract from the Principals of Operation.  */
6211 
6212 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6213 {
6214     uint32_t r, m;
6215 
6216     if (f->size == 0) {
6217         return;
6218     }
6219 
6220     /* Zero extract the field from the insn.  */
6221     r = (insn << f->beg) >> (64 - f->size);
6222 
6223     /* Sign-extend, or un-swap the field as necessary.  */
6224     switch (f->type) {
6225     case 0: /* unsigned */
6226         break;
6227     case 1: /* signed */
6228         assert(f->size <= 32);
6229         m = 1u << (f->size - 1);
6230         r = (r ^ m) - m;
6231         break;
6232     case 2: /* dl+dh split, signed 20 bit. */
6233         r = ((int8_t)r << 12) | (r >> 8);
6234         break;
6235     case 3: /* MSB stored in RXB */
6236         g_assert(f->size == 4);
6237         switch (f->beg) {
6238         case 8:
6239             r |= extract64(insn, 63 - 36, 1) << 4;
6240             break;
6241         case 12:
6242             r |= extract64(insn, 63 - 37, 1) << 4;
6243             break;
6244         case 16:
6245             r |= extract64(insn, 63 - 38, 1) << 4;
6246             break;
6247         case 32:
6248             r |= extract64(insn, 63 - 39, 1) << 4;
6249             break;
6250         default:
6251             g_assert_not_reached();
6252         }
6253         break;
6254     default:
6255         abort();
6256     }
6257 
6258     /* Validate that the "compressed" encoding we selected above is valid.
6259        I.e. we havn't make two different original fields overlap.  */
6260     assert(((o->presentC >> f->indexC) & 1) == 0);
6261     o->presentC |= 1 << f->indexC;
6262     o->presentO |= 1 << f->indexO;
6263 
6264     o->c[f->indexC] = r;
6265 }
6266 
6267 /* Lookup the insn at the current PC, extracting the operands into O and
6268    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6269 
6270 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6271 {
6272     uint64_t insn, pc = s->base.pc_next;
6273     int op, op2, ilen;
6274     const DisasInsn *info;
6275 
6276     if (unlikely(s->ex_value)) {
6277         /* Drop the EX data now, so that it's clear on exception paths.  */
6278         TCGv_i64 zero = tcg_const_i64(0);
6279         tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6280         tcg_temp_free_i64(zero);
6281 
6282         /* Extract the values saved by EXECUTE.  */
6283         insn = s->ex_value & 0xffffffffffff0000ull;
6284         ilen = s->ex_value & 0xf;
6285         op = insn >> 56;
6286     } else {
6287         insn = ld_code2(env, pc);
6288         op = (insn >> 8) & 0xff;
6289         ilen = get_ilen(op);
6290         switch (ilen) {
6291         case 2:
6292             insn = insn << 48;
6293             break;
6294         case 4:
6295             insn = ld_code4(env, pc) << 32;
6296             break;
6297         case 6:
6298             insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6299             break;
6300         default:
6301             g_assert_not_reached();
6302         }
6303     }
6304     s->pc_tmp = s->base.pc_next + ilen;
6305     s->ilen = ilen;
6306 
6307     /* We can't actually determine the insn format until we've looked up
6308        the full insn opcode.  Which we can't do without locating the
6309        secondary opcode.  Assume by default that OP2 is at bit 40; for
6310        those smaller insns that don't actually have a secondary opcode
6311        this will correctly result in OP2 = 0. */
6312     switch (op) {
6313     case 0x01: /* E */
6314     case 0x80: /* S */
6315     case 0x82: /* S */
6316     case 0x93: /* S */
6317     case 0xb2: /* S, RRF, RRE, IE */
6318     case 0xb3: /* RRE, RRD, RRF */
6319     case 0xb9: /* RRE, RRF */
6320     case 0xe5: /* SSE, SIL */
6321         op2 = (insn << 8) >> 56;
6322         break;
6323     case 0xa5: /* RI */
6324     case 0xa7: /* RI */
6325     case 0xc0: /* RIL */
6326     case 0xc2: /* RIL */
6327     case 0xc4: /* RIL */
6328     case 0xc6: /* RIL */
6329     case 0xc8: /* SSF */
6330     case 0xcc: /* RIL */
6331         op2 = (insn << 12) >> 60;
6332         break;
6333     case 0xc5: /* MII */
6334     case 0xc7: /* SMI */
6335     case 0xd0 ... 0xdf: /* SS */
6336     case 0xe1: /* SS */
6337     case 0xe2: /* SS */
6338     case 0xe8: /* SS */
6339     case 0xe9: /* SS */
6340     case 0xea: /* SS */
6341     case 0xee ... 0xf3: /* SS */
6342     case 0xf8 ... 0xfd: /* SS */
6343         op2 = 0;
6344         break;
6345     default:
6346         op2 = (insn << 40) >> 56;
6347         break;
6348     }
6349 
6350     memset(&s->fields, 0, sizeof(s->fields));
6351     s->fields.raw_insn = insn;
6352     s->fields.op = op;
6353     s->fields.op2 = op2;
6354 
6355     /* Lookup the instruction.  */
6356     info = lookup_opc(op << 8 | op2);
6357     s->insn = info;
6358 
6359     /* If we found it, extract the operands.  */
6360     if (info != NULL) {
6361         DisasFormat fmt = info->fmt;
6362         int i;
6363 
6364         for (i = 0; i < NUM_C_FIELD; ++i) {
6365             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6366         }
6367     }
6368     return info;
6369 }
6370 
6371 static bool is_afp_reg(int reg)
6372 {
6373     return reg % 2 || reg > 6;
6374 }
6375 
6376 static bool is_fp_pair(int reg)
6377 {
6378     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6379     return !(reg & 0x2);
6380 }
6381 
6382 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6383 {
6384     const DisasInsn *insn;
6385     DisasJumpType ret = DISAS_NEXT;
6386     DisasOps o = {};
6387     bool icount = false;
6388 
6389     /* Search for the insn in the table.  */
6390     insn = extract_insn(env, s);
6391 
6392     /* Emit insn_start now that we know the ILEN.  */
6393     tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6394 
6395     /* Not found means unimplemented/illegal opcode.  */
6396     if (insn == NULL) {
6397         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6398                       s->fields.op, s->fields.op2);
6399         gen_illegal_opcode(s);
6400         ret = DISAS_NORETURN;
6401         goto out;
6402     }
6403 
6404 #ifndef CONFIG_USER_ONLY
6405     if (s->base.tb->flags & FLAG_MASK_PER) {
6406         TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6407         gen_helper_per_ifetch(cpu_env, addr);
6408         tcg_temp_free_i64(addr);
6409     }
6410 #endif
6411 
6412     /* process flags */
6413     if (insn->flags) {
6414         /* privileged instruction */
6415         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6416             gen_program_exception(s, PGM_PRIVILEGED);
6417             ret = DISAS_NORETURN;
6418             goto out;
6419         }
6420 
6421         /* if AFP is not enabled, instructions and registers are forbidden */
6422         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6423             uint8_t dxc = 0;
6424 
6425             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6426                 dxc = 1;
6427             }
6428             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6429                 dxc = 1;
6430             }
6431             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6432                 dxc = 1;
6433             }
6434             if (insn->flags & IF_BFP) {
6435                 dxc = 2;
6436             }
6437             if (insn->flags & IF_DFP) {
6438                 dxc = 3;
6439             }
6440             if (insn->flags & IF_VEC) {
6441                 dxc = 0xfe;
6442             }
6443             if (dxc) {
6444                 gen_data_exception(dxc);
6445                 ret = DISAS_NORETURN;
6446                 goto out;
6447             }
6448         }
6449 
6450         /* if vector instructions not enabled, executing them is forbidden */
6451         if (insn->flags & IF_VEC) {
6452             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6453                 gen_data_exception(0xfe);
6454                 ret = DISAS_NORETURN;
6455                 goto out;
6456             }
6457         }
6458 
6459         /* input/output is the special case for icount mode */
6460         if (unlikely(insn->flags & IF_IO)) {
6461             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6462             if (icount) {
6463                 gen_io_start();
6464             }
6465         }
6466     }
6467 
6468     /* Check for insn specification exceptions.  */
6469     if (insn->spec) {
6470         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6471             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6472             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6473             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6474             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6475             gen_program_exception(s, PGM_SPECIFICATION);
6476             ret = DISAS_NORETURN;
6477             goto out;
6478         }
6479     }
6480 
6481     /* Implement the instruction.  */
6482     if (insn->help_in1) {
6483         insn->help_in1(s, &o);
6484     }
6485     if (insn->help_in2) {
6486         insn->help_in2(s, &o);
6487     }
6488     if (insn->help_prep) {
6489         insn->help_prep(s, &o);
6490     }
6491     if (insn->help_op) {
6492         ret = insn->help_op(s, &o);
6493     }
6494     if (ret != DISAS_NORETURN) {
6495         if (insn->help_wout) {
6496             insn->help_wout(s, &o);
6497         }
6498         if (insn->help_cout) {
6499             insn->help_cout(s, &o);
6500         }
6501     }
6502 
6503     /* Free any temporaries created by the helpers.  */
6504     if (o.out && !o.g_out) {
6505         tcg_temp_free_i64(o.out);
6506     }
6507     if (o.out2 && !o.g_out2) {
6508         tcg_temp_free_i64(o.out2);
6509     }
6510     if (o.in1 && !o.g_in1) {
6511         tcg_temp_free_i64(o.in1);
6512     }
6513     if (o.in2 && !o.g_in2) {
6514         tcg_temp_free_i64(o.in2);
6515     }
6516     if (o.addr1) {
6517         tcg_temp_free_i64(o.addr1);
6518     }
6519 
6520     /* io should be the last instruction in tb when icount is enabled */
6521     if (unlikely(icount && ret == DISAS_NEXT)) {
6522         ret = DISAS_PC_STALE;
6523     }
6524 
6525 #ifndef CONFIG_USER_ONLY
6526     if (s->base.tb->flags & FLAG_MASK_PER) {
6527         /* An exception might be triggered, save PSW if not already done.  */
6528         if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6529             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6530         }
6531 
6532         /* Call the helper to check for a possible PER exception.  */
6533         gen_helper_per_check_exception(cpu_env);
6534     }
6535 #endif
6536 
6537 out:
6538     /* Advance to the next instruction.  */
6539     s->base.pc_next = s->pc_tmp;
6540     return ret;
6541 }
6542 
6543 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6544 {
6545     DisasContext *dc = container_of(dcbase, DisasContext, base);
6546 
6547     /* 31-bit mode */
6548     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6549         dc->base.pc_first &= 0x7fffffff;
6550         dc->base.pc_next = dc->base.pc_first;
6551     }
6552 
6553     dc->cc_op = CC_OP_DYNAMIC;
6554     dc->ex_value = dc->base.tb->cs_base;
6555     dc->do_debug = dc->base.singlestep_enabled;
6556 }
6557 
6558 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6559 {
6560 }
6561 
6562 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6563 {
6564 }
6565 
6566 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6567                                       const CPUBreakpoint *bp)
6568 {
6569     DisasContext *dc = container_of(dcbase, DisasContext, base);
6570 
6571     /*
6572      * Emit an insn_start to accompany the breakpoint exception.
6573      * The ILEN value is a dummy, since this does not result in
6574      * an s390x exception, but an internal qemu exception which
6575      * brings us back to interact with the gdbstub.
6576      */
6577     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6578 
6579     dc->base.is_jmp = DISAS_PC_STALE;
6580     dc->do_debug = true;
6581     /* The address covered by the breakpoint must be included in
6582        [tb->pc, tb->pc + tb->size) in order to for it to be
6583        properly cleared -- thus we increment the PC here so that
6584        the logic setting tb->size does the right thing.  */
6585     dc->base.pc_next += 2;
6586     return true;
6587 }
6588 
6589 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6590 {
6591     CPUS390XState *env = cs->env_ptr;
6592     DisasContext *dc = container_of(dcbase, DisasContext, base);
6593 
6594     dc->base.is_jmp = translate_one(env, dc);
6595     if (dc->base.is_jmp == DISAS_NEXT) {
6596         uint64_t page_start;
6597 
6598         page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6599         if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6600             dc->base.is_jmp = DISAS_TOO_MANY;
6601         }
6602     }
6603 }
6604 
6605 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6606 {
6607     DisasContext *dc = container_of(dcbase, DisasContext, base);
6608 
6609     switch (dc->base.is_jmp) {
6610     case DISAS_GOTO_TB:
6611     case DISAS_NORETURN:
6612         break;
6613     case DISAS_TOO_MANY:
6614     case DISAS_PC_STALE:
6615     case DISAS_PC_STALE_NOCHAIN:
6616         update_psw_addr(dc);
6617         /* FALLTHRU */
6618     case DISAS_PC_UPDATED:
6619         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6620            cc op type is in env */
6621         update_cc_op(dc);
6622         /* FALLTHRU */
6623     case DISAS_PC_CC_UPDATED:
6624         /* Exit the TB, either by raising a debug exception or by return.  */
6625         if (dc->do_debug) {
6626             gen_exception(EXCP_DEBUG);
6627         } else if (use_exit_tb(dc) ||
6628                    dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6629             tcg_gen_exit_tb(NULL, 0);
6630         } else {
6631             tcg_gen_lookup_and_goto_ptr();
6632         }
6633         break;
6634     default:
6635         g_assert_not_reached();
6636     }
6637 }
6638 
6639 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6640 {
6641     DisasContext *dc = container_of(dcbase, DisasContext, base);
6642 
6643     if (unlikely(dc->ex_value)) {
6644         /* ??? Unfortunately log_target_disas can't use host memory.  */
6645         qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6646     } else {
6647         qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6648         log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6649     }
6650 }
6651 
6652 static const TranslatorOps s390x_tr_ops = {
6653     .init_disas_context = s390x_tr_init_disas_context,
6654     .tb_start           = s390x_tr_tb_start,
6655     .insn_start         = s390x_tr_insn_start,
6656     .breakpoint_check   = s390x_tr_breakpoint_check,
6657     .translate_insn     = s390x_tr_translate_insn,
6658     .tb_stop            = s390x_tr_tb_stop,
6659     .disas_log          = s390x_tr_disas_log,
6660 };
6661 
6662 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6663 {
6664     DisasContext dc;
6665 
6666     translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6667 }
6668 
6669 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6670                           target_ulong *data)
6671 {
6672     int cc_op = data[1];
6673 
6674     env->psw.addr = data[0];
6675 
6676     /* Update the CC opcode if it is not already up-to-date.  */
6677     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6678         env->cc_op = cc_op;
6679     }
6680 
6681     /* Record ILEN.  */
6682     env->int_pgm_ilen = data[2];
6683 }
6684