xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision c8c89a6a)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152 };
153 
154 /* Information carried about a condition to be evaluated.  */
155 typedef struct {
156     TCGCond cond:8;
157     bool is_64;
158     bool g1;
159     bool g2;
160     union {
161         struct { TCGv_i64 a, b; } s64;
162         struct { TCGv_i32 a, b; } s32;
163     } u;
164 } DisasCompare;
165 
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit[CC_OP_MAX];
168 static uint64_t inline_branch_miss[CC_OP_MAX];
169 #endif
170 
171 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
172 {
173     TCGv_i64 tmp;
174 
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tmp = tcg_const_i64(pc);
184     tcg_gen_deposit_i64(out, out, tmp, 0, 32);
185     tcg_temp_free_i64(tmp);
186 }
187 
188 static TCGv_i64 psw_addr;
189 static TCGv_i64 psw_mask;
190 static TCGv_i64 gbea;
191 
192 static TCGv_i32 cc_op;
193 static TCGv_i64 cc_src;
194 static TCGv_i64 cc_dst;
195 static TCGv_i64 cc_vr;
196 
197 static char cpu_reg_names[16][4];
198 static TCGv_i64 regs[16];
199 
200 void s390x_translate_init(void)
201 {
202     int i;
203 
204     psw_addr = tcg_global_mem_new_i64(cpu_env,
205                                       offsetof(CPUS390XState, psw.addr),
206                                       "psw_addr");
207     psw_mask = tcg_global_mem_new_i64(cpu_env,
208                                       offsetof(CPUS390XState, psw.mask),
209                                       "psw_mask");
210     gbea = tcg_global_mem_new_i64(cpu_env,
211                                   offsetof(CPUS390XState, gbea),
212                                   "gbea");
213 
214     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
215                                    "cc_op");
216     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
217                                     "cc_src");
218     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
219                                     "cc_dst");
220     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
221                                    "cc_vr");
222 
223     for (i = 0; i < 16; i++) {
224         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
225         regs[i] = tcg_global_mem_new(cpu_env,
226                                      offsetof(CPUS390XState, regs[i]),
227                                      cpu_reg_names[i]);
228     }
229 }
230 
231 static inline int vec_full_reg_offset(uint8_t reg)
232 {
233     g_assert(reg < 32);
234     return offsetof(CPUS390XState, vregs[reg][0]);
235 }
236 
237 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
238 {
239     /* Convert element size (es) - e.g. MO_8 - to bytes */
240     const uint8_t bytes = 1 << es;
241     int offs = enr * bytes;
242 
243     /*
244      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
245      * of the 16 byte vector, on both, little and big endian systems.
246      *
247      * Big Endian (target/possible host)
248      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
249      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
250      * W:  [             0][             1] - [             2][             3]
251      * DW: [                             0] - [                             1]
252      *
253      * Little Endian (possible host)
254      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
255      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
256      * W:  [             1][             0] - [             3][             2]
257      * DW: [                             0] - [                             1]
258      *
259      * For 16 byte elements, the two 8 byte halves will not form a host
260      * int128 if the host is little endian, since they're in the wrong order.
261      * Some operations (e.g. xor) do not care. For operations like addition,
262      * the two 8 byte elements have to be loaded separately. Let's force all
263      * 16 byte operations to handle it in a special way.
264      */
265     g_assert(es <= MO_64);
266 #ifndef HOST_WORDS_BIGENDIAN
267     offs ^= (8 - bytes);
268 #endif
269     return offs + vec_full_reg_offset(reg);
270 }
271 
272 static inline int freg64_offset(uint8_t reg)
273 {
274     g_assert(reg < 16);
275     return vec_reg_offset(reg, 0, MO_64);
276 }
277 
278 static inline int freg32_offset(uint8_t reg)
279 {
280     g_assert(reg < 16);
281     return vec_reg_offset(reg, 0, MO_32);
282 }
283 
284 static TCGv_i64 load_reg(int reg)
285 {
286     TCGv_i64 r = tcg_temp_new_i64();
287     tcg_gen_mov_i64(r, regs[reg]);
288     return r;
289 }
290 
291 static TCGv_i64 load_freg(int reg)
292 {
293     TCGv_i64 r = tcg_temp_new_i64();
294 
295     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
296     return r;
297 }
298 
299 static TCGv_i64 load_freg32_i64(int reg)
300 {
301     TCGv_i64 r = tcg_temp_new_i64();
302 
303     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
304     return r;
305 }
306 
307 static void store_reg(int reg, TCGv_i64 v)
308 {
309     tcg_gen_mov_i64(regs[reg], v);
310 }
311 
312 static void store_freg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
315 }
316 
317 static void store_reg32_i64(int reg, TCGv_i64 v)
318 {
319     /* 32 bit register writes keep the upper half */
320     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
321 }
322 
323 static void store_reg32h_i64(int reg, TCGv_i64 v)
324 {
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
326 }
327 
328 static void store_freg32_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
331 }
332 
333 static void return_low128(TCGv_i64 dest)
334 {
335     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
336 }
337 
338 static void update_psw_addr(DisasContext *s)
339 {
340     /* psw.addr */
341     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
342 }
343 
344 static void per_branch(DisasContext *s, bool to_next)
345 {
346 #ifndef CONFIG_USER_ONLY
347     tcg_gen_movi_i64(gbea, s->base.pc_next);
348 
349     if (s->base.tb->flags & FLAG_MASK_PER) {
350         TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
351         gen_helper_per_branch(cpu_env, gbea, next_pc);
352         if (to_next) {
353             tcg_temp_free_i64(next_pc);
354         }
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(cpu_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374         tcg_temp_free_i64(pc);
375     }
376 #endif
377 }
378 
379 static void per_breaking_event(DisasContext *s)
380 {
381     tcg_gen_movi_i64(gbea, s->base.pc_next);
382 }
383 
384 static void update_cc_op(DisasContext *s)
385 {
386     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
387         tcg_gen_movi_i32(cc_op, s->cc_op);
388     }
389 }
390 
391 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
392                                 uint64_t pc)
393 {
394     return (uint64_t)translator_lduw(env, &s->base, pc);
395 }
396 
397 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
398                                 uint64_t pc)
399 {
400     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
401 }
402 
403 static int get_mem_index(DisasContext *s)
404 {
405 #ifdef CONFIG_USER_ONLY
406     return MMU_USER_IDX;
407 #else
408     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
409         return MMU_REAL_IDX;
410     }
411 
412     switch (s->base.tb->flags & FLAG_MASK_ASC) {
413     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
414         return MMU_PRIMARY_IDX;
415     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
416         return MMU_SECONDARY_IDX;
417     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
418         return MMU_HOME_IDX;
419     default:
420         tcg_abort();
421         break;
422     }
423 #endif
424 }
425 
426 static void gen_exception(int excp)
427 {
428     TCGv_i32 tmp = tcg_const_i32(excp);
429     gen_helper_exception(cpu_env, tmp);
430     tcg_temp_free_i32(tmp);
431 }
432 
433 static void gen_program_exception(DisasContext *s, int code)
434 {
435     TCGv_i32 tmp;
436 
437     /* Remember what pgm exeption this was.  */
438     tmp = tcg_const_i32(code);
439     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
440     tcg_temp_free_i32(tmp);
441 
442     tmp = tcg_const_i32(s->ilen);
443     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
444     tcg_temp_free_i32(tmp);
445 
446     /* update the psw */
447     update_psw_addr(s);
448 
449     /* Save off cc.  */
450     update_cc_op(s);
451 
452     /* Trigger exception.  */
453     gen_exception(EXCP_PGM);
454 }
455 
456 static inline void gen_illegal_opcode(DisasContext *s)
457 {
458     gen_program_exception(s, PGM_OPERATION);
459 }
460 
461 static inline void gen_data_exception(uint8_t dxc)
462 {
463     TCGv_i32 tmp = tcg_const_i32(dxc);
464     gen_helper_data_exception(cpu_env, tmp);
465     tcg_temp_free_i32(tmp);
466 }
467 
468 static inline void gen_trap(DisasContext *s)
469 {
470     /* Set DXC to 0xff */
471     gen_data_exception(0xff);
472 }
473 
474 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
475                                   int64_t imm)
476 {
477     tcg_gen_addi_i64(dst, src, imm);
478     if (!(s->base.tb->flags & FLAG_MASK_64)) {
479         if (s->base.tb->flags & FLAG_MASK_32) {
480             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
481         } else {
482             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
483         }
484     }
485 }
486 
487 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 {
489     TCGv_i64 tmp = tcg_temp_new_i64();
490 
491     /*
492      * Note that d2 is limited to 20 bits, signed.  If we crop negative
493      * displacements early we create larger immedate addends.
494      */
495     if (b2 && x2) {
496         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
497         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
498     } else if (b2) {
499         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
500     } else if (x2) {
501         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
502     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
503         if (s->base.tb->flags & FLAG_MASK_32) {
504             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
505         } else {
506             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507         }
508     } else {
509         tcg_gen_movi_i64(tmp, d2);
510     }
511 
512     return tmp;
513 }
514 
515 static inline bool live_cc_data(DisasContext *s)
516 {
517     return (s->cc_op != CC_OP_DYNAMIC
518             && s->cc_op != CC_OP_STATIC
519             && s->cc_op > 3);
520 }
521 
522 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 {
524     if (live_cc_data(s)) {
525         tcg_gen_discard_i64(cc_src);
526         tcg_gen_discard_i64(cc_dst);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     s->cc_op = CC_OP_CONST0 + val;
530 }
531 
532 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 {
534     if (live_cc_data(s)) {
535         tcg_gen_discard_i64(cc_src);
536         tcg_gen_discard_i64(cc_vr);
537     }
538     tcg_gen_mov_i64(cc_dst, dst);
539     s->cc_op = op;
540 }
541 
542 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
543                                   TCGv_i64 dst)
544 {
545     if (live_cc_data(s)) {
546         tcg_gen_discard_i64(cc_vr);
547     }
548     tcg_gen_mov_i64(cc_src, src);
549     tcg_gen_mov_i64(cc_dst, dst);
550     s->cc_op = op;
551 }
552 
553 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
554                                   TCGv_i64 dst, TCGv_i64 vr)
555 {
556     tcg_gen_mov_i64(cc_src, src);
557     tcg_gen_mov_i64(cc_dst, dst);
558     tcg_gen_mov_i64(cc_vr, vr);
559     s->cc_op = op;
560 }
561 
562 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 {
564     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
565 }
566 
567 /* CC value is in env->cc_op */
568 static void set_cc_static(DisasContext *s)
569 {
570     if (live_cc_data(s)) {
571         tcg_gen_discard_i64(cc_src);
572         tcg_gen_discard_i64(cc_dst);
573         tcg_gen_discard_i64(cc_vr);
574     }
575     s->cc_op = CC_OP_STATIC;
576 }
577 
578 /* calculates cc into cc_op */
579 static void gen_op_calc_cc(DisasContext *s)
580 {
581     TCGv_i32 local_cc_op = NULL;
582     TCGv_i64 dummy = NULL;
583 
584     switch (s->cc_op) {
585     default:
586         dummy = tcg_const_i64(0);
587         /* FALLTHRU */
588     case CC_OP_ADD_64:
589     case CC_OP_SUB_64:
590     case CC_OP_ADD_32:
591     case CC_OP_SUB_32:
592         local_cc_op = tcg_const_i32(s->cc_op);
593         break;
594     case CC_OP_CONST0:
595     case CC_OP_CONST1:
596     case CC_OP_CONST2:
597     case CC_OP_CONST3:
598     case CC_OP_STATIC:
599     case CC_OP_DYNAMIC:
600         break;
601     }
602 
603     switch (s->cc_op) {
604     case CC_OP_CONST0:
605     case CC_OP_CONST1:
606     case CC_OP_CONST2:
607     case CC_OP_CONST3:
608         /* s->cc_op is the cc value */
609         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
610         break;
611     case CC_OP_STATIC:
612         /* env->cc_op already is the cc value */
613         break;
614     case CC_OP_NZ:
615     case CC_OP_ABS_64:
616     case CC_OP_NABS_64:
617     case CC_OP_ABS_32:
618     case CC_OP_NABS_32:
619     case CC_OP_LTGT0_32:
620     case CC_OP_LTGT0_64:
621     case CC_OP_COMP_32:
622     case CC_OP_COMP_64:
623     case CC_OP_NZ_F32:
624     case CC_OP_NZ_F64:
625     case CC_OP_FLOGR:
626     case CC_OP_LCBB:
627     case CC_OP_MULS_32:
628         /* 1 argument */
629         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
630         break;
631     case CC_OP_ADDU:
632     case CC_OP_ICM:
633     case CC_OP_LTGT_32:
634     case CC_OP_LTGT_64:
635     case CC_OP_LTUGTU_32:
636     case CC_OP_LTUGTU_64:
637     case CC_OP_TM_32:
638     case CC_OP_TM_64:
639     case CC_OP_SLA:
640     case CC_OP_SUBU:
641     case CC_OP_NZ_F128:
642     case CC_OP_VC:
643     case CC_OP_MULS_64:
644         /* 2 arguments */
645         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
646         break;
647     case CC_OP_ADD_64:
648     case CC_OP_SUB_64:
649     case CC_OP_ADD_32:
650     case CC_OP_SUB_32:
651         /* 3 arguments */
652         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     case CC_OP_DYNAMIC:
655         /* unknown operation - assume 3 arguments and cc_op in env */
656         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
657         break;
658     default:
659         tcg_abort();
660     }
661 
662     if (local_cc_op) {
663         tcg_temp_free_i32(local_cc_op);
664     }
665     if (dummy) {
666         tcg_temp_free_i64(dummy);
667     }
668 
669     /* We now have cc in cc_op as constant */
670     set_cc_static(s);
671 }
672 
673 static bool use_goto_tb(DisasContext *s, uint64_t dest)
674 {
675     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
676         return false;
677     }
678     return translator_use_goto_tb(&s->base, dest);
679 }
680 
681 static void account_noninline_branch(DisasContext *s, int cc_op)
682 {
683 #ifdef DEBUG_INLINE_BRANCHES
684     inline_branch_miss[cc_op]++;
685 #endif
686 }
687 
688 static void account_inline_branch(DisasContext *s, int cc_op)
689 {
690 #ifdef DEBUG_INLINE_BRANCHES
691     inline_branch_hit[cc_op]++;
692 #endif
693 }
694 
695 /* Table of mask values to comparison codes, given a comparison as input.
696    For such, CC=3 should not be possible.  */
697 static const TCGCond ltgt_cond[16] = {
698     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
699     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
700     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
701     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
702     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
703     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
704     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
706 };
707 
708 /* Table of mask values to comparison codes, given a logic op as input.
709    For such, only CC=0 and CC=1 should be possible.  */
710 static const TCGCond nz_cond[16] = {
711     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
712     TCG_COND_NEVER, TCG_COND_NEVER,
713     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
714     TCG_COND_NE, TCG_COND_NE,
715     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
716     TCG_COND_EQ, TCG_COND_EQ,
717     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
718     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
719 };
720 
721 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
722    details required to generate a TCG comparison.  */
723 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
724 {
725     TCGCond cond;
726     enum cc_op old_cc_op = s->cc_op;
727 
728     if (mask == 15 || mask == 0) {
729         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
730         c->u.s32.a = cc_op;
731         c->u.s32.b = cc_op;
732         c->g1 = c->g2 = true;
733         c->is_64 = false;
734         return;
735     }
736 
737     /* Find the TCG condition for the mask + cc op.  */
738     switch (old_cc_op) {
739     case CC_OP_LTGT0_32:
740     case CC_OP_LTGT0_64:
741     case CC_OP_LTGT_32:
742     case CC_OP_LTGT_64:
743         cond = ltgt_cond[mask];
744         if (cond == TCG_COND_NEVER) {
745             goto do_dynamic;
746         }
747         account_inline_branch(s, old_cc_op);
748         break;
749 
750     case CC_OP_LTUGTU_32:
751     case CC_OP_LTUGTU_64:
752         cond = tcg_unsigned_cond(ltgt_cond[mask]);
753         if (cond == TCG_COND_NEVER) {
754             goto do_dynamic;
755         }
756         account_inline_branch(s, old_cc_op);
757         break;
758 
759     case CC_OP_NZ:
760         cond = nz_cond[mask];
761         if (cond == TCG_COND_NEVER) {
762             goto do_dynamic;
763         }
764         account_inline_branch(s, old_cc_op);
765         break;
766 
767     case CC_OP_TM_32:
768     case CC_OP_TM_64:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774             cond = TCG_COND_NE;
775             break;
776         default:
777             goto do_dynamic;
778         }
779         account_inline_branch(s, old_cc_op);
780         break;
781 
782     case CC_OP_ICM:
783         switch (mask) {
784         case 8:
785             cond = TCG_COND_EQ;
786             break;
787         case 4 | 2 | 1:
788         case 4 | 2:
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_FLOGR:
798         switch (mask & 0xa) {
799         case 8: /* src == 0 -> no one bit found */
800             cond = TCG_COND_EQ;
801             break;
802         case 2: /* src != 0 -> one bit found */
803             cond = TCG_COND_NE;
804             break;
805         default:
806             goto do_dynamic;
807         }
808         account_inline_branch(s, old_cc_op);
809         break;
810 
811     case CC_OP_ADDU:
812     case CC_OP_SUBU:
813         switch (mask) {
814         case 8 | 2: /* result == 0 */
815             cond = TCG_COND_EQ;
816             break;
817         case 4 | 1: /* result != 0 */
818             cond = TCG_COND_NE;
819             break;
820         case 8 | 4: /* !carry (borrow) */
821             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
822             break;
823         case 2 | 1: /* carry (!borrow) */
824             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
825             break;
826         default:
827             goto do_dynamic;
828         }
829         account_inline_branch(s, old_cc_op);
830         break;
831 
832     default:
833     do_dynamic:
834         /* Calculate cc value.  */
835         gen_op_calc_cc(s);
836         /* FALLTHRU */
837 
838     case CC_OP_STATIC:
839         /* Jump based on CC.  We'll load up the real cond below;
840            the assignment here merely avoids a compiler warning.  */
841         account_noninline_branch(s, old_cc_op);
842         old_cc_op = CC_OP_STATIC;
843         cond = TCG_COND_NEVER;
844         break;
845     }
846 
847     /* Load up the arguments of the comparison.  */
848     c->is_64 = true;
849     c->g1 = c->g2 = false;
850     switch (old_cc_op) {
851     case CC_OP_LTGT0_32:
852         c->is_64 = false;
853         c->u.s32.a = tcg_temp_new_i32();
854         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
855         c->u.s32.b = tcg_const_i32(0);
856         break;
857     case CC_OP_LTGT_32:
858     case CC_OP_LTUGTU_32:
859         c->is_64 = false;
860         c->u.s32.a = tcg_temp_new_i32();
861         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
862         c->u.s32.b = tcg_temp_new_i32();
863         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
864         break;
865 
866     case CC_OP_LTGT0_64:
867     case CC_OP_NZ:
868     case CC_OP_FLOGR:
869         c->u.s64.a = cc_dst;
870         c->u.s64.b = tcg_const_i64(0);
871         c->g1 = true;
872         break;
873     case CC_OP_LTGT_64:
874     case CC_OP_LTUGTU_64:
875         c->u.s64.a = cc_src;
876         c->u.s64.b = cc_dst;
877         c->g1 = c->g2 = true;
878         break;
879 
880     case CC_OP_TM_32:
881     case CC_OP_TM_64:
882     case CC_OP_ICM:
883         c->u.s64.a = tcg_temp_new_i64();
884         c->u.s64.b = tcg_const_i64(0);
885         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
886         break;
887 
888     case CC_OP_ADDU:
889     case CC_OP_SUBU:
890         c->is_64 = true;
891         c->u.s64.b = tcg_const_i64(0);
892         c->g1 = true;
893         switch (mask) {
894         case 8 | 2:
895         case 4 | 1: /* result */
896             c->u.s64.a = cc_dst;
897             break;
898         case 8 | 4:
899         case 2 | 1: /* carry */
900             c->u.s64.a = cc_src;
901             break;
902         default:
903             g_assert_not_reached();
904         }
905         break;
906 
907     case CC_OP_STATIC:
908         c->is_64 = false;
909         c->u.s32.a = cc_op;
910         c->g1 = true;
911         switch (mask) {
912         case 0x8 | 0x4 | 0x2: /* cc != 3 */
913             cond = TCG_COND_NE;
914             c->u.s32.b = tcg_const_i32(3);
915             break;
916         case 0x8 | 0x4 | 0x1: /* cc != 2 */
917             cond = TCG_COND_NE;
918             c->u.s32.b = tcg_const_i32(2);
919             break;
920         case 0x8 | 0x2 | 0x1: /* cc != 1 */
921             cond = TCG_COND_NE;
922             c->u.s32.b = tcg_const_i32(1);
923             break;
924         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
925             cond = TCG_COND_EQ;
926             c->g1 = false;
927             c->u.s32.a = tcg_temp_new_i32();
928             c->u.s32.b = tcg_const_i32(0);
929             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930             break;
931         case 0x8 | 0x4: /* cc < 2 */
932             cond = TCG_COND_LTU;
933             c->u.s32.b = tcg_const_i32(2);
934             break;
935         case 0x8: /* cc == 0 */
936             cond = TCG_COND_EQ;
937             c->u.s32.b = tcg_const_i32(0);
938             break;
939         case 0x4 | 0x2 | 0x1: /* cc != 0 */
940             cond = TCG_COND_NE;
941             c->u.s32.b = tcg_const_i32(0);
942             break;
943         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
944             cond = TCG_COND_NE;
945             c->g1 = false;
946             c->u.s32.a = tcg_temp_new_i32();
947             c->u.s32.b = tcg_const_i32(0);
948             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949             break;
950         case 0x4: /* cc == 1 */
951             cond = TCG_COND_EQ;
952             c->u.s32.b = tcg_const_i32(1);
953             break;
954         case 0x2 | 0x1: /* cc > 1 */
955             cond = TCG_COND_GTU;
956             c->u.s32.b = tcg_const_i32(1);
957             break;
958         case 0x2: /* cc == 2 */
959             cond = TCG_COND_EQ;
960             c->u.s32.b = tcg_const_i32(2);
961             break;
962         case 0x1: /* cc == 3 */
963             cond = TCG_COND_EQ;
964             c->u.s32.b = tcg_const_i32(3);
965             break;
966         default:
967             /* CC is masked by something else: (8 >> cc) & mask.  */
968             cond = TCG_COND_NE;
969             c->g1 = false;
970             c->u.s32.a = tcg_const_i32(8);
971             c->u.s32.b = tcg_const_i32(0);
972             tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
973             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
974             break;
975         }
976         break;
977 
978     default:
979         abort();
980     }
981     c->cond = cond;
982 }
983 
984 static void free_compare(DisasCompare *c)
985 {
986     if (!c->g1) {
987         if (c->is_64) {
988             tcg_temp_free_i64(c->u.s64.a);
989         } else {
990             tcg_temp_free_i32(c->u.s32.a);
991         }
992     }
993     if (!c->g2) {
994         if (c->is_64) {
995             tcg_temp_free_i64(c->u.s64.b);
996         } else {
997             tcg_temp_free_i32(c->u.s32.b);
998         }
999     }
1000 }
1001 
1002 /* ====================================================================== */
1003 /* Define the insn format enumeration.  */
1004 #define F0(N)                         FMT_##N,
1005 #define F1(N, X1)                     F0(N)
1006 #define F2(N, X1, X2)                 F0(N)
1007 #define F3(N, X1, X2, X3)             F0(N)
1008 #define F4(N, X1, X2, X3, X4)         F0(N)
1009 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
1010 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1011 
1012 typedef enum {
1013 #include "insn-format.def"
1014 } DisasFormat;
1015 
1016 #undef F0
1017 #undef F1
1018 #undef F2
1019 #undef F3
1020 #undef F4
1021 #undef F5
1022 #undef F6
1023 
1024 /* This is the way fields are to be accessed out of DisasFields.  */
1025 #define have_field(S, F)  have_field1((S), FLD_O_##F)
1026 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1027 
1028 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1029 {
1030     return (s->fields.presentO >> c) & 1;
1031 }
1032 
1033 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1034                       enum DisasFieldIndexC c)
1035 {
1036     assert(have_field1(s, o));
1037     return s->fields.c[c];
1038 }
1039 
1040 /* Describe the layout of each field in each format.  */
1041 typedef struct DisasField {
1042     unsigned int beg:8;
1043     unsigned int size:8;
1044     unsigned int type:2;
1045     unsigned int indexC:6;
1046     enum DisasFieldIndexO indexO:8;
1047 } DisasField;
1048 
1049 typedef struct DisasFormatInfo {
1050     DisasField op[NUM_C_FIELD];
1051 } DisasFormatInfo;
1052 
1053 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1054 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1055 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1056 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1057                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1058 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1059                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1060                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1061 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1062                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1063 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1064                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1065                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1066 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1067 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1068 
1069 #define F0(N)                     { { } },
1070 #define F1(N, X1)                 { { X1 } },
1071 #define F2(N, X1, X2)             { { X1, X2 } },
1072 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1073 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1074 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1075 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1076 
1077 static const DisasFormatInfo format_info[] = {
1078 #include "insn-format.def"
1079 };
1080 
1081 #undef F0
1082 #undef F1
1083 #undef F2
1084 #undef F3
1085 #undef F4
1086 #undef F5
1087 #undef F6
1088 #undef R
1089 #undef M
1090 #undef V
1091 #undef BD
1092 #undef BXD
1093 #undef BDL
1094 #undef BXDL
1095 #undef I
1096 #undef L
1097 
1098 /* Generally, we'll extract operands into this structures, operate upon
1099    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1100    of routines below for more details.  */
1101 typedef struct {
1102     bool g_out, g_out2, g_in1, g_in2;
1103     TCGv_i64 out, out2, in1, in2;
1104     TCGv_i64 addr1;
1105 } DisasOps;
1106 
1107 /* Instructions can place constraints on their operands, raising specification
1108    exceptions if they are violated.  To make this easy to automate, each "in1",
1109    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1110    of the following, or 0.  To make this easy to document, we'll put the
1111    SPEC_<name> defines next to <name>.  */
1112 
1113 #define SPEC_r1_even    1
1114 #define SPEC_r2_even    2
1115 #define SPEC_r3_even    4
1116 #define SPEC_r1_f128    8
1117 #define SPEC_r2_f128    16
1118 
1119 /* Return values from translate_one, indicating the state of the TB.  */
1120 
1121 /* We are not using a goto_tb (for whatever reason), but have updated
1122    the PC (for whatever reason), so there's no need to do it again on
1123    exiting the TB.  */
1124 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1125 
1126 /* We have emitted one or more goto_tb.  No fixup required.  */
1127 #define DISAS_GOTO_TB           DISAS_TARGET_1
1128 
1129 /* We have updated the PC and CC values.  */
1130 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1131 
1132 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1133    updated the PC for the next instruction to be executed.  */
1134 #define DISAS_PC_STALE          DISAS_TARGET_3
1135 
1136 /* We are exiting the TB to the main loop.  */
1137 #define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1138 
1139 
1140 /* Instruction flags */
1141 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1142 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1143 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1144 #define IF_BFP      0x0008      /* binary floating point instruction */
1145 #define IF_DFP      0x0010      /* decimal floating point instruction */
1146 #define IF_PRIV     0x0020      /* privileged instruction */
1147 #define IF_VEC      0x0040      /* vector instruction */
1148 #define IF_IO       0x0080      /* input/output instruction */
1149 
1150 struct DisasInsn {
1151     unsigned opc:16;
1152     unsigned flags:16;
1153     DisasFormat fmt:8;
1154     unsigned fac:8;
1155     unsigned spec:8;
1156 
1157     const char *name;
1158 
1159     /* Pre-process arguments before HELP_OP.  */
1160     void (*help_in1)(DisasContext *, DisasOps *);
1161     void (*help_in2)(DisasContext *, DisasOps *);
1162     void (*help_prep)(DisasContext *, DisasOps *);
1163 
1164     /*
1165      * Post-process output after HELP_OP.
1166      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1167      */
1168     void (*help_wout)(DisasContext *, DisasOps *);
1169     void (*help_cout)(DisasContext *, DisasOps *);
1170 
1171     /* Implement the operation itself.  */
1172     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1173 
1174     uint64_t data;
1175 };
1176 
1177 /* ====================================================================== */
1178 /* Miscellaneous helpers, used by several operations.  */
1179 
1180 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1181 {
1182     if (dest == s->pc_tmp) {
1183         per_branch(s, true);
1184         return DISAS_NEXT;
1185     }
1186     if (use_goto_tb(s, dest)) {
1187         update_cc_op(s);
1188         per_breaking_event(s);
1189         tcg_gen_goto_tb(0);
1190         tcg_gen_movi_i64(psw_addr, dest);
1191         tcg_gen_exit_tb(s->base.tb, 0);
1192         return DISAS_GOTO_TB;
1193     } else {
1194         tcg_gen_movi_i64(psw_addr, dest);
1195         per_branch(s, false);
1196         return DISAS_PC_UPDATED;
1197     }
1198 }
1199 
1200 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1201                                  bool is_imm, int imm, TCGv_i64 cdest)
1202 {
1203     DisasJumpType ret;
1204     uint64_t dest = s->base.pc_next + 2 * imm;
1205     TCGLabel *lab;
1206 
1207     /* Take care of the special cases first.  */
1208     if (c->cond == TCG_COND_NEVER) {
1209         ret = DISAS_NEXT;
1210         goto egress;
1211     }
1212     if (is_imm) {
1213         if (dest == s->pc_tmp) {
1214             /* Branch to next.  */
1215             per_branch(s, true);
1216             ret = DISAS_NEXT;
1217             goto egress;
1218         }
1219         if (c->cond == TCG_COND_ALWAYS) {
1220             ret = help_goto_direct(s, dest);
1221             goto egress;
1222         }
1223     } else {
1224         if (!cdest) {
1225             /* E.g. bcr %r0 -> no branch.  */
1226             ret = DISAS_NEXT;
1227             goto egress;
1228         }
1229         if (c->cond == TCG_COND_ALWAYS) {
1230             tcg_gen_mov_i64(psw_addr, cdest);
1231             per_branch(s, false);
1232             ret = DISAS_PC_UPDATED;
1233             goto egress;
1234         }
1235     }
1236 
1237     if (use_goto_tb(s, s->pc_tmp)) {
1238         if (is_imm && use_goto_tb(s, dest)) {
1239             /* Both exits can use goto_tb.  */
1240             update_cc_op(s);
1241 
1242             lab = gen_new_label();
1243             if (c->is_64) {
1244                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1245             } else {
1246                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1247             }
1248 
1249             /* Branch not taken.  */
1250             tcg_gen_goto_tb(0);
1251             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1252             tcg_gen_exit_tb(s->base.tb, 0);
1253 
1254             /* Branch taken.  */
1255             gen_set_label(lab);
1256             per_breaking_event(s);
1257             tcg_gen_goto_tb(1);
1258             tcg_gen_movi_i64(psw_addr, dest);
1259             tcg_gen_exit_tb(s->base.tb, 1);
1260 
1261             ret = DISAS_GOTO_TB;
1262         } else {
1263             /* Fallthru can use goto_tb, but taken branch cannot.  */
1264             /* Store taken branch destination before the brcond.  This
1265                avoids having to allocate a new local temp to hold it.
1266                We'll overwrite this in the not taken case anyway.  */
1267             if (!is_imm) {
1268                 tcg_gen_mov_i64(psw_addr, cdest);
1269             }
1270 
1271             lab = gen_new_label();
1272             if (c->is_64) {
1273                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1274             } else {
1275                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1276             }
1277 
1278             /* Branch not taken.  */
1279             update_cc_op(s);
1280             tcg_gen_goto_tb(0);
1281             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1282             tcg_gen_exit_tb(s->base.tb, 0);
1283 
1284             gen_set_label(lab);
1285             if (is_imm) {
1286                 tcg_gen_movi_i64(psw_addr, dest);
1287             }
1288             per_breaking_event(s);
1289             ret = DISAS_PC_UPDATED;
1290         }
1291     } else {
1292         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1293            Most commonly we're single-stepping or some other condition that
1294            disables all use of goto_tb.  Just update the PC and exit.  */
1295 
1296         TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1297         if (is_imm) {
1298             cdest = tcg_const_i64(dest);
1299         }
1300 
1301         if (c->is_64) {
1302             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1303                                 cdest, next);
1304             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1305         } else {
1306             TCGv_i32 t0 = tcg_temp_new_i32();
1307             TCGv_i64 t1 = tcg_temp_new_i64();
1308             TCGv_i64 z = tcg_const_i64(0);
1309             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1310             tcg_gen_extu_i32_i64(t1, t0);
1311             tcg_temp_free_i32(t0);
1312             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1313             per_branch_cond(s, TCG_COND_NE, t1, z);
1314             tcg_temp_free_i64(t1);
1315             tcg_temp_free_i64(z);
1316         }
1317 
1318         if (is_imm) {
1319             tcg_temp_free_i64(cdest);
1320         }
1321         tcg_temp_free_i64(next);
1322 
1323         ret = DISAS_PC_UPDATED;
1324     }
1325 
1326  egress:
1327     free_compare(c);
1328     return ret;
1329 }
1330 
1331 /* ====================================================================== */
1332 /* The operations.  These perform the bulk of the work for any insn,
1333    usually after the operands have been loaded and output initialized.  */
1334 
1335 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1336 {
1337     tcg_gen_abs_i64(o->out, o->in2);
1338     return DISAS_NEXT;
1339 }
1340 
1341 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1342 {
1343     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1344     return DISAS_NEXT;
1345 }
1346 
1347 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1348 {
1349     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1350     return DISAS_NEXT;
1351 }
1352 
1353 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1354 {
1355     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1356     tcg_gen_mov_i64(o->out2, o->in2);
1357     return DISAS_NEXT;
1358 }
1359 
1360 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1361 {
1362     tcg_gen_add_i64(o->out, o->in1, o->in2);
1363     return DISAS_NEXT;
1364 }
1365 
1366 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1367 {
1368     tcg_gen_movi_i64(cc_src, 0);
1369     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1370     return DISAS_NEXT;
1371 }
1372 
1373 /* Compute carry into cc_src. */
1374 static void compute_carry(DisasContext *s)
1375 {
1376     switch (s->cc_op) {
1377     case CC_OP_ADDU:
1378         /* The carry value is already in cc_src (1,0). */
1379         break;
1380     case CC_OP_SUBU:
1381         tcg_gen_addi_i64(cc_src, cc_src, 1);
1382         break;
1383     default:
1384         gen_op_calc_cc(s);
1385         /* fall through */
1386     case CC_OP_STATIC:
1387         /* The carry flag is the msb of CC; compute into cc_src. */
1388         tcg_gen_extu_i32_i64(cc_src, cc_op);
1389         tcg_gen_shri_i64(cc_src, cc_src, 1);
1390         break;
1391     }
1392 }
1393 
1394 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1395 {
1396     compute_carry(s);
1397     tcg_gen_add_i64(o->out, o->in1, o->in2);
1398     tcg_gen_add_i64(o->out, o->out, cc_src);
1399     return DISAS_NEXT;
1400 }
1401 
1402 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1403 {
1404     compute_carry(s);
1405 
1406     TCGv_i64 zero = tcg_const_i64(0);
1407     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1408     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1409     tcg_temp_free_i64(zero);
1410 
1411     return DISAS_NEXT;
1412 }
1413 
1414 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1415 {
1416     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1417 
1418     o->in1 = tcg_temp_new_i64();
1419     if (non_atomic) {
1420         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1421     } else {
1422         /* Perform the atomic addition in memory. */
1423         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1424                                      s->insn->data);
1425     }
1426 
1427     /* Recompute also for atomic case: needed for setting CC. */
1428     tcg_gen_add_i64(o->out, o->in1, o->in2);
1429 
1430     if (non_atomic) {
1431         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1432     }
1433     return DISAS_NEXT;
1434 }
1435 
1436 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1437 {
1438     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1439 
1440     o->in1 = tcg_temp_new_i64();
1441     if (non_atomic) {
1442         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1443     } else {
1444         /* Perform the atomic addition in memory. */
1445         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1446                                      s->insn->data);
1447     }
1448 
1449     /* Recompute also for atomic case: needed for setting CC. */
1450     tcg_gen_movi_i64(cc_src, 0);
1451     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1452 
1453     if (non_atomic) {
1454         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1455     }
1456     return DISAS_NEXT;
1457 }
1458 
1459 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1460 {
1461     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1462     return DISAS_NEXT;
1463 }
1464 
1465 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1466 {
1467     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1468     return DISAS_NEXT;
1469 }
1470 
1471 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1472 {
1473     gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1474     return_low128(o->out2);
1475     return DISAS_NEXT;
1476 }
1477 
1478 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1479 {
1480     tcg_gen_and_i64(o->out, o->in1, o->in2);
1481     return DISAS_NEXT;
1482 }
1483 
1484 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1485 {
1486     int shift = s->insn->data & 0xff;
1487     int size = s->insn->data >> 8;
1488     uint64_t mask = ((1ull << size) - 1) << shift;
1489 
1490     assert(!o->g_in2);
1491     tcg_gen_shli_i64(o->in2, o->in2, shift);
1492     tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1493     tcg_gen_and_i64(o->out, o->in1, o->in2);
1494 
1495     /* Produce the CC from only the bits manipulated.  */
1496     tcg_gen_andi_i64(cc_dst, o->out, mask);
1497     set_cc_nz_u64(s, cc_dst);
1498     return DISAS_NEXT;
1499 }
1500 
1501 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1502 {
1503     o->in1 = tcg_temp_new_i64();
1504 
1505     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1506         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1507     } else {
1508         /* Perform the atomic operation in memory. */
1509         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1510                                      s->insn->data);
1511     }
1512 
1513     /* Recompute also for atomic case: needed for setting CC. */
1514     tcg_gen_and_i64(o->out, o->in1, o->in2);
1515 
1516     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1517         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1518     }
1519     return DISAS_NEXT;
1520 }
1521 
1522 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1523 {
1524     pc_to_link_info(o->out, s, s->pc_tmp);
1525     if (o->in2) {
1526         tcg_gen_mov_i64(psw_addr, o->in2);
1527         per_branch(s, false);
1528         return DISAS_PC_UPDATED;
1529     } else {
1530         return DISAS_NEXT;
1531     }
1532 }
1533 
1534 static void save_link_info(DisasContext *s, DisasOps *o)
1535 {
1536     TCGv_i64 t;
1537 
1538     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1539         pc_to_link_info(o->out, s, s->pc_tmp);
1540         return;
1541     }
1542     gen_op_calc_cc(s);
1543     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1544     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1545     t = tcg_temp_new_i64();
1546     tcg_gen_shri_i64(t, psw_mask, 16);
1547     tcg_gen_andi_i64(t, t, 0x0f000000);
1548     tcg_gen_or_i64(o->out, o->out, t);
1549     tcg_gen_extu_i32_i64(t, cc_op);
1550     tcg_gen_shli_i64(t, t, 28);
1551     tcg_gen_or_i64(o->out, o->out, t);
1552     tcg_temp_free_i64(t);
1553 }
1554 
1555 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1556 {
1557     save_link_info(s, o);
1558     if (o->in2) {
1559         tcg_gen_mov_i64(psw_addr, o->in2);
1560         per_branch(s, false);
1561         return DISAS_PC_UPDATED;
1562     } else {
1563         return DISAS_NEXT;
1564     }
1565 }
1566 
1567 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1568 {
1569     pc_to_link_info(o->out, s, s->pc_tmp);
1570     return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1571 }
1572 
1573 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1574 {
1575     int m1 = get_field(s, m1);
1576     bool is_imm = have_field(s, i2);
1577     int imm = is_imm ? get_field(s, i2) : 0;
1578     DisasCompare c;
1579 
1580     /* BCR with R2 = 0 causes no branching */
1581     if (have_field(s, r2) && get_field(s, r2) == 0) {
1582         if (m1 == 14) {
1583             /* Perform serialization */
1584             /* FIXME: check for fast-BCR-serialization facility */
1585             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1586         }
1587         if (m1 == 15) {
1588             /* Perform serialization */
1589             /* FIXME: perform checkpoint-synchronisation */
1590             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1591         }
1592         return DISAS_NEXT;
1593     }
1594 
1595     disas_jcc(s, &c, m1);
1596     return help_branch(s, &c, is_imm, imm, o->in2);
1597 }
1598 
1599 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1600 {
1601     int r1 = get_field(s, r1);
1602     bool is_imm = have_field(s, i2);
1603     int imm = is_imm ? get_field(s, i2) : 0;
1604     DisasCompare c;
1605     TCGv_i64 t;
1606 
1607     c.cond = TCG_COND_NE;
1608     c.is_64 = false;
1609     c.g1 = false;
1610     c.g2 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_const_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618     tcg_temp_free_i64(t);
1619 
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632     c.g1 = false;
1633     c.g2 = false;
1634 
1635     t = tcg_temp_new_i64();
1636     tcg_gen_shri_i64(t, regs[r1], 32);
1637     tcg_gen_subi_i64(t, t, 1);
1638     store_reg32h_i64(r1, t);
1639     c.u.s32.a = tcg_temp_new_i32();
1640     c.u.s32.b = tcg_const_i32(0);
1641     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1642     tcg_temp_free_i64(t);
1643 
1644     return help_branch(s, &c, 1, imm, o->in2);
1645 }
1646 
1647 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1648 {
1649     int r1 = get_field(s, r1);
1650     bool is_imm = have_field(s, i2);
1651     int imm = is_imm ? get_field(s, i2) : 0;
1652     DisasCompare c;
1653 
1654     c.cond = TCG_COND_NE;
1655     c.is_64 = true;
1656     c.g1 = true;
1657     c.g2 = false;
1658 
1659     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1660     c.u.s64.a = regs[r1];
1661     c.u.s64.b = tcg_const_i64(0);
1662 
1663     return help_branch(s, &c, is_imm, imm, o->in2);
1664 }
1665 
1666 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1667 {
1668     int r1 = get_field(s, r1);
1669     int r3 = get_field(s, r3);
1670     bool is_imm = have_field(s, i2);
1671     int imm = is_imm ? get_field(s, i2) : 0;
1672     DisasCompare c;
1673     TCGv_i64 t;
1674 
1675     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1676     c.is_64 = false;
1677     c.g1 = false;
1678     c.g2 = false;
1679 
1680     t = tcg_temp_new_i64();
1681     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1682     c.u.s32.a = tcg_temp_new_i32();
1683     c.u.s32.b = tcg_temp_new_i32();
1684     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1685     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1686     store_reg32_i64(r1, t);
1687     tcg_temp_free_i64(t);
1688 
1689     return help_branch(s, &c, is_imm, imm, o->in2);
1690 }
1691 
1692 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1693 {
1694     int r1 = get_field(s, r1);
1695     int r3 = get_field(s, r3);
1696     bool is_imm = have_field(s, i2);
1697     int imm = is_imm ? get_field(s, i2) : 0;
1698     DisasCompare c;
1699 
1700     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1701     c.is_64 = true;
1702 
1703     if (r1 == (r3 | 1)) {
1704         c.u.s64.b = load_reg(r3 | 1);
1705         c.g2 = false;
1706     } else {
1707         c.u.s64.b = regs[r3 | 1];
1708         c.g2 = true;
1709     }
1710 
1711     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1712     c.u.s64.a = regs[r1];
1713     c.g1 = true;
1714 
1715     return help_branch(s, &c, is_imm, imm, o->in2);
1716 }
1717 
1718 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1719 {
1720     int imm, m3 = get_field(s, m3);
1721     bool is_imm;
1722     DisasCompare c;
1723 
1724     c.cond = ltgt_cond[m3];
1725     if (s->insn->data) {
1726         c.cond = tcg_unsigned_cond(c.cond);
1727     }
1728     c.is_64 = c.g1 = c.g2 = true;
1729     c.u.s64.a = o->in1;
1730     c.u.s64.b = o->in2;
1731 
1732     is_imm = have_field(s, i4);
1733     if (is_imm) {
1734         imm = get_field(s, i4);
1735     } else {
1736         imm = 0;
1737         o->out = get_address(s, 0, get_field(s, b4),
1738                              get_field(s, d4));
1739     }
1740 
1741     return help_branch(s, &c, is_imm, imm, o->out);
1742 }
1743 
1744 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1745 {
1746     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1747     set_cc_static(s);
1748     return DISAS_NEXT;
1749 }
1750 
1751 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1752 {
1753     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1754     set_cc_static(s);
1755     return DISAS_NEXT;
1756 }
1757 
1758 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1759 {
1760     gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1761     set_cc_static(s);
1762     return DISAS_NEXT;
1763 }
1764 
1765 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1766                                    bool m4_with_fpe)
1767 {
1768     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1769     uint8_t m3 = get_field(s, m3);
1770     uint8_t m4 = get_field(s, m4);
1771 
1772     /* m3 field was introduced with FPE */
1773     if (!fpe && m3_with_fpe) {
1774         m3 = 0;
1775     }
1776     /* m4 field was introduced with FPE */
1777     if (!fpe && m4_with_fpe) {
1778         m4 = 0;
1779     }
1780 
1781     /* Check for valid rounding modes. Mode 3 was introduced later. */
1782     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1783         gen_program_exception(s, PGM_SPECIFICATION);
1784         return NULL;
1785     }
1786 
1787     return tcg_const_i32(deposit32(m3, 4, 4, m4));
1788 }
1789 
1790 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1791 {
1792     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1793 
1794     if (!m34) {
1795         return DISAS_NORETURN;
1796     }
1797     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1798     tcg_temp_free_i32(m34);
1799     set_cc_static(s);
1800     return DISAS_NEXT;
1801 }
1802 
1803 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1804 {
1805     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1806 
1807     if (!m34) {
1808         return DISAS_NORETURN;
1809     }
1810     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1811     tcg_temp_free_i32(m34);
1812     set_cc_static(s);
1813     return DISAS_NEXT;
1814 }
1815 
1816 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1817 {
1818     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1819 
1820     if (!m34) {
1821         return DISAS_NORETURN;
1822     }
1823     gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1824     tcg_temp_free_i32(m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1837     tcg_temp_free_i32(m34);
1838     set_cc_static(s);
1839     return DISAS_NEXT;
1840 }
1841 
1842 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1843 {
1844     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1845 
1846     if (!m34) {
1847         return DISAS_NORETURN;
1848     }
1849     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1850     tcg_temp_free_i32(m34);
1851     set_cc_static(s);
1852     return DISAS_NEXT;
1853 }
1854 
1855 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1856 {
1857     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1858 
1859     if (!m34) {
1860         return DISAS_NORETURN;
1861     }
1862     gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1863     tcg_temp_free_i32(m34);
1864     set_cc_static(s);
1865     return DISAS_NEXT;
1866 }
1867 
1868 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1869 {
1870     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1871 
1872     if (!m34) {
1873         return DISAS_NORETURN;
1874     }
1875     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1876     tcg_temp_free_i32(m34);
1877     set_cc_static(s);
1878     return DISAS_NEXT;
1879 }
1880 
1881 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1882 {
1883     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1884 
1885     if (!m34) {
1886         return DISAS_NORETURN;
1887     }
1888     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1889     tcg_temp_free_i32(m34);
1890     set_cc_static(s);
1891     return DISAS_NEXT;
1892 }
1893 
1894 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1895 {
1896     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1897 
1898     if (!m34) {
1899         return DISAS_NORETURN;
1900     }
1901     gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1902     tcg_temp_free_i32(m34);
1903     set_cc_static(s);
1904     return DISAS_NEXT;
1905 }
1906 
1907 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1908 {
1909     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1910 
1911     if (!m34) {
1912         return DISAS_NORETURN;
1913     }
1914     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1915     tcg_temp_free_i32(m34);
1916     set_cc_static(s);
1917     return DISAS_NEXT;
1918 }
1919 
1920 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1921 {
1922     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1923 
1924     if (!m34) {
1925         return DISAS_NORETURN;
1926     }
1927     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1928     tcg_temp_free_i32(m34);
1929     set_cc_static(s);
1930     return DISAS_NEXT;
1931 }
1932 
1933 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1934 {
1935     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1936 
1937     if (!m34) {
1938         return DISAS_NORETURN;
1939     }
1940     gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1941     tcg_temp_free_i32(m34);
1942     set_cc_static(s);
1943     return DISAS_NEXT;
1944 }
1945 
1946 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1947 {
1948     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1949 
1950     if (!m34) {
1951         return DISAS_NORETURN;
1952     }
1953     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1954     tcg_temp_free_i32(m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1966     tcg_temp_free_i32(m34);
1967     return DISAS_NEXT;
1968 }
1969 
1970 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1971 {
1972     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1973 
1974     if (!m34) {
1975         return DISAS_NORETURN;
1976     }
1977     gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
1978     tcg_temp_free_i32(m34);
1979     return_low128(o->out2);
1980     return DISAS_NEXT;
1981 }
1982 
1983 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1984 {
1985     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1986 
1987     if (!m34) {
1988         return DISAS_NORETURN;
1989     }
1990     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1991     tcg_temp_free_i32(m34);
1992     return DISAS_NEXT;
1993 }
1994 
1995 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1996 {
1997     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1998 
1999     if (!m34) {
2000         return DISAS_NORETURN;
2001     }
2002     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2003     tcg_temp_free_i32(m34);
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2008 {
2009     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2010 
2011     if (!m34) {
2012         return DISAS_NORETURN;
2013     }
2014     gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2015     tcg_temp_free_i32(m34);
2016     return_low128(o->out2);
2017     return DISAS_NEXT;
2018 }
2019 
2020 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2021 {
2022     int r2 = get_field(s, r2);
2023     TCGv_i64 len = tcg_temp_new_i64();
2024 
2025     gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2026     set_cc_static(s);
2027     return_low128(o->out);
2028 
2029     tcg_gen_add_i64(regs[r2], regs[r2], len);
2030     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2031     tcg_temp_free_i64(len);
2032 
2033     return DISAS_NEXT;
2034 }
2035 
2036 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2037 {
2038     int l = get_field(s, l1);
2039     TCGv_i32 vl;
2040 
2041     switch (l + 1) {
2042     case 1:
2043         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2044         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2045         break;
2046     case 2:
2047         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2048         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2049         break;
2050     case 4:
2051         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2052         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2053         break;
2054     case 8:
2055         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2056         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2057         break;
2058     default:
2059         vl = tcg_const_i32(l);
2060         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2061         tcg_temp_free_i32(vl);
2062         set_cc_static(s);
2063         return DISAS_NEXT;
2064     }
2065     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2066     return DISAS_NEXT;
2067 }
2068 
2069 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2070 {
2071     int r1 = get_field(s, r1);
2072     int r2 = get_field(s, r2);
2073     TCGv_i32 t1, t2;
2074 
2075     /* r1 and r2 must be even.  */
2076     if (r1 & 1 || r2 & 1) {
2077         gen_program_exception(s, PGM_SPECIFICATION);
2078         return DISAS_NORETURN;
2079     }
2080 
2081     t1 = tcg_const_i32(r1);
2082     t2 = tcg_const_i32(r2);
2083     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2084     tcg_temp_free_i32(t1);
2085     tcg_temp_free_i32(t2);
2086     set_cc_static(s);
2087     return DISAS_NEXT;
2088 }
2089 
2090 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2091 {
2092     int r1 = get_field(s, r1);
2093     int r3 = get_field(s, r3);
2094     TCGv_i32 t1, t3;
2095 
2096     /* r1 and r3 must be even.  */
2097     if (r1 & 1 || r3 & 1) {
2098         gen_program_exception(s, PGM_SPECIFICATION);
2099         return DISAS_NORETURN;
2100     }
2101 
2102     t1 = tcg_const_i32(r1);
2103     t3 = tcg_const_i32(r3);
2104     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2105     tcg_temp_free_i32(t1);
2106     tcg_temp_free_i32(t3);
2107     set_cc_static(s);
2108     return DISAS_NEXT;
2109 }
2110 
2111 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2112 {
2113     int r1 = get_field(s, r1);
2114     int r3 = get_field(s, r3);
2115     TCGv_i32 t1, t3;
2116 
2117     /* r1 and r3 must be even.  */
2118     if (r1 & 1 || r3 & 1) {
2119         gen_program_exception(s, PGM_SPECIFICATION);
2120         return DISAS_NORETURN;
2121     }
2122 
2123     t1 = tcg_const_i32(r1);
2124     t3 = tcg_const_i32(r3);
2125     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2126     tcg_temp_free_i32(t1);
2127     tcg_temp_free_i32(t3);
2128     set_cc_static(s);
2129     return DISAS_NEXT;
2130 }
2131 
2132 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2133 {
2134     TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2135     TCGv_i32 t1 = tcg_temp_new_i32();
2136     tcg_gen_extrl_i64_i32(t1, o->in1);
2137     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2138     set_cc_static(s);
2139     tcg_temp_free_i32(t1);
2140     tcg_temp_free_i32(m3);
2141     return DISAS_NEXT;
2142 }
2143 
2144 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2145 {
2146     gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2147     set_cc_static(s);
2148     return_low128(o->in2);
2149     return DISAS_NEXT;
2150 }
2151 
2152 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2153 {
2154     TCGv_i64 t = tcg_temp_new_i64();
2155     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2156     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2157     tcg_gen_or_i64(o->out, o->out, t);
2158     tcg_temp_free_i64(t);
2159     return DISAS_NEXT;
2160 }
2161 
2162 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2163 {
2164     int d2 = get_field(s, d2);
2165     int b2 = get_field(s, b2);
2166     TCGv_i64 addr, cc;
2167 
2168     /* Note that in1 = R3 (new value) and
2169        in2 = (zero-extended) R1 (expected value).  */
2170 
2171     addr = get_address(s, 0, b2, d2);
2172     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2173                                get_mem_index(s), s->insn->data | MO_ALIGN);
2174     tcg_temp_free_i64(addr);
2175 
2176     /* Are the memory and expected values (un)equal?  Note that this setcond
2177        produces the output CC value, thus the NE sense of the test.  */
2178     cc = tcg_temp_new_i64();
2179     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2180     tcg_gen_extrl_i64_i32(cc_op, cc);
2181     tcg_temp_free_i64(cc);
2182     set_cc_static(s);
2183 
2184     return DISAS_NEXT;
2185 }
2186 
2187 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2188 {
2189     int r1 = get_field(s, r1);
2190     int r3 = get_field(s, r3);
2191     int d2 = get_field(s, d2);
2192     int b2 = get_field(s, b2);
2193     DisasJumpType ret = DISAS_NEXT;
2194     TCGv_i64 addr;
2195     TCGv_i32 t_r1, t_r3;
2196 
2197     /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2198     addr = get_address(s, 0, b2, d2);
2199     t_r1 = tcg_const_i32(r1);
2200     t_r3 = tcg_const_i32(r3);
2201     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2202         gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2203     } else if (HAVE_CMPXCHG128) {
2204         gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2205     } else {
2206         gen_helper_exit_atomic(cpu_env);
2207         ret = DISAS_NORETURN;
2208     }
2209     tcg_temp_free_i64(addr);
2210     tcg_temp_free_i32(t_r1);
2211     tcg_temp_free_i32(t_r3);
2212 
2213     set_cc_static(s);
2214     return ret;
2215 }
2216 
2217 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2218 {
2219     int r3 = get_field(s, r3);
2220     TCGv_i32 t_r3 = tcg_const_i32(r3);
2221 
2222     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2223         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2224     } else {
2225         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2226     }
2227     tcg_temp_free_i32(t_r3);
2228 
2229     set_cc_static(s);
2230     return DISAS_NEXT;
2231 }
2232 
2233 #ifndef CONFIG_USER_ONLY
2234 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2235 {
2236     MemOp mop = s->insn->data;
2237     TCGv_i64 addr, old, cc;
2238     TCGLabel *lab = gen_new_label();
2239 
2240     /* Note that in1 = R1 (zero-extended expected value),
2241        out = R1 (original reg), out2 = R1+1 (new value).  */
2242 
2243     addr = tcg_temp_new_i64();
2244     old = tcg_temp_new_i64();
2245     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2246     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2247                                get_mem_index(s), mop | MO_ALIGN);
2248     tcg_temp_free_i64(addr);
2249 
2250     /* Are the memory and expected values (un)equal?  */
2251     cc = tcg_temp_new_i64();
2252     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2253     tcg_gen_extrl_i64_i32(cc_op, cc);
2254 
2255     /* Write back the output now, so that it happens before the
2256        following branch, so that we don't need local temps.  */
2257     if ((mop & MO_SIZE) == MO_32) {
2258         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2259     } else {
2260         tcg_gen_mov_i64(o->out, old);
2261     }
2262     tcg_temp_free_i64(old);
2263 
2264     /* If the comparison was equal, and the LSB of R2 was set,
2265        then we need to flush the TLB (for all cpus).  */
2266     tcg_gen_xori_i64(cc, cc, 1);
2267     tcg_gen_and_i64(cc, cc, o->in2);
2268     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2269     tcg_temp_free_i64(cc);
2270 
2271     gen_helper_purge(cpu_env);
2272     gen_set_label(lab);
2273 
2274     return DISAS_NEXT;
2275 }
2276 #endif
2277 
2278 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2279 {
2280     TCGv_i64 t1 = tcg_temp_new_i64();
2281     TCGv_i32 t2 = tcg_temp_new_i32();
2282     tcg_gen_extrl_i64_i32(t2, o->in1);
2283     gen_helper_cvd(t1, t2);
2284     tcg_temp_free_i32(t2);
2285     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2286     tcg_temp_free_i64(t1);
2287     return DISAS_NEXT;
2288 }
2289 
2290 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2291 {
2292     int m3 = get_field(s, m3);
2293     TCGLabel *lab = gen_new_label();
2294     TCGCond c;
2295 
2296     c = tcg_invert_cond(ltgt_cond[m3]);
2297     if (s->insn->data) {
2298         c = tcg_unsigned_cond(c);
2299     }
2300     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2301 
2302     /* Trap.  */
2303     gen_trap(s);
2304 
2305     gen_set_label(lab);
2306     return DISAS_NEXT;
2307 }
2308 
2309 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2310 {
2311     int m3 = get_field(s, m3);
2312     int r1 = get_field(s, r1);
2313     int r2 = get_field(s, r2);
2314     TCGv_i32 tr1, tr2, chk;
2315 
2316     /* R1 and R2 must both be even.  */
2317     if ((r1 | r2) & 1) {
2318         gen_program_exception(s, PGM_SPECIFICATION);
2319         return DISAS_NORETURN;
2320     }
2321     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2322         m3 = 0;
2323     }
2324 
2325     tr1 = tcg_const_i32(r1);
2326     tr2 = tcg_const_i32(r2);
2327     chk = tcg_const_i32(m3);
2328 
2329     switch (s->insn->data) {
2330     case 12:
2331         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2332         break;
2333     case 14:
2334         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2335         break;
2336     case 21:
2337         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2338         break;
2339     case 24:
2340         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2341         break;
2342     case 41:
2343         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2344         break;
2345     case 42:
2346         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2347         break;
2348     default:
2349         g_assert_not_reached();
2350     }
2351 
2352     tcg_temp_free_i32(tr1);
2353     tcg_temp_free_i32(tr2);
2354     tcg_temp_free_i32(chk);
2355     set_cc_static(s);
2356     return DISAS_NEXT;
2357 }
2358 
2359 #ifndef CONFIG_USER_ONLY
2360 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2361 {
2362     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2363     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2364     TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2365 
2366     gen_helper_diag(cpu_env, r1, r3, func_code);
2367 
2368     tcg_temp_free_i32(func_code);
2369     tcg_temp_free_i32(r3);
2370     tcg_temp_free_i32(r1);
2371     return DISAS_NEXT;
2372 }
2373 #endif
2374 
2375 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2376 {
2377     gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2378     return_low128(o->out);
2379     return DISAS_NEXT;
2380 }
2381 
2382 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2383 {
2384     gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2385     return_low128(o->out);
2386     return DISAS_NEXT;
2387 }
2388 
2389 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2390 {
2391     gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2392     return_low128(o->out);
2393     return DISAS_NEXT;
2394 }
2395 
2396 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2397 {
2398     gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2399     return_low128(o->out);
2400     return DISAS_NEXT;
2401 }
2402 
2403 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2404 {
2405     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2406     return DISAS_NEXT;
2407 }
2408 
2409 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2410 {
2411     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2412     return DISAS_NEXT;
2413 }
2414 
2415 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2416 {
2417     gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2418     return_low128(o->out2);
2419     return DISAS_NEXT;
2420 }
2421 
2422 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2423 {
2424     int r2 = get_field(s, r2);
2425     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2426     return DISAS_NEXT;
2427 }
2428 
2429 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2430 {
2431     /* No cache information provided.  */
2432     tcg_gen_movi_i64(o->out, -1);
2433     return DISAS_NEXT;
2434 }
2435 
2436 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2437 {
2438     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2439     return DISAS_NEXT;
2440 }
2441 
2442 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2443 {
2444     int r1 = get_field(s, r1);
2445     int r2 = get_field(s, r2);
2446     TCGv_i64 t = tcg_temp_new_i64();
2447 
2448     /* Note the "subsequently" in the PoO, which implies a defined result
2449        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2450     tcg_gen_shri_i64(t, psw_mask, 32);
2451     store_reg32_i64(r1, t);
2452     if (r2 != 0) {
2453         store_reg32_i64(r2, psw_mask);
2454     }
2455 
2456     tcg_temp_free_i64(t);
2457     return DISAS_NEXT;
2458 }
2459 
2460 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2461 {
2462     int r1 = get_field(s, r1);
2463     TCGv_i32 ilen;
2464     TCGv_i64 v1;
2465 
2466     /* Nested EXECUTE is not allowed.  */
2467     if (unlikely(s->ex_value)) {
2468         gen_program_exception(s, PGM_EXECUTE);
2469         return DISAS_NORETURN;
2470     }
2471 
2472     update_psw_addr(s);
2473     update_cc_op(s);
2474 
2475     if (r1 == 0) {
2476         v1 = tcg_const_i64(0);
2477     } else {
2478         v1 = regs[r1];
2479     }
2480 
2481     ilen = tcg_const_i32(s->ilen);
2482     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2483     tcg_temp_free_i32(ilen);
2484 
2485     if (r1 == 0) {
2486         tcg_temp_free_i64(v1);
2487     }
2488 
2489     return DISAS_PC_CC_UPDATED;
2490 }
2491 
2492 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2493 {
2494     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2495 
2496     if (!m34) {
2497         return DISAS_NORETURN;
2498     }
2499     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2500     tcg_temp_free_i32(m34);
2501     return DISAS_NEXT;
2502 }
2503 
2504 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2505 {
2506     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2507 
2508     if (!m34) {
2509         return DISAS_NORETURN;
2510     }
2511     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2512     tcg_temp_free_i32(m34);
2513     return DISAS_NEXT;
2514 }
2515 
2516 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2517 {
2518     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2519 
2520     if (!m34) {
2521         return DISAS_NORETURN;
2522     }
2523     gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2524     return_low128(o->out2);
2525     tcg_temp_free_i32(m34);
2526     return DISAS_NEXT;
2527 }
2528 
2529 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2530 {
2531     /* We'll use the original input for cc computation, since we get to
2532        compare that against 0, which ought to be better than comparing
2533        the real output against 64.  It also lets cc_dst be a convenient
2534        temporary during our computation.  */
2535     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2536 
2537     /* R1 = IN ? CLZ(IN) : 64.  */
2538     tcg_gen_clzi_i64(o->out, o->in2, 64);
2539 
2540     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2541        value by 64, which is undefined.  But since the shift is 64 iff the
2542        input is zero, we still get the correct result after and'ing.  */
2543     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2544     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2545     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2546     return DISAS_NEXT;
2547 }
2548 
2549 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2550 {
2551     int m3 = get_field(s, m3);
2552     int pos, len, base = s->insn->data;
2553     TCGv_i64 tmp = tcg_temp_new_i64();
2554     uint64_t ccm;
2555 
2556     switch (m3) {
2557     case 0xf:
2558         /* Effectively a 32-bit load.  */
2559         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2560         len = 32;
2561         goto one_insert;
2562 
2563     case 0xc:
2564     case 0x6:
2565     case 0x3:
2566         /* Effectively a 16-bit load.  */
2567         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2568         len = 16;
2569         goto one_insert;
2570 
2571     case 0x8:
2572     case 0x4:
2573     case 0x2:
2574     case 0x1:
2575         /* Effectively an 8-bit load.  */
2576         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2577         len = 8;
2578         goto one_insert;
2579 
2580     one_insert:
2581         pos = base + ctz32(m3) * 8;
2582         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2583         ccm = ((1ull << len) - 1) << pos;
2584         break;
2585 
2586     default:
2587         /* This is going to be a sequence of loads and inserts.  */
2588         pos = base + 32 - 8;
2589         ccm = 0;
2590         while (m3) {
2591             if (m3 & 0x8) {
2592                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2593                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2594                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2595                 ccm |= 0xff << pos;
2596             }
2597             m3 = (m3 << 1) & 0xf;
2598             pos -= 8;
2599         }
2600         break;
2601     }
2602 
2603     tcg_gen_movi_i64(tmp, ccm);
2604     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2605     tcg_temp_free_i64(tmp);
2606     return DISAS_NEXT;
2607 }
2608 
2609 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2610 {
2611     int shift = s->insn->data & 0xff;
2612     int size = s->insn->data >> 8;
2613     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2614     return DISAS_NEXT;
2615 }
2616 
2617 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2618 {
2619     TCGv_i64 t1, t2;
2620 
2621     gen_op_calc_cc(s);
2622     t1 = tcg_temp_new_i64();
2623     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2624     t2 = tcg_temp_new_i64();
2625     tcg_gen_extu_i32_i64(t2, cc_op);
2626     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2627     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2628     tcg_temp_free_i64(t1);
2629     tcg_temp_free_i64(t2);
2630     return DISAS_NEXT;
2631 }
2632 
2633 #ifndef CONFIG_USER_ONLY
2634 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2635 {
2636     TCGv_i32 m4;
2637 
2638     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2639         m4 = tcg_const_i32(get_field(s, m4));
2640     } else {
2641         m4 = tcg_const_i32(0);
2642     }
2643     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2644     tcg_temp_free_i32(m4);
2645     return DISAS_NEXT;
2646 }
2647 
2648 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2649 {
2650     TCGv_i32 m4;
2651 
2652     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2653         m4 = tcg_const_i32(get_field(s, m4));
2654     } else {
2655         m4 = tcg_const_i32(0);
2656     }
2657     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2658     tcg_temp_free_i32(m4);
2659     return DISAS_NEXT;
2660 }
2661 
2662 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2663 {
2664     gen_helper_iske(o->out, cpu_env, o->in2);
2665     return DISAS_NEXT;
2666 }
2667 #endif
2668 
2669 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2670 {
2671     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2672     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2673     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2674     TCGv_i32 t_r1, t_r2, t_r3, type;
2675 
2676     switch (s->insn->data) {
2677     case S390_FEAT_TYPE_KMA:
2678         if (r3 == r1 || r3 == r2) {
2679             gen_program_exception(s, PGM_SPECIFICATION);
2680             return DISAS_NORETURN;
2681         }
2682         /* FALL THROUGH */
2683     case S390_FEAT_TYPE_KMCTR:
2684         if (r3 & 1 || !r3) {
2685             gen_program_exception(s, PGM_SPECIFICATION);
2686             return DISAS_NORETURN;
2687         }
2688         /* FALL THROUGH */
2689     case S390_FEAT_TYPE_PPNO:
2690     case S390_FEAT_TYPE_KMF:
2691     case S390_FEAT_TYPE_KMC:
2692     case S390_FEAT_TYPE_KMO:
2693     case S390_FEAT_TYPE_KM:
2694         if (r1 & 1 || !r1) {
2695             gen_program_exception(s, PGM_SPECIFICATION);
2696             return DISAS_NORETURN;
2697         }
2698         /* FALL THROUGH */
2699     case S390_FEAT_TYPE_KMAC:
2700     case S390_FEAT_TYPE_KIMD:
2701     case S390_FEAT_TYPE_KLMD:
2702         if (r2 & 1 || !r2) {
2703             gen_program_exception(s, PGM_SPECIFICATION);
2704             return DISAS_NORETURN;
2705         }
2706         /* FALL THROUGH */
2707     case S390_FEAT_TYPE_PCKMO:
2708     case S390_FEAT_TYPE_PCC:
2709         break;
2710     default:
2711         g_assert_not_reached();
2712     };
2713 
2714     t_r1 = tcg_const_i32(r1);
2715     t_r2 = tcg_const_i32(r2);
2716     t_r3 = tcg_const_i32(r3);
2717     type = tcg_const_i32(s->insn->data);
2718     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2719     set_cc_static(s);
2720     tcg_temp_free_i32(t_r1);
2721     tcg_temp_free_i32(t_r2);
2722     tcg_temp_free_i32(t_r3);
2723     tcg_temp_free_i32(type);
2724     return DISAS_NEXT;
2725 }
2726 
2727 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2728 {
2729     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2730     set_cc_static(s);
2731     return DISAS_NEXT;
2732 }
2733 
2734 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2735 {
2736     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2737     set_cc_static(s);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2742 {
2743     gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2744     set_cc_static(s);
2745     return DISAS_NEXT;
2746 }
2747 
2748 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2749 {
2750     /* The real output is indeed the original value in memory;
2751        recompute the addition for the computation of CC.  */
2752     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2753                                  s->insn->data | MO_ALIGN);
2754     /* However, we need to recompute the addition for setting CC.  */
2755     tcg_gen_add_i64(o->out, o->in1, o->in2);
2756     return DISAS_NEXT;
2757 }
2758 
2759 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2760 {
2761     /* The real output is indeed the original value in memory;
2762        recompute the addition for the computation of CC.  */
2763     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2764                                  s->insn->data | MO_ALIGN);
2765     /* However, we need to recompute the operation for setting CC.  */
2766     tcg_gen_and_i64(o->out, o->in1, o->in2);
2767     return DISAS_NEXT;
2768 }
2769 
2770 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2771 {
2772     /* The real output is indeed the original value in memory;
2773        recompute the addition for the computation of CC.  */
2774     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2775                                 s->insn->data | MO_ALIGN);
2776     /* However, we need to recompute the operation for setting CC.  */
2777     tcg_gen_or_i64(o->out, o->in1, o->in2);
2778     return DISAS_NEXT;
2779 }
2780 
2781 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2782 {
2783     /* The real output is indeed the original value in memory;
2784        recompute the addition for the computation of CC.  */
2785     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2786                                  s->insn->data | MO_ALIGN);
2787     /* However, we need to recompute the operation for setting CC.  */
2788     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2793 {
2794     gen_helper_ldeb(o->out, cpu_env, o->in2);
2795     return DISAS_NEXT;
2796 }
2797 
2798 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2799 {
2800     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2801 
2802     if (!m34) {
2803         return DISAS_NORETURN;
2804     }
2805     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2806     tcg_temp_free_i32(m34);
2807     return DISAS_NEXT;
2808 }
2809 
2810 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2811 {
2812     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2813 
2814     if (!m34) {
2815         return DISAS_NORETURN;
2816     }
2817     gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2818     tcg_temp_free_i32(m34);
2819     return DISAS_NEXT;
2820 }
2821 
2822 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2823 {
2824     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2825 
2826     if (!m34) {
2827         return DISAS_NORETURN;
2828     }
2829     gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2830     tcg_temp_free_i32(m34);
2831     return DISAS_NEXT;
2832 }
2833 
2834 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2835 {
2836     gen_helper_lxdb(o->out, cpu_env, o->in2);
2837     return_low128(o->out2);
2838     return DISAS_NEXT;
2839 }
2840 
2841 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2842 {
2843     gen_helper_lxeb(o->out, cpu_env, o->in2);
2844     return_low128(o->out2);
2845     return DISAS_NEXT;
2846 }
2847 
2848 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2849 {
2850     tcg_gen_shli_i64(o->out, o->in2, 32);
2851     return DISAS_NEXT;
2852 }
2853 
2854 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2855 {
2856     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2857     return DISAS_NEXT;
2858 }
2859 
2860 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2861 {
2862     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2863     return DISAS_NEXT;
2864 }
2865 
2866 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2867 {
2868     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2869     return DISAS_NEXT;
2870 }
2871 
2872 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2873 {
2874     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2875     return DISAS_NEXT;
2876 }
2877 
2878 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2879 {
2880     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2881     return DISAS_NEXT;
2882 }
2883 
2884 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2885 {
2886     tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2887     return DISAS_NEXT;
2888 }
2889 
2890 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2891 {
2892     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2893     return DISAS_NEXT;
2894 }
2895 
2896 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2897 {
2898     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2899     return DISAS_NEXT;
2900 }
2901 
2902 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2903 {
2904     TCGLabel *lab = gen_new_label();
2905     store_reg32_i64(get_field(s, r1), o->in2);
2906     /* The value is stored even in case of trap. */
2907     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2908     gen_trap(s);
2909     gen_set_label(lab);
2910     return DISAS_NEXT;
2911 }
2912 
2913 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2914 {
2915     TCGLabel *lab = gen_new_label();
2916     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2917     /* The value is stored even in case of trap. */
2918     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2919     gen_trap(s);
2920     gen_set_label(lab);
2921     return DISAS_NEXT;
2922 }
2923 
2924 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2925 {
2926     TCGLabel *lab = gen_new_label();
2927     store_reg32h_i64(get_field(s, r1), o->in2);
2928     /* The value is stored even in case of trap. */
2929     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2930     gen_trap(s);
2931     gen_set_label(lab);
2932     return DISAS_NEXT;
2933 }
2934 
2935 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2936 {
2937     TCGLabel *lab = gen_new_label();
2938     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2939     /* The value is stored even in case of trap. */
2940     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2941     gen_trap(s);
2942     gen_set_label(lab);
2943     return DISAS_NEXT;
2944 }
2945 
2946 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2947 {
2948     TCGLabel *lab = gen_new_label();
2949     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2950     /* The value is stored even in case of trap. */
2951     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2952     gen_trap(s);
2953     gen_set_label(lab);
2954     return DISAS_NEXT;
2955 }
2956 
2957 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2958 {
2959     DisasCompare c;
2960 
2961     disas_jcc(s, &c, get_field(s, m3));
2962 
2963     if (c.is_64) {
2964         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2965                             o->in2, o->in1);
2966         free_compare(&c);
2967     } else {
2968         TCGv_i32 t32 = tcg_temp_new_i32();
2969         TCGv_i64 t, z;
2970 
2971         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2972         free_compare(&c);
2973 
2974         t = tcg_temp_new_i64();
2975         tcg_gen_extu_i32_i64(t, t32);
2976         tcg_temp_free_i32(t32);
2977 
2978         z = tcg_const_i64(0);
2979         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2980         tcg_temp_free_i64(t);
2981         tcg_temp_free_i64(z);
2982     }
2983 
2984     return DISAS_NEXT;
2985 }
2986 
2987 #ifndef CONFIG_USER_ONLY
2988 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2989 {
2990     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2991     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2992     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2993     tcg_temp_free_i32(r1);
2994     tcg_temp_free_i32(r3);
2995     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2996     return DISAS_PC_STALE_NOCHAIN;
2997 }
2998 
2999 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3000 {
3001     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3002     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3003     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3004     tcg_temp_free_i32(r1);
3005     tcg_temp_free_i32(r3);
3006     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3007     return DISAS_PC_STALE_NOCHAIN;
3008 }
3009 
3010 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3011 {
3012     gen_helper_lra(o->out, cpu_env, o->in2);
3013     set_cc_static(s);
3014     return DISAS_NEXT;
3015 }
3016 
3017 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3018 {
3019     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3020     return DISAS_NEXT;
3021 }
3022 
3023 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3024 {
3025     TCGv_i64 t1, t2;
3026 
3027     per_breaking_event(s);
3028 
3029     t1 = tcg_temp_new_i64();
3030     t2 = tcg_temp_new_i64();
3031     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3032                         MO_TEUL | MO_ALIGN_8);
3033     tcg_gen_addi_i64(o->in2, o->in2, 4);
3034     tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3035     /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3036     tcg_gen_shli_i64(t1, t1, 32);
3037     gen_helper_load_psw(cpu_env, t1, t2);
3038     tcg_temp_free_i64(t1);
3039     tcg_temp_free_i64(t2);
3040     return DISAS_NORETURN;
3041 }
3042 
3043 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3044 {
3045     TCGv_i64 t1, t2;
3046 
3047     per_breaking_event(s);
3048 
3049     t1 = tcg_temp_new_i64();
3050     t2 = tcg_temp_new_i64();
3051     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3052                         MO_TEUQ | MO_ALIGN_8);
3053     tcg_gen_addi_i64(o->in2, o->in2, 8);
3054     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3055     gen_helper_load_psw(cpu_env, t1, t2);
3056     tcg_temp_free_i64(t1);
3057     tcg_temp_free_i64(t2);
3058     return DISAS_NORETURN;
3059 }
3060 #endif
3061 
3062 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3063 {
3064     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3065     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3066     gen_helper_lam(cpu_env, r1, o->in2, r3);
3067     tcg_temp_free_i32(r1);
3068     tcg_temp_free_i32(r3);
3069     return DISAS_NEXT;
3070 }
3071 
3072 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3073 {
3074     int r1 = get_field(s, r1);
3075     int r3 = get_field(s, r3);
3076     TCGv_i64 t1, t2;
3077 
3078     /* Only one register to read. */
3079     t1 = tcg_temp_new_i64();
3080     if (unlikely(r1 == r3)) {
3081         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3082         store_reg32_i64(r1, t1);
3083         tcg_temp_free(t1);
3084         return DISAS_NEXT;
3085     }
3086 
3087     /* First load the values of the first and last registers to trigger
3088        possible page faults. */
3089     t2 = tcg_temp_new_i64();
3090     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3091     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3092     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3093     store_reg32_i64(r1, t1);
3094     store_reg32_i64(r3, t2);
3095 
3096     /* Only two registers to read. */
3097     if (((r1 + 1) & 15) == r3) {
3098         tcg_temp_free(t2);
3099         tcg_temp_free(t1);
3100         return DISAS_NEXT;
3101     }
3102 
3103     /* Then load the remaining registers. Page fault can't occur. */
3104     r3 = (r3 - 1) & 15;
3105     tcg_gen_movi_i64(t2, 4);
3106     while (r1 != r3) {
3107         r1 = (r1 + 1) & 15;
3108         tcg_gen_add_i64(o->in2, o->in2, t2);
3109         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3110         store_reg32_i64(r1, t1);
3111     }
3112     tcg_temp_free(t2);
3113     tcg_temp_free(t1);
3114 
3115     return DISAS_NEXT;
3116 }
3117 
3118 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3119 {
3120     int r1 = get_field(s, r1);
3121     int r3 = get_field(s, r3);
3122     TCGv_i64 t1, t2;
3123 
3124     /* Only one register to read. */
3125     t1 = tcg_temp_new_i64();
3126     if (unlikely(r1 == r3)) {
3127         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3128         store_reg32h_i64(r1, t1);
3129         tcg_temp_free(t1);
3130         return DISAS_NEXT;
3131     }
3132 
3133     /* First load the values of the first and last registers to trigger
3134        possible page faults. */
3135     t2 = tcg_temp_new_i64();
3136     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3137     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3138     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3139     store_reg32h_i64(r1, t1);
3140     store_reg32h_i64(r3, t2);
3141 
3142     /* Only two registers to read. */
3143     if (((r1 + 1) & 15) == r3) {
3144         tcg_temp_free(t2);
3145         tcg_temp_free(t1);
3146         return DISAS_NEXT;
3147     }
3148 
3149     /* Then load the remaining registers. Page fault can't occur. */
3150     r3 = (r3 - 1) & 15;
3151     tcg_gen_movi_i64(t2, 4);
3152     while (r1 != r3) {
3153         r1 = (r1 + 1) & 15;
3154         tcg_gen_add_i64(o->in2, o->in2, t2);
3155         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3156         store_reg32h_i64(r1, t1);
3157     }
3158     tcg_temp_free(t2);
3159     tcg_temp_free(t1);
3160 
3161     return DISAS_NEXT;
3162 }
3163 
3164 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3165 {
3166     int r1 = get_field(s, r1);
3167     int r3 = get_field(s, r3);
3168     TCGv_i64 t1, t2;
3169 
3170     /* Only one register to read. */
3171     if (unlikely(r1 == r3)) {
3172         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3173         return DISAS_NEXT;
3174     }
3175 
3176     /* First load the values of the first and last registers to trigger
3177        possible page faults. */
3178     t1 = tcg_temp_new_i64();
3179     t2 = tcg_temp_new_i64();
3180     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3181     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3182     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3183     tcg_gen_mov_i64(regs[r1], t1);
3184     tcg_temp_free(t2);
3185 
3186     /* Only two registers to read. */
3187     if (((r1 + 1) & 15) == r3) {
3188         tcg_temp_free(t1);
3189         return DISAS_NEXT;
3190     }
3191 
3192     /* Then load the remaining registers. Page fault can't occur. */
3193     r3 = (r3 - 1) & 15;
3194     tcg_gen_movi_i64(t1, 8);
3195     while (r1 != r3) {
3196         r1 = (r1 + 1) & 15;
3197         tcg_gen_add_i64(o->in2, o->in2, t1);
3198         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3199     }
3200     tcg_temp_free(t1);
3201 
3202     return DISAS_NEXT;
3203 }
3204 
3205 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3206 {
3207     TCGv_i64 a1, a2;
3208     MemOp mop = s->insn->data;
3209 
3210     /* In a parallel context, stop the world and single step.  */
3211     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3212         update_psw_addr(s);
3213         update_cc_op(s);
3214         gen_exception(EXCP_ATOMIC);
3215         return DISAS_NORETURN;
3216     }
3217 
3218     /* In a serial context, perform the two loads ... */
3219     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3220     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3221     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3222     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3223     tcg_temp_free_i64(a1);
3224     tcg_temp_free_i64(a2);
3225 
3226     /* ... and indicate that we performed them while interlocked.  */
3227     gen_op_movi_cc(s, 0);
3228     return DISAS_NEXT;
3229 }
3230 
3231 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3232 {
3233     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3234         gen_helper_lpq(o->out, cpu_env, o->in2);
3235     } else if (HAVE_ATOMIC128) {
3236         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3237     } else {
3238         gen_helper_exit_atomic(cpu_env);
3239         return DISAS_NORETURN;
3240     }
3241     return_low128(o->out2);
3242     return DISAS_NEXT;
3243 }
3244 
3245 #ifndef CONFIG_USER_ONLY
3246 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3247 {
3248     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3249     return DISAS_NEXT;
3250 }
3251 #endif
3252 
3253 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3254 {
3255     tcg_gen_andi_i64(o->out, o->in2, -256);
3256     return DISAS_NEXT;
3257 }
3258 
3259 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3260 {
3261     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3262 
3263     if (get_field(s, m3) > 6) {
3264         gen_program_exception(s, PGM_SPECIFICATION);
3265         return DISAS_NORETURN;
3266     }
3267 
3268     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3269     tcg_gen_neg_i64(o->addr1, o->addr1);
3270     tcg_gen_movi_i64(o->out, 16);
3271     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3272     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3273     return DISAS_NEXT;
3274 }
3275 
3276 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3277 {
3278 #if !defined(CONFIG_USER_ONLY)
3279     TCGv_i32 i2;
3280 #endif
3281     const uint16_t monitor_class = get_field(s, i2);
3282 
3283     if (monitor_class & 0xff00) {
3284         gen_program_exception(s, PGM_SPECIFICATION);
3285         return DISAS_NORETURN;
3286     }
3287 
3288 #if !defined(CONFIG_USER_ONLY)
3289     i2 = tcg_const_i32(monitor_class);
3290     gen_helper_monitor_call(cpu_env, o->addr1, i2);
3291     tcg_temp_free_i32(i2);
3292 #endif
3293     /* Defaults to a NOP. */
3294     return DISAS_NEXT;
3295 }
3296 
3297 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3298 {
3299     o->out = o->in2;
3300     o->g_out = o->g_in2;
3301     o->in2 = NULL;
3302     o->g_in2 = false;
3303     return DISAS_NEXT;
3304 }
3305 
3306 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3307 {
3308     int b2 = get_field(s, b2);
3309     TCGv ar1 = tcg_temp_new_i64();
3310 
3311     o->out = o->in2;
3312     o->g_out = o->g_in2;
3313     o->in2 = NULL;
3314     o->g_in2 = false;
3315 
3316     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3317     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3318         tcg_gen_movi_i64(ar1, 0);
3319         break;
3320     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3321         tcg_gen_movi_i64(ar1, 1);
3322         break;
3323     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3324         if (b2) {
3325             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3326         } else {
3327             tcg_gen_movi_i64(ar1, 0);
3328         }
3329         break;
3330     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3331         tcg_gen_movi_i64(ar1, 2);
3332         break;
3333     }
3334 
3335     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3336     tcg_temp_free_i64(ar1);
3337 
3338     return DISAS_NEXT;
3339 }
3340 
3341 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3342 {
3343     o->out = o->in1;
3344     o->out2 = o->in2;
3345     o->g_out = o->g_in1;
3346     o->g_out2 = o->g_in2;
3347     o->in1 = NULL;
3348     o->in2 = NULL;
3349     o->g_in1 = o->g_in2 = false;
3350     return DISAS_NEXT;
3351 }
3352 
3353 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3354 {
3355     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3356     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3357     tcg_temp_free_i32(l);
3358     return DISAS_NEXT;
3359 }
3360 
3361 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3362 {
3363     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3364     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3365     tcg_temp_free_i32(l);
3366     return DISAS_NEXT;
3367 }
3368 
3369 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3370 {
3371     int r1 = get_field(s, r1);
3372     int r2 = get_field(s, r2);
3373     TCGv_i32 t1, t2;
3374 
3375     /* r1 and r2 must be even.  */
3376     if (r1 & 1 || r2 & 1) {
3377         gen_program_exception(s, PGM_SPECIFICATION);
3378         return DISAS_NORETURN;
3379     }
3380 
3381     t1 = tcg_const_i32(r1);
3382     t2 = tcg_const_i32(r2);
3383     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3384     tcg_temp_free_i32(t1);
3385     tcg_temp_free_i32(t2);
3386     set_cc_static(s);
3387     return DISAS_NEXT;
3388 }
3389 
3390 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3391 {
3392     int r1 = get_field(s, r1);
3393     int r3 = get_field(s, r3);
3394     TCGv_i32 t1, t3;
3395 
3396     /* r1 and r3 must be even.  */
3397     if (r1 & 1 || r3 & 1) {
3398         gen_program_exception(s, PGM_SPECIFICATION);
3399         return DISAS_NORETURN;
3400     }
3401 
3402     t1 = tcg_const_i32(r1);
3403     t3 = tcg_const_i32(r3);
3404     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3405     tcg_temp_free_i32(t1);
3406     tcg_temp_free_i32(t3);
3407     set_cc_static(s);
3408     return DISAS_NEXT;
3409 }
3410 
3411 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3412 {
3413     int r1 = get_field(s, r1);
3414     int r3 = get_field(s, r3);
3415     TCGv_i32 t1, t3;
3416 
3417     /* r1 and r3 must be even.  */
3418     if (r1 & 1 || r3 & 1) {
3419         gen_program_exception(s, PGM_SPECIFICATION);
3420         return DISAS_NORETURN;
3421     }
3422 
3423     t1 = tcg_const_i32(r1);
3424     t3 = tcg_const_i32(r3);
3425     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3426     tcg_temp_free_i32(t1);
3427     tcg_temp_free_i32(t3);
3428     set_cc_static(s);
3429     return DISAS_NEXT;
3430 }
3431 
3432 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3433 {
3434     int r3 = get_field(s, r3);
3435     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3436     set_cc_static(s);
3437     return DISAS_NEXT;
3438 }
3439 
3440 #ifndef CONFIG_USER_ONLY
3441 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3442 {
3443     int r1 = get_field(s, l1);
3444     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3445     set_cc_static(s);
3446     return DISAS_NEXT;
3447 }
3448 
3449 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3450 {
3451     int r1 = get_field(s, l1);
3452     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3453     set_cc_static(s);
3454     return DISAS_NEXT;
3455 }
3456 #endif
3457 
3458 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3459 {
3460     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3461     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3462     tcg_temp_free_i32(l);
3463     return DISAS_NEXT;
3464 }
3465 
3466 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3467 {
3468     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3469     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3470     tcg_temp_free_i32(l);
3471     return DISAS_NEXT;
3472 }
3473 
3474 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3475 {
3476     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3477     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3478 
3479     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3480     tcg_temp_free_i32(t1);
3481     tcg_temp_free_i32(t2);
3482     set_cc_static(s);
3483     return DISAS_NEXT;
3484 }
3485 
3486 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3487 {
3488     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3489     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3490 
3491     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3492     tcg_temp_free_i32(t1);
3493     tcg_temp_free_i32(t2);
3494     set_cc_static(s);
3495     return DISAS_NEXT;
3496 }
3497 
3498 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3499 {
3500     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3501     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3502     tcg_temp_free_i32(l);
3503     return DISAS_NEXT;
3504 }
3505 
3506 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3507 {
3508     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3509     return DISAS_NEXT;
3510 }
3511 
3512 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3513 {
3514     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3515     return DISAS_NEXT;
3516 }
3517 
3518 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3519 {
3520     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3521     return DISAS_NEXT;
3522 }
3523 
3524 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3525 {
3526     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3527     return DISAS_NEXT;
3528 }
3529 
3530 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3531 {
3532     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3533     return DISAS_NEXT;
3534 }
3535 
3536 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3537 {
3538     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3539     return DISAS_NEXT;
3540 }
3541 
3542 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3543 {
3544     gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3545     return_low128(o->out2);
3546     return DISAS_NEXT;
3547 }
3548 
3549 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3550 {
3551     gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3552     return_low128(o->out2);
3553     return DISAS_NEXT;
3554 }
3555 
3556 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3557 {
3558     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3559     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3560     tcg_temp_free_i64(r3);
3561     return DISAS_NEXT;
3562 }
3563 
3564 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3565 {
3566     TCGv_i64 r3 = load_freg(get_field(s, r3));
3567     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3568     tcg_temp_free_i64(r3);
3569     return DISAS_NEXT;
3570 }
3571 
3572 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3573 {
3574     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3575     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3576     tcg_temp_free_i64(r3);
3577     return DISAS_NEXT;
3578 }
3579 
3580 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3581 {
3582     TCGv_i64 r3 = load_freg(get_field(s, r3));
3583     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3584     tcg_temp_free_i64(r3);
3585     return DISAS_NEXT;
3586 }
3587 
3588 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3589 {
3590     TCGv_i64 z, n;
3591     z = tcg_const_i64(0);
3592     n = tcg_temp_new_i64();
3593     tcg_gen_neg_i64(n, o->in2);
3594     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3595     tcg_temp_free_i64(n);
3596     tcg_temp_free_i64(z);
3597     return DISAS_NEXT;
3598 }
3599 
3600 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3601 {
3602     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3603     return DISAS_NEXT;
3604 }
3605 
3606 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3607 {
3608     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3609     return DISAS_NEXT;
3610 }
3611 
3612 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3613 {
3614     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3615     tcg_gen_mov_i64(o->out2, o->in2);
3616     return DISAS_NEXT;
3617 }
3618 
3619 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3620 {
3621     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3622     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3623     tcg_temp_free_i32(l);
3624     set_cc_static(s);
3625     return DISAS_NEXT;
3626 }
3627 
3628 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3629 {
3630     tcg_gen_neg_i64(o->out, o->in2);
3631     return DISAS_NEXT;
3632 }
3633 
3634 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3635 {
3636     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3637     return DISAS_NEXT;
3638 }
3639 
3640 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3641 {
3642     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3643     return DISAS_NEXT;
3644 }
3645 
3646 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3647 {
3648     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3649     tcg_gen_mov_i64(o->out2, o->in2);
3650     return DISAS_NEXT;
3651 }
3652 
3653 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3654 {
3655     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3656     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3657     tcg_temp_free_i32(l);
3658     set_cc_static(s);
3659     return DISAS_NEXT;
3660 }
3661 
3662 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3663 {
3664     tcg_gen_or_i64(o->out, o->in1, o->in2);
3665     return DISAS_NEXT;
3666 }
3667 
3668 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3669 {
3670     int shift = s->insn->data & 0xff;
3671     int size = s->insn->data >> 8;
3672     uint64_t mask = ((1ull << size) - 1) << shift;
3673 
3674     assert(!o->g_in2);
3675     tcg_gen_shli_i64(o->in2, o->in2, shift);
3676     tcg_gen_or_i64(o->out, o->in1, o->in2);
3677 
3678     /* Produce the CC from only the bits manipulated.  */
3679     tcg_gen_andi_i64(cc_dst, o->out, mask);
3680     set_cc_nz_u64(s, cc_dst);
3681     return DISAS_NEXT;
3682 }
3683 
3684 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3685 {
3686     o->in1 = tcg_temp_new_i64();
3687 
3688     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3689         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3690     } else {
3691         /* Perform the atomic operation in memory. */
3692         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3693                                     s->insn->data);
3694     }
3695 
3696     /* Recompute also for atomic case: needed for setting CC. */
3697     tcg_gen_or_i64(o->out, o->in1, o->in2);
3698 
3699     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3700         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3701     }
3702     return DISAS_NEXT;
3703 }
3704 
3705 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3706 {
3707     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3708     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3709     tcg_temp_free_i32(l);
3710     return DISAS_NEXT;
3711 }
3712 
3713 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3714 {
3715     int l2 = get_field(s, l2) + 1;
3716     TCGv_i32 l;
3717 
3718     /* The length must not exceed 32 bytes.  */
3719     if (l2 > 32) {
3720         gen_program_exception(s, PGM_SPECIFICATION);
3721         return DISAS_NORETURN;
3722     }
3723     l = tcg_const_i32(l2);
3724     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3725     tcg_temp_free_i32(l);
3726     return DISAS_NEXT;
3727 }
3728 
3729 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3730 {
3731     int l2 = get_field(s, l2) + 1;
3732     TCGv_i32 l;
3733 
3734     /* The length must be even and should not exceed 64 bytes.  */
3735     if ((l2 & 1) || (l2 > 64)) {
3736         gen_program_exception(s, PGM_SPECIFICATION);
3737         return DISAS_NORETURN;
3738     }
3739     l = tcg_const_i32(l2);
3740     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3741     tcg_temp_free_i32(l);
3742     return DISAS_NEXT;
3743 }
3744 
3745 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3746 {
3747     gen_helper_popcnt(o->out, o->in2);
3748     return DISAS_NEXT;
3749 }
3750 
3751 #ifndef CONFIG_USER_ONLY
3752 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3753 {
3754     gen_helper_ptlb(cpu_env);
3755     return DISAS_NEXT;
3756 }
3757 #endif
3758 
3759 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3760 {
3761     int i3 = get_field(s, i3);
3762     int i4 = get_field(s, i4);
3763     int i5 = get_field(s, i5);
3764     int do_zero = i4 & 0x80;
3765     uint64_t mask, imask, pmask;
3766     int pos, len, rot;
3767 
3768     /* Adjust the arguments for the specific insn.  */
3769     switch (s->fields.op2) {
3770     case 0x55: /* risbg */
3771     case 0x59: /* risbgn */
3772         i3 &= 63;
3773         i4 &= 63;
3774         pmask = ~0;
3775         break;
3776     case 0x5d: /* risbhg */
3777         i3 &= 31;
3778         i4 &= 31;
3779         pmask = 0xffffffff00000000ull;
3780         break;
3781     case 0x51: /* risblg */
3782         i3 = (i3 & 31) + 32;
3783         i4 = (i4 & 31) + 32;
3784         pmask = 0x00000000ffffffffull;
3785         break;
3786     default:
3787         g_assert_not_reached();
3788     }
3789 
3790     /* MASK is the set of bits to be inserted from R2. */
3791     if (i3 <= i4) {
3792         /* [0...i3---i4...63] */
3793         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3794     } else {
3795         /* [0---i4...i3---63] */
3796         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3797     }
3798     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3799     mask &= pmask;
3800 
3801     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3802        insns, we need to keep the other half of the register.  */
3803     imask = ~mask | ~pmask;
3804     if (do_zero) {
3805         imask = ~pmask;
3806     }
3807 
3808     len = i4 - i3 + 1;
3809     pos = 63 - i4;
3810     rot = i5 & 63;
3811 
3812     /* In some cases we can implement this with extract.  */
3813     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3814         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3815         return DISAS_NEXT;
3816     }
3817 
3818     /* In some cases we can implement this with deposit.  */
3819     if (len > 0 && (imask == 0 || ~mask == imask)) {
3820         /* Note that we rotate the bits to be inserted to the lsb, not to
3821            the position as described in the PoO.  */
3822         rot = (rot - pos) & 63;
3823     } else {
3824         pos = -1;
3825     }
3826 
3827     /* Rotate the input as necessary.  */
3828     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3829 
3830     /* Insert the selected bits into the output.  */
3831     if (pos >= 0) {
3832         if (imask == 0) {
3833             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3834         } else {
3835             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3836         }
3837     } else if (imask == 0) {
3838         tcg_gen_andi_i64(o->out, o->in2, mask);
3839     } else {
3840         tcg_gen_andi_i64(o->in2, o->in2, mask);
3841         tcg_gen_andi_i64(o->out, o->out, imask);
3842         tcg_gen_or_i64(o->out, o->out, o->in2);
3843     }
3844     return DISAS_NEXT;
3845 }
3846 
3847 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3848 {
3849     int i3 = get_field(s, i3);
3850     int i4 = get_field(s, i4);
3851     int i5 = get_field(s, i5);
3852     uint64_t mask;
3853 
3854     /* If this is a test-only form, arrange to discard the result.  */
3855     if (i3 & 0x80) {
3856         o->out = tcg_temp_new_i64();
3857         o->g_out = false;
3858     }
3859 
3860     i3 &= 63;
3861     i4 &= 63;
3862     i5 &= 63;
3863 
3864     /* MASK is the set of bits to be operated on from R2.
3865        Take care for I3/I4 wraparound.  */
3866     mask = ~0ull >> i3;
3867     if (i3 <= i4) {
3868         mask ^= ~0ull >> i4 >> 1;
3869     } else {
3870         mask |= ~(~0ull >> i4 >> 1);
3871     }
3872 
3873     /* Rotate the input as necessary.  */
3874     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3875 
3876     /* Operate.  */
3877     switch (s->fields.op2) {
3878     case 0x54: /* AND */
3879         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3880         tcg_gen_and_i64(o->out, o->out, o->in2);
3881         break;
3882     case 0x56: /* OR */
3883         tcg_gen_andi_i64(o->in2, o->in2, mask);
3884         tcg_gen_or_i64(o->out, o->out, o->in2);
3885         break;
3886     case 0x57: /* XOR */
3887         tcg_gen_andi_i64(o->in2, o->in2, mask);
3888         tcg_gen_xor_i64(o->out, o->out, o->in2);
3889         break;
3890     default:
3891         abort();
3892     }
3893 
3894     /* Set the CC.  */
3895     tcg_gen_andi_i64(cc_dst, o->out, mask);
3896     set_cc_nz_u64(s, cc_dst);
3897     return DISAS_NEXT;
3898 }
3899 
3900 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3901 {
3902     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3903     return DISAS_NEXT;
3904 }
3905 
3906 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3907 {
3908     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3909     return DISAS_NEXT;
3910 }
3911 
3912 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3913 {
3914     tcg_gen_bswap64_i64(o->out, o->in2);
3915     return DISAS_NEXT;
3916 }
3917 
3918 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3919 {
3920     TCGv_i32 t1 = tcg_temp_new_i32();
3921     TCGv_i32 t2 = tcg_temp_new_i32();
3922     TCGv_i32 to = tcg_temp_new_i32();
3923     tcg_gen_extrl_i64_i32(t1, o->in1);
3924     tcg_gen_extrl_i64_i32(t2, o->in2);
3925     tcg_gen_rotl_i32(to, t1, t2);
3926     tcg_gen_extu_i32_i64(o->out, to);
3927     tcg_temp_free_i32(t1);
3928     tcg_temp_free_i32(t2);
3929     tcg_temp_free_i32(to);
3930     return DISAS_NEXT;
3931 }
3932 
3933 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3934 {
3935     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3936     return DISAS_NEXT;
3937 }
3938 
3939 #ifndef CONFIG_USER_ONLY
3940 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3941 {
3942     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3943     set_cc_static(s);
3944     return DISAS_NEXT;
3945 }
3946 
3947 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3948 {
3949     gen_helper_sacf(cpu_env, o->in2);
3950     /* Addressing mode has changed, so end the block.  */
3951     return DISAS_PC_STALE;
3952 }
3953 #endif
3954 
3955 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3956 {
3957     int sam = s->insn->data;
3958     TCGv_i64 tsam;
3959     uint64_t mask;
3960 
3961     switch (sam) {
3962     case 0:
3963         mask = 0xffffff;
3964         break;
3965     case 1:
3966         mask = 0x7fffffff;
3967         break;
3968     default:
3969         mask = -1;
3970         break;
3971     }
3972 
3973     /* Bizarre but true, we check the address of the current insn for the
3974        specification exception, not the next to be executed.  Thus the PoO
3975        documents that Bad Things Happen two bytes before the end.  */
3976     if (s->base.pc_next & ~mask) {
3977         gen_program_exception(s, PGM_SPECIFICATION);
3978         return DISAS_NORETURN;
3979     }
3980     s->pc_tmp &= mask;
3981 
3982     tsam = tcg_const_i64(sam);
3983     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3984     tcg_temp_free_i64(tsam);
3985 
3986     /* Always exit the TB, since we (may have) changed execution mode.  */
3987     return DISAS_PC_STALE;
3988 }
3989 
3990 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3991 {
3992     int r1 = get_field(s, r1);
3993     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3994     return DISAS_NEXT;
3995 }
3996 
3997 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3998 {
3999     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4000     return DISAS_NEXT;
4001 }
4002 
4003 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4004 {
4005     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4006     return DISAS_NEXT;
4007 }
4008 
4009 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4010 {
4011     gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4012     return_low128(o->out2);
4013     return DISAS_NEXT;
4014 }
4015 
4016 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4017 {
4018     gen_helper_sqeb(o->out, cpu_env, o->in2);
4019     return DISAS_NEXT;
4020 }
4021 
4022 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4023 {
4024     gen_helper_sqdb(o->out, cpu_env, o->in2);
4025     return DISAS_NEXT;
4026 }
4027 
4028 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4029 {
4030     gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4031     return_low128(o->out2);
4032     return DISAS_NEXT;
4033 }
4034 
4035 #ifndef CONFIG_USER_ONLY
4036 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4037 {
4038     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4039     set_cc_static(s);
4040     return DISAS_NEXT;
4041 }
4042 
4043 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4044 {
4045     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4046     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4047     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4048     set_cc_static(s);
4049     tcg_temp_free_i32(r1);
4050     tcg_temp_free_i32(r3);
4051     return DISAS_NEXT;
4052 }
4053 #endif
4054 
4055 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4056 {
4057     DisasCompare c;
4058     TCGv_i64 a, h;
4059     TCGLabel *lab;
4060     int r1;
4061 
4062     disas_jcc(s, &c, get_field(s, m3));
4063 
4064     /* We want to store when the condition is fulfilled, so branch
4065        out when it's not */
4066     c.cond = tcg_invert_cond(c.cond);
4067 
4068     lab = gen_new_label();
4069     if (c.is_64) {
4070         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4071     } else {
4072         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4073     }
4074     free_compare(&c);
4075 
4076     r1 = get_field(s, r1);
4077     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4078     switch (s->insn->data) {
4079     case 1: /* STOCG */
4080         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4081         break;
4082     case 0: /* STOC */
4083         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4084         break;
4085     case 2: /* STOCFH */
4086         h = tcg_temp_new_i64();
4087         tcg_gen_shri_i64(h, regs[r1], 32);
4088         tcg_gen_qemu_st32(h, a, get_mem_index(s));
4089         tcg_temp_free_i64(h);
4090         break;
4091     default:
4092         g_assert_not_reached();
4093     }
4094     tcg_temp_free_i64(a);
4095 
4096     gen_set_label(lab);
4097     return DISAS_NEXT;
4098 }
4099 
4100 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4101 {
4102     TCGv_i64 t;
4103     uint64_t sign = 1ull << s->insn->data;
4104     if (s->insn->data == 31) {
4105         t = tcg_temp_new_i64();
4106         tcg_gen_shli_i64(t, o->in1, 32);
4107     } else {
4108         t = o->in1;
4109     }
4110     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4111     if (s->insn->data == 31) {
4112         tcg_temp_free_i64(t);
4113     }
4114     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4115     /* The arithmetic left shift is curious in that it does not affect
4116        the sign bit.  Copy that over from the source unchanged.  */
4117     tcg_gen_andi_i64(o->out, o->out, ~sign);
4118     tcg_gen_andi_i64(o->in1, o->in1, sign);
4119     tcg_gen_or_i64(o->out, o->out, o->in1);
4120     return DISAS_NEXT;
4121 }
4122 
4123 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4124 {
4125     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4126     return DISAS_NEXT;
4127 }
4128 
4129 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4130 {
4131     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4132     return DISAS_NEXT;
4133 }
4134 
4135 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4136 {
4137     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4138     return DISAS_NEXT;
4139 }
4140 
4141 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4142 {
4143     gen_helper_sfpc(cpu_env, o->in2);
4144     return DISAS_NEXT;
4145 }
4146 
4147 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4148 {
4149     gen_helper_sfas(cpu_env, o->in2);
4150     return DISAS_NEXT;
4151 }
4152 
4153 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4154 {
4155     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4156     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4157     gen_helper_srnm(cpu_env, o->addr1);
4158     return DISAS_NEXT;
4159 }
4160 
4161 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4162 {
4163     /* Bits 0-55 are are ignored. */
4164     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4165     gen_helper_srnm(cpu_env, o->addr1);
4166     return DISAS_NEXT;
4167 }
4168 
4169 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4170 {
4171     TCGv_i64 tmp = tcg_temp_new_i64();
4172 
4173     /* Bits other than 61-63 are ignored. */
4174     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4175 
4176     /* No need to call a helper, we don't implement dfp */
4177     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4178     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4179     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4180 
4181     tcg_temp_free_i64(tmp);
4182     return DISAS_NEXT;
4183 }
4184 
4185 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4186 {
4187     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4188     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4189     set_cc_static(s);
4190 
4191     tcg_gen_shri_i64(o->in1, o->in1, 24);
4192     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4193     return DISAS_NEXT;
4194 }
4195 
4196 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4197 {
4198     int b1 = get_field(s, b1);
4199     int d1 = get_field(s, d1);
4200     int b2 = get_field(s, b2);
4201     int d2 = get_field(s, d2);
4202     int r3 = get_field(s, r3);
4203     TCGv_i64 tmp = tcg_temp_new_i64();
4204 
4205     /* fetch all operands first */
4206     o->in1 = tcg_temp_new_i64();
4207     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4208     o->in2 = tcg_temp_new_i64();
4209     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4210     o->addr1 = tcg_temp_new_i64();
4211     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4212 
4213     /* load the third operand into r3 before modifying anything */
4214     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4215 
4216     /* subtract CPU timer from first operand and store in GR0 */
4217     gen_helper_stpt(tmp, cpu_env);
4218     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4219 
4220     /* store second operand in GR1 */
4221     tcg_gen_mov_i64(regs[1], o->in2);
4222 
4223     tcg_temp_free_i64(tmp);
4224     return DISAS_NEXT;
4225 }
4226 
4227 #ifndef CONFIG_USER_ONLY
4228 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4229 {
4230     tcg_gen_shri_i64(o->in2, o->in2, 4);
4231     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4232     return DISAS_NEXT;
4233 }
4234 
4235 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4236 {
4237     gen_helper_sske(cpu_env, o->in1, o->in2);
4238     return DISAS_NEXT;
4239 }
4240 
4241 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4242 {
4243     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4244     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4245     return DISAS_PC_STALE_NOCHAIN;
4246 }
4247 
4248 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4249 {
4250     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4251     return DISAS_NEXT;
4252 }
4253 #endif
4254 
4255 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4256 {
4257     gen_helper_stck(o->out, cpu_env);
4258     /* ??? We don't implement clock states.  */
4259     gen_op_movi_cc(s, 0);
4260     return DISAS_NEXT;
4261 }
4262 
4263 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4264 {
4265     TCGv_i64 c1 = tcg_temp_new_i64();
4266     TCGv_i64 c2 = tcg_temp_new_i64();
4267     TCGv_i64 todpr = tcg_temp_new_i64();
4268     gen_helper_stck(c1, cpu_env);
4269     /* 16 bit value store in an uint32_t (only valid bits set) */
4270     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4271     /* Shift the 64-bit value into its place as a zero-extended
4272        104-bit value.  Note that "bit positions 64-103 are always
4273        non-zero so that they compare differently to STCK"; we set
4274        the least significant bit to 1.  */
4275     tcg_gen_shli_i64(c2, c1, 56);
4276     tcg_gen_shri_i64(c1, c1, 8);
4277     tcg_gen_ori_i64(c2, c2, 0x10000);
4278     tcg_gen_or_i64(c2, c2, todpr);
4279     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4280     tcg_gen_addi_i64(o->in2, o->in2, 8);
4281     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4282     tcg_temp_free_i64(c1);
4283     tcg_temp_free_i64(c2);
4284     tcg_temp_free_i64(todpr);
4285     /* ??? We don't implement clock states.  */
4286     gen_op_movi_cc(s, 0);
4287     return DISAS_NEXT;
4288 }
4289 
4290 #ifndef CONFIG_USER_ONLY
4291 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4292 {
4293     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
4294     gen_helper_sck(cc_op, cpu_env, o->in1);
4295     set_cc_static(s);
4296     return DISAS_NEXT;
4297 }
4298 
4299 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4300 {
4301     gen_helper_sckc(cpu_env, o->in2);
4302     return DISAS_NEXT;
4303 }
4304 
4305 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4306 {
4307     gen_helper_sckpf(cpu_env, regs[0]);
4308     return DISAS_NEXT;
4309 }
4310 
4311 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4312 {
4313     gen_helper_stckc(o->out, cpu_env);
4314     return DISAS_NEXT;
4315 }
4316 
4317 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4318 {
4319     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4320     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4321     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4322     tcg_temp_free_i32(r1);
4323     tcg_temp_free_i32(r3);
4324     return DISAS_NEXT;
4325 }
4326 
4327 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4328 {
4329     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4330     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4331     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4332     tcg_temp_free_i32(r1);
4333     tcg_temp_free_i32(r3);
4334     return DISAS_NEXT;
4335 }
4336 
4337 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4338 {
4339     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4340     return DISAS_NEXT;
4341 }
4342 
4343 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4344 {
4345     gen_helper_spt(cpu_env, o->in2);
4346     return DISAS_NEXT;
4347 }
4348 
4349 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4350 {
4351     gen_helper_stfl(cpu_env);
4352     return DISAS_NEXT;
4353 }
4354 
4355 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4356 {
4357     gen_helper_stpt(o->out, cpu_env);
4358     return DISAS_NEXT;
4359 }
4360 
4361 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4362 {
4363     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4364     set_cc_static(s);
4365     return DISAS_NEXT;
4366 }
4367 
4368 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4369 {
4370     gen_helper_spx(cpu_env, o->in2);
4371     return DISAS_NEXT;
4372 }
4373 
4374 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4375 {
4376     gen_helper_xsch(cpu_env, regs[1]);
4377     set_cc_static(s);
4378     return DISAS_NEXT;
4379 }
4380 
4381 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4382 {
4383     gen_helper_csch(cpu_env, regs[1]);
4384     set_cc_static(s);
4385     return DISAS_NEXT;
4386 }
4387 
4388 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4389 {
4390     gen_helper_hsch(cpu_env, regs[1]);
4391     set_cc_static(s);
4392     return DISAS_NEXT;
4393 }
4394 
4395 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4396 {
4397     gen_helper_msch(cpu_env, regs[1], o->in2);
4398     set_cc_static(s);
4399     return DISAS_NEXT;
4400 }
4401 
4402 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4403 {
4404     gen_helper_rchp(cpu_env, regs[1]);
4405     set_cc_static(s);
4406     return DISAS_NEXT;
4407 }
4408 
4409 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4410 {
4411     gen_helper_rsch(cpu_env, regs[1]);
4412     set_cc_static(s);
4413     return DISAS_NEXT;
4414 }
4415 
4416 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4417 {
4418     gen_helper_sal(cpu_env, regs[1]);
4419     return DISAS_NEXT;
4420 }
4421 
4422 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4423 {
4424     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4425     return DISAS_NEXT;
4426 }
4427 
4428 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4429 {
4430     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4431     gen_op_movi_cc(s, 3);
4432     return DISAS_NEXT;
4433 }
4434 
4435 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4436 {
4437     /* The instruction is suppressed if not provided. */
4438     return DISAS_NEXT;
4439 }
4440 
4441 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4442 {
4443     gen_helper_ssch(cpu_env, regs[1], o->in2);
4444     set_cc_static(s);
4445     return DISAS_NEXT;
4446 }
4447 
4448 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4449 {
4450     gen_helper_stsch(cpu_env, regs[1], o->in2);
4451     set_cc_static(s);
4452     return DISAS_NEXT;
4453 }
4454 
4455 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4456 {
4457     gen_helper_stcrw(cpu_env, o->in2);
4458     set_cc_static(s);
4459     return DISAS_NEXT;
4460 }
4461 
4462 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4463 {
4464     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4465     set_cc_static(s);
4466     return DISAS_NEXT;
4467 }
4468 
4469 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4470 {
4471     gen_helper_tsch(cpu_env, regs[1], o->in2);
4472     set_cc_static(s);
4473     return DISAS_NEXT;
4474 }
4475 
4476 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4477 {
4478     gen_helper_chsc(cpu_env, o->in2);
4479     set_cc_static(s);
4480     return DISAS_NEXT;
4481 }
4482 
4483 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4484 {
4485     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4486     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4487     return DISAS_NEXT;
4488 }
4489 
4490 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4491 {
4492     uint64_t i2 = get_field(s, i2);
4493     TCGv_i64 t;
4494 
4495     /* It is important to do what the instruction name says: STORE THEN.
4496        If we let the output hook perform the store then if we fault and
4497        restart, we'll have the wrong SYSTEM MASK in place.  */
4498     t = tcg_temp_new_i64();
4499     tcg_gen_shri_i64(t, psw_mask, 56);
4500     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4501     tcg_temp_free_i64(t);
4502 
4503     if (s->fields.op == 0xac) {
4504         tcg_gen_andi_i64(psw_mask, psw_mask,
4505                          (i2 << 56) | 0x00ffffffffffffffull);
4506     } else {
4507         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4508     }
4509 
4510     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4511     return DISAS_PC_STALE_NOCHAIN;
4512 }
4513 
4514 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4515 {
4516     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4517 
4518     if (s->base.tb->flags & FLAG_MASK_PER) {
4519         update_psw_addr(s);
4520         gen_helper_per_store_real(cpu_env);
4521     }
4522     return DISAS_NEXT;
4523 }
4524 #endif
4525 
4526 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4527 {
4528     gen_helper_stfle(cc_op, cpu_env, o->in2);
4529     set_cc_static(s);
4530     return DISAS_NEXT;
4531 }
4532 
4533 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4534 {
4535     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4536     return DISAS_NEXT;
4537 }
4538 
4539 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4540 {
4541     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4542     return DISAS_NEXT;
4543 }
4544 
4545 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4546 {
4547     tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4548     return DISAS_NEXT;
4549 }
4550 
4551 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4552 {
4553     tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4554     return DISAS_NEXT;
4555 }
4556 
4557 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4558 {
4559     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4560     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4561     gen_helper_stam(cpu_env, r1, o->in2, r3);
4562     tcg_temp_free_i32(r1);
4563     tcg_temp_free_i32(r3);
4564     return DISAS_NEXT;
4565 }
4566 
4567 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4568 {
4569     int m3 = get_field(s, m3);
4570     int pos, base = s->insn->data;
4571     TCGv_i64 tmp = tcg_temp_new_i64();
4572 
4573     pos = base + ctz32(m3) * 8;
4574     switch (m3) {
4575     case 0xf:
4576         /* Effectively a 32-bit store.  */
4577         tcg_gen_shri_i64(tmp, o->in1, pos);
4578         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4579         break;
4580 
4581     case 0xc:
4582     case 0x6:
4583     case 0x3:
4584         /* Effectively a 16-bit store.  */
4585         tcg_gen_shri_i64(tmp, o->in1, pos);
4586         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4587         break;
4588 
4589     case 0x8:
4590     case 0x4:
4591     case 0x2:
4592     case 0x1:
4593         /* Effectively an 8-bit store.  */
4594         tcg_gen_shri_i64(tmp, o->in1, pos);
4595         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4596         break;
4597 
4598     default:
4599         /* This is going to be a sequence of shifts and stores.  */
4600         pos = base + 32 - 8;
4601         while (m3) {
4602             if (m3 & 0x8) {
4603                 tcg_gen_shri_i64(tmp, o->in1, pos);
4604                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4605                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4606             }
4607             m3 = (m3 << 1) & 0xf;
4608             pos -= 8;
4609         }
4610         break;
4611     }
4612     tcg_temp_free_i64(tmp);
4613     return DISAS_NEXT;
4614 }
4615 
4616 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4617 {
4618     int r1 = get_field(s, r1);
4619     int r3 = get_field(s, r3);
4620     int size = s->insn->data;
4621     TCGv_i64 tsize = tcg_const_i64(size);
4622 
4623     while (1) {
4624         if (size == 8) {
4625             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4626         } else {
4627             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4628         }
4629         if (r1 == r3) {
4630             break;
4631         }
4632         tcg_gen_add_i64(o->in2, o->in2, tsize);
4633         r1 = (r1 + 1) & 15;
4634     }
4635 
4636     tcg_temp_free_i64(tsize);
4637     return DISAS_NEXT;
4638 }
4639 
4640 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4641 {
4642     int r1 = get_field(s, r1);
4643     int r3 = get_field(s, r3);
4644     TCGv_i64 t = tcg_temp_new_i64();
4645     TCGv_i64 t4 = tcg_const_i64(4);
4646     TCGv_i64 t32 = tcg_const_i64(32);
4647 
4648     while (1) {
4649         tcg_gen_shl_i64(t, regs[r1], t32);
4650         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4651         if (r1 == r3) {
4652             break;
4653         }
4654         tcg_gen_add_i64(o->in2, o->in2, t4);
4655         r1 = (r1 + 1) & 15;
4656     }
4657 
4658     tcg_temp_free_i64(t);
4659     tcg_temp_free_i64(t4);
4660     tcg_temp_free_i64(t32);
4661     return DISAS_NEXT;
4662 }
4663 
4664 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4665 {
4666     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4667         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4668     } else if (HAVE_ATOMIC128) {
4669         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4670     } else {
4671         gen_helper_exit_atomic(cpu_env);
4672         return DISAS_NORETURN;
4673     }
4674     return DISAS_NEXT;
4675 }
4676 
4677 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4678 {
4679     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4680     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4681 
4682     gen_helper_srst(cpu_env, r1, r2);
4683 
4684     tcg_temp_free_i32(r1);
4685     tcg_temp_free_i32(r2);
4686     set_cc_static(s);
4687     return DISAS_NEXT;
4688 }
4689 
4690 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4691 {
4692     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4693     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4694 
4695     gen_helper_srstu(cpu_env, r1, r2);
4696 
4697     tcg_temp_free_i32(r1);
4698     tcg_temp_free_i32(r2);
4699     set_cc_static(s);
4700     return DISAS_NEXT;
4701 }
4702 
4703 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4704 {
4705     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4706     return DISAS_NEXT;
4707 }
4708 
4709 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4710 {
4711     tcg_gen_movi_i64(cc_src, 0);
4712     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4713     return DISAS_NEXT;
4714 }
4715 
4716 /* Compute borrow (0, -1) into cc_src. */
4717 static void compute_borrow(DisasContext *s)
4718 {
4719     switch (s->cc_op) {
4720     case CC_OP_SUBU:
4721         /* The borrow value is already in cc_src (0,-1). */
4722         break;
4723     default:
4724         gen_op_calc_cc(s);
4725         /* fall through */
4726     case CC_OP_STATIC:
4727         /* The carry flag is the msb of CC; compute into cc_src. */
4728         tcg_gen_extu_i32_i64(cc_src, cc_op);
4729         tcg_gen_shri_i64(cc_src, cc_src, 1);
4730         /* fall through */
4731     case CC_OP_ADDU:
4732         /* Convert carry (1,0) to borrow (0,-1). */
4733         tcg_gen_subi_i64(cc_src, cc_src, 1);
4734         break;
4735     }
4736 }
4737 
4738 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4739 {
4740     compute_borrow(s);
4741 
4742     /* Borrow is {0, -1}, so add to subtract. */
4743     tcg_gen_add_i64(o->out, o->in1, cc_src);
4744     tcg_gen_sub_i64(o->out, o->out, o->in2);
4745     return DISAS_NEXT;
4746 }
4747 
4748 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4749 {
4750     compute_borrow(s);
4751 
4752     /*
4753      * Borrow is {0, -1}, so add to subtract; replicate the
4754      * borrow input to produce 128-bit -1 for the addition.
4755      */
4756     TCGv_i64 zero = tcg_const_i64(0);
4757     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4758     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4759     tcg_temp_free_i64(zero);
4760 
4761     return DISAS_NEXT;
4762 }
4763 
4764 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4765 {
4766     TCGv_i32 t;
4767 
4768     update_psw_addr(s);
4769     update_cc_op(s);
4770 
4771     t = tcg_const_i32(get_field(s, i1) & 0xff);
4772     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4773     tcg_temp_free_i32(t);
4774 
4775     t = tcg_const_i32(s->ilen);
4776     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4777     tcg_temp_free_i32(t);
4778 
4779     gen_exception(EXCP_SVC);
4780     return DISAS_NORETURN;
4781 }
4782 
4783 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4784 {
4785     int cc = 0;
4786 
4787     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4788     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4789     gen_op_movi_cc(s, cc);
4790     return DISAS_NEXT;
4791 }
4792 
4793 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4794 {
4795     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4796     set_cc_static(s);
4797     return DISAS_NEXT;
4798 }
4799 
4800 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4801 {
4802     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4803     set_cc_static(s);
4804     return DISAS_NEXT;
4805 }
4806 
4807 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4808 {
4809     gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4810     set_cc_static(s);
4811     return DISAS_NEXT;
4812 }
4813 
4814 #ifndef CONFIG_USER_ONLY
4815 
4816 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4817 {
4818     gen_helper_testblock(cc_op, cpu_env, o->in2);
4819     set_cc_static(s);
4820     return DISAS_NEXT;
4821 }
4822 
4823 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4824 {
4825     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4826     set_cc_static(s);
4827     return DISAS_NEXT;
4828 }
4829 
4830 #endif
4831 
4832 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4833 {
4834     TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4835     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4836     tcg_temp_free_i32(l1);
4837     set_cc_static(s);
4838     return DISAS_NEXT;
4839 }
4840 
4841 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4842 {
4843     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4844     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4845     tcg_temp_free_i32(l);
4846     set_cc_static(s);
4847     return DISAS_NEXT;
4848 }
4849 
4850 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4851 {
4852     gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4853     return_low128(o->out2);
4854     set_cc_static(s);
4855     return DISAS_NEXT;
4856 }
4857 
4858 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4859 {
4860     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4861     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4862     tcg_temp_free_i32(l);
4863     set_cc_static(s);
4864     return DISAS_NEXT;
4865 }
4866 
4867 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4868 {
4869     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4870     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4871     tcg_temp_free_i32(l);
4872     set_cc_static(s);
4873     return DISAS_NEXT;
4874 }
4875 
4876 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4877 {
4878     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4879     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4880     TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4881     TCGv_i32 tst = tcg_temp_new_i32();
4882     int m3 = get_field(s, m3);
4883 
4884     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4885         m3 = 0;
4886     }
4887     if (m3 & 1) {
4888         tcg_gen_movi_i32(tst, -1);
4889     } else {
4890         tcg_gen_extrl_i64_i32(tst, regs[0]);
4891         if (s->insn->opc & 3) {
4892             tcg_gen_ext8u_i32(tst, tst);
4893         } else {
4894             tcg_gen_ext16u_i32(tst, tst);
4895         }
4896     }
4897     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4898 
4899     tcg_temp_free_i32(r1);
4900     tcg_temp_free_i32(r2);
4901     tcg_temp_free_i32(sizes);
4902     tcg_temp_free_i32(tst);
4903     set_cc_static(s);
4904     return DISAS_NEXT;
4905 }
4906 
4907 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4908 {
4909     TCGv_i32 t1 = tcg_const_i32(0xff);
4910     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4911     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4912     tcg_temp_free_i32(t1);
4913     set_cc_static(s);
4914     return DISAS_NEXT;
4915 }
4916 
4917 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4918 {
4919     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4920     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4921     tcg_temp_free_i32(l);
4922     return DISAS_NEXT;
4923 }
4924 
4925 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4926 {
4927     int l1 = get_field(s, l1) + 1;
4928     TCGv_i32 l;
4929 
4930     /* The length must not exceed 32 bytes.  */
4931     if (l1 > 32) {
4932         gen_program_exception(s, PGM_SPECIFICATION);
4933         return DISAS_NORETURN;
4934     }
4935     l = tcg_const_i32(l1);
4936     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4937     tcg_temp_free_i32(l);
4938     set_cc_static(s);
4939     return DISAS_NEXT;
4940 }
4941 
4942 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4943 {
4944     int l1 = get_field(s, l1) + 1;
4945     TCGv_i32 l;
4946 
4947     /* The length must be even and should not exceed 64 bytes.  */
4948     if ((l1 & 1) || (l1 > 64)) {
4949         gen_program_exception(s, PGM_SPECIFICATION);
4950         return DISAS_NORETURN;
4951     }
4952     l = tcg_const_i32(l1);
4953     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4954     tcg_temp_free_i32(l);
4955     set_cc_static(s);
4956     return DISAS_NEXT;
4957 }
4958 
4959 
4960 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4961 {
4962     int d1 = get_field(s, d1);
4963     int d2 = get_field(s, d2);
4964     int b1 = get_field(s, b1);
4965     int b2 = get_field(s, b2);
4966     int l = get_field(s, l1);
4967     TCGv_i32 t32;
4968 
4969     o->addr1 = get_address(s, 0, b1, d1);
4970 
4971     /* If the addresses are identical, this is a store/memset of zero.  */
4972     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4973         o->in2 = tcg_const_i64(0);
4974 
4975         l++;
4976         while (l >= 8) {
4977             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4978             l -= 8;
4979             if (l > 0) {
4980                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4981             }
4982         }
4983         if (l >= 4) {
4984             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4985             l -= 4;
4986             if (l > 0) {
4987                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4988             }
4989         }
4990         if (l >= 2) {
4991             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4992             l -= 2;
4993             if (l > 0) {
4994                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4995             }
4996         }
4997         if (l) {
4998             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4999         }
5000         gen_op_movi_cc(s, 0);
5001         return DISAS_NEXT;
5002     }
5003 
5004     /* But in general we'll defer to a helper.  */
5005     o->in2 = get_address(s, 0, b2, d2);
5006     t32 = tcg_const_i32(l);
5007     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5008     tcg_temp_free_i32(t32);
5009     set_cc_static(s);
5010     return DISAS_NEXT;
5011 }
5012 
5013 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5014 {
5015     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5016     return DISAS_NEXT;
5017 }
5018 
5019 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5020 {
5021     int shift = s->insn->data & 0xff;
5022     int size = s->insn->data >> 8;
5023     uint64_t mask = ((1ull << size) - 1) << shift;
5024 
5025     assert(!o->g_in2);
5026     tcg_gen_shli_i64(o->in2, o->in2, shift);
5027     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5028 
5029     /* Produce the CC from only the bits manipulated.  */
5030     tcg_gen_andi_i64(cc_dst, o->out, mask);
5031     set_cc_nz_u64(s, cc_dst);
5032     return DISAS_NEXT;
5033 }
5034 
5035 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5036 {
5037     o->in1 = tcg_temp_new_i64();
5038 
5039     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5040         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5041     } else {
5042         /* Perform the atomic operation in memory. */
5043         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5044                                      s->insn->data);
5045     }
5046 
5047     /* Recompute also for atomic case: needed for setting CC. */
5048     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5049 
5050     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5051         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5052     }
5053     return DISAS_NEXT;
5054 }
5055 
5056 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5057 {
5058     o->out = tcg_const_i64(0);
5059     return DISAS_NEXT;
5060 }
5061 
5062 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5063 {
5064     o->out = tcg_const_i64(0);
5065     o->out2 = o->out;
5066     o->g_out2 = true;
5067     return DISAS_NEXT;
5068 }
5069 
5070 #ifndef CONFIG_USER_ONLY
5071 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5072 {
5073     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5074 
5075     gen_helper_clp(cpu_env, r2);
5076     tcg_temp_free_i32(r2);
5077     set_cc_static(s);
5078     return DISAS_NEXT;
5079 }
5080 
5081 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5082 {
5083     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5084     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5085 
5086     gen_helper_pcilg(cpu_env, r1, r2);
5087     tcg_temp_free_i32(r1);
5088     tcg_temp_free_i32(r2);
5089     set_cc_static(s);
5090     return DISAS_NEXT;
5091 }
5092 
5093 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5094 {
5095     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5096     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5097 
5098     gen_helper_pcistg(cpu_env, r1, r2);
5099     tcg_temp_free_i32(r1);
5100     tcg_temp_free_i32(r2);
5101     set_cc_static(s);
5102     return DISAS_NEXT;
5103 }
5104 
5105 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5106 {
5107     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5108     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5109 
5110     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5111     tcg_temp_free_i32(ar);
5112     tcg_temp_free_i32(r1);
5113     set_cc_static(s);
5114     return DISAS_NEXT;
5115 }
5116 
5117 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5118 {
5119     gen_helper_sic(cpu_env, o->in1, o->in2);
5120     return DISAS_NEXT;
5121 }
5122 
5123 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5124 {
5125     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5126     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5127 
5128     gen_helper_rpcit(cpu_env, r1, r2);
5129     tcg_temp_free_i32(r1);
5130     tcg_temp_free_i32(r2);
5131     set_cc_static(s);
5132     return DISAS_NEXT;
5133 }
5134 
5135 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5136 {
5137     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5138     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5139     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5140 
5141     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5142     tcg_temp_free_i32(ar);
5143     tcg_temp_free_i32(r1);
5144     tcg_temp_free_i32(r3);
5145     set_cc_static(s);
5146     return DISAS_NEXT;
5147 }
5148 
5149 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5150 {
5151     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5152     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5153 
5154     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5155     tcg_temp_free_i32(ar);
5156     tcg_temp_free_i32(r1);
5157     set_cc_static(s);
5158     return DISAS_NEXT;
5159 }
5160 #endif
5161 
5162 #include "translate_vx.c.inc"
5163 
5164 /* ====================================================================== */
5165 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5166    the original inputs), update the various cc data structures in order to
5167    be able to compute the new condition code.  */
5168 
5169 static void cout_abs32(DisasContext *s, DisasOps *o)
5170 {
5171     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5172 }
5173 
5174 static void cout_abs64(DisasContext *s, DisasOps *o)
5175 {
5176     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5177 }
5178 
5179 static void cout_adds32(DisasContext *s, DisasOps *o)
5180 {
5181     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5182 }
5183 
5184 static void cout_adds64(DisasContext *s, DisasOps *o)
5185 {
5186     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5187 }
5188 
5189 static void cout_addu32(DisasContext *s, DisasOps *o)
5190 {
5191     tcg_gen_shri_i64(cc_src, o->out, 32);
5192     tcg_gen_ext32u_i64(cc_dst, o->out);
5193     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5194 }
5195 
5196 static void cout_addu64(DisasContext *s, DisasOps *o)
5197 {
5198     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5199 }
5200 
5201 static void cout_cmps32(DisasContext *s, DisasOps *o)
5202 {
5203     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5204 }
5205 
5206 static void cout_cmps64(DisasContext *s, DisasOps *o)
5207 {
5208     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5209 }
5210 
5211 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5212 {
5213     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5214 }
5215 
5216 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5217 {
5218     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5219 }
5220 
5221 static void cout_f32(DisasContext *s, DisasOps *o)
5222 {
5223     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5224 }
5225 
5226 static void cout_f64(DisasContext *s, DisasOps *o)
5227 {
5228     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5229 }
5230 
5231 static void cout_f128(DisasContext *s, DisasOps *o)
5232 {
5233     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5234 }
5235 
5236 static void cout_nabs32(DisasContext *s, DisasOps *o)
5237 {
5238     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5239 }
5240 
5241 static void cout_nabs64(DisasContext *s, DisasOps *o)
5242 {
5243     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5244 }
5245 
5246 static void cout_neg32(DisasContext *s, DisasOps *o)
5247 {
5248     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5249 }
5250 
5251 static void cout_neg64(DisasContext *s, DisasOps *o)
5252 {
5253     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5254 }
5255 
5256 static void cout_nz32(DisasContext *s, DisasOps *o)
5257 {
5258     tcg_gen_ext32u_i64(cc_dst, o->out);
5259     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5260 }
5261 
5262 static void cout_nz64(DisasContext *s, DisasOps *o)
5263 {
5264     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5265 }
5266 
5267 static void cout_s32(DisasContext *s, DisasOps *o)
5268 {
5269     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5270 }
5271 
5272 static void cout_s64(DisasContext *s, DisasOps *o)
5273 {
5274     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5275 }
5276 
5277 static void cout_subs32(DisasContext *s, DisasOps *o)
5278 {
5279     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5280 }
5281 
5282 static void cout_subs64(DisasContext *s, DisasOps *o)
5283 {
5284     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5285 }
5286 
5287 static void cout_subu32(DisasContext *s, DisasOps *o)
5288 {
5289     tcg_gen_sari_i64(cc_src, o->out, 32);
5290     tcg_gen_ext32u_i64(cc_dst, o->out);
5291     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5292 }
5293 
5294 static void cout_subu64(DisasContext *s, DisasOps *o)
5295 {
5296     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5297 }
5298 
5299 static void cout_tm32(DisasContext *s, DisasOps *o)
5300 {
5301     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5302 }
5303 
5304 static void cout_tm64(DisasContext *s, DisasOps *o)
5305 {
5306     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5307 }
5308 
5309 static void cout_muls32(DisasContext *s, DisasOps *o)
5310 {
5311     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5312 }
5313 
5314 static void cout_muls64(DisasContext *s, DisasOps *o)
5315 {
5316     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5317     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5318 }
5319 
5320 /* ====================================================================== */
5321 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5322    with the TCG register to which we will write.  Used in combination with
5323    the "wout" generators, in some cases we need a new temporary, and in
5324    some cases we can write to a TCG global.  */
5325 
5326 static void prep_new(DisasContext *s, DisasOps *o)
5327 {
5328     o->out = tcg_temp_new_i64();
5329 }
5330 #define SPEC_prep_new 0
5331 
5332 static void prep_new_P(DisasContext *s, DisasOps *o)
5333 {
5334     o->out = tcg_temp_new_i64();
5335     o->out2 = tcg_temp_new_i64();
5336 }
5337 #define SPEC_prep_new_P 0
5338 
5339 static void prep_r1(DisasContext *s, DisasOps *o)
5340 {
5341     o->out = regs[get_field(s, r1)];
5342     o->g_out = true;
5343 }
5344 #define SPEC_prep_r1 0
5345 
5346 static void prep_r1_P(DisasContext *s, DisasOps *o)
5347 {
5348     int r1 = get_field(s, r1);
5349     o->out = regs[r1];
5350     o->out2 = regs[r1 + 1];
5351     o->g_out = o->g_out2 = true;
5352 }
5353 #define SPEC_prep_r1_P SPEC_r1_even
5354 
5355 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5356 static void prep_x1(DisasContext *s, DisasOps *o)
5357 {
5358     o->out = load_freg(get_field(s, r1));
5359     o->out2 = load_freg(get_field(s, r1) + 2);
5360 }
5361 #define SPEC_prep_x1 SPEC_r1_f128
5362 
5363 /* ====================================================================== */
5364 /* The "Write OUTput" generators.  These generally perform some non-trivial
5365    copy of data to TCG globals, or to main memory.  The trivial cases are
5366    generally handled by having a "prep" generator install the TCG global
5367    as the destination of the operation.  */
5368 
5369 static void wout_r1(DisasContext *s, DisasOps *o)
5370 {
5371     store_reg(get_field(s, r1), o->out);
5372 }
5373 #define SPEC_wout_r1 0
5374 
5375 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5376 {
5377     store_reg(get_field(s, r1), o->out2);
5378 }
5379 #define SPEC_wout_out2_r1 0
5380 
5381 static void wout_r1_8(DisasContext *s, DisasOps *o)
5382 {
5383     int r1 = get_field(s, r1);
5384     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5385 }
5386 #define SPEC_wout_r1_8 0
5387 
5388 static void wout_r1_16(DisasContext *s, DisasOps *o)
5389 {
5390     int r1 = get_field(s, r1);
5391     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5392 }
5393 #define SPEC_wout_r1_16 0
5394 
5395 static void wout_r1_32(DisasContext *s, DisasOps *o)
5396 {
5397     store_reg32_i64(get_field(s, r1), o->out);
5398 }
5399 #define SPEC_wout_r1_32 0
5400 
5401 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5402 {
5403     store_reg32h_i64(get_field(s, r1), o->out);
5404 }
5405 #define SPEC_wout_r1_32h 0
5406 
5407 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5408 {
5409     int r1 = get_field(s, r1);
5410     store_reg32_i64(r1, o->out);
5411     store_reg32_i64(r1 + 1, o->out2);
5412 }
5413 #define SPEC_wout_r1_P32 SPEC_r1_even
5414 
5415 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5416 {
5417     int r1 = get_field(s, r1);
5418     TCGv_i64 t = tcg_temp_new_i64();
5419     store_reg32_i64(r1 + 1, o->out);
5420     tcg_gen_shri_i64(t, o->out, 32);
5421     store_reg32_i64(r1, t);
5422     tcg_temp_free_i64(t);
5423 }
5424 #define SPEC_wout_r1_D32 SPEC_r1_even
5425 
5426 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5427 {
5428     int r3 = get_field(s, r3);
5429     store_reg32_i64(r3, o->out);
5430     store_reg32_i64(r3 + 1, o->out2);
5431 }
5432 #define SPEC_wout_r3_P32 SPEC_r3_even
5433 
5434 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5435 {
5436     int r3 = get_field(s, r3);
5437     store_reg(r3, o->out);
5438     store_reg(r3 + 1, o->out2);
5439 }
5440 #define SPEC_wout_r3_P64 SPEC_r3_even
5441 
5442 static void wout_e1(DisasContext *s, DisasOps *o)
5443 {
5444     store_freg32_i64(get_field(s, r1), o->out);
5445 }
5446 #define SPEC_wout_e1 0
5447 
5448 static void wout_f1(DisasContext *s, DisasOps *o)
5449 {
5450     store_freg(get_field(s, r1), o->out);
5451 }
5452 #define SPEC_wout_f1 0
5453 
5454 static void wout_x1(DisasContext *s, DisasOps *o)
5455 {
5456     int f1 = get_field(s, r1);
5457     store_freg(f1, o->out);
5458     store_freg(f1 + 2, o->out2);
5459 }
5460 #define SPEC_wout_x1 SPEC_r1_f128
5461 
5462 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5463 {
5464     if (get_field(s, r1) != get_field(s, r2)) {
5465         store_reg32_i64(get_field(s, r1), o->out);
5466     }
5467 }
5468 #define SPEC_wout_cond_r1r2_32 0
5469 
5470 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5471 {
5472     if (get_field(s, r1) != get_field(s, r2)) {
5473         store_freg32_i64(get_field(s, r1), o->out);
5474     }
5475 }
5476 #define SPEC_wout_cond_e1e2 0
5477 
5478 static void wout_m1_8(DisasContext *s, DisasOps *o)
5479 {
5480     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5481 }
5482 #define SPEC_wout_m1_8 0
5483 
5484 static void wout_m1_16(DisasContext *s, DisasOps *o)
5485 {
5486     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5487 }
5488 #define SPEC_wout_m1_16 0
5489 
5490 #ifndef CONFIG_USER_ONLY
5491 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5492 {
5493     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5494 }
5495 #define SPEC_wout_m1_16a 0
5496 #endif
5497 
5498 static void wout_m1_32(DisasContext *s, DisasOps *o)
5499 {
5500     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5501 }
5502 #define SPEC_wout_m1_32 0
5503 
5504 #ifndef CONFIG_USER_ONLY
5505 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5506 {
5507     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5508 }
5509 #define SPEC_wout_m1_32a 0
5510 #endif
5511 
5512 static void wout_m1_64(DisasContext *s, DisasOps *o)
5513 {
5514     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5515 }
5516 #define SPEC_wout_m1_64 0
5517 
5518 #ifndef CONFIG_USER_ONLY
5519 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5520 {
5521     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5522 }
5523 #define SPEC_wout_m1_64a 0
5524 #endif
5525 
5526 static void wout_m2_32(DisasContext *s, DisasOps *o)
5527 {
5528     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5529 }
5530 #define SPEC_wout_m2_32 0
5531 
5532 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5533 {
5534     store_reg(get_field(s, r1), o->in2);
5535 }
5536 #define SPEC_wout_in2_r1 0
5537 
5538 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5539 {
5540     store_reg32_i64(get_field(s, r1), o->in2);
5541 }
5542 #define SPEC_wout_in2_r1_32 0
5543 
5544 /* ====================================================================== */
5545 /* The "INput 1" generators.  These load the first operand to an insn.  */
5546 
5547 static void in1_r1(DisasContext *s, DisasOps *o)
5548 {
5549     o->in1 = load_reg(get_field(s, r1));
5550 }
5551 #define SPEC_in1_r1 0
5552 
5553 static void in1_r1_o(DisasContext *s, DisasOps *o)
5554 {
5555     o->in1 = regs[get_field(s, r1)];
5556     o->g_in1 = true;
5557 }
5558 #define SPEC_in1_r1_o 0
5559 
5560 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5561 {
5562     o->in1 = tcg_temp_new_i64();
5563     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5564 }
5565 #define SPEC_in1_r1_32s 0
5566 
5567 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5568 {
5569     o->in1 = tcg_temp_new_i64();
5570     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5571 }
5572 #define SPEC_in1_r1_32u 0
5573 
5574 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5575 {
5576     o->in1 = tcg_temp_new_i64();
5577     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5578 }
5579 #define SPEC_in1_r1_sr32 0
5580 
5581 static void in1_r1p1(DisasContext *s, DisasOps *o)
5582 {
5583     o->in1 = load_reg(get_field(s, r1) + 1);
5584 }
5585 #define SPEC_in1_r1p1 SPEC_r1_even
5586 
5587 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5588 {
5589     o->in1 = regs[get_field(s, r1) + 1];
5590     o->g_in1 = true;
5591 }
5592 #define SPEC_in1_r1p1_o SPEC_r1_even
5593 
5594 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5595 {
5596     o->in1 = tcg_temp_new_i64();
5597     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5598 }
5599 #define SPEC_in1_r1p1_32s SPEC_r1_even
5600 
5601 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5602 {
5603     o->in1 = tcg_temp_new_i64();
5604     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5605 }
5606 #define SPEC_in1_r1p1_32u SPEC_r1_even
5607 
5608 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5609 {
5610     int r1 = get_field(s, r1);
5611     o->in1 = tcg_temp_new_i64();
5612     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5613 }
5614 #define SPEC_in1_r1_D32 SPEC_r1_even
5615 
5616 static void in1_r2(DisasContext *s, DisasOps *o)
5617 {
5618     o->in1 = load_reg(get_field(s, r2));
5619 }
5620 #define SPEC_in1_r2 0
5621 
5622 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5623 {
5624     o->in1 = tcg_temp_new_i64();
5625     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5626 }
5627 #define SPEC_in1_r2_sr32 0
5628 
5629 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5630 {
5631     o->in1 = tcg_temp_new_i64();
5632     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5633 }
5634 #define SPEC_in1_r2_32u 0
5635 
5636 static void in1_r3(DisasContext *s, DisasOps *o)
5637 {
5638     o->in1 = load_reg(get_field(s, r3));
5639 }
5640 #define SPEC_in1_r3 0
5641 
5642 static void in1_r3_o(DisasContext *s, DisasOps *o)
5643 {
5644     o->in1 = regs[get_field(s, r3)];
5645     o->g_in1 = true;
5646 }
5647 #define SPEC_in1_r3_o 0
5648 
5649 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5650 {
5651     o->in1 = tcg_temp_new_i64();
5652     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5653 }
5654 #define SPEC_in1_r3_32s 0
5655 
5656 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5657 {
5658     o->in1 = tcg_temp_new_i64();
5659     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5660 }
5661 #define SPEC_in1_r3_32u 0
5662 
5663 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5664 {
5665     int r3 = get_field(s, r3);
5666     o->in1 = tcg_temp_new_i64();
5667     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5668 }
5669 #define SPEC_in1_r3_D32 SPEC_r3_even
5670 
5671 static void in1_e1(DisasContext *s, DisasOps *o)
5672 {
5673     o->in1 = load_freg32_i64(get_field(s, r1));
5674 }
5675 #define SPEC_in1_e1 0
5676 
5677 static void in1_f1(DisasContext *s, DisasOps *o)
5678 {
5679     o->in1 = load_freg(get_field(s, r1));
5680 }
5681 #define SPEC_in1_f1 0
5682 
5683 /* Load the high double word of an extended (128-bit) format FP number */
5684 static void in1_x2h(DisasContext *s, DisasOps *o)
5685 {
5686     o->in1 = load_freg(get_field(s, r2));
5687 }
5688 #define SPEC_in1_x2h SPEC_r2_f128
5689 
5690 static void in1_f3(DisasContext *s, DisasOps *o)
5691 {
5692     o->in1 = load_freg(get_field(s, r3));
5693 }
5694 #define SPEC_in1_f3 0
5695 
5696 static void in1_la1(DisasContext *s, DisasOps *o)
5697 {
5698     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5699 }
5700 #define SPEC_in1_la1 0
5701 
5702 static void in1_la2(DisasContext *s, DisasOps *o)
5703 {
5704     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5705     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5706 }
5707 #define SPEC_in1_la2 0
5708 
5709 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5710 {
5711     in1_la1(s, o);
5712     o->in1 = tcg_temp_new_i64();
5713     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5714 }
5715 #define SPEC_in1_m1_8u 0
5716 
5717 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5718 {
5719     in1_la1(s, o);
5720     o->in1 = tcg_temp_new_i64();
5721     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5722 }
5723 #define SPEC_in1_m1_16s 0
5724 
5725 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5726 {
5727     in1_la1(s, o);
5728     o->in1 = tcg_temp_new_i64();
5729     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5730 }
5731 #define SPEC_in1_m1_16u 0
5732 
5733 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5734 {
5735     in1_la1(s, o);
5736     o->in1 = tcg_temp_new_i64();
5737     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5738 }
5739 #define SPEC_in1_m1_32s 0
5740 
5741 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5742 {
5743     in1_la1(s, o);
5744     o->in1 = tcg_temp_new_i64();
5745     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5746 }
5747 #define SPEC_in1_m1_32u 0
5748 
5749 static void in1_m1_64(DisasContext *s, DisasOps *o)
5750 {
5751     in1_la1(s, o);
5752     o->in1 = tcg_temp_new_i64();
5753     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5754 }
5755 #define SPEC_in1_m1_64 0
5756 
5757 /* ====================================================================== */
5758 /* The "INput 2" generators.  These load the second operand to an insn.  */
5759 
5760 static void in2_r1_o(DisasContext *s, DisasOps *o)
5761 {
5762     o->in2 = regs[get_field(s, r1)];
5763     o->g_in2 = true;
5764 }
5765 #define SPEC_in2_r1_o 0
5766 
5767 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5768 {
5769     o->in2 = tcg_temp_new_i64();
5770     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5771 }
5772 #define SPEC_in2_r1_16u 0
5773 
5774 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5775 {
5776     o->in2 = tcg_temp_new_i64();
5777     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5778 }
5779 #define SPEC_in2_r1_32u 0
5780 
5781 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5782 {
5783     int r1 = get_field(s, r1);
5784     o->in2 = tcg_temp_new_i64();
5785     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5786 }
5787 #define SPEC_in2_r1_D32 SPEC_r1_even
5788 
5789 static void in2_r2(DisasContext *s, DisasOps *o)
5790 {
5791     o->in2 = load_reg(get_field(s, r2));
5792 }
5793 #define SPEC_in2_r2 0
5794 
5795 static void in2_r2_o(DisasContext *s, DisasOps *o)
5796 {
5797     o->in2 = regs[get_field(s, r2)];
5798     o->g_in2 = true;
5799 }
5800 #define SPEC_in2_r2_o 0
5801 
5802 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5803 {
5804     int r2 = get_field(s, r2);
5805     if (r2 != 0) {
5806         o->in2 = load_reg(r2);
5807     }
5808 }
5809 #define SPEC_in2_r2_nz 0
5810 
5811 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5812 {
5813     o->in2 = tcg_temp_new_i64();
5814     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5815 }
5816 #define SPEC_in2_r2_8s 0
5817 
5818 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5819 {
5820     o->in2 = tcg_temp_new_i64();
5821     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5822 }
5823 #define SPEC_in2_r2_8u 0
5824 
5825 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5826 {
5827     o->in2 = tcg_temp_new_i64();
5828     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5829 }
5830 #define SPEC_in2_r2_16s 0
5831 
5832 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5833 {
5834     o->in2 = tcg_temp_new_i64();
5835     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5836 }
5837 #define SPEC_in2_r2_16u 0
5838 
5839 static void in2_r3(DisasContext *s, DisasOps *o)
5840 {
5841     o->in2 = load_reg(get_field(s, r3));
5842 }
5843 #define SPEC_in2_r3 0
5844 
5845 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5846 {
5847     o->in2 = tcg_temp_new_i64();
5848     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5849 }
5850 #define SPEC_in2_r3_sr32 0
5851 
5852 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5853 {
5854     o->in2 = tcg_temp_new_i64();
5855     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5856 }
5857 #define SPEC_in2_r3_32u 0
5858 
5859 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5860 {
5861     o->in2 = tcg_temp_new_i64();
5862     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5863 }
5864 #define SPEC_in2_r2_32s 0
5865 
5866 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5867 {
5868     o->in2 = tcg_temp_new_i64();
5869     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5870 }
5871 #define SPEC_in2_r2_32u 0
5872 
5873 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5874 {
5875     o->in2 = tcg_temp_new_i64();
5876     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5877 }
5878 #define SPEC_in2_r2_sr32 0
5879 
5880 static void in2_e2(DisasContext *s, DisasOps *o)
5881 {
5882     o->in2 = load_freg32_i64(get_field(s, r2));
5883 }
5884 #define SPEC_in2_e2 0
5885 
5886 static void in2_f2(DisasContext *s, DisasOps *o)
5887 {
5888     o->in2 = load_freg(get_field(s, r2));
5889 }
5890 #define SPEC_in2_f2 0
5891 
5892 /* Load the low double word of an extended (128-bit) format FP number */
5893 static void in2_x2l(DisasContext *s, DisasOps *o)
5894 {
5895     o->in2 = load_freg(get_field(s, r2) + 2);
5896 }
5897 #define SPEC_in2_x2l SPEC_r2_f128
5898 
5899 static void in2_ra2(DisasContext *s, DisasOps *o)
5900 {
5901     int r2 = get_field(s, r2);
5902 
5903     /* Note: *don't* treat !r2 as 0, use the reg value. */
5904     o->in2 = tcg_temp_new_i64();
5905     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5906 }
5907 #define SPEC_in2_ra2 0
5908 
5909 static void in2_a2(DisasContext *s, DisasOps *o)
5910 {
5911     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5912     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5913 }
5914 #define SPEC_in2_a2 0
5915 
5916 static void in2_ri2(DisasContext *s, DisasOps *o)
5917 {
5918     o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5919 }
5920 #define SPEC_in2_ri2 0
5921 
5922 static void in2_sh(DisasContext *s, DisasOps *o)
5923 {
5924     int b2 = get_field(s, b2);
5925     int d2 = get_field(s, d2);
5926 
5927     if (b2 == 0) {
5928         o->in2 = tcg_const_i64(d2 & 0x3f);
5929     } else {
5930         o->in2 = get_address(s, 0, b2, d2);
5931         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5932     }
5933 }
5934 #define SPEC_in2_sh 0
5935 
5936 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5937 {
5938     in2_a2(s, o);
5939     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5940 }
5941 #define SPEC_in2_m2_8u 0
5942 
5943 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5944 {
5945     in2_a2(s, o);
5946     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5947 }
5948 #define SPEC_in2_m2_16s 0
5949 
5950 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5951 {
5952     in2_a2(s, o);
5953     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5954 }
5955 #define SPEC_in2_m2_16u 0
5956 
5957 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5958 {
5959     in2_a2(s, o);
5960     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5961 }
5962 #define SPEC_in2_m2_32s 0
5963 
5964 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5965 {
5966     in2_a2(s, o);
5967     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5968 }
5969 #define SPEC_in2_m2_32u 0
5970 
5971 #ifndef CONFIG_USER_ONLY
5972 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5973 {
5974     in2_a2(s, o);
5975     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5976 }
5977 #define SPEC_in2_m2_32ua 0
5978 #endif
5979 
5980 static void in2_m2_64(DisasContext *s, DisasOps *o)
5981 {
5982     in2_a2(s, o);
5983     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5984 }
5985 #define SPEC_in2_m2_64 0
5986 
5987 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5988 {
5989     in2_a2(s, o);
5990     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5991     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5992 }
5993 #define SPEC_in2_m2_64w 0
5994 
5995 #ifndef CONFIG_USER_ONLY
5996 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5997 {
5998     in2_a2(s, o);
5999     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
6000 }
6001 #define SPEC_in2_m2_64a 0
6002 #endif
6003 
6004 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6005 {
6006     in2_ri2(s, o);
6007     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6008 }
6009 #define SPEC_in2_mri2_16u 0
6010 
6011 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6012 {
6013     in2_ri2(s, o);
6014     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6015 }
6016 #define SPEC_in2_mri2_32s 0
6017 
6018 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6019 {
6020     in2_ri2(s, o);
6021     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6022 }
6023 #define SPEC_in2_mri2_32u 0
6024 
6025 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6026 {
6027     in2_ri2(s, o);
6028     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6029 }
6030 #define SPEC_in2_mri2_64 0
6031 
6032 static void in2_i2(DisasContext *s, DisasOps *o)
6033 {
6034     o->in2 = tcg_const_i64(get_field(s, i2));
6035 }
6036 #define SPEC_in2_i2 0
6037 
6038 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6039 {
6040     o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6041 }
6042 #define SPEC_in2_i2_8u 0
6043 
6044 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6045 {
6046     o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6047 }
6048 #define SPEC_in2_i2_16u 0
6049 
6050 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6051 {
6052     o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6053 }
6054 #define SPEC_in2_i2_32u 0
6055 
6056 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6057 {
6058     uint64_t i2 = (uint16_t)get_field(s, i2);
6059     o->in2 = tcg_const_i64(i2 << s->insn->data);
6060 }
6061 #define SPEC_in2_i2_16u_shl 0
6062 
6063 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6064 {
6065     uint64_t i2 = (uint32_t)get_field(s, i2);
6066     o->in2 = tcg_const_i64(i2 << s->insn->data);
6067 }
6068 #define SPEC_in2_i2_32u_shl 0
6069 
6070 #ifndef CONFIG_USER_ONLY
6071 static void in2_insn(DisasContext *s, DisasOps *o)
6072 {
6073     o->in2 = tcg_const_i64(s->fields.raw_insn);
6074 }
6075 #define SPEC_in2_insn 0
6076 #endif
6077 
6078 /* ====================================================================== */
6079 
6080 /* Find opc within the table of insns.  This is formulated as a switch
6081    statement so that (1) we get compile-time notice of cut-paste errors
6082    for duplicated opcodes, and (2) the compiler generates the binary
6083    search tree, rather than us having to post-process the table.  */
6084 
6085 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6086     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6087 
6088 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6089     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6090 
6091 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6092     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6093 
6094 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6095 
6096 enum DisasInsnEnum {
6097 #include "insn-data.def"
6098 };
6099 
6100 #undef E
6101 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6102     .opc = OPC,                                                             \
6103     .flags = FL,                                                            \
6104     .fmt = FMT_##FT,                                                        \
6105     .fac = FAC_##FC,                                                        \
6106     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6107     .name = #NM,                                                            \
6108     .help_in1 = in1_##I1,                                                   \
6109     .help_in2 = in2_##I2,                                                   \
6110     .help_prep = prep_##P,                                                  \
6111     .help_wout = wout_##W,                                                  \
6112     .help_cout = cout_##CC,                                                 \
6113     .help_op = op_##OP,                                                     \
6114     .data = D                                                               \
6115  },
6116 
6117 /* Allow 0 to be used for NULL in the table below.  */
6118 #define in1_0  NULL
6119 #define in2_0  NULL
6120 #define prep_0  NULL
6121 #define wout_0  NULL
6122 #define cout_0  NULL
6123 #define op_0  NULL
6124 
6125 #define SPEC_in1_0 0
6126 #define SPEC_in2_0 0
6127 #define SPEC_prep_0 0
6128 #define SPEC_wout_0 0
6129 
6130 /* Give smaller names to the various facilities.  */
6131 #define FAC_Z           S390_FEAT_ZARCH
6132 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6133 #define FAC_DFP         S390_FEAT_DFP
6134 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6135 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6136 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6137 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6138 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6139 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6140 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6141 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6142 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6143 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6144 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6145 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6146 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6147 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6148 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6149 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6150 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6151 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6152 #define FAC_SFLE        S390_FEAT_STFLE
6153 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6154 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6155 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6156 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6157 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6158 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6159 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6160 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6161 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6162 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6163 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6164 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6165 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6166 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6167 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6168 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6169 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6170 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6171 #define FAC_VE          S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6172 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6173 
6174 static const DisasInsn insn_info[] = {
6175 #include "insn-data.def"
6176 };
6177 
6178 #undef E
6179 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6180     case OPC: return &insn_info[insn_ ## NM];
6181 
6182 static const DisasInsn *lookup_opc(uint16_t opc)
6183 {
6184     switch (opc) {
6185 #include "insn-data.def"
6186     default:
6187         return NULL;
6188     }
6189 }
6190 
6191 #undef F
6192 #undef E
6193 #undef D
6194 #undef C
6195 
6196 /* Extract a field from the insn.  The INSN should be left-aligned in
6197    the uint64_t so that we can more easily utilize the big-bit-endian
6198    definitions we extract from the Principals of Operation.  */
6199 
6200 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6201 {
6202     uint32_t r, m;
6203 
6204     if (f->size == 0) {
6205         return;
6206     }
6207 
6208     /* Zero extract the field from the insn.  */
6209     r = (insn << f->beg) >> (64 - f->size);
6210 
6211     /* Sign-extend, or un-swap the field as necessary.  */
6212     switch (f->type) {
6213     case 0: /* unsigned */
6214         break;
6215     case 1: /* signed */
6216         assert(f->size <= 32);
6217         m = 1u << (f->size - 1);
6218         r = (r ^ m) - m;
6219         break;
6220     case 2: /* dl+dh split, signed 20 bit. */
6221         r = ((int8_t)r << 12) | (r >> 8);
6222         break;
6223     case 3: /* MSB stored in RXB */
6224         g_assert(f->size == 4);
6225         switch (f->beg) {
6226         case 8:
6227             r |= extract64(insn, 63 - 36, 1) << 4;
6228             break;
6229         case 12:
6230             r |= extract64(insn, 63 - 37, 1) << 4;
6231             break;
6232         case 16:
6233             r |= extract64(insn, 63 - 38, 1) << 4;
6234             break;
6235         case 32:
6236             r |= extract64(insn, 63 - 39, 1) << 4;
6237             break;
6238         default:
6239             g_assert_not_reached();
6240         }
6241         break;
6242     default:
6243         abort();
6244     }
6245 
6246     /*
6247      * Validate that the "compressed" encoding we selected above is valid.
6248      * I.e. we haven't made two different original fields overlap.
6249      */
6250     assert(((o->presentC >> f->indexC) & 1) == 0);
6251     o->presentC |= 1 << f->indexC;
6252     o->presentO |= 1 << f->indexO;
6253 
6254     o->c[f->indexC] = r;
6255 }
6256 
6257 /* Lookup the insn at the current PC, extracting the operands into O and
6258    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6259 
6260 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6261 {
6262     uint64_t insn, pc = s->base.pc_next;
6263     int op, op2, ilen;
6264     const DisasInsn *info;
6265 
6266     if (unlikely(s->ex_value)) {
6267         /* Drop the EX data now, so that it's clear on exception paths.  */
6268         TCGv_i64 zero = tcg_const_i64(0);
6269         tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6270         tcg_temp_free_i64(zero);
6271 
6272         /* Extract the values saved by EXECUTE.  */
6273         insn = s->ex_value & 0xffffffffffff0000ull;
6274         ilen = s->ex_value & 0xf;
6275         op = insn >> 56;
6276     } else {
6277         insn = ld_code2(env, s, pc);
6278         op = (insn >> 8) & 0xff;
6279         ilen = get_ilen(op);
6280         switch (ilen) {
6281         case 2:
6282             insn = insn << 48;
6283             break;
6284         case 4:
6285             insn = ld_code4(env, s, pc) << 32;
6286             break;
6287         case 6:
6288             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6289             break;
6290         default:
6291             g_assert_not_reached();
6292         }
6293     }
6294     s->pc_tmp = s->base.pc_next + ilen;
6295     s->ilen = ilen;
6296 
6297     /* We can't actually determine the insn format until we've looked up
6298        the full insn opcode.  Which we can't do without locating the
6299        secondary opcode.  Assume by default that OP2 is at bit 40; for
6300        those smaller insns that don't actually have a secondary opcode
6301        this will correctly result in OP2 = 0. */
6302     switch (op) {
6303     case 0x01: /* E */
6304     case 0x80: /* S */
6305     case 0x82: /* S */
6306     case 0x93: /* S */
6307     case 0xb2: /* S, RRF, RRE, IE */
6308     case 0xb3: /* RRE, RRD, RRF */
6309     case 0xb9: /* RRE, RRF */
6310     case 0xe5: /* SSE, SIL */
6311         op2 = (insn << 8) >> 56;
6312         break;
6313     case 0xa5: /* RI */
6314     case 0xa7: /* RI */
6315     case 0xc0: /* RIL */
6316     case 0xc2: /* RIL */
6317     case 0xc4: /* RIL */
6318     case 0xc6: /* RIL */
6319     case 0xc8: /* SSF */
6320     case 0xcc: /* RIL */
6321         op2 = (insn << 12) >> 60;
6322         break;
6323     case 0xc5: /* MII */
6324     case 0xc7: /* SMI */
6325     case 0xd0 ... 0xdf: /* SS */
6326     case 0xe1: /* SS */
6327     case 0xe2: /* SS */
6328     case 0xe8: /* SS */
6329     case 0xe9: /* SS */
6330     case 0xea: /* SS */
6331     case 0xee ... 0xf3: /* SS */
6332     case 0xf8 ... 0xfd: /* SS */
6333         op2 = 0;
6334         break;
6335     default:
6336         op2 = (insn << 40) >> 56;
6337         break;
6338     }
6339 
6340     memset(&s->fields, 0, sizeof(s->fields));
6341     s->fields.raw_insn = insn;
6342     s->fields.op = op;
6343     s->fields.op2 = op2;
6344 
6345     /* Lookup the instruction.  */
6346     info = lookup_opc(op << 8 | op2);
6347     s->insn = info;
6348 
6349     /* If we found it, extract the operands.  */
6350     if (info != NULL) {
6351         DisasFormat fmt = info->fmt;
6352         int i;
6353 
6354         for (i = 0; i < NUM_C_FIELD; ++i) {
6355             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6356         }
6357     }
6358     return info;
6359 }
6360 
6361 static bool is_afp_reg(int reg)
6362 {
6363     return reg % 2 || reg > 6;
6364 }
6365 
6366 static bool is_fp_pair(int reg)
6367 {
6368     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6369     return !(reg & 0x2);
6370 }
6371 
6372 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6373 {
6374     const DisasInsn *insn;
6375     DisasJumpType ret = DISAS_NEXT;
6376     DisasOps o = {};
6377     bool icount = false;
6378 
6379     /* Search for the insn in the table.  */
6380     insn = extract_insn(env, s);
6381 
6382     /* Update insn_start now that we know the ILEN.  */
6383     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6384 
6385     /* Not found means unimplemented/illegal opcode.  */
6386     if (insn == NULL) {
6387         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6388                       s->fields.op, s->fields.op2);
6389         gen_illegal_opcode(s);
6390         ret = DISAS_NORETURN;
6391         goto out;
6392     }
6393 
6394 #ifndef CONFIG_USER_ONLY
6395     if (s->base.tb->flags & FLAG_MASK_PER) {
6396         TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6397         gen_helper_per_ifetch(cpu_env, addr);
6398         tcg_temp_free_i64(addr);
6399     }
6400 #endif
6401 
6402     /* process flags */
6403     if (insn->flags) {
6404         /* privileged instruction */
6405         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6406             gen_program_exception(s, PGM_PRIVILEGED);
6407             ret = DISAS_NORETURN;
6408             goto out;
6409         }
6410 
6411         /* if AFP is not enabled, instructions and registers are forbidden */
6412         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6413             uint8_t dxc = 0;
6414 
6415             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6416                 dxc = 1;
6417             }
6418             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6419                 dxc = 1;
6420             }
6421             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6422                 dxc = 1;
6423             }
6424             if (insn->flags & IF_BFP) {
6425                 dxc = 2;
6426             }
6427             if (insn->flags & IF_DFP) {
6428                 dxc = 3;
6429             }
6430             if (insn->flags & IF_VEC) {
6431                 dxc = 0xfe;
6432             }
6433             if (dxc) {
6434                 gen_data_exception(dxc);
6435                 ret = DISAS_NORETURN;
6436                 goto out;
6437             }
6438         }
6439 
6440         /* if vector instructions not enabled, executing them is forbidden */
6441         if (insn->flags & IF_VEC) {
6442             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6443                 gen_data_exception(0xfe);
6444                 ret = DISAS_NORETURN;
6445                 goto out;
6446             }
6447         }
6448 
6449         /* input/output is the special case for icount mode */
6450         if (unlikely(insn->flags & IF_IO)) {
6451             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6452             if (icount) {
6453                 gen_io_start();
6454             }
6455         }
6456     }
6457 
6458     /* Check for insn specification exceptions.  */
6459     if (insn->spec) {
6460         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6461             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6462             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6463             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6464             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6465             gen_program_exception(s, PGM_SPECIFICATION);
6466             ret = DISAS_NORETURN;
6467             goto out;
6468         }
6469     }
6470 
6471     /* Implement the instruction.  */
6472     if (insn->help_in1) {
6473         insn->help_in1(s, &o);
6474     }
6475     if (insn->help_in2) {
6476         insn->help_in2(s, &o);
6477     }
6478     if (insn->help_prep) {
6479         insn->help_prep(s, &o);
6480     }
6481     if (insn->help_op) {
6482         ret = insn->help_op(s, &o);
6483     }
6484     if (ret != DISAS_NORETURN) {
6485         if (insn->help_wout) {
6486             insn->help_wout(s, &o);
6487         }
6488         if (insn->help_cout) {
6489             insn->help_cout(s, &o);
6490         }
6491     }
6492 
6493     /* Free any temporaries created by the helpers.  */
6494     if (o.out && !o.g_out) {
6495         tcg_temp_free_i64(o.out);
6496     }
6497     if (o.out2 && !o.g_out2) {
6498         tcg_temp_free_i64(o.out2);
6499     }
6500     if (o.in1 && !o.g_in1) {
6501         tcg_temp_free_i64(o.in1);
6502     }
6503     if (o.in2 && !o.g_in2) {
6504         tcg_temp_free_i64(o.in2);
6505     }
6506     if (o.addr1) {
6507         tcg_temp_free_i64(o.addr1);
6508     }
6509 
6510     /* io should be the last instruction in tb when icount is enabled */
6511     if (unlikely(icount && ret == DISAS_NEXT)) {
6512         ret = DISAS_PC_STALE;
6513     }
6514 
6515 #ifndef CONFIG_USER_ONLY
6516     if (s->base.tb->flags & FLAG_MASK_PER) {
6517         /* An exception might be triggered, save PSW if not already done.  */
6518         if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6519             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6520         }
6521 
6522         /* Call the helper to check for a possible PER exception.  */
6523         gen_helper_per_check_exception(cpu_env);
6524     }
6525 #endif
6526 
6527 out:
6528     /* Advance to the next instruction.  */
6529     s->base.pc_next = s->pc_tmp;
6530     return ret;
6531 }
6532 
6533 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6534 {
6535     DisasContext *dc = container_of(dcbase, DisasContext, base);
6536 
6537     /* 31-bit mode */
6538     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6539         dc->base.pc_first &= 0x7fffffff;
6540         dc->base.pc_next = dc->base.pc_first;
6541     }
6542 
6543     dc->cc_op = CC_OP_DYNAMIC;
6544     dc->ex_value = dc->base.tb->cs_base;
6545 }
6546 
6547 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6548 {
6549 }
6550 
6551 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6552 {
6553     DisasContext *dc = container_of(dcbase, DisasContext, base);
6554 
6555     /* Delay the set of ilen until we've read the insn. */
6556     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6557     dc->insn_start = tcg_last_op();
6558 }
6559 
6560 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6561 {
6562     CPUS390XState *env = cs->env_ptr;
6563     DisasContext *dc = container_of(dcbase, DisasContext, base);
6564 
6565     dc->base.is_jmp = translate_one(env, dc);
6566     if (dc->base.is_jmp == DISAS_NEXT) {
6567         uint64_t page_start;
6568 
6569         page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6570         if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6571             dc->base.is_jmp = DISAS_TOO_MANY;
6572         }
6573     }
6574 }
6575 
6576 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6577 {
6578     DisasContext *dc = container_of(dcbase, DisasContext, base);
6579 
6580     switch (dc->base.is_jmp) {
6581     case DISAS_GOTO_TB:
6582     case DISAS_NORETURN:
6583         break;
6584     case DISAS_TOO_MANY:
6585     case DISAS_PC_STALE:
6586     case DISAS_PC_STALE_NOCHAIN:
6587         update_psw_addr(dc);
6588         /* FALLTHRU */
6589     case DISAS_PC_UPDATED:
6590         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6591            cc op type is in env */
6592         update_cc_op(dc);
6593         /* FALLTHRU */
6594     case DISAS_PC_CC_UPDATED:
6595         /* Exit the TB, either by raising a debug exception or by return.  */
6596         if ((dc->base.tb->flags & FLAG_MASK_PER) ||
6597              dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6598             tcg_gen_exit_tb(NULL, 0);
6599         } else {
6600             tcg_gen_lookup_and_goto_ptr();
6601         }
6602         break;
6603     default:
6604         g_assert_not_reached();
6605     }
6606 }
6607 
6608 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6609 {
6610     DisasContext *dc = container_of(dcbase, DisasContext, base);
6611 
6612     if (unlikely(dc->ex_value)) {
6613         /* ??? Unfortunately log_target_disas can't use host memory.  */
6614         qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6615     } else {
6616         qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6617         log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6618     }
6619 }
6620 
6621 static const TranslatorOps s390x_tr_ops = {
6622     .init_disas_context = s390x_tr_init_disas_context,
6623     .tb_start           = s390x_tr_tb_start,
6624     .insn_start         = s390x_tr_insn_start,
6625     .translate_insn     = s390x_tr_translate_insn,
6626     .tb_stop            = s390x_tr_tb_stop,
6627     .disas_log          = s390x_tr_disas_log,
6628 };
6629 
6630 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6631 {
6632     DisasContext dc;
6633 
6634     translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6635 }
6636 
6637 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6638                           target_ulong *data)
6639 {
6640     int cc_op = data[1];
6641 
6642     env->psw.addr = data[0];
6643 
6644     /* Update the CC opcode if it is not already up-to-date.  */
6645     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6646         env->cc_op = cc_op;
6647     }
6648 
6649     /* Record ILEN.  */
6650     env->int_pgm_ilen = data[2];
6651 }
6652