xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 0b29090a)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(cpu_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(cpu_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(cpu_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
291     return r;
292 }
293 
294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
299     return r;
300 }
301 
302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
320 }
321 
322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
336 }
337 
338 static void return_low128(TCGv_i64 dest)
339 {
340     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
341 }
342 
343 static void update_psw_addr(DisasContext *s)
344 {
345     /* psw.addr */
346     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
347 }
348 
349 static void per_branch(DisasContext *s, bool to_next)
350 {
351 #ifndef CONFIG_USER_ONLY
352     tcg_gen_movi_i64(gbea, s->base.pc_next);
353 
354     if (s->base.tb->flags & FLAG_MASK_PER) {
355         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
356         gen_helper_per_branch(cpu_env, gbea, next_pc);
357     }
358 #endif
359 }
360 
361 static void per_branch_cond(DisasContext *s, TCGCond cond,
362                             TCGv_i64 arg1, TCGv_i64 arg2)
363 {
364 #ifndef CONFIG_USER_ONLY
365     if (s->base.tb->flags & FLAG_MASK_PER) {
366         TCGLabel *lab = gen_new_label();
367         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 
369         tcg_gen_movi_i64(gbea, s->base.pc_next);
370         gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 
372         gen_set_label(lab);
373     } else {
374         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
375         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
376     }
377 #endif
378 }
379 
380 static void per_breaking_event(DisasContext *s)
381 {
382     tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384 
385 static void update_cc_op(DisasContext *s)
386 {
387     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388         tcg_gen_movi_i32(cc_op, s->cc_op);
389     }
390 }
391 
392 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
393                                 uint64_t pc)
394 {
395     return (uint64_t)translator_lduw(env, &s->base, pc);
396 }
397 
398 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
399                                 uint64_t pc)
400 {
401     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 }
403 
404 static int get_mem_index(DisasContext *s)
405 {
406 #ifdef CONFIG_USER_ONLY
407     return MMU_USER_IDX;
408 #else
409     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
410         return MMU_REAL_IDX;
411     }
412 
413     switch (s->base.tb->flags & FLAG_MASK_ASC) {
414     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_PRIMARY_IDX;
416     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
417         return MMU_SECONDARY_IDX;
418     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
419         return MMU_HOME_IDX;
420     default:
421         tcg_abort();
422         break;
423     }
424 #endif
425 }
426 
427 static void gen_exception(int excp)
428 {
429     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
430 }
431 
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434     /* Remember what pgm exeption this was.  */
435     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
436                    offsetof(CPUS390XState, int_pgm_code));
437 
438     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
439                    offsetof(CPUS390XState, int_pgm_ilen));
440 
441     /* update the psw */
442     update_psw_addr(s);
443 
444     /* Save off cc.  */
445     update_cc_op(s);
446 
447     /* Trigger exception.  */
448     gen_exception(EXCP_PGM);
449 }
450 
451 static inline void gen_illegal_opcode(DisasContext *s)
452 {
453     gen_program_exception(s, PGM_OPERATION);
454 }
455 
456 static inline void gen_data_exception(uint8_t dxc)
457 {
458     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
459 }
460 
461 static inline void gen_trap(DisasContext *s)
462 {
463     /* Set DXC to 0xff */
464     gen_data_exception(0xff);
465 }
466 
467 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
468                                   int64_t imm)
469 {
470     tcg_gen_addi_i64(dst, src, imm);
471     if (!(s->base.tb->flags & FLAG_MASK_64)) {
472         if (s->base.tb->flags & FLAG_MASK_32) {
473             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
474         } else {
475             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
476         }
477     }
478 }
479 
480 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
481 {
482     TCGv_i64 tmp = tcg_temp_new_i64();
483 
484     /*
485      * Note that d2 is limited to 20 bits, signed.  If we crop negative
486      * displacements early we create larger immediate addends.
487      */
488     if (b2 && x2) {
489         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
490         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
491     } else if (b2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
493     } else if (x2) {
494         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
495     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
496         if (s->base.tb->flags & FLAG_MASK_32) {
497             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
498         } else {
499             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
500         }
501     } else {
502         tcg_gen_movi_i64(tmp, d2);
503     }
504 
505     return tmp;
506 }
507 
508 static inline bool live_cc_data(DisasContext *s)
509 {
510     return (s->cc_op != CC_OP_DYNAMIC
511             && s->cc_op != CC_OP_STATIC
512             && s->cc_op > 3);
513 }
514 
515 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
516 {
517     if (live_cc_data(s)) {
518         tcg_gen_discard_i64(cc_src);
519         tcg_gen_discard_i64(cc_dst);
520         tcg_gen_discard_i64(cc_vr);
521     }
522     s->cc_op = CC_OP_CONST0 + val;
523 }
524 
525 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
526 {
527     if (live_cc_data(s)) {
528         tcg_gen_discard_i64(cc_src);
529         tcg_gen_discard_i64(cc_vr);
530     }
531     tcg_gen_mov_i64(cc_dst, dst);
532     s->cc_op = op;
533 }
534 
535 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
536                                   TCGv_i64 dst)
537 {
538     if (live_cc_data(s)) {
539         tcg_gen_discard_i64(cc_vr);
540     }
541     tcg_gen_mov_i64(cc_src, src);
542     tcg_gen_mov_i64(cc_dst, dst);
543     s->cc_op = op;
544 }
545 
546 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
547                                   TCGv_i64 dst, TCGv_i64 vr)
548 {
549     tcg_gen_mov_i64(cc_src, src);
550     tcg_gen_mov_i64(cc_dst, dst);
551     tcg_gen_mov_i64(cc_vr, vr);
552     s->cc_op = op;
553 }
554 
555 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
556 {
557     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
558 }
559 
560 /* CC value is in env->cc_op */
561 static void set_cc_static(DisasContext *s)
562 {
563     if (live_cc_data(s)) {
564         tcg_gen_discard_i64(cc_src);
565         tcg_gen_discard_i64(cc_dst);
566         tcg_gen_discard_i64(cc_vr);
567     }
568     s->cc_op = CC_OP_STATIC;
569 }
570 
571 /* calculates cc into cc_op */
572 static void gen_op_calc_cc(DisasContext *s)
573 {
574     TCGv_i32 local_cc_op = NULL;
575     TCGv_i64 dummy = NULL;
576 
577     switch (s->cc_op) {
578     default:
579         dummy = tcg_constant_i64(0);
580         /* FALLTHRU */
581     case CC_OP_ADD_64:
582     case CC_OP_SUB_64:
583     case CC_OP_ADD_32:
584     case CC_OP_SUB_32:
585         local_cc_op = tcg_constant_i32(s->cc_op);
586         break;
587     case CC_OP_CONST0:
588     case CC_OP_CONST1:
589     case CC_OP_CONST2:
590     case CC_OP_CONST3:
591     case CC_OP_STATIC:
592     case CC_OP_DYNAMIC:
593         break;
594     }
595 
596     switch (s->cc_op) {
597     case CC_OP_CONST0:
598     case CC_OP_CONST1:
599     case CC_OP_CONST2:
600     case CC_OP_CONST3:
601         /* s->cc_op is the cc value */
602         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
603         break;
604     case CC_OP_STATIC:
605         /* env->cc_op already is the cc value */
606         break;
607     case CC_OP_NZ:
608         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
609         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
610         break;
611     case CC_OP_ABS_64:
612     case CC_OP_NABS_64:
613     case CC_OP_ABS_32:
614     case CC_OP_NABS_32:
615     case CC_OP_LTGT0_32:
616     case CC_OP_LTGT0_64:
617     case CC_OP_COMP_32:
618     case CC_OP_COMP_64:
619     case CC_OP_NZ_F32:
620     case CC_OP_NZ_F64:
621     case CC_OP_FLOGR:
622     case CC_OP_LCBB:
623     case CC_OP_MULS_32:
624         /* 1 argument */
625         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626         break;
627     case CC_OP_ADDU:
628     case CC_OP_ICM:
629     case CC_OP_LTGT_32:
630     case CC_OP_LTGT_64:
631     case CC_OP_LTUGTU_32:
632     case CC_OP_LTUGTU_64:
633     case CC_OP_TM_32:
634     case CC_OP_TM_64:
635     case CC_OP_SLA:
636     case CC_OP_SUBU:
637     case CC_OP_NZ_F128:
638     case CC_OP_VC:
639     case CC_OP_MULS_64:
640         /* 2 arguments */
641         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
642         break;
643     case CC_OP_ADD_64:
644     case CC_OP_SUB_64:
645     case CC_OP_ADD_32:
646     case CC_OP_SUB_32:
647         /* 3 arguments */
648         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
649         break;
650     case CC_OP_DYNAMIC:
651         /* unknown operation - assume 3 arguments and cc_op in env */
652         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     default:
655         tcg_abort();
656     }
657 
658     /* We now have cc in cc_op as constant */
659     set_cc_static(s);
660 }
661 
662 static bool use_goto_tb(DisasContext *s, uint64_t dest)
663 {
664     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
665         return false;
666     }
667     return translator_use_goto_tb(&s->base, dest);
668 }
669 
670 static void account_noninline_branch(DisasContext *s, int cc_op)
671 {
672 #ifdef DEBUG_INLINE_BRANCHES
673     inline_branch_miss[cc_op]++;
674 #endif
675 }
676 
677 static void account_inline_branch(DisasContext *s, int cc_op)
678 {
679 #ifdef DEBUG_INLINE_BRANCHES
680     inline_branch_hit[cc_op]++;
681 #endif
682 }
683 
684 /* Table of mask values to comparison codes, given a comparison as input.
685    For such, CC=3 should not be possible.  */
686 static const TCGCond ltgt_cond[16] = {
687     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
688     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
689     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
690     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
691     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
692     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
693     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
694     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
695 };
696 
697 /* Table of mask values to comparison codes, given a logic op as input.
698    For such, only CC=0 and CC=1 should be possible.  */
699 static const TCGCond nz_cond[16] = {
700     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
701     TCG_COND_NEVER, TCG_COND_NEVER,
702     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
703     TCG_COND_NE, TCG_COND_NE,
704     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
705     TCG_COND_EQ, TCG_COND_EQ,
706     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
707     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
708 };
709 
710 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
711    details required to generate a TCG comparison.  */
712 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
713 {
714     TCGCond cond;
715     enum cc_op old_cc_op = s->cc_op;
716 
717     if (mask == 15 || mask == 0) {
718         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
719         c->u.s32.a = cc_op;
720         c->u.s32.b = cc_op;
721         c->is_64 = false;
722         return;
723     }
724 
725     /* Find the TCG condition for the mask + cc op.  */
726     switch (old_cc_op) {
727     case CC_OP_LTGT0_32:
728     case CC_OP_LTGT0_64:
729     case CC_OP_LTGT_32:
730     case CC_OP_LTGT_64:
731         cond = ltgt_cond[mask];
732         if (cond == TCG_COND_NEVER) {
733             goto do_dynamic;
734         }
735         account_inline_branch(s, old_cc_op);
736         break;
737 
738     case CC_OP_LTUGTU_32:
739     case CC_OP_LTUGTU_64:
740         cond = tcg_unsigned_cond(ltgt_cond[mask]);
741         if (cond == TCG_COND_NEVER) {
742             goto do_dynamic;
743         }
744         account_inline_branch(s, old_cc_op);
745         break;
746 
747     case CC_OP_NZ:
748         cond = nz_cond[mask];
749         if (cond == TCG_COND_NEVER) {
750             goto do_dynamic;
751         }
752         account_inline_branch(s, old_cc_op);
753         break;
754 
755     case CC_OP_TM_32:
756     case CC_OP_TM_64:
757         switch (mask) {
758         case 8:
759             cond = TCG_COND_EQ;
760             break;
761         case 4 | 2 | 1:
762             cond = TCG_COND_NE;
763             break;
764         default:
765             goto do_dynamic;
766         }
767         account_inline_branch(s, old_cc_op);
768         break;
769 
770     case CC_OP_ICM:
771         switch (mask) {
772         case 8:
773             cond = TCG_COND_EQ;
774             break;
775         case 4 | 2 | 1:
776         case 4 | 2:
777             cond = TCG_COND_NE;
778             break;
779         default:
780             goto do_dynamic;
781         }
782         account_inline_branch(s, old_cc_op);
783         break;
784 
785     case CC_OP_FLOGR:
786         switch (mask & 0xa) {
787         case 8: /* src == 0 -> no one bit found */
788             cond = TCG_COND_EQ;
789             break;
790         case 2: /* src != 0 -> one bit found */
791             cond = TCG_COND_NE;
792             break;
793         default:
794             goto do_dynamic;
795         }
796         account_inline_branch(s, old_cc_op);
797         break;
798 
799     case CC_OP_ADDU:
800     case CC_OP_SUBU:
801         switch (mask) {
802         case 8 | 2: /* result == 0 */
803             cond = TCG_COND_EQ;
804             break;
805         case 4 | 1: /* result != 0 */
806             cond = TCG_COND_NE;
807             break;
808         case 8 | 4: /* !carry (borrow) */
809             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
810             break;
811         case 2 | 1: /* carry (!borrow) */
812             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
813             break;
814         default:
815             goto do_dynamic;
816         }
817         account_inline_branch(s, old_cc_op);
818         break;
819 
820     default:
821     do_dynamic:
822         /* Calculate cc value.  */
823         gen_op_calc_cc(s);
824         /* FALLTHRU */
825 
826     case CC_OP_STATIC:
827         /* Jump based on CC.  We'll load up the real cond below;
828            the assignment here merely avoids a compiler warning.  */
829         account_noninline_branch(s, old_cc_op);
830         old_cc_op = CC_OP_STATIC;
831         cond = TCG_COND_NEVER;
832         break;
833     }
834 
835     /* Load up the arguments of the comparison.  */
836     c->is_64 = true;
837     switch (old_cc_op) {
838     case CC_OP_LTGT0_32:
839         c->is_64 = false;
840         c->u.s32.a = tcg_temp_new_i32();
841         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
842         c->u.s32.b = tcg_constant_i32(0);
843         break;
844     case CC_OP_LTGT_32:
845     case CC_OP_LTUGTU_32:
846         c->is_64 = false;
847         c->u.s32.a = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
849         c->u.s32.b = tcg_temp_new_i32();
850         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
851         break;
852 
853     case CC_OP_LTGT0_64:
854     case CC_OP_NZ:
855     case CC_OP_FLOGR:
856         c->u.s64.a = cc_dst;
857         c->u.s64.b = tcg_constant_i64(0);
858         break;
859     case CC_OP_LTGT_64:
860     case CC_OP_LTUGTU_64:
861         c->u.s64.a = cc_src;
862         c->u.s64.b = cc_dst;
863         break;
864 
865     case CC_OP_TM_32:
866     case CC_OP_TM_64:
867     case CC_OP_ICM:
868         c->u.s64.a = tcg_temp_new_i64();
869         c->u.s64.b = tcg_constant_i64(0);
870         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
871         break;
872 
873     case CC_OP_ADDU:
874     case CC_OP_SUBU:
875         c->is_64 = true;
876         c->u.s64.b = tcg_constant_i64(0);
877         switch (mask) {
878         case 8 | 2:
879         case 4 | 1: /* result */
880             c->u.s64.a = cc_dst;
881             break;
882         case 8 | 4:
883         case 2 | 1: /* carry */
884             c->u.s64.a = cc_src;
885             break;
886         default:
887             g_assert_not_reached();
888         }
889         break;
890 
891     case CC_OP_STATIC:
892         c->is_64 = false;
893         c->u.s32.a = cc_op;
894         switch (mask) {
895         case 0x8 | 0x4 | 0x2: /* cc != 3 */
896             cond = TCG_COND_NE;
897             c->u.s32.b = tcg_constant_i32(3);
898             break;
899         case 0x8 | 0x4 | 0x1: /* cc != 2 */
900             cond = TCG_COND_NE;
901             c->u.s32.b = tcg_constant_i32(2);
902             break;
903         case 0x8 | 0x2 | 0x1: /* cc != 1 */
904             cond = TCG_COND_NE;
905             c->u.s32.b = tcg_constant_i32(1);
906             break;
907         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
908             cond = TCG_COND_EQ;
909             c->u.s32.a = tcg_temp_new_i32();
910             c->u.s32.b = tcg_constant_i32(0);
911             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
912             break;
913         case 0x8 | 0x4: /* cc < 2 */
914             cond = TCG_COND_LTU;
915             c->u.s32.b = tcg_constant_i32(2);
916             break;
917         case 0x8: /* cc == 0 */
918             cond = TCG_COND_EQ;
919             c->u.s32.b = tcg_constant_i32(0);
920             break;
921         case 0x4 | 0x2 | 0x1: /* cc != 0 */
922             cond = TCG_COND_NE;
923             c->u.s32.b = tcg_constant_i32(0);
924             break;
925         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
926             cond = TCG_COND_NE;
927             c->u.s32.a = tcg_temp_new_i32();
928             c->u.s32.b = tcg_constant_i32(0);
929             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930             break;
931         case 0x4: /* cc == 1 */
932             cond = TCG_COND_EQ;
933             c->u.s32.b = tcg_constant_i32(1);
934             break;
935         case 0x2 | 0x1: /* cc > 1 */
936             cond = TCG_COND_GTU;
937             c->u.s32.b = tcg_constant_i32(1);
938             break;
939         case 0x2: /* cc == 2 */
940             cond = TCG_COND_EQ;
941             c->u.s32.b = tcg_constant_i32(2);
942             break;
943         case 0x1: /* cc == 3 */
944             cond = TCG_COND_EQ;
945             c->u.s32.b = tcg_constant_i32(3);
946             break;
947         default:
948             /* CC is masked by something else: (8 >> cc) & mask.  */
949             cond = TCG_COND_NE;
950             c->u.s32.a = tcg_temp_new_i32();
951             c->u.s32.b = tcg_constant_i32(0);
952             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
953             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
954             break;
955         }
956         break;
957 
958     default:
959         abort();
960     }
961     c->cond = cond;
962 }
963 
964 /* ====================================================================== */
965 /* Define the insn format enumeration.  */
966 #define F0(N)                         FMT_##N,
967 #define F1(N, X1)                     F0(N)
968 #define F2(N, X1, X2)                 F0(N)
969 #define F3(N, X1, X2, X3)             F0(N)
970 #define F4(N, X1, X2, X3, X4)         F0(N)
971 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
972 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
973 
974 typedef enum {
975 #include "insn-format.h.inc"
976 } DisasFormat;
977 
978 #undef F0
979 #undef F1
980 #undef F2
981 #undef F3
982 #undef F4
983 #undef F5
984 #undef F6
985 
986 /* This is the way fields are to be accessed out of DisasFields.  */
987 #define have_field(S, F)  have_field1((S), FLD_O_##F)
988 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
989 
990 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
991 {
992     return (s->fields.presentO >> c) & 1;
993 }
994 
995 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
996                       enum DisasFieldIndexC c)
997 {
998     assert(have_field1(s, o));
999     return s->fields.c[c];
1000 }
1001 
1002 /* Describe the layout of each field in each format.  */
1003 typedef struct DisasField {
1004     unsigned int beg:8;
1005     unsigned int size:8;
1006     unsigned int type:2;
1007     unsigned int indexC:6;
1008     enum DisasFieldIndexO indexO:8;
1009 } DisasField;
1010 
1011 typedef struct DisasFormatInfo {
1012     DisasField op[NUM_C_FIELD];
1013 } DisasFormatInfo;
1014 
1015 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1016 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1017 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1018 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1030 
1031 #define F0(N)                     { { } },
1032 #define F1(N, X1)                 { { X1 } },
1033 #define F2(N, X1, X2)             { { X1, X2 } },
1034 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1037 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1038 
1039 static const DisasFormatInfo format_info[] = {
1040 #include "insn-format.h.inc"
1041 };
1042 
1043 #undef F0
1044 #undef F1
1045 #undef F2
1046 #undef F3
1047 #undef F4
1048 #undef F5
1049 #undef F6
1050 #undef R
1051 #undef M
1052 #undef V
1053 #undef BD
1054 #undef BXD
1055 #undef BDL
1056 #undef BXDL
1057 #undef I
1058 #undef L
1059 
1060 /* Generally, we'll extract operands into this structures, operate upon
1061    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1062    of routines below for more details.  */
1063 typedef struct {
1064     TCGv_i64 out, out2, in1, in2;
1065     TCGv_i64 addr1;
1066     TCGv_i128 out_128, in1_128, in2_128;
1067 } DisasOps;
1068 
1069 /* Instructions can place constraints on their operands, raising specification
1070    exceptions if they are violated.  To make this easy to automate, each "in1",
1071    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1072    of the following, or 0.  To make this easy to document, we'll put the
1073    SPEC_<name> defines next to <name>.  */
1074 
1075 #define SPEC_r1_even    1
1076 #define SPEC_r2_even    2
1077 #define SPEC_r3_even    4
1078 #define SPEC_r1_f128    8
1079 #define SPEC_r2_f128    16
1080 
1081 /* Return values from translate_one, indicating the state of the TB.  */
1082 
1083 /* We are not using a goto_tb (for whatever reason), but have updated
1084    the PC (for whatever reason), so there's no need to do it again on
1085    exiting the TB.  */
1086 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1087 
1088 /* We have updated the PC and CC values.  */
1089 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1090 
1091 
1092 /* Instruction flags */
1093 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1094 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1095 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1096 #define IF_BFP      0x0008      /* binary floating point instruction */
1097 #define IF_DFP      0x0010      /* decimal floating point instruction */
1098 #define IF_PRIV     0x0020      /* privileged instruction */
1099 #define IF_VEC      0x0040      /* vector instruction */
1100 #define IF_IO       0x0080      /* input/output instruction */
1101 
1102 struct DisasInsn {
1103     unsigned opc:16;
1104     unsigned flags:16;
1105     DisasFormat fmt:8;
1106     unsigned fac:8;
1107     unsigned spec:8;
1108 
1109     const char *name;
1110 
1111     /* Pre-process arguments before HELP_OP.  */
1112     void (*help_in1)(DisasContext *, DisasOps *);
1113     void (*help_in2)(DisasContext *, DisasOps *);
1114     void (*help_prep)(DisasContext *, DisasOps *);
1115 
1116     /*
1117      * Post-process output after HELP_OP.
1118      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1119      */
1120     void (*help_wout)(DisasContext *, DisasOps *);
1121     void (*help_cout)(DisasContext *, DisasOps *);
1122 
1123     /* Implement the operation itself.  */
1124     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1125 
1126     uint64_t data;
1127 };
1128 
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations.  */
1131 
1132 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1133 {
1134     if (dest == s->pc_tmp) {
1135         per_branch(s, true);
1136         return DISAS_NEXT;
1137     }
1138     if (use_goto_tb(s, dest)) {
1139         update_cc_op(s);
1140         per_breaking_event(s);
1141         tcg_gen_goto_tb(0);
1142         tcg_gen_movi_i64(psw_addr, dest);
1143         tcg_gen_exit_tb(s->base.tb, 0);
1144         return DISAS_NORETURN;
1145     } else {
1146         tcg_gen_movi_i64(psw_addr, dest);
1147         per_branch(s, false);
1148         return DISAS_PC_UPDATED;
1149     }
1150 }
1151 
1152 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1153                                  bool is_imm, int imm, TCGv_i64 cdest)
1154 {
1155     DisasJumpType ret;
1156     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1157     TCGLabel *lab;
1158 
1159     /* Take care of the special cases first.  */
1160     if (c->cond == TCG_COND_NEVER) {
1161         ret = DISAS_NEXT;
1162         goto egress;
1163     }
1164     if (is_imm) {
1165         if (dest == s->pc_tmp) {
1166             /* Branch to next.  */
1167             per_branch(s, true);
1168             ret = DISAS_NEXT;
1169             goto egress;
1170         }
1171         if (c->cond == TCG_COND_ALWAYS) {
1172             ret = help_goto_direct(s, dest);
1173             goto egress;
1174         }
1175     } else {
1176         if (!cdest) {
1177             /* E.g. bcr %r0 -> no branch.  */
1178             ret = DISAS_NEXT;
1179             goto egress;
1180         }
1181         if (c->cond == TCG_COND_ALWAYS) {
1182             tcg_gen_mov_i64(psw_addr, cdest);
1183             per_branch(s, false);
1184             ret = DISAS_PC_UPDATED;
1185             goto egress;
1186         }
1187     }
1188 
1189     if (use_goto_tb(s, s->pc_tmp)) {
1190         if (is_imm && use_goto_tb(s, dest)) {
1191             /* Both exits can use goto_tb.  */
1192             update_cc_op(s);
1193 
1194             lab = gen_new_label();
1195             if (c->is_64) {
1196                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1197             } else {
1198                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1199             }
1200 
1201             /* Branch not taken.  */
1202             tcg_gen_goto_tb(0);
1203             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1204             tcg_gen_exit_tb(s->base.tb, 0);
1205 
1206             /* Branch taken.  */
1207             gen_set_label(lab);
1208             per_breaking_event(s);
1209             tcg_gen_goto_tb(1);
1210             tcg_gen_movi_i64(psw_addr, dest);
1211             tcg_gen_exit_tb(s->base.tb, 1);
1212 
1213             ret = DISAS_NORETURN;
1214         } else {
1215             /* Fallthru can use goto_tb, but taken branch cannot.  */
1216             /* Store taken branch destination before the brcond.  This
1217                avoids having to allocate a new local temp to hold it.
1218                We'll overwrite this in the not taken case anyway.  */
1219             if (!is_imm) {
1220                 tcg_gen_mov_i64(psw_addr, cdest);
1221             }
1222 
1223             lab = gen_new_label();
1224             if (c->is_64) {
1225                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1226             } else {
1227                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1228             }
1229 
1230             /* Branch not taken.  */
1231             update_cc_op(s);
1232             tcg_gen_goto_tb(0);
1233             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1234             tcg_gen_exit_tb(s->base.tb, 0);
1235 
1236             gen_set_label(lab);
1237             if (is_imm) {
1238                 tcg_gen_movi_i64(psw_addr, dest);
1239             }
1240             per_breaking_event(s);
1241             ret = DISAS_PC_UPDATED;
1242         }
1243     } else {
1244         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1245            Most commonly we're single-stepping or some other condition that
1246            disables all use of goto_tb.  Just update the PC and exit.  */
1247 
1248         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1249         if (is_imm) {
1250             cdest = tcg_constant_i64(dest);
1251         }
1252 
1253         if (c->is_64) {
1254             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1255                                 cdest, next);
1256             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1257         } else {
1258             TCGv_i32 t0 = tcg_temp_new_i32();
1259             TCGv_i64 t1 = tcg_temp_new_i64();
1260             TCGv_i64 z = tcg_constant_i64(0);
1261             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1262             tcg_gen_extu_i32_i64(t1, t0);
1263             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1264             per_branch_cond(s, TCG_COND_NE, t1, z);
1265         }
1266 
1267         ret = DISAS_PC_UPDATED;
1268     }
1269 
1270  egress:
1271     return ret;
1272 }
1273 
1274 /* ====================================================================== */
1275 /* The operations.  These perform the bulk of the work for any insn,
1276    usually after the operands have been loaded and output initialized.  */
1277 
1278 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1279 {
1280     tcg_gen_abs_i64(o->out, o->in2);
1281     return DISAS_NEXT;
1282 }
1283 
1284 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1285 {
1286     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1287     return DISAS_NEXT;
1288 }
1289 
1290 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1291 {
1292     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1293     return DISAS_NEXT;
1294 }
1295 
1296 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1297 {
1298     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1299     tcg_gen_mov_i64(o->out2, o->in2);
1300     return DISAS_NEXT;
1301 }
1302 
1303 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1304 {
1305     tcg_gen_add_i64(o->out, o->in1, o->in2);
1306     return DISAS_NEXT;
1307 }
1308 
1309 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1310 {
1311     tcg_gen_movi_i64(cc_src, 0);
1312     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1313     return DISAS_NEXT;
1314 }
1315 
1316 /* Compute carry into cc_src. */
1317 static void compute_carry(DisasContext *s)
1318 {
1319     switch (s->cc_op) {
1320     case CC_OP_ADDU:
1321         /* The carry value is already in cc_src (1,0). */
1322         break;
1323     case CC_OP_SUBU:
1324         tcg_gen_addi_i64(cc_src, cc_src, 1);
1325         break;
1326     default:
1327         gen_op_calc_cc(s);
1328         /* fall through */
1329     case CC_OP_STATIC:
1330         /* The carry flag is the msb of CC; compute into cc_src. */
1331         tcg_gen_extu_i32_i64(cc_src, cc_op);
1332         tcg_gen_shri_i64(cc_src, cc_src, 1);
1333         break;
1334     }
1335 }
1336 
1337 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1338 {
1339     compute_carry(s);
1340     tcg_gen_add_i64(o->out, o->in1, o->in2);
1341     tcg_gen_add_i64(o->out, o->out, cc_src);
1342     return DISAS_NEXT;
1343 }
1344 
1345 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1346 {
1347     compute_carry(s);
1348 
1349     TCGv_i64 zero = tcg_constant_i64(0);
1350     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1351     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1352 
1353     return DISAS_NEXT;
1354 }
1355 
1356 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1357 {
1358     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1359 
1360     o->in1 = tcg_temp_new_i64();
1361     if (non_atomic) {
1362         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1363     } else {
1364         /* Perform the atomic addition in memory. */
1365         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1366                                      s->insn->data);
1367     }
1368 
1369     /* Recompute also for atomic case: needed for setting CC. */
1370     tcg_gen_add_i64(o->out, o->in1, o->in2);
1371 
1372     if (non_atomic) {
1373         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1374     }
1375     return DISAS_NEXT;
1376 }
1377 
1378 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1379 {
1380     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1381 
1382     o->in1 = tcg_temp_new_i64();
1383     if (non_atomic) {
1384         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1385     } else {
1386         /* Perform the atomic addition in memory. */
1387         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1388                                      s->insn->data);
1389     }
1390 
1391     /* Recompute also for atomic case: needed for setting CC. */
1392     tcg_gen_movi_i64(cc_src, 0);
1393     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1394 
1395     if (non_atomic) {
1396         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1397     }
1398     return DISAS_NEXT;
1399 }
1400 
1401 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1402 {
1403     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1404     return DISAS_NEXT;
1405 }
1406 
1407 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1408 {
1409     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1410     return DISAS_NEXT;
1411 }
1412 
1413 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1414 {
1415     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1416     return DISAS_NEXT;
1417 }
1418 
1419 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1420 {
1421     tcg_gen_and_i64(o->out, o->in1, o->in2);
1422     return DISAS_NEXT;
1423 }
1424 
1425 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1426 {
1427     int shift = s->insn->data & 0xff;
1428     int size = s->insn->data >> 8;
1429     uint64_t mask = ((1ull << size) - 1) << shift;
1430     TCGv_i64 t = tcg_temp_new_i64();
1431 
1432     tcg_gen_shli_i64(t, o->in2, shift);
1433     tcg_gen_ori_i64(t, t, ~mask);
1434     tcg_gen_and_i64(o->out, o->in1, t);
1435 
1436     /* Produce the CC from only the bits manipulated.  */
1437     tcg_gen_andi_i64(cc_dst, o->out, mask);
1438     set_cc_nz_u64(s, cc_dst);
1439     return DISAS_NEXT;
1440 }
1441 
1442 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1443 {
1444     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1445     return DISAS_NEXT;
1446 }
1447 
1448 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1449 {
1450     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1451     return DISAS_NEXT;
1452 }
1453 
1454 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1455 {
1456     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1457     return DISAS_NEXT;
1458 }
1459 
1460 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1461 {
1462     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1463     return DISAS_NEXT;
1464 }
1465 
1466 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1467 {
1468     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1469     return DISAS_NEXT;
1470 }
1471 
1472 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1473 {
1474     o->in1 = tcg_temp_new_i64();
1475 
1476     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1477         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478     } else {
1479         /* Perform the atomic operation in memory. */
1480         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481                                      s->insn->data);
1482     }
1483 
1484     /* Recompute also for atomic case: needed for setting CC. */
1485     tcg_gen_and_i64(o->out, o->in1, o->in2);
1486 
1487     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1488         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489     }
1490     return DISAS_NEXT;
1491 }
1492 
1493 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1494 {
1495     pc_to_link_info(o->out, s, s->pc_tmp);
1496     if (o->in2) {
1497         tcg_gen_mov_i64(psw_addr, o->in2);
1498         per_branch(s, false);
1499         return DISAS_PC_UPDATED;
1500     } else {
1501         return DISAS_NEXT;
1502     }
1503 }
1504 
1505 static void save_link_info(DisasContext *s, DisasOps *o)
1506 {
1507     TCGv_i64 t;
1508 
1509     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1510         pc_to_link_info(o->out, s, s->pc_tmp);
1511         return;
1512     }
1513     gen_op_calc_cc(s);
1514     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1515     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1516     t = tcg_temp_new_i64();
1517     tcg_gen_shri_i64(t, psw_mask, 16);
1518     tcg_gen_andi_i64(t, t, 0x0f000000);
1519     tcg_gen_or_i64(o->out, o->out, t);
1520     tcg_gen_extu_i32_i64(t, cc_op);
1521     tcg_gen_shli_i64(t, t, 28);
1522     tcg_gen_or_i64(o->out, o->out, t);
1523 }
1524 
1525 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1526 {
1527     save_link_info(s, o);
1528     if (o->in2) {
1529         tcg_gen_mov_i64(psw_addr, o->in2);
1530         per_branch(s, false);
1531         return DISAS_PC_UPDATED;
1532     } else {
1533         return DISAS_NEXT;
1534     }
1535 }
1536 
1537 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1538 {
1539     pc_to_link_info(o->out, s, s->pc_tmp);
1540     return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1541 }
1542 
1543 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1544 {
1545     int m1 = get_field(s, m1);
1546     bool is_imm = have_field(s, i2);
1547     int imm = is_imm ? get_field(s, i2) : 0;
1548     DisasCompare c;
1549 
1550     /* BCR with R2 = 0 causes no branching */
1551     if (have_field(s, r2) && get_field(s, r2) == 0) {
1552         if (m1 == 14) {
1553             /* Perform serialization */
1554             /* FIXME: check for fast-BCR-serialization facility */
1555             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1556         }
1557         if (m1 == 15) {
1558             /* Perform serialization */
1559             /* FIXME: perform checkpoint-synchronisation */
1560             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1561         }
1562         return DISAS_NEXT;
1563     }
1564 
1565     disas_jcc(s, &c, m1);
1566     return help_branch(s, &c, is_imm, imm, o->in2);
1567 }
1568 
1569 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1570 {
1571     int r1 = get_field(s, r1);
1572     bool is_imm = have_field(s, i2);
1573     int imm = is_imm ? get_field(s, i2) : 0;
1574     DisasCompare c;
1575     TCGv_i64 t;
1576 
1577     c.cond = TCG_COND_NE;
1578     c.is_64 = false;
1579 
1580     t = tcg_temp_new_i64();
1581     tcg_gen_subi_i64(t, regs[r1], 1);
1582     store_reg32_i64(r1, t);
1583     c.u.s32.a = tcg_temp_new_i32();
1584     c.u.s32.b = tcg_constant_i32(0);
1585     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1586 
1587     return help_branch(s, &c, is_imm, imm, o->in2);
1588 }
1589 
1590 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1591 {
1592     int r1 = get_field(s, r1);
1593     int imm = get_field(s, i2);
1594     DisasCompare c;
1595     TCGv_i64 t;
1596 
1597     c.cond = TCG_COND_NE;
1598     c.is_64 = false;
1599 
1600     t = tcg_temp_new_i64();
1601     tcg_gen_shri_i64(t, regs[r1], 32);
1602     tcg_gen_subi_i64(t, t, 1);
1603     store_reg32h_i64(r1, t);
1604     c.u.s32.a = tcg_temp_new_i32();
1605     c.u.s32.b = tcg_constant_i32(0);
1606     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1607 
1608     return help_branch(s, &c, 1, imm, o->in2);
1609 }
1610 
1611 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1612 {
1613     int r1 = get_field(s, r1);
1614     bool is_imm = have_field(s, i2);
1615     int imm = is_imm ? get_field(s, i2) : 0;
1616     DisasCompare c;
1617 
1618     c.cond = TCG_COND_NE;
1619     c.is_64 = true;
1620 
1621     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1622     c.u.s64.a = regs[r1];
1623     c.u.s64.b = tcg_constant_i64(0);
1624 
1625     return help_branch(s, &c, is_imm, imm, o->in2);
1626 }
1627 
1628 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1629 {
1630     int r1 = get_field(s, r1);
1631     int r3 = get_field(s, r3);
1632     bool is_imm = have_field(s, i2);
1633     int imm = is_imm ? get_field(s, i2) : 0;
1634     DisasCompare c;
1635     TCGv_i64 t;
1636 
1637     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1638     c.is_64 = false;
1639 
1640     t = tcg_temp_new_i64();
1641     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1642     c.u.s32.a = tcg_temp_new_i32();
1643     c.u.s32.b = tcg_temp_new_i32();
1644     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1645     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1646     store_reg32_i64(r1, t);
1647 
1648     return help_branch(s, &c, is_imm, imm, o->in2);
1649 }
1650 
1651 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1652 {
1653     int r1 = get_field(s, r1);
1654     int r3 = get_field(s, r3);
1655     bool is_imm = have_field(s, i2);
1656     int imm = is_imm ? get_field(s, i2) : 0;
1657     DisasCompare c;
1658 
1659     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1660     c.is_64 = true;
1661 
1662     if (r1 == (r3 | 1)) {
1663         c.u.s64.b = load_reg(r3 | 1);
1664     } else {
1665         c.u.s64.b = regs[r3 | 1];
1666     }
1667 
1668     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1669     c.u.s64.a = regs[r1];
1670 
1671     return help_branch(s, &c, is_imm, imm, o->in2);
1672 }
1673 
1674 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1675 {
1676     int imm, m3 = get_field(s, m3);
1677     bool is_imm;
1678     DisasCompare c;
1679 
1680     c.cond = ltgt_cond[m3];
1681     if (s->insn->data) {
1682         c.cond = tcg_unsigned_cond(c.cond);
1683     }
1684     c.is_64 = true;
1685     c.u.s64.a = o->in1;
1686     c.u.s64.b = o->in2;
1687 
1688     is_imm = have_field(s, i4);
1689     if (is_imm) {
1690         imm = get_field(s, i4);
1691     } else {
1692         imm = 0;
1693         o->out = get_address(s, 0, get_field(s, b4),
1694                              get_field(s, d4));
1695     }
1696 
1697     return help_branch(s, &c, is_imm, imm, o->out);
1698 }
1699 
1700 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1701 {
1702     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1703     set_cc_static(s);
1704     return DISAS_NEXT;
1705 }
1706 
1707 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1708 {
1709     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1710     set_cc_static(s);
1711     return DISAS_NEXT;
1712 }
1713 
1714 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1715 {
1716     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1717     set_cc_static(s);
1718     return DISAS_NEXT;
1719 }
1720 
1721 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1722                                    bool m4_with_fpe)
1723 {
1724     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1725     uint8_t m3 = get_field(s, m3);
1726     uint8_t m4 = get_field(s, m4);
1727 
1728     /* m3 field was introduced with FPE */
1729     if (!fpe && m3_with_fpe) {
1730         m3 = 0;
1731     }
1732     /* m4 field was introduced with FPE */
1733     if (!fpe && m4_with_fpe) {
1734         m4 = 0;
1735     }
1736 
1737     /* Check for valid rounding modes. Mode 3 was introduced later. */
1738     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1739         gen_program_exception(s, PGM_SPECIFICATION);
1740         return NULL;
1741     }
1742 
1743     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1744 }
1745 
1746 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1747 {
1748     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1749 
1750     if (!m34) {
1751         return DISAS_NORETURN;
1752     }
1753     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1754     set_cc_static(s);
1755     return DISAS_NEXT;
1756 }
1757 
1758 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1759 {
1760     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1761 
1762     if (!m34) {
1763         return DISAS_NORETURN;
1764     }
1765     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1766     set_cc_static(s);
1767     return DISAS_NEXT;
1768 }
1769 
1770 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1771 {
1772     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1773 
1774     if (!m34) {
1775         return DISAS_NORETURN;
1776     }
1777     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1778     set_cc_static(s);
1779     return DISAS_NEXT;
1780 }
1781 
1782 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1783 {
1784     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1785 
1786     if (!m34) {
1787         return DISAS_NORETURN;
1788     }
1789     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1790     set_cc_static(s);
1791     return DISAS_NEXT;
1792 }
1793 
1794 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1795 {
1796     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1797 
1798     if (!m34) {
1799         return DISAS_NORETURN;
1800     }
1801     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1802     set_cc_static(s);
1803     return DISAS_NEXT;
1804 }
1805 
1806 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1807 {
1808     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1809 
1810     if (!m34) {
1811         return DISAS_NORETURN;
1812     }
1813     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1814     set_cc_static(s);
1815     return DISAS_NEXT;
1816 }
1817 
1818 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1819 {
1820     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1821 
1822     if (!m34) {
1823         return DISAS_NORETURN;
1824     }
1825     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1826     set_cc_static(s);
1827     return DISAS_NEXT;
1828 }
1829 
1830 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1831 {
1832     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1833 
1834     if (!m34) {
1835         return DISAS_NORETURN;
1836     }
1837     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1838     set_cc_static(s);
1839     return DISAS_NEXT;
1840 }
1841 
1842 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1843 {
1844     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1845 
1846     if (!m34) {
1847         return DISAS_NORETURN;
1848     }
1849     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1850     set_cc_static(s);
1851     return DISAS_NEXT;
1852 }
1853 
1854 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1855 {
1856     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1857 
1858     if (!m34) {
1859         return DISAS_NORETURN;
1860     }
1861     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1862     set_cc_static(s);
1863     return DISAS_NEXT;
1864 }
1865 
1866 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1867 {
1868     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1869 
1870     if (!m34) {
1871         return DISAS_NORETURN;
1872     }
1873     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1874     set_cc_static(s);
1875     return DISAS_NEXT;
1876 }
1877 
1878 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1879 {
1880     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1881 
1882     if (!m34) {
1883         return DISAS_NORETURN;
1884     }
1885     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1886     set_cc_static(s);
1887     return DISAS_NEXT;
1888 }
1889 
1890 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1891 {
1892     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1893 
1894     if (!m34) {
1895         return DISAS_NORETURN;
1896     }
1897     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1909     return DISAS_NEXT;
1910 }
1911 
1912 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1913 {
1914     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1915 
1916     if (!m34) {
1917         return DISAS_NORETURN;
1918     }
1919     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1920     return DISAS_NEXT;
1921 }
1922 
1923 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1924 {
1925     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1926 
1927     if (!m34) {
1928         return DISAS_NORETURN;
1929     }
1930     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1931     return DISAS_NEXT;
1932 }
1933 
1934 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1935 {
1936     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1937 
1938     if (!m34) {
1939         return DISAS_NORETURN;
1940     }
1941     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1942     return DISAS_NEXT;
1943 }
1944 
1945 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1946 {
1947     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1948 
1949     if (!m34) {
1950         return DISAS_NORETURN;
1951     }
1952     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1953     return DISAS_NEXT;
1954 }
1955 
1956 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1957 {
1958     int r2 = get_field(s, r2);
1959     TCGv_i128 pair = tcg_temp_new_i128();
1960     TCGv_i64 len = tcg_temp_new_i64();
1961 
1962     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1963     set_cc_static(s);
1964     tcg_gen_extr_i128_i64(o->out, len, pair);
1965 
1966     tcg_gen_add_i64(regs[r2], regs[r2], len);
1967     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1968 
1969     return DISAS_NEXT;
1970 }
1971 
1972 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1973 {
1974     int l = get_field(s, l1);
1975     TCGv_i32 vl;
1976 
1977     switch (l + 1) {
1978     case 1:
1979         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1980         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1981         break;
1982     case 2:
1983         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1984         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1985         break;
1986     case 4:
1987         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1988         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1989         break;
1990     case 8:
1991         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1992         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1993         break;
1994     default:
1995         vl = tcg_constant_i32(l);
1996         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1997         set_cc_static(s);
1998         return DISAS_NEXT;
1999     }
2000     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2001     return DISAS_NEXT;
2002 }
2003 
2004 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2005 {
2006     int r1 = get_field(s, r1);
2007     int r2 = get_field(s, r2);
2008     TCGv_i32 t1, t2;
2009 
2010     /* r1 and r2 must be even.  */
2011     if (r1 & 1 || r2 & 1) {
2012         gen_program_exception(s, PGM_SPECIFICATION);
2013         return DISAS_NORETURN;
2014     }
2015 
2016     t1 = tcg_constant_i32(r1);
2017     t2 = tcg_constant_i32(r2);
2018     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2019     set_cc_static(s);
2020     return DISAS_NEXT;
2021 }
2022 
2023 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2024 {
2025     int r1 = get_field(s, r1);
2026     int r3 = get_field(s, r3);
2027     TCGv_i32 t1, t3;
2028 
2029     /* r1 and r3 must be even.  */
2030     if (r1 & 1 || r3 & 1) {
2031         gen_program_exception(s, PGM_SPECIFICATION);
2032         return DISAS_NORETURN;
2033     }
2034 
2035     t1 = tcg_constant_i32(r1);
2036     t3 = tcg_constant_i32(r3);
2037     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2038     set_cc_static(s);
2039     return DISAS_NEXT;
2040 }
2041 
2042 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2043 {
2044     int r1 = get_field(s, r1);
2045     int r3 = get_field(s, r3);
2046     TCGv_i32 t1, t3;
2047 
2048     /* r1 and r3 must be even.  */
2049     if (r1 & 1 || r3 & 1) {
2050         gen_program_exception(s, PGM_SPECIFICATION);
2051         return DISAS_NORETURN;
2052     }
2053 
2054     t1 = tcg_constant_i32(r1);
2055     t3 = tcg_constant_i32(r3);
2056     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2057     set_cc_static(s);
2058     return DISAS_NEXT;
2059 }
2060 
2061 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2062 {
2063     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2064     TCGv_i32 t1 = tcg_temp_new_i32();
2065 
2066     tcg_gen_extrl_i64_i32(t1, o->in1);
2067     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2068     set_cc_static(s);
2069     return DISAS_NEXT;
2070 }
2071 
2072 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2073 {
2074     TCGv_i128 pair = tcg_temp_new_i128();
2075 
2076     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2077     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2078 
2079     set_cc_static(s);
2080     return DISAS_NEXT;
2081 }
2082 
2083 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2084 {
2085     TCGv_i64 t = tcg_temp_new_i64();
2086     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2087     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2088     tcg_gen_or_i64(o->out, o->out, t);
2089     return DISAS_NEXT;
2090 }
2091 
2092 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2093 {
2094     int d2 = get_field(s, d2);
2095     int b2 = get_field(s, b2);
2096     TCGv_i64 addr, cc;
2097 
2098     /* Note that in1 = R3 (new value) and
2099        in2 = (zero-extended) R1 (expected value).  */
2100 
2101     addr = get_address(s, 0, b2, d2);
2102     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2103                                get_mem_index(s), s->insn->data | MO_ALIGN);
2104 
2105     /* Are the memory and expected values (un)equal?  Note that this setcond
2106        produces the output CC value, thus the NE sense of the test.  */
2107     cc = tcg_temp_new_i64();
2108     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2109     tcg_gen_extrl_i64_i32(cc_op, cc);
2110     set_cc_static(s);
2111 
2112     return DISAS_NEXT;
2113 }
2114 
2115 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2116 {
2117     int r1 = get_field(s, r1);
2118 
2119     o->out_128 = tcg_temp_new_i128();
2120     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2121 
2122     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2123     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2124                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2125 
2126     /*
2127      * Extract result into cc_dst:cc_src, compare vs the expected value
2128      * in the as yet unmodified input registers, then update CC_OP.
2129      */
2130     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2131     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2132     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2133     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2134     set_cc_nz_u64(s, cc_dst);
2135 
2136     return DISAS_NEXT;
2137 }
2138 
2139 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2140 {
2141     int r3 = get_field(s, r3);
2142     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2143 
2144     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2145         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2146     } else {
2147         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2148     }
2149 
2150     set_cc_static(s);
2151     return DISAS_NEXT;
2152 }
2153 
2154 #ifndef CONFIG_USER_ONLY
2155 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2156 {
2157     MemOp mop = s->insn->data;
2158     TCGv_i64 addr, old, cc;
2159     TCGLabel *lab = gen_new_label();
2160 
2161     /* Note that in1 = R1 (zero-extended expected value),
2162        out = R1 (original reg), out2 = R1+1 (new value).  */
2163 
2164     addr = tcg_temp_new_i64();
2165     old = tcg_temp_new_i64();
2166     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2167     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2168                                get_mem_index(s), mop | MO_ALIGN);
2169 
2170     /* Are the memory and expected values (un)equal?  */
2171     cc = tcg_temp_new_i64();
2172     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2173     tcg_gen_extrl_i64_i32(cc_op, cc);
2174 
2175     /* Write back the output now, so that it happens before the
2176        following branch, so that we don't need local temps.  */
2177     if ((mop & MO_SIZE) == MO_32) {
2178         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2179     } else {
2180         tcg_gen_mov_i64(o->out, old);
2181     }
2182 
2183     /* If the comparison was equal, and the LSB of R2 was set,
2184        then we need to flush the TLB (for all cpus).  */
2185     tcg_gen_xori_i64(cc, cc, 1);
2186     tcg_gen_and_i64(cc, cc, o->in2);
2187     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2188 
2189     gen_helper_purge(cpu_env);
2190     gen_set_label(lab);
2191 
2192     return DISAS_NEXT;
2193 }
2194 #endif
2195 
2196 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2197 {
2198     TCGv_i64 t1 = tcg_temp_new_i64();
2199     TCGv_i32 t2 = tcg_temp_new_i32();
2200     tcg_gen_extrl_i64_i32(t2, o->in1);
2201     gen_helper_cvd(t1, t2);
2202     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2203     return DISAS_NEXT;
2204 }
2205 
2206 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2207 {
2208     int m3 = get_field(s, m3);
2209     TCGLabel *lab = gen_new_label();
2210     TCGCond c;
2211 
2212     c = tcg_invert_cond(ltgt_cond[m3]);
2213     if (s->insn->data) {
2214         c = tcg_unsigned_cond(c);
2215     }
2216     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2217 
2218     /* Trap.  */
2219     gen_trap(s);
2220 
2221     gen_set_label(lab);
2222     return DISAS_NEXT;
2223 }
2224 
2225 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2226 {
2227     int m3 = get_field(s, m3);
2228     int r1 = get_field(s, r1);
2229     int r2 = get_field(s, r2);
2230     TCGv_i32 tr1, tr2, chk;
2231 
2232     /* R1 and R2 must both be even.  */
2233     if ((r1 | r2) & 1) {
2234         gen_program_exception(s, PGM_SPECIFICATION);
2235         return DISAS_NORETURN;
2236     }
2237     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2238         m3 = 0;
2239     }
2240 
2241     tr1 = tcg_constant_i32(r1);
2242     tr2 = tcg_constant_i32(r2);
2243     chk = tcg_constant_i32(m3);
2244 
2245     switch (s->insn->data) {
2246     case 12:
2247         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2248         break;
2249     case 14:
2250         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2251         break;
2252     case 21:
2253         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2254         break;
2255     case 24:
2256         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2257         break;
2258     case 41:
2259         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2260         break;
2261     case 42:
2262         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2263         break;
2264     default:
2265         g_assert_not_reached();
2266     }
2267 
2268     set_cc_static(s);
2269     return DISAS_NEXT;
2270 }
2271 
2272 #ifndef CONFIG_USER_ONLY
2273 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2274 {
2275     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2276     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2277     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2278 
2279     gen_helper_diag(cpu_env, r1, r3, func_code);
2280     return DISAS_NEXT;
2281 }
2282 #endif
2283 
2284 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2285 {
2286     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2287     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2288     return DISAS_NEXT;
2289 }
2290 
2291 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2292 {
2293     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2294     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2295     return DISAS_NEXT;
2296 }
2297 
2298 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2299 {
2300     TCGv_i128 t = tcg_temp_new_i128();
2301 
2302     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2303     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2304     return DISAS_NEXT;
2305 }
2306 
2307 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2308 {
2309     TCGv_i128 t = tcg_temp_new_i128();
2310 
2311     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2312     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2313     return DISAS_NEXT;
2314 }
2315 
2316 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2317 {
2318     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2319     return DISAS_NEXT;
2320 }
2321 
2322 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2323 {
2324     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2325     return DISAS_NEXT;
2326 }
2327 
2328 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2329 {
2330     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2335 {
2336     int r2 = get_field(s, r2);
2337     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2338     return DISAS_NEXT;
2339 }
2340 
2341 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2342 {
2343     /* No cache information provided.  */
2344     tcg_gen_movi_i64(o->out, -1);
2345     return DISAS_NEXT;
2346 }
2347 
2348 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2349 {
2350     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2351     return DISAS_NEXT;
2352 }
2353 
2354 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2355 {
2356     int r1 = get_field(s, r1);
2357     int r2 = get_field(s, r2);
2358     TCGv_i64 t = tcg_temp_new_i64();
2359 
2360     /* Note the "subsequently" in the PoO, which implies a defined result
2361        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2362     tcg_gen_shri_i64(t, psw_mask, 32);
2363     store_reg32_i64(r1, t);
2364     if (r2 != 0) {
2365         store_reg32_i64(r2, psw_mask);
2366     }
2367     return DISAS_NEXT;
2368 }
2369 
2370 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2371 {
2372     int r1 = get_field(s, r1);
2373     TCGv_i32 ilen;
2374     TCGv_i64 v1;
2375 
2376     /* Nested EXECUTE is not allowed.  */
2377     if (unlikely(s->ex_value)) {
2378         gen_program_exception(s, PGM_EXECUTE);
2379         return DISAS_NORETURN;
2380     }
2381 
2382     update_psw_addr(s);
2383     update_cc_op(s);
2384 
2385     if (r1 == 0) {
2386         v1 = tcg_constant_i64(0);
2387     } else {
2388         v1 = regs[r1];
2389     }
2390 
2391     ilen = tcg_constant_i32(s->ilen);
2392     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2393 
2394     return DISAS_PC_CC_UPDATED;
2395 }
2396 
2397 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2398 {
2399     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2400 
2401     if (!m34) {
2402         return DISAS_NORETURN;
2403     }
2404     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2405     return DISAS_NEXT;
2406 }
2407 
2408 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2409 {
2410     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2411 
2412     if (!m34) {
2413         return DISAS_NORETURN;
2414     }
2415     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2416     return DISAS_NEXT;
2417 }
2418 
2419 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2420 {
2421     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2422 
2423     if (!m34) {
2424         return DISAS_NORETURN;
2425     }
2426     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2427     return DISAS_NEXT;
2428 }
2429 
2430 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2431 {
2432     /* We'll use the original input for cc computation, since we get to
2433        compare that against 0, which ought to be better than comparing
2434        the real output against 64.  It also lets cc_dst be a convenient
2435        temporary during our computation.  */
2436     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2437 
2438     /* R1 = IN ? CLZ(IN) : 64.  */
2439     tcg_gen_clzi_i64(o->out, o->in2, 64);
2440 
2441     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2442        value by 64, which is undefined.  But since the shift is 64 iff the
2443        input is zero, we still get the correct result after and'ing.  */
2444     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2445     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2446     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2447     return DISAS_NEXT;
2448 }
2449 
2450 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2451 {
2452     int m3 = get_field(s, m3);
2453     int pos, len, base = s->insn->data;
2454     TCGv_i64 tmp = tcg_temp_new_i64();
2455     uint64_t ccm;
2456 
2457     switch (m3) {
2458     case 0xf:
2459         /* Effectively a 32-bit load.  */
2460         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2461         len = 32;
2462         goto one_insert;
2463 
2464     case 0xc:
2465     case 0x6:
2466     case 0x3:
2467         /* Effectively a 16-bit load.  */
2468         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2469         len = 16;
2470         goto one_insert;
2471 
2472     case 0x8:
2473     case 0x4:
2474     case 0x2:
2475     case 0x1:
2476         /* Effectively an 8-bit load.  */
2477         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2478         len = 8;
2479         goto one_insert;
2480 
2481     one_insert:
2482         pos = base + ctz32(m3) * 8;
2483         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2484         ccm = ((1ull << len) - 1) << pos;
2485         break;
2486 
2487     default:
2488         /* This is going to be a sequence of loads and inserts.  */
2489         pos = base + 32 - 8;
2490         ccm = 0;
2491         while (m3) {
2492             if (m3 & 0x8) {
2493                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2494                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2495                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2496                 ccm |= 0xffull << pos;
2497             }
2498             m3 = (m3 << 1) & 0xf;
2499             pos -= 8;
2500         }
2501         break;
2502     }
2503 
2504     tcg_gen_movi_i64(tmp, ccm);
2505     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2506     return DISAS_NEXT;
2507 }
2508 
2509 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2510 {
2511     int shift = s->insn->data & 0xff;
2512     int size = s->insn->data >> 8;
2513     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2514     return DISAS_NEXT;
2515 }
2516 
2517 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2518 {
2519     TCGv_i64 t1, t2;
2520 
2521     gen_op_calc_cc(s);
2522     t1 = tcg_temp_new_i64();
2523     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2524     t2 = tcg_temp_new_i64();
2525     tcg_gen_extu_i32_i64(t2, cc_op);
2526     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2527     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2528     return DISAS_NEXT;
2529 }
2530 
2531 #ifndef CONFIG_USER_ONLY
2532 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2533 {
2534     TCGv_i32 m4;
2535 
2536     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2537         m4 = tcg_constant_i32(get_field(s, m4));
2538     } else {
2539         m4 = tcg_constant_i32(0);
2540     }
2541     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2542     return DISAS_NEXT;
2543 }
2544 
2545 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2546 {
2547     TCGv_i32 m4;
2548 
2549     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2550         m4 = tcg_constant_i32(get_field(s, m4));
2551     } else {
2552         m4 = tcg_constant_i32(0);
2553     }
2554     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2555     return DISAS_NEXT;
2556 }
2557 
2558 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2559 {
2560     gen_helper_iske(o->out, cpu_env, o->in2);
2561     return DISAS_NEXT;
2562 }
2563 #endif
2564 
2565 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2566 {
2567     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2568     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2569     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2570     TCGv_i32 t_r1, t_r2, t_r3, type;
2571 
2572     switch (s->insn->data) {
2573     case S390_FEAT_TYPE_KMA:
2574         if (r3 == r1 || r3 == r2) {
2575             gen_program_exception(s, PGM_SPECIFICATION);
2576             return DISAS_NORETURN;
2577         }
2578         /* FALL THROUGH */
2579     case S390_FEAT_TYPE_KMCTR:
2580         if (r3 & 1 || !r3) {
2581             gen_program_exception(s, PGM_SPECIFICATION);
2582             return DISAS_NORETURN;
2583         }
2584         /* FALL THROUGH */
2585     case S390_FEAT_TYPE_PPNO:
2586     case S390_FEAT_TYPE_KMF:
2587     case S390_FEAT_TYPE_KMC:
2588     case S390_FEAT_TYPE_KMO:
2589     case S390_FEAT_TYPE_KM:
2590         if (r1 & 1 || !r1) {
2591             gen_program_exception(s, PGM_SPECIFICATION);
2592             return DISAS_NORETURN;
2593         }
2594         /* FALL THROUGH */
2595     case S390_FEAT_TYPE_KMAC:
2596     case S390_FEAT_TYPE_KIMD:
2597     case S390_FEAT_TYPE_KLMD:
2598         if (r2 & 1 || !r2) {
2599             gen_program_exception(s, PGM_SPECIFICATION);
2600             return DISAS_NORETURN;
2601         }
2602         /* FALL THROUGH */
2603     case S390_FEAT_TYPE_PCKMO:
2604     case S390_FEAT_TYPE_PCC:
2605         break;
2606     default:
2607         g_assert_not_reached();
2608     };
2609 
2610     t_r1 = tcg_constant_i32(r1);
2611     t_r2 = tcg_constant_i32(r2);
2612     t_r3 = tcg_constant_i32(r3);
2613     type = tcg_constant_i32(s->insn->data);
2614     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2615     set_cc_static(s);
2616     return DISAS_NEXT;
2617 }
2618 
2619 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2620 {
2621     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2622     set_cc_static(s);
2623     return DISAS_NEXT;
2624 }
2625 
2626 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2627 {
2628     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2629     set_cc_static(s);
2630     return DISAS_NEXT;
2631 }
2632 
2633 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2634 {
2635     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2636     set_cc_static(s);
2637     return DISAS_NEXT;
2638 }
2639 
2640 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2641 {
2642     /* The real output is indeed the original value in memory;
2643        recompute the addition for the computation of CC.  */
2644     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2645                                  s->insn->data | MO_ALIGN);
2646     /* However, we need to recompute the addition for setting CC.  */
2647     tcg_gen_add_i64(o->out, o->in1, o->in2);
2648     return DISAS_NEXT;
2649 }
2650 
2651 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2652 {
2653     /* The real output is indeed the original value in memory;
2654        recompute the addition for the computation of CC.  */
2655     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2656                                  s->insn->data | MO_ALIGN);
2657     /* However, we need to recompute the operation for setting CC.  */
2658     tcg_gen_and_i64(o->out, o->in1, o->in2);
2659     return DISAS_NEXT;
2660 }
2661 
2662 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2663 {
2664     /* The real output is indeed the original value in memory;
2665        recompute the addition for the computation of CC.  */
2666     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2667                                 s->insn->data | MO_ALIGN);
2668     /* However, we need to recompute the operation for setting CC.  */
2669     tcg_gen_or_i64(o->out, o->in1, o->in2);
2670     return DISAS_NEXT;
2671 }
2672 
2673 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2674 {
2675     /* The real output is indeed the original value in memory;
2676        recompute the addition for the computation of CC.  */
2677     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2678                                  s->insn->data | MO_ALIGN);
2679     /* However, we need to recompute the operation for setting CC.  */
2680     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2681     return DISAS_NEXT;
2682 }
2683 
2684 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2685 {
2686     gen_helper_ldeb(o->out, cpu_env, o->in2);
2687     return DISAS_NEXT;
2688 }
2689 
2690 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2691 {
2692     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2693 
2694     if (!m34) {
2695         return DISAS_NORETURN;
2696     }
2697     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2698     return DISAS_NEXT;
2699 }
2700 
2701 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2702 {
2703     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2704 
2705     if (!m34) {
2706         return DISAS_NORETURN;
2707     }
2708     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2709     return DISAS_NEXT;
2710 }
2711 
2712 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2713 {
2714     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2715 
2716     if (!m34) {
2717         return DISAS_NORETURN;
2718     }
2719     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2720     return DISAS_NEXT;
2721 }
2722 
2723 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2724 {
2725     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2726     return DISAS_NEXT;
2727 }
2728 
2729 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2730 {
2731     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2732     return DISAS_NEXT;
2733 }
2734 
2735 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2736 {
2737     tcg_gen_shli_i64(o->out, o->in2, 32);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2742 {
2743     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2744     return DISAS_NEXT;
2745 }
2746 
2747 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2748 {
2749     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2750     return DISAS_NEXT;
2751 }
2752 
2753 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2754 {
2755     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2756     return DISAS_NEXT;
2757 }
2758 
2759 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2760 {
2761     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2762     return DISAS_NEXT;
2763 }
2764 
2765 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2766 {
2767     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2768     return DISAS_NEXT;
2769 }
2770 
2771 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2772 {
2773     tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2774     return DISAS_NEXT;
2775 }
2776 
2777 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2778 {
2779     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2780     return DISAS_NEXT;
2781 }
2782 
2783 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2784 {
2785     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2786     return DISAS_NEXT;
2787 }
2788 
2789 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2790 {
2791     TCGLabel *lab = gen_new_label();
2792     store_reg32_i64(get_field(s, r1), o->in2);
2793     /* The value is stored even in case of trap. */
2794     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2795     gen_trap(s);
2796     gen_set_label(lab);
2797     return DISAS_NEXT;
2798 }
2799 
2800 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2801 {
2802     TCGLabel *lab = gen_new_label();
2803     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2804     /* The value is stored even in case of trap. */
2805     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2806     gen_trap(s);
2807     gen_set_label(lab);
2808     return DISAS_NEXT;
2809 }
2810 
2811 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2812 {
2813     TCGLabel *lab = gen_new_label();
2814     store_reg32h_i64(get_field(s, r1), o->in2);
2815     /* The value is stored even in case of trap. */
2816     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2817     gen_trap(s);
2818     gen_set_label(lab);
2819     return DISAS_NEXT;
2820 }
2821 
2822 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2823 {
2824     TCGLabel *lab = gen_new_label();
2825     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2826     /* The value is stored even in case of trap. */
2827     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2828     gen_trap(s);
2829     gen_set_label(lab);
2830     return DISAS_NEXT;
2831 }
2832 
2833 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2834 {
2835     TCGLabel *lab = gen_new_label();
2836     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2837     /* The value is stored even in case of trap. */
2838     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2839     gen_trap(s);
2840     gen_set_label(lab);
2841     return DISAS_NEXT;
2842 }
2843 
2844 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2845 {
2846     DisasCompare c;
2847 
2848     if (have_field(s, m3)) {
2849         /* LOAD * ON CONDITION */
2850         disas_jcc(s, &c, get_field(s, m3));
2851     } else {
2852         /* SELECT */
2853         disas_jcc(s, &c, get_field(s, m4));
2854     }
2855 
2856     if (c.is_64) {
2857         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2858                             o->in2, o->in1);
2859     } else {
2860         TCGv_i32 t32 = tcg_temp_new_i32();
2861         TCGv_i64 t, z;
2862 
2863         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2864 
2865         t = tcg_temp_new_i64();
2866         tcg_gen_extu_i32_i64(t, t32);
2867 
2868         z = tcg_constant_i64(0);
2869         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2870     }
2871 
2872     return DISAS_NEXT;
2873 }
2874 
2875 #ifndef CONFIG_USER_ONLY
2876 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2877 {
2878     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2879     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2880 
2881     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2882     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2883     s->exit_to_mainloop = true;
2884     return DISAS_TOO_MANY;
2885 }
2886 
2887 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2888 {
2889     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2890     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2891 
2892     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2893     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2894     s->exit_to_mainloop = true;
2895     return DISAS_TOO_MANY;
2896 }
2897 
2898 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2899 {
2900     gen_helper_lra(o->out, cpu_env, o->in2);
2901     set_cc_static(s);
2902     return DISAS_NEXT;
2903 }
2904 
2905 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2906 {
2907     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2908     return DISAS_NEXT;
2909 }
2910 
2911 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2912 {
2913     TCGv_i64 t1, t2;
2914 
2915     per_breaking_event(s);
2916 
2917     t1 = tcg_temp_new_i64();
2918     t2 = tcg_temp_new_i64();
2919     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2920                         MO_TEUL | MO_ALIGN_8);
2921     tcg_gen_addi_i64(o->in2, o->in2, 4);
2922     tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2923     /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2924     tcg_gen_shli_i64(t1, t1, 32);
2925     gen_helper_load_psw(cpu_env, t1, t2);
2926     return DISAS_NORETURN;
2927 }
2928 
2929 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2930 {
2931     TCGv_i64 t1, t2;
2932 
2933     per_breaking_event(s);
2934 
2935     t1 = tcg_temp_new_i64();
2936     t2 = tcg_temp_new_i64();
2937     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2938                         MO_TEUQ | MO_ALIGN_8);
2939     tcg_gen_addi_i64(o->in2, o->in2, 8);
2940     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2941     gen_helper_load_psw(cpu_env, t1, t2);
2942     return DISAS_NORETURN;
2943 }
2944 #endif
2945 
2946 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2947 {
2948     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2949     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2950 
2951     gen_helper_lam(cpu_env, r1, o->in2, r3);
2952     return DISAS_NEXT;
2953 }
2954 
2955 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2956 {
2957     int r1 = get_field(s, r1);
2958     int r3 = get_field(s, r3);
2959     TCGv_i64 t1, t2;
2960 
2961     /* Only one register to read. */
2962     t1 = tcg_temp_new_i64();
2963     if (unlikely(r1 == r3)) {
2964         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2965         store_reg32_i64(r1, t1);
2966         return DISAS_NEXT;
2967     }
2968 
2969     /* First load the values of the first and last registers to trigger
2970        possible page faults. */
2971     t2 = tcg_temp_new_i64();
2972     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2973     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2974     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2975     store_reg32_i64(r1, t1);
2976     store_reg32_i64(r3, t2);
2977 
2978     /* Only two registers to read. */
2979     if (((r1 + 1) & 15) == r3) {
2980         return DISAS_NEXT;
2981     }
2982 
2983     /* Then load the remaining registers. Page fault can't occur. */
2984     r3 = (r3 - 1) & 15;
2985     tcg_gen_movi_i64(t2, 4);
2986     while (r1 != r3) {
2987         r1 = (r1 + 1) & 15;
2988         tcg_gen_add_i64(o->in2, o->in2, t2);
2989         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2990         store_reg32_i64(r1, t1);
2991     }
2992     return DISAS_NEXT;
2993 }
2994 
2995 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2996 {
2997     int r1 = get_field(s, r1);
2998     int r3 = get_field(s, r3);
2999     TCGv_i64 t1, t2;
3000 
3001     /* Only one register to read. */
3002     t1 = tcg_temp_new_i64();
3003     if (unlikely(r1 == r3)) {
3004         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3005         store_reg32h_i64(r1, t1);
3006         return DISAS_NEXT;
3007     }
3008 
3009     /* First load the values of the first and last registers to trigger
3010        possible page faults. */
3011     t2 = tcg_temp_new_i64();
3012     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3013     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3014     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3015     store_reg32h_i64(r1, t1);
3016     store_reg32h_i64(r3, t2);
3017 
3018     /* Only two registers to read. */
3019     if (((r1 + 1) & 15) == r3) {
3020         return DISAS_NEXT;
3021     }
3022 
3023     /* Then load the remaining registers. Page fault can't occur. */
3024     r3 = (r3 - 1) & 15;
3025     tcg_gen_movi_i64(t2, 4);
3026     while (r1 != r3) {
3027         r1 = (r1 + 1) & 15;
3028         tcg_gen_add_i64(o->in2, o->in2, t2);
3029         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3030         store_reg32h_i64(r1, t1);
3031     }
3032     return DISAS_NEXT;
3033 }
3034 
3035 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3036 {
3037     int r1 = get_field(s, r1);
3038     int r3 = get_field(s, r3);
3039     TCGv_i64 t1, t2;
3040 
3041     /* Only one register to read. */
3042     if (unlikely(r1 == r3)) {
3043         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3044         return DISAS_NEXT;
3045     }
3046 
3047     /* First load the values of the first and last registers to trigger
3048        possible page faults. */
3049     t1 = tcg_temp_new_i64();
3050     t2 = tcg_temp_new_i64();
3051     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3052     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3053     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3054     tcg_gen_mov_i64(regs[r1], t1);
3055 
3056     /* Only two registers to read. */
3057     if (((r1 + 1) & 15) == r3) {
3058         return DISAS_NEXT;
3059     }
3060 
3061     /* Then load the remaining registers. Page fault can't occur. */
3062     r3 = (r3 - 1) & 15;
3063     tcg_gen_movi_i64(t1, 8);
3064     while (r1 != r3) {
3065         r1 = (r1 + 1) & 15;
3066         tcg_gen_add_i64(o->in2, o->in2, t1);
3067         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3068     }
3069     return DISAS_NEXT;
3070 }
3071 
3072 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3073 {
3074     TCGv_i64 a1, a2;
3075     MemOp mop = s->insn->data;
3076 
3077     /* In a parallel context, stop the world and single step.  */
3078     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3079         update_psw_addr(s);
3080         update_cc_op(s);
3081         gen_exception(EXCP_ATOMIC);
3082         return DISAS_NORETURN;
3083     }
3084 
3085     /* In a serial context, perform the two loads ... */
3086     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3087     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3088     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3089     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3090 
3091     /* ... and indicate that we performed them while interlocked.  */
3092     gen_op_movi_cc(s, 0);
3093     return DISAS_NEXT;
3094 }
3095 
3096 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3097 {
3098     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3099         gen_helper_lpq(o->out, cpu_env, o->in2);
3100     } else if (HAVE_ATOMIC128) {
3101         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3102     } else {
3103         gen_helper_exit_atomic(cpu_env);
3104         return DISAS_NORETURN;
3105     }
3106     return_low128(o->out2);
3107     return DISAS_NEXT;
3108 }
3109 
3110 #ifndef CONFIG_USER_ONLY
3111 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3112 {
3113     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3114     return DISAS_NEXT;
3115 }
3116 #endif
3117 
3118 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3119 {
3120     tcg_gen_andi_i64(o->out, o->in2, -256);
3121     return DISAS_NEXT;
3122 }
3123 
3124 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3125 {
3126     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3127 
3128     if (get_field(s, m3) > 6) {
3129         gen_program_exception(s, PGM_SPECIFICATION);
3130         return DISAS_NORETURN;
3131     }
3132 
3133     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3134     tcg_gen_neg_i64(o->addr1, o->addr1);
3135     tcg_gen_movi_i64(o->out, 16);
3136     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3137     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3138     return DISAS_NEXT;
3139 }
3140 
3141 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3142 {
3143     const uint16_t monitor_class = get_field(s, i2);
3144 
3145     if (monitor_class & 0xff00) {
3146         gen_program_exception(s, PGM_SPECIFICATION);
3147         return DISAS_NORETURN;
3148     }
3149 
3150 #if !defined(CONFIG_USER_ONLY)
3151     gen_helper_monitor_call(cpu_env, o->addr1,
3152                             tcg_constant_i32(monitor_class));
3153 #endif
3154     /* Defaults to a NOP. */
3155     return DISAS_NEXT;
3156 }
3157 
3158 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3159 {
3160     o->out = o->in2;
3161     o->in2 = NULL;
3162     return DISAS_NEXT;
3163 }
3164 
3165 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3166 {
3167     int b2 = get_field(s, b2);
3168     TCGv ar1 = tcg_temp_new_i64();
3169 
3170     o->out = o->in2;
3171     o->in2 = NULL;
3172 
3173     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3174     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3175         tcg_gen_movi_i64(ar1, 0);
3176         break;
3177     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3178         tcg_gen_movi_i64(ar1, 1);
3179         break;
3180     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3181         if (b2) {
3182             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3183         } else {
3184             tcg_gen_movi_i64(ar1, 0);
3185         }
3186         break;
3187     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3188         tcg_gen_movi_i64(ar1, 2);
3189         break;
3190     }
3191 
3192     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3193     return DISAS_NEXT;
3194 }
3195 
3196 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3197 {
3198     o->out = o->in1;
3199     o->out2 = o->in2;
3200     o->in1 = NULL;
3201     o->in2 = NULL;
3202     return DISAS_NEXT;
3203 }
3204 
3205 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3206 {
3207     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3208 
3209     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3210     return DISAS_NEXT;
3211 }
3212 
3213 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3214 {
3215     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3216     return DISAS_NEXT;
3217 }
3218 
3219 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3220 {
3221     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3222 
3223     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3224     return DISAS_NEXT;
3225 }
3226 
3227 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3228 {
3229     int r1 = get_field(s, r1);
3230     int r2 = get_field(s, r2);
3231     TCGv_i32 t1, t2;
3232 
3233     /* r1 and r2 must be even.  */
3234     if (r1 & 1 || r2 & 1) {
3235         gen_program_exception(s, PGM_SPECIFICATION);
3236         return DISAS_NORETURN;
3237     }
3238 
3239     t1 = tcg_constant_i32(r1);
3240     t2 = tcg_constant_i32(r2);
3241     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3242     set_cc_static(s);
3243     return DISAS_NEXT;
3244 }
3245 
3246 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3247 {
3248     int r1 = get_field(s, r1);
3249     int r3 = get_field(s, r3);
3250     TCGv_i32 t1, t3;
3251 
3252     /* r1 and r3 must be even.  */
3253     if (r1 & 1 || r3 & 1) {
3254         gen_program_exception(s, PGM_SPECIFICATION);
3255         return DISAS_NORETURN;
3256     }
3257 
3258     t1 = tcg_constant_i32(r1);
3259     t3 = tcg_constant_i32(r3);
3260     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3261     set_cc_static(s);
3262     return DISAS_NEXT;
3263 }
3264 
3265 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3266 {
3267     int r1 = get_field(s, r1);
3268     int r3 = get_field(s, r3);
3269     TCGv_i32 t1, t3;
3270 
3271     /* r1 and r3 must be even.  */
3272     if (r1 & 1 || r3 & 1) {
3273         gen_program_exception(s, PGM_SPECIFICATION);
3274         return DISAS_NORETURN;
3275     }
3276 
3277     t1 = tcg_constant_i32(r1);
3278     t3 = tcg_constant_i32(r3);
3279     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3280     set_cc_static(s);
3281     return DISAS_NEXT;
3282 }
3283 
3284 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3285 {
3286     int r3 = get_field(s, r3);
3287     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3288     set_cc_static(s);
3289     return DISAS_NEXT;
3290 }
3291 
3292 #ifndef CONFIG_USER_ONLY
3293 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3294 {
3295     int r1 = get_field(s, l1);
3296     int r3 = get_field(s, r3);
3297     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3298     set_cc_static(s);
3299     return DISAS_NEXT;
3300 }
3301 
3302 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3303 {
3304     int r1 = get_field(s, l1);
3305     int r3 = get_field(s, r3);
3306     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3307     set_cc_static(s);
3308     return DISAS_NEXT;
3309 }
3310 #endif
3311 
3312 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3313 {
3314     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3315 
3316     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3317     return DISAS_NEXT;
3318 }
3319 
3320 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3321 {
3322     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3323 
3324     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3325     return DISAS_NEXT;
3326 }
3327 
3328 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3329 {
3330     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3331     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3332 
3333     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3334     set_cc_static(s);
3335     return DISAS_NEXT;
3336 }
3337 
3338 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3339 {
3340     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3341     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3342 
3343     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3344     set_cc_static(s);
3345     return DISAS_NEXT;
3346 }
3347 
3348 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3349 {
3350     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3351 
3352     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3353     return DISAS_NEXT;
3354 }
3355 
3356 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3357 {
3358     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3359     return DISAS_NEXT;
3360 }
3361 
3362 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3363 {
3364     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3365     return DISAS_NEXT;
3366 }
3367 
3368 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3369 {
3370     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3371     return DISAS_NEXT;
3372 }
3373 
3374 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3375 {
3376     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3377     return DISAS_NEXT;
3378 }
3379 
3380 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3381 {
3382     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3383     return DISAS_NEXT;
3384 }
3385 
3386 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3387 {
3388     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3389     return DISAS_NEXT;
3390 }
3391 
3392 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3393 {
3394     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3395     return DISAS_NEXT;
3396 }
3397 
3398 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3399 {
3400     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3401     return DISAS_NEXT;
3402 }
3403 
3404 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3405 {
3406     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3407     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3408     return DISAS_NEXT;
3409 }
3410 
3411 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3412 {
3413     TCGv_i64 r3 = load_freg(get_field(s, r3));
3414     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3415     return DISAS_NEXT;
3416 }
3417 
3418 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3419 {
3420     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3421     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3422     return DISAS_NEXT;
3423 }
3424 
3425 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3426 {
3427     TCGv_i64 r3 = load_freg(get_field(s, r3));
3428     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3429     return DISAS_NEXT;
3430 }
3431 
3432 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3433 {
3434     TCGv_i64 z = tcg_constant_i64(0);
3435     TCGv_i64 n = tcg_temp_new_i64();
3436 
3437     tcg_gen_neg_i64(n, o->in2);
3438     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3439     return DISAS_NEXT;
3440 }
3441 
3442 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3443 {
3444     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3445     return DISAS_NEXT;
3446 }
3447 
3448 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3449 {
3450     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3451     return DISAS_NEXT;
3452 }
3453 
3454 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3455 {
3456     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3457     tcg_gen_mov_i64(o->out2, o->in2);
3458     return DISAS_NEXT;
3459 }
3460 
3461 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3462 {
3463     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3464 
3465     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3466     set_cc_static(s);
3467     return DISAS_NEXT;
3468 }
3469 
3470 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3471 {
3472     tcg_gen_neg_i64(o->out, o->in2);
3473     return DISAS_NEXT;
3474 }
3475 
3476 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3477 {
3478     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3479     return DISAS_NEXT;
3480 }
3481 
3482 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3483 {
3484     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3485     return DISAS_NEXT;
3486 }
3487 
3488 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3489 {
3490     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3491     tcg_gen_mov_i64(o->out2, o->in2);
3492     return DISAS_NEXT;
3493 }
3494 
3495 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3496 {
3497     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3498 
3499     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3500     set_cc_static(s);
3501     return DISAS_NEXT;
3502 }
3503 
3504 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3505 {
3506     tcg_gen_or_i64(o->out, o->in1, o->in2);
3507     return DISAS_NEXT;
3508 }
3509 
3510 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3511 {
3512     int shift = s->insn->data & 0xff;
3513     int size = s->insn->data >> 8;
3514     uint64_t mask = ((1ull << size) - 1) << shift;
3515     TCGv_i64 t = tcg_temp_new_i64();
3516 
3517     tcg_gen_shli_i64(t, o->in2, shift);
3518     tcg_gen_or_i64(o->out, o->in1, t);
3519 
3520     /* Produce the CC from only the bits manipulated.  */
3521     tcg_gen_andi_i64(cc_dst, o->out, mask);
3522     set_cc_nz_u64(s, cc_dst);
3523     return DISAS_NEXT;
3524 }
3525 
3526 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3527 {
3528     o->in1 = tcg_temp_new_i64();
3529 
3530     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3531         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3532     } else {
3533         /* Perform the atomic operation in memory. */
3534         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3535                                     s->insn->data);
3536     }
3537 
3538     /* Recompute also for atomic case: needed for setting CC. */
3539     tcg_gen_or_i64(o->out, o->in1, o->in2);
3540 
3541     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3542         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3543     }
3544     return DISAS_NEXT;
3545 }
3546 
3547 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3548 {
3549     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3550 
3551     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3552     return DISAS_NEXT;
3553 }
3554 
3555 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3556 {
3557     int l2 = get_field(s, l2) + 1;
3558     TCGv_i32 l;
3559 
3560     /* The length must not exceed 32 bytes.  */
3561     if (l2 > 32) {
3562         gen_program_exception(s, PGM_SPECIFICATION);
3563         return DISAS_NORETURN;
3564     }
3565     l = tcg_constant_i32(l2);
3566     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3567     return DISAS_NEXT;
3568 }
3569 
3570 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3571 {
3572     int l2 = get_field(s, l2) + 1;
3573     TCGv_i32 l;
3574 
3575     /* The length must be even and should not exceed 64 bytes.  */
3576     if ((l2 & 1) || (l2 > 64)) {
3577         gen_program_exception(s, PGM_SPECIFICATION);
3578         return DISAS_NORETURN;
3579     }
3580     l = tcg_constant_i32(l2);
3581     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3582     return DISAS_NEXT;
3583 }
3584 
3585 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3586 {
3587     const uint8_t m3 = get_field(s, m3);
3588 
3589     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3590         tcg_gen_ctpop_i64(o->out, o->in2);
3591     } else {
3592         gen_helper_popcnt(o->out, o->in2);
3593     }
3594     return DISAS_NEXT;
3595 }
3596 
3597 #ifndef CONFIG_USER_ONLY
3598 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3599 {
3600     gen_helper_ptlb(cpu_env);
3601     return DISAS_NEXT;
3602 }
3603 #endif
3604 
3605 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3606 {
3607     int i3 = get_field(s, i3);
3608     int i4 = get_field(s, i4);
3609     int i5 = get_field(s, i5);
3610     int do_zero = i4 & 0x80;
3611     uint64_t mask, imask, pmask;
3612     int pos, len, rot;
3613 
3614     /* Adjust the arguments for the specific insn.  */
3615     switch (s->fields.op2) {
3616     case 0x55: /* risbg */
3617     case 0x59: /* risbgn */
3618         i3 &= 63;
3619         i4 &= 63;
3620         pmask = ~0;
3621         break;
3622     case 0x5d: /* risbhg */
3623         i3 &= 31;
3624         i4 &= 31;
3625         pmask = 0xffffffff00000000ull;
3626         break;
3627     case 0x51: /* risblg */
3628         i3 = (i3 & 31) + 32;
3629         i4 = (i4 & 31) + 32;
3630         pmask = 0x00000000ffffffffull;
3631         break;
3632     default:
3633         g_assert_not_reached();
3634     }
3635 
3636     /* MASK is the set of bits to be inserted from R2. */
3637     if (i3 <= i4) {
3638         /* [0...i3---i4...63] */
3639         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3640     } else {
3641         /* [0---i4...i3---63] */
3642         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3643     }
3644     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3645     mask &= pmask;
3646 
3647     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3648        insns, we need to keep the other half of the register.  */
3649     imask = ~mask | ~pmask;
3650     if (do_zero) {
3651         imask = ~pmask;
3652     }
3653 
3654     len = i4 - i3 + 1;
3655     pos = 63 - i4;
3656     rot = i5 & 63;
3657 
3658     /* In some cases we can implement this with extract.  */
3659     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3660         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3661         return DISAS_NEXT;
3662     }
3663 
3664     /* In some cases we can implement this with deposit.  */
3665     if (len > 0 && (imask == 0 || ~mask == imask)) {
3666         /* Note that we rotate the bits to be inserted to the lsb, not to
3667            the position as described in the PoO.  */
3668         rot = (rot - pos) & 63;
3669     } else {
3670         pos = -1;
3671     }
3672 
3673     /* Rotate the input as necessary.  */
3674     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3675 
3676     /* Insert the selected bits into the output.  */
3677     if (pos >= 0) {
3678         if (imask == 0) {
3679             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3680         } else {
3681             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3682         }
3683     } else if (imask == 0) {
3684         tcg_gen_andi_i64(o->out, o->in2, mask);
3685     } else {
3686         tcg_gen_andi_i64(o->in2, o->in2, mask);
3687         tcg_gen_andi_i64(o->out, o->out, imask);
3688         tcg_gen_or_i64(o->out, o->out, o->in2);
3689     }
3690     return DISAS_NEXT;
3691 }
3692 
3693 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3694 {
3695     int i3 = get_field(s, i3);
3696     int i4 = get_field(s, i4);
3697     int i5 = get_field(s, i5);
3698     uint64_t mask;
3699 
3700     /* If this is a test-only form, arrange to discard the result.  */
3701     if (i3 & 0x80) {
3702         o->out = tcg_temp_new_i64();
3703     }
3704 
3705     i3 &= 63;
3706     i4 &= 63;
3707     i5 &= 63;
3708 
3709     /* MASK is the set of bits to be operated on from R2.
3710        Take care for I3/I4 wraparound.  */
3711     mask = ~0ull >> i3;
3712     if (i3 <= i4) {
3713         mask ^= ~0ull >> i4 >> 1;
3714     } else {
3715         mask |= ~(~0ull >> i4 >> 1);
3716     }
3717 
3718     /* Rotate the input as necessary.  */
3719     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3720 
3721     /* Operate.  */
3722     switch (s->fields.op2) {
3723     case 0x54: /* AND */
3724         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3725         tcg_gen_and_i64(o->out, o->out, o->in2);
3726         break;
3727     case 0x56: /* OR */
3728         tcg_gen_andi_i64(o->in2, o->in2, mask);
3729         tcg_gen_or_i64(o->out, o->out, o->in2);
3730         break;
3731     case 0x57: /* XOR */
3732         tcg_gen_andi_i64(o->in2, o->in2, mask);
3733         tcg_gen_xor_i64(o->out, o->out, o->in2);
3734         break;
3735     default:
3736         abort();
3737     }
3738 
3739     /* Set the CC.  */
3740     tcg_gen_andi_i64(cc_dst, o->out, mask);
3741     set_cc_nz_u64(s, cc_dst);
3742     return DISAS_NEXT;
3743 }
3744 
3745 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3746 {
3747     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3748     return DISAS_NEXT;
3749 }
3750 
3751 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3752 {
3753     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3754     return DISAS_NEXT;
3755 }
3756 
3757 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3758 {
3759     tcg_gen_bswap64_i64(o->out, o->in2);
3760     return DISAS_NEXT;
3761 }
3762 
3763 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3764 {
3765     TCGv_i32 t1 = tcg_temp_new_i32();
3766     TCGv_i32 t2 = tcg_temp_new_i32();
3767     TCGv_i32 to = tcg_temp_new_i32();
3768     tcg_gen_extrl_i64_i32(t1, o->in1);
3769     tcg_gen_extrl_i64_i32(t2, o->in2);
3770     tcg_gen_rotl_i32(to, t1, t2);
3771     tcg_gen_extu_i32_i64(o->out, to);
3772     return DISAS_NEXT;
3773 }
3774 
3775 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3776 {
3777     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3778     return DISAS_NEXT;
3779 }
3780 
3781 #ifndef CONFIG_USER_ONLY
3782 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3783 {
3784     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3785     set_cc_static(s);
3786     return DISAS_NEXT;
3787 }
3788 
3789 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3790 {
3791     gen_helper_sacf(cpu_env, o->in2);
3792     /* Addressing mode has changed, so end the block.  */
3793     return DISAS_TOO_MANY;
3794 }
3795 #endif
3796 
3797 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3798 {
3799     int sam = s->insn->data;
3800     TCGv_i64 tsam;
3801     uint64_t mask;
3802 
3803     switch (sam) {
3804     case 0:
3805         mask = 0xffffff;
3806         break;
3807     case 1:
3808         mask = 0x7fffffff;
3809         break;
3810     default:
3811         mask = -1;
3812         break;
3813     }
3814 
3815     /* Bizarre but true, we check the address of the current insn for the
3816        specification exception, not the next to be executed.  Thus the PoO
3817        documents that Bad Things Happen two bytes before the end.  */
3818     if (s->base.pc_next & ~mask) {
3819         gen_program_exception(s, PGM_SPECIFICATION);
3820         return DISAS_NORETURN;
3821     }
3822     s->pc_tmp &= mask;
3823 
3824     tsam = tcg_constant_i64(sam);
3825     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3826 
3827     /* Always exit the TB, since we (may have) changed execution mode.  */
3828     return DISAS_TOO_MANY;
3829 }
3830 
3831 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3832 {
3833     int r1 = get_field(s, r1);
3834     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3835     return DISAS_NEXT;
3836 }
3837 
3838 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3839 {
3840     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3841     return DISAS_NEXT;
3842 }
3843 
3844 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3845 {
3846     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3847     return DISAS_NEXT;
3848 }
3849 
3850 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3851 {
3852     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3853     return DISAS_NEXT;
3854 }
3855 
3856 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3857 {
3858     gen_helper_sqeb(o->out, cpu_env, o->in2);
3859     return DISAS_NEXT;
3860 }
3861 
3862 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3863 {
3864     gen_helper_sqdb(o->out, cpu_env, o->in2);
3865     return DISAS_NEXT;
3866 }
3867 
3868 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3869 {
3870     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3871     return DISAS_NEXT;
3872 }
3873 
3874 #ifndef CONFIG_USER_ONLY
3875 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3876 {
3877     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3878     set_cc_static(s);
3879     return DISAS_NEXT;
3880 }
3881 
3882 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3883 {
3884     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3885     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3886 
3887     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3888     set_cc_static(s);
3889     return DISAS_NEXT;
3890 }
3891 #endif
3892 
3893 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3894 {
3895     DisasCompare c;
3896     TCGv_i64 a, h;
3897     TCGLabel *lab;
3898     int r1;
3899 
3900     disas_jcc(s, &c, get_field(s, m3));
3901 
3902     /* We want to store when the condition is fulfilled, so branch
3903        out when it's not */
3904     c.cond = tcg_invert_cond(c.cond);
3905 
3906     lab = gen_new_label();
3907     if (c.is_64) {
3908         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3909     } else {
3910         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3911     }
3912 
3913     r1 = get_field(s, r1);
3914     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3915     switch (s->insn->data) {
3916     case 1: /* STOCG */
3917         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3918         break;
3919     case 0: /* STOC */
3920         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3921         break;
3922     case 2: /* STOCFH */
3923         h = tcg_temp_new_i64();
3924         tcg_gen_shri_i64(h, regs[r1], 32);
3925         tcg_gen_qemu_st32(h, a, get_mem_index(s));
3926         break;
3927     default:
3928         g_assert_not_reached();
3929     }
3930 
3931     gen_set_label(lab);
3932     return DISAS_NEXT;
3933 }
3934 
3935 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3936 {
3937     TCGv_i64 t;
3938     uint64_t sign = 1ull << s->insn->data;
3939     if (s->insn->data == 31) {
3940         t = tcg_temp_new_i64();
3941         tcg_gen_shli_i64(t, o->in1, 32);
3942     } else {
3943         t = o->in1;
3944     }
3945     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3946     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3947     /* The arithmetic left shift is curious in that it does not affect
3948        the sign bit.  Copy that over from the source unchanged.  */
3949     tcg_gen_andi_i64(o->out, o->out, ~sign);
3950     tcg_gen_andi_i64(o->in1, o->in1, sign);
3951     tcg_gen_or_i64(o->out, o->out, o->in1);
3952     return DISAS_NEXT;
3953 }
3954 
3955 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3956 {
3957     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3958     return DISAS_NEXT;
3959 }
3960 
3961 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3962 {
3963     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3964     return DISAS_NEXT;
3965 }
3966 
3967 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3968 {
3969     tcg_gen_shr_i64(o->out, o->in1, o->in2);
3970     return DISAS_NEXT;
3971 }
3972 
3973 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3974 {
3975     gen_helper_sfpc(cpu_env, o->in2);
3976     return DISAS_NEXT;
3977 }
3978 
3979 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3980 {
3981     gen_helper_sfas(cpu_env, o->in2);
3982     return DISAS_NEXT;
3983 }
3984 
3985 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3986 {
3987     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
3988     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
3989     gen_helper_srnm(cpu_env, o->addr1);
3990     return DISAS_NEXT;
3991 }
3992 
3993 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
3994 {
3995     /* Bits 0-55 are are ignored. */
3996     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
3997     gen_helper_srnm(cpu_env, o->addr1);
3998     return DISAS_NEXT;
3999 }
4000 
4001 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4002 {
4003     TCGv_i64 tmp = tcg_temp_new_i64();
4004 
4005     /* Bits other than 61-63 are ignored. */
4006     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4007 
4008     /* No need to call a helper, we don't implement dfp */
4009     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4010     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4011     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4012     return DISAS_NEXT;
4013 }
4014 
4015 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4016 {
4017     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4018     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4019     set_cc_static(s);
4020 
4021     tcg_gen_shri_i64(o->in1, o->in1, 24);
4022     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4023     return DISAS_NEXT;
4024 }
4025 
4026 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4027 {
4028     int b1 = get_field(s, b1);
4029     int d1 = get_field(s, d1);
4030     int b2 = get_field(s, b2);
4031     int d2 = get_field(s, d2);
4032     int r3 = get_field(s, r3);
4033     TCGv_i64 tmp = tcg_temp_new_i64();
4034 
4035     /* fetch all operands first */
4036     o->in1 = tcg_temp_new_i64();
4037     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4038     o->in2 = tcg_temp_new_i64();
4039     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4040     o->addr1 = tcg_temp_new_i64();
4041     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4042 
4043     /* load the third operand into r3 before modifying anything */
4044     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4045 
4046     /* subtract CPU timer from first operand and store in GR0 */
4047     gen_helper_stpt(tmp, cpu_env);
4048     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4049 
4050     /* store second operand in GR1 */
4051     tcg_gen_mov_i64(regs[1], o->in2);
4052     return DISAS_NEXT;
4053 }
4054 
4055 #ifndef CONFIG_USER_ONLY
4056 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4057 {
4058     tcg_gen_shri_i64(o->in2, o->in2, 4);
4059     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4060     return DISAS_NEXT;
4061 }
4062 
4063 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4064 {
4065     gen_helper_sske(cpu_env, o->in1, o->in2);
4066     return DISAS_NEXT;
4067 }
4068 
4069 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4070 {
4071     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4072     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4073     s->exit_to_mainloop = true;
4074     return DISAS_TOO_MANY;
4075 }
4076 
4077 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4078 {
4079     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4080     return DISAS_NEXT;
4081 }
4082 #endif
4083 
4084 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4085 {
4086     gen_helper_stck(o->out, cpu_env);
4087     /* ??? We don't implement clock states.  */
4088     gen_op_movi_cc(s, 0);
4089     return DISAS_NEXT;
4090 }
4091 
4092 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4093 {
4094     TCGv_i64 c1 = tcg_temp_new_i64();
4095     TCGv_i64 c2 = tcg_temp_new_i64();
4096     TCGv_i64 todpr = tcg_temp_new_i64();
4097     gen_helper_stck(c1, cpu_env);
4098     /* 16 bit value store in an uint32_t (only valid bits set) */
4099     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4100     /* Shift the 64-bit value into its place as a zero-extended
4101        104-bit value.  Note that "bit positions 64-103 are always
4102        non-zero so that they compare differently to STCK"; we set
4103        the least significant bit to 1.  */
4104     tcg_gen_shli_i64(c2, c1, 56);
4105     tcg_gen_shri_i64(c1, c1, 8);
4106     tcg_gen_ori_i64(c2, c2, 0x10000);
4107     tcg_gen_or_i64(c2, c2, todpr);
4108     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4109     tcg_gen_addi_i64(o->in2, o->in2, 8);
4110     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4111     /* ??? We don't implement clock states.  */
4112     gen_op_movi_cc(s, 0);
4113     return DISAS_NEXT;
4114 }
4115 
4116 #ifndef CONFIG_USER_ONLY
4117 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4118 {
4119     gen_helper_sck(cc_op, cpu_env, o->in2);
4120     set_cc_static(s);
4121     return DISAS_NEXT;
4122 }
4123 
4124 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4125 {
4126     gen_helper_sckc(cpu_env, o->in2);
4127     return DISAS_NEXT;
4128 }
4129 
4130 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4131 {
4132     gen_helper_sckpf(cpu_env, regs[0]);
4133     return DISAS_NEXT;
4134 }
4135 
4136 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4137 {
4138     gen_helper_stckc(o->out, cpu_env);
4139     return DISAS_NEXT;
4140 }
4141 
4142 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4143 {
4144     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4145     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4146 
4147     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4148     return DISAS_NEXT;
4149 }
4150 
4151 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4152 {
4153     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4154     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4155 
4156     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4157     return DISAS_NEXT;
4158 }
4159 
4160 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4161 {
4162     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4163     return DISAS_NEXT;
4164 }
4165 
4166 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4167 {
4168     gen_helper_spt(cpu_env, o->in2);
4169     return DISAS_NEXT;
4170 }
4171 
4172 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4173 {
4174     gen_helper_stfl(cpu_env);
4175     return DISAS_NEXT;
4176 }
4177 
4178 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4179 {
4180     gen_helper_stpt(o->out, cpu_env);
4181     return DISAS_NEXT;
4182 }
4183 
4184 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4185 {
4186     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4187     set_cc_static(s);
4188     return DISAS_NEXT;
4189 }
4190 
4191 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4192 {
4193     gen_helper_spx(cpu_env, o->in2);
4194     return DISAS_NEXT;
4195 }
4196 
4197 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4198 {
4199     gen_helper_xsch(cpu_env, regs[1]);
4200     set_cc_static(s);
4201     return DISAS_NEXT;
4202 }
4203 
4204 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4205 {
4206     gen_helper_csch(cpu_env, regs[1]);
4207     set_cc_static(s);
4208     return DISAS_NEXT;
4209 }
4210 
4211 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4212 {
4213     gen_helper_hsch(cpu_env, regs[1]);
4214     set_cc_static(s);
4215     return DISAS_NEXT;
4216 }
4217 
4218 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4219 {
4220     gen_helper_msch(cpu_env, regs[1], o->in2);
4221     set_cc_static(s);
4222     return DISAS_NEXT;
4223 }
4224 
4225 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4226 {
4227     gen_helper_rchp(cpu_env, regs[1]);
4228     set_cc_static(s);
4229     return DISAS_NEXT;
4230 }
4231 
4232 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4233 {
4234     gen_helper_rsch(cpu_env, regs[1]);
4235     set_cc_static(s);
4236     return DISAS_NEXT;
4237 }
4238 
4239 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4240 {
4241     gen_helper_sal(cpu_env, regs[1]);
4242     return DISAS_NEXT;
4243 }
4244 
4245 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4246 {
4247     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4248     return DISAS_NEXT;
4249 }
4250 
4251 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4252 {
4253     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4254     gen_op_movi_cc(s, 3);
4255     return DISAS_NEXT;
4256 }
4257 
4258 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4259 {
4260     /* The instruction is suppressed if not provided. */
4261     return DISAS_NEXT;
4262 }
4263 
4264 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4265 {
4266     gen_helper_ssch(cpu_env, regs[1], o->in2);
4267     set_cc_static(s);
4268     return DISAS_NEXT;
4269 }
4270 
4271 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4272 {
4273     gen_helper_stsch(cpu_env, regs[1], o->in2);
4274     set_cc_static(s);
4275     return DISAS_NEXT;
4276 }
4277 
4278 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4279 {
4280     gen_helper_stcrw(cpu_env, o->in2);
4281     set_cc_static(s);
4282     return DISAS_NEXT;
4283 }
4284 
4285 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4286 {
4287     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4288     set_cc_static(s);
4289     return DISAS_NEXT;
4290 }
4291 
4292 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4293 {
4294     gen_helper_tsch(cpu_env, regs[1], o->in2);
4295     set_cc_static(s);
4296     return DISAS_NEXT;
4297 }
4298 
4299 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4300 {
4301     gen_helper_chsc(cpu_env, o->in2);
4302     set_cc_static(s);
4303     return DISAS_NEXT;
4304 }
4305 
4306 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4307 {
4308     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4309     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4310     return DISAS_NEXT;
4311 }
4312 
4313 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4314 {
4315     uint64_t i2 = get_field(s, i2);
4316     TCGv_i64 t;
4317 
4318     /* It is important to do what the instruction name says: STORE THEN.
4319        If we let the output hook perform the store then if we fault and
4320        restart, we'll have the wrong SYSTEM MASK in place.  */
4321     t = tcg_temp_new_i64();
4322     tcg_gen_shri_i64(t, psw_mask, 56);
4323     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4324 
4325     if (s->fields.op == 0xac) {
4326         tcg_gen_andi_i64(psw_mask, psw_mask,
4327                          (i2 << 56) | 0x00ffffffffffffffull);
4328     } else {
4329         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4330     }
4331 
4332     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4333     s->exit_to_mainloop = true;
4334     return DISAS_TOO_MANY;
4335 }
4336 
4337 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4338 {
4339     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4340 
4341     if (s->base.tb->flags & FLAG_MASK_PER) {
4342         update_psw_addr(s);
4343         gen_helper_per_store_real(cpu_env);
4344     }
4345     return DISAS_NEXT;
4346 }
4347 #endif
4348 
4349 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4350 {
4351     gen_helper_stfle(cc_op, cpu_env, o->in2);
4352     set_cc_static(s);
4353     return DISAS_NEXT;
4354 }
4355 
4356 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4357 {
4358     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4359     return DISAS_NEXT;
4360 }
4361 
4362 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4363 {
4364     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4365     return DISAS_NEXT;
4366 }
4367 
4368 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4369 {
4370     tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4371     return DISAS_NEXT;
4372 }
4373 
4374 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4375 {
4376     tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4377     return DISAS_NEXT;
4378 }
4379 
4380 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4381 {
4382     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4383     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4384 
4385     gen_helper_stam(cpu_env, r1, o->in2, r3);
4386     return DISAS_NEXT;
4387 }
4388 
4389 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4390 {
4391     int m3 = get_field(s, m3);
4392     int pos, base = s->insn->data;
4393     TCGv_i64 tmp = tcg_temp_new_i64();
4394 
4395     pos = base + ctz32(m3) * 8;
4396     switch (m3) {
4397     case 0xf:
4398         /* Effectively a 32-bit store.  */
4399         tcg_gen_shri_i64(tmp, o->in1, pos);
4400         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4401         break;
4402 
4403     case 0xc:
4404     case 0x6:
4405     case 0x3:
4406         /* Effectively a 16-bit store.  */
4407         tcg_gen_shri_i64(tmp, o->in1, pos);
4408         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4409         break;
4410 
4411     case 0x8:
4412     case 0x4:
4413     case 0x2:
4414     case 0x1:
4415         /* Effectively an 8-bit store.  */
4416         tcg_gen_shri_i64(tmp, o->in1, pos);
4417         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4418         break;
4419 
4420     default:
4421         /* This is going to be a sequence of shifts and stores.  */
4422         pos = base + 32 - 8;
4423         while (m3) {
4424             if (m3 & 0x8) {
4425                 tcg_gen_shri_i64(tmp, o->in1, pos);
4426                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4427                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4428             }
4429             m3 = (m3 << 1) & 0xf;
4430             pos -= 8;
4431         }
4432         break;
4433     }
4434     return DISAS_NEXT;
4435 }
4436 
4437 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4438 {
4439     int r1 = get_field(s, r1);
4440     int r3 = get_field(s, r3);
4441     int size = s->insn->data;
4442     TCGv_i64 tsize = tcg_constant_i64(size);
4443 
4444     while (1) {
4445         if (size == 8) {
4446             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4447         } else {
4448             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4449         }
4450         if (r1 == r3) {
4451             break;
4452         }
4453         tcg_gen_add_i64(o->in2, o->in2, tsize);
4454         r1 = (r1 + 1) & 15;
4455     }
4456 
4457     return DISAS_NEXT;
4458 }
4459 
4460 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4461 {
4462     int r1 = get_field(s, r1);
4463     int r3 = get_field(s, r3);
4464     TCGv_i64 t = tcg_temp_new_i64();
4465     TCGv_i64 t4 = tcg_constant_i64(4);
4466     TCGv_i64 t32 = tcg_constant_i64(32);
4467 
4468     while (1) {
4469         tcg_gen_shl_i64(t, regs[r1], t32);
4470         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4471         if (r1 == r3) {
4472             break;
4473         }
4474         tcg_gen_add_i64(o->in2, o->in2, t4);
4475         r1 = (r1 + 1) & 15;
4476     }
4477     return DISAS_NEXT;
4478 }
4479 
4480 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4481 {
4482     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4483         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4484     } else if (HAVE_ATOMIC128) {
4485         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4486     } else {
4487         gen_helper_exit_atomic(cpu_env);
4488         return DISAS_NORETURN;
4489     }
4490     return DISAS_NEXT;
4491 }
4492 
4493 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4494 {
4495     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4496     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4497 
4498     gen_helper_srst(cpu_env, r1, r2);
4499     set_cc_static(s);
4500     return DISAS_NEXT;
4501 }
4502 
4503 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4504 {
4505     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4506     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4507 
4508     gen_helper_srstu(cpu_env, r1, r2);
4509     set_cc_static(s);
4510     return DISAS_NEXT;
4511 }
4512 
4513 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4514 {
4515     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4516     return DISAS_NEXT;
4517 }
4518 
4519 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4520 {
4521     tcg_gen_movi_i64(cc_src, 0);
4522     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4523     return DISAS_NEXT;
4524 }
4525 
4526 /* Compute borrow (0, -1) into cc_src. */
4527 static void compute_borrow(DisasContext *s)
4528 {
4529     switch (s->cc_op) {
4530     case CC_OP_SUBU:
4531         /* The borrow value is already in cc_src (0,-1). */
4532         break;
4533     default:
4534         gen_op_calc_cc(s);
4535         /* fall through */
4536     case CC_OP_STATIC:
4537         /* The carry flag is the msb of CC; compute into cc_src. */
4538         tcg_gen_extu_i32_i64(cc_src, cc_op);
4539         tcg_gen_shri_i64(cc_src, cc_src, 1);
4540         /* fall through */
4541     case CC_OP_ADDU:
4542         /* Convert carry (1,0) to borrow (0,-1). */
4543         tcg_gen_subi_i64(cc_src, cc_src, 1);
4544         break;
4545     }
4546 }
4547 
4548 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4549 {
4550     compute_borrow(s);
4551 
4552     /* Borrow is {0, -1}, so add to subtract. */
4553     tcg_gen_add_i64(o->out, o->in1, cc_src);
4554     tcg_gen_sub_i64(o->out, o->out, o->in2);
4555     return DISAS_NEXT;
4556 }
4557 
4558 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4559 {
4560     compute_borrow(s);
4561 
4562     /*
4563      * Borrow is {0, -1}, so add to subtract; replicate the
4564      * borrow input to produce 128-bit -1 for the addition.
4565      */
4566     TCGv_i64 zero = tcg_constant_i64(0);
4567     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4568     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4569 
4570     return DISAS_NEXT;
4571 }
4572 
4573 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4574 {
4575     TCGv_i32 t;
4576 
4577     update_psw_addr(s);
4578     update_cc_op(s);
4579 
4580     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4581     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4582 
4583     t = tcg_constant_i32(s->ilen);
4584     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4585 
4586     gen_exception(EXCP_SVC);
4587     return DISAS_NORETURN;
4588 }
4589 
4590 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4591 {
4592     int cc = 0;
4593 
4594     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4595     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4596     gen_op_movi_cc(s, cc);
4597     return DISAS_NEXT;
4598 }
4599 
4600 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4601 {
4602     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4603     set_cc_static(s);
4604     return DISAS_NEXT;
4605 }
4606 
4607 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4608 {
4609     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4610     set_cc_static(s);
4611     return DISAS_NEXT;
4612 }
4613 
4614 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4615 {
4616     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4617     set_cc_static(s);
4618     return DISAS_NEXT;
4619 }
4620 
4621 #ifndef CONFIG_USER_ONLY
4622 
4623 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4624 {
4625     gen_helper_testblock(cc_op, cpu_env, o->in2);
4626     set_cc_static(s);
4627     return DISAS_NEXT;
4628 }
4629 
4630 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4631 {
4632     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4633     set_cc_static(s);
4634     return DISAS_NEXT;
4635 }
4636 
4637 #endif
4638 
4639 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4640 {
4641     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4642 
4643     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4644     set_cc_static(s);
4645     return DISAS_NEXT;
4646 }
4647 
4648 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4649 {
4650     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4651 
4652     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4653     set_cc_static(s);
4654     return DISAS_NEXT;
4655 }
4656 
4657 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4658 {
4659     TCGv_i128 pair = tcg_temp_new_i128();
4660 
4661     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4662     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4663     set_cc_static(s);
4664     return DISAS_NEXT;
4665 }
4666 
4667 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4668 {
4669     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4670 
4671     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4672     set_cc_static(s);
4673     return DISAS_NEXT;
4674 }
4675 
4676 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4677 {
4678     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4679 
4680     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4681     set_cc_static(s);
4682     return DISAS_NEXT;
4683 }
4684 
4685 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4686 {
4687     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4688     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4689     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4690     TCGv_i32 tst = tcg_temp_new_i32();
4691     int m3 = get_field(s, m3);
4692 
4693     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4694         m3 = 0;
4695     }
4696     if (m3 & 1) {
4697         tcg_gen_movi_i32(tst, -1);
4698     } else {
4699         tcg_gen_extrl_i64_i32(tst, regs[0]);
4700         if (s->insn->opc & 3) {
4701             tcg_gen_ext8u_i32(tst, tst);
4702         } else {
4703             tcg_gen_ext16u_i32(tst, tst);
4704         }
4705     }
4706     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4707 
4708     set_cc_static(s);
4709     return DISAS_NEXT;
4710 }
4711 
4712 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4713 {
4714     TCGv_i32 t1 = tcg_constant_i32(0xff);
4715 
4716     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4717     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4718     set_cc_static(s);
4719     return DISAS_NEXT;
4720 }
4721 
4722 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4723 {
4724     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4725 
4726     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4727     return DISAS_NEXT;
4728 }
4729 
4730 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4731 {
4732     int l1 = get_field(s, l1) + 1;
4733     TCGv_i32 l;
4734 
4735     /* The length must not exceed 32 bytes.  */
4736     if (l1 > 32) {
4737         gen_program_exception(s, PGM_SPECIFICATION);
4738         return DISAS_NORETURN;
4739     }
4740     l = tcg_constant_i32(l1);
4741     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4742     set_cc_static(s);
4743     return DISAS_NEXT;
4744 }
4745 
4746 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4747 {
4748     int l1 = get_field(s, l1) + 1;
4749     TCGv_i32 l;
4750 
4751     /* The length must be even and should not exceed 64 bytes.  */
4752     if ((l1 & 1) || (l1 > 64)) {
4753         gen_program_exception(s, PGM_SPECIFICATION);
4754         return DISAS_NORETURN;
4755     }
4756     l = tcg_constant_i32(l1);
4757     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4758     set_cc_static(s);
4759     return DISAS_NEXT;
4760 }
4761 
4762 
4763 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4764 {
4765     int d1 = get_field(s, d1);
4766     int d2 = get_field(s, d2);
4767     int b1 = get_field(s, b1);
4768     int b2 = get_field(s, b2);
4769     int l = get_field(s, l1);
4770     TCGv_i32 t32;
4771 
4772     o->addr1 = get_address(s, 0, b1, d1);
4773 
4774     /* If the addresses are identical, this is a store/memset of zero.  */
4775     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4776         o->in2 = tcg_constant_i64(0);
4777 
4778         l++;
4779         while (l >= 8) {
4780             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4781             l -= 8;
4782             if (l > 0) {
4783                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4784             }
4785         }
4786         if (l >= 4) {
4787             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4788             l -= 4;
4789             if (l > 0) {
4790                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4791             }
4792         }
4793         if (l >= 2) {
4794             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4795             l -= 2;
4796             if (l > 0) {
4797                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4798             }
4799         }
4800         if (l) {
4801             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4802         }
4803         gen_op_movi_cc(s, 0);
4804         return DISAS_NEXT;
4805     }
4806 
4807     /* But in general we'll defer to a helper.  */
4808     o->in2 = get_address(s, 0, b2, d2);
4809     t32 = tcg_constant_i32(l);
4810     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4811     set_cc_static(s);
4812     return DISAS_NEXT;
4813 }
4814 
4815 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4816 {
4817     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4818     return DISAS_NEXT;
4819 }
4820 
4821 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4822 {
4823     int shift = s->insn->data & 0xff;
4824     int size = s->insn->data >> 8;
4825     uint64_t mask = ((1ull << size) - 1) << shift;
4826     TCGv_i64 t = tcg_temp_new_i64();
4827 
4828     tcg_gen_shli_i64(t, o->in2, shift);
4829     tcg_gen_xor_i64(o->out, o->in1, t);
4830 
4831     /* Produce the CC from only the bits manipulated.  */
4832     tcg_gen_andi_i64(cc_dst, o->out, mask);
4833     set_cc_nz_u64(s, cc_dst);
4834     return DISAS_NEXT;
4835 }
4836 
4837 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4838 {
4839     o->in1 = tcg_temp_new_i64();
4840 
4841     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4842         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4843     } else {
4844         /* Perform the atomic operation in memory. */
4845         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4846                                      s->insn->data);
4847     }
4848 
4849     /* Recompute also for atomic case: needed for setting CC. */
4850     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4851 
4852     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4853         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4854     }
4855     return DISAS_NEXT;
4856 }
4857 
4858 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4859 {
4860     o->out = tcg_constant_i64(0);
4861     return DISAS_NEXT;
4862 }
4863 
4864 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4865 {
4866     o->out = tcg_constant_i64(0);
4867     o->out2 = o->out;
4868     return DISAS_NEXT;
4869 }
4870 
4871 #ifndef CONFIG_USER_ONLY
4872 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4873 {
4874     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4875 
4876     gen_helper_clp(cpu_env, r2);
4877     set_cc_static(s);
4878     return DISAS_NEXT;
4879 }
4880 
4881 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4882 {
4883     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4884     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4885 
4886     gen_helper_pcilg(cpu_env, r1, r2);
4887     set_cc_static(s);
4888     return DISAS_NEXT;
4889 }
4890 
4891 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4892 {
4893     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4894     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4895 
4896     gen_helper_pcistg(cpu_env, r1, r2);
4897     set_cc_static(s);
4898     return DISAS_NEXT;
4899 }
4900 
4901 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4902 {
4903     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4904     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4905 
4906     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4907     set_cc_static(s);
4908     return DISAS_NEXT;
4909 }
4910 
4911 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4912 {
4913     gen_helper_sic(cpu_env, o->in1, o->in2);
4914     return DISAS_NEXT;
4915 }
4916 
4917 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4918 {
4919     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4920     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4921 
4922     gen_helper_rpcit(cpu_env, r1, r2);
4923     set_cc_static(s);
4924     return DISAS_NEXT;
4925 }
4926 
4927 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4928 {
4929     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4930     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4931     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4932 
4933     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4934     set_cc_static(s);
4935     return DISAS_NEXT;
4936 }
4937 
4938 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4939 {
4940     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4941     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4942 
4943     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4944     set_cc_static(s);
4945     return DISAS_NEXT;
4946 }
4947 #endif
4948 
4949 #include "translate_vx.c.inc"
4950 
4951 /* ====================================================================== */
4952 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4953    the original inputs), update the various cc data structures in order to
4954    be able to compute the new condition code.  */
4955 
4956 static void cout_abs32(DisasContext *s, DisasOps *o)
4957 {
4958     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4959 }
4960 
4961 static void cout_abs64(DisasContext *s, DisasOps *o)
4962 {
4963     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4964 }
4965 
4966 static void cout_adds32(DisasContext *s, DisasOps *o)
4967 {
4968     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4969 }
4970 
4971 static void cout_adds64(DisasContext *s, DisasOps *o)
4972 {
4973     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4974 }
4975 
4976 static void cout_addu32(DisasContext *s, DisasOps *o)
4977 {
4978     tcg_gen_shri_i64(cc_src, o->out, 32);
4979     tcg_gen_ext32u_i64(cc_dst, o->out);
4980     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
4981 }
4982 
4983 static void cout_addu64(DisasContext *s, DisasOps *o)
4984 {
4985     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
4986 }
4987 
4988 static void cout_cmps32(DisasContext *s, DisasOps *o)
4989 {
4990     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4991 }
4992 
4993 static void cout_cmps64(DisasContext *s, DisasOps *o)
4994 {
4995     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4996 }
4997 
4998 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4999 {
5000     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5001 }
5002 
5003 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5004 {
5005     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5006 }
5007 
5008 static void cout_f32(DisasContext *s, DisasOps *o)
5009 {
5010     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5011 }
5012 
5013 static void cout_f64(DisasContext *s, DisasOps *o)
5014 {
5015     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5016 }
5017 
5018 static void cout_f128(DisasContext *s, DisasOps *o)
5019 {
5020     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5021 }
5022 
5023 static void cout_nabs32(DisasContext *s, DisasOps *o)
5024 {
5025     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5026 }
5027 
5028 static void cout_nabs64(DisasContext *s, DisasOps *o)
5029 {
5030     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5031 }
5032 
5033 static void cout_neg32(DisasContext *s, DisasOps *o)
5034 {
5035     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5036 }
5037 
5038 static void cout_neg64(DisasContext *s, DisasOps *o)
5039 {
5040     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5041 }
5042 
5043 static void cout_nz32(DisasContext *s, DisasOps *o)
5044 {
5045     tcg_gen_ext32u_i64(cc_dst, o->out);
5046     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5047 }
5048 
5049 static void cout_nz64(DisasContext *s, DisasOps *o)
5050 {
5051     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5052 }
5053 
5054 static void cout_s32(DisasContext *s, DisasOps *o)
5055 {
5056     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5057 }
5058 
5059 static void cout_s64(DisasContext *s, DisasOps *o)
5060 {
5061     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5062 }
5063 
5064 static void cout_subs32(DisasContext *s, DisasOps *o)
5065 {
5066     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5067 }
5068 
5069 static void cout_subs64(DisasContext *s, DisasOps *o)
5070 {
5071     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5072 }
5073 
5074 static void cout_subu32(DisasContext *s, DisasOps *o)
5075 {
5076     tcg_gen_sari_i64(cc_src, o->out, 32);
5077     tcg_gen_ext32u_i64(cc_dst, o->out);
5078     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5079 }
5080 
5081 static void cout_subu64(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5084 }
5085 
5086 static void cout_tm32(DisasContext *s, DisasOps *o)
5087 {
5088     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5089 }
5090 
5091 static void cout_tm64(DisasContext *s, DisasOps *o)
5092 {
5093     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5094 }
5095 
5096 static void cout_muls32(DisasContext *s, DisasOps *o)
5097 {
5098     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5099 }
5100 
5101 static void cout_muls64(DisasContext *s, DisasOps *o)
5102 {
5103     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5104     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5105 }
5106 
5107 /* ====================================================================== */
5108 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5109    with the TCG register to which we will write.  Used in combination with
5110    the "wout" generators, in some cases we need a new temporary, and in
5111    some cases we can write to a TCG global.  */
5112 
5113 static void prep_new(DisasContext *s, DisasOps *o)
5114 {
5115     o->out = tcg_temp_new_i64();
5116 }
5117 #define SPEC_prep_new 0
5118 
5119 static void prep_new_P(DisasContext *s, DisasOps *o)
5120 {
5121     o->out = tcg_temp_new_i64();
5122     o->out2 = tcg_temp_new_i64();
5123 }
5124 #define SPEC_prep_new_P 0
5125 
5126 static void prep_new_x(DisasContext *s, DisasOps *o)
5127 {
5128     o->out_128 = tcg_temp_new_i128();
5129 }
5130 #define SPEC_prep_new_x 0
5131 
5132 static void prep_r1(DisasContext *s, DisasOps *o)
5133 {
5134     o->out = regs[get_field(s, r1)];
5135 }
5136 #define SPEC_prep_r1 0
5137 
5138 static void prep_r1_P(DisasContext *s, DisasOps *o)
5139 {
5140     int r1 = get_field(s, r1);
5141     o->out = regs[r1];
5142     o->out2 = regs[r1 + 1];
5143 }
5144 #define SPEC_prep_r1_P SPEC_r1_even
5145 
5146 static void prep_x1(DisasContext *s, DisasOps *o)
5147 {
5148     o->out_128 = load_freg_128(get_field(s, r1));
5149 }
5150 #define SPEC_prep_x1 SPEC_r1_f128
5151 
5152 /* ====================================================================== */
5153 /* The "Write OUTput" generators.  These generally perform some non-trivial
5154    copy of data to TCG globals, or to main memory.  The trivial cases are
5155    generally handled by having a "prep" generator install the TCG global
5156    as the destination of the operation.  */
5157 
5158 static void wout_r1(DisasContext *s, DisasOps *o)
5159 {
5160     store_reg(get_field(s, r1), o->out);
5161 }
5162 #define SPEC_wout_r1 0
5163 
5164 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5165 {
5166     store_reg(get_field(s, r1), o->out2);
5167 }
5168 #define SPEC_wout_out2_r1 0
5169 
5170 static void wout_r1_8(DisasContext *s, DisasOps *o)
5171 {
5172     int r1 = get_field(s, r1);
5173     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5174 }
5175 #define SPEC_wout_r1_8 0
5176 
5177 static void wout_r1_16(DisasContext *s, DisasOps *o)
5178 {
5179     int r1 = get_field(s, r1);
5180     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5181 }
5182 #define SPEC_wout_r1_16 0
5183 
5184 static void wout_r1_32(DisasContext *s, DisasOps *o)
5185 {
5186     store_reg32_i64(get_field(s, r1), o->out);
5187 }
5188 #define SPEC_wout_r1_32 0
5189 
5190 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5191 {
5192     store_reg32h_i64(get_field(s, r1), o->out);
5193 }
5194 #define SPEC_wout_r1_32h 0
5195 
5196 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5197 {
5198     int r1 = get_field(s, r1);
5199     store_reg32_i64(r1, o->out);
5200     store_reg32_i64(r1 + 1, o->out2);
5201 }
5202 #define SPEC_wout_r1_P32 SPEC_r1_even
5203 
5204 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5205 {
5206     int r1 = get_field(s, r1);
5207     TCGv_i64 t = tcg_temp_new_i64();
5208     store_reg32_i64(r1 + 1, o->out);
5209     tcg_gen_shri_i64(t, o->out, 32);
5210     store_reg32_i64(r1, t);
5211 }
5212 #define SPEC_wout_r1_D32 SPEC_r1_even
5213 
5214 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5215 {
5216     int r1 = get_field(s, r1);
5217     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5218 }
5219 #define SPEC_wout_r1_D64 SPEC_r1_even
5220 
5221 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5222 {
5223     int r3 = get_field(s, r3);
5224     store_reg32_i64(r3, o->out);
5225     store_reg32_i64(r3 + 1, o->out2);
5226 }
5227 #define SPEC_wout_r3_P32 SPEC_r3_even
5228 
5229 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5230 {
5231     int r3 = get_field(s, r3);
5232     store_reg(r3, o->out);
5233     store_reg(r3 + 1, o->out2);
5234 }
5235 #define SPEC_wout_r3_P64 SPEC_r3_even
5236 
5237 static void wout_e1(DisasContext *s, DisasOps *o)
5238 {
5239     store_freg32_i64(get_field(s, r1), o->out);
5240 }
5241 #define SPEC_wout_e1 0
5242 
5243 static void wout_f1(DisasContext *s, DisasOps *o)
5244 {
5245     store_freg(get_field(s, r1), o->out);
5246 }
5247 #define SPEC_wout_f1 0
5248 
5249 static void wout_x1(DisasContext *s, DisasOps *o)
5250 {
5251     int f1 = get_field(s, r1);
5252 
5253     /* Split out_128 into out+out2 for cout_f128. */
5254     tcg_debug_assert(o->out == NULL);
5255     o->out = tcg_temp_new_i64();
5256     o->out2 = tcg_temp_new_i64();
5257 
5258     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5259     store_freg(f1, o->out);
5260     store_freg(f1 + 2, o->out2);
5261 }
5262 #define SPEC_wout_x1 SPEC_r1_f128
5263 
5264 static void wout_x1_P(DisasContext *s, DisasOps *o)
5265 {
5266     int f1 = get_field(s, r1);
5267     store_freg(f1, o->out);
5268     store_freg(f1 + 2, o->out2);
5269 }
5270 #define SPEC_wout_x1_P SPEC_r1_f128
5271 
5272 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5273 {
5274     if (get_field(s, r1) != get_field(s, r2)) {
5275         store_reg32_i64(get_field(s, r1), o->out);
5276     }
5277 }
5278 #define SPEC_wout_cond_r1r2_32 0
5279 
5280 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5281 {
5282     if (get_field(s, r1) != get_field(s, r2)) {
5283         store_freg32_i64(get_field(s, r1), o->out);
5284     }
5285 }
5286 #define SPEC_wout_cond_e1e2 0
5287 
5288 static void wout_m1_8(DisasContext *s, DisasOps *o)
5289 {
5290     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5291 }
5292 #define SPEC_wout_m1_8 0
5293 
5294 static void wout_m1_16(DisasContext *s, DisasOps *o)
5295 {
5296     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5297 }
5298 #define SPEC_wout_m1_16 0
5299 
5300 #ifndef CONFIG_USER_ONLY
5301 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5302 {
5303     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5304 }
5305 #define SPEC_wout_m1_16a 0
5306 #endif
5307 
5308 static void wout_m1_32(DisasContext *s, DisasOps *o)
5309 {
5310     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5311 }
5312 #define SPEC_wout_m1_32 0
5313 
5314 #ifndef CONFIG_USER_ONLY
5315 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5316 {
5317     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5318 }
5319 #define SPEC_wout_m1_32a 0
5320 #endif
5321 
5322 static void wout_m1_64(DisasContext *s, DisasOps *o)
5323 {
5324     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5325 }
5326 #define SPEC_wout_m1_64 0
5327 
5328 #ifndef CONFIG_USER_ONLY
5329 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5330 {
5331     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5332 }
5333 #define SPEC_wout_m1_64a 0
5334 #endif
5335 
5336 static void wout_m2_32(DisasContext *s, DisasOps *o)
5337 {
5338     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5339 }
5340 #define SPEC_wout_m2_32 0
5341 
5342 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5343 {
5344     store_reg(get_field(s, r1), o->in2);
5345 }
5346 #define SPEC_wout_in2_r1 0
5347 
5348 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5349 {
5350     store_reg32_i64(get_field(s, r1), o->in2);
5351 }
5352 #define SPEC_wout_in2_r1_32 0
5353 
5354 /* ====================================================================== */
5355 /* The "INput 1" generators.  These load the first operand to an insn.  */
5356 
5357 static void in1_r1(DisasContext *s, DisasOps *o)
5358 {
5359     o->in1 = load_reg(get_field(s, r1));
5360 }
5361 #define SPEC_in1_r1 0
5362 
5363 static void in1_r1_o(DisasContext *s, DisasOps *o)
5364 {
5365     o->in1 = regs[get_field(s, r1)];
5366 }
5367 #define SPEC_in1_r1_o 0
5368 
5369 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5370 {
5371     o->in1 = tcg_temp_new_i64();
5372     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5373 }
5374 #define SPEC_in1_r1_32s 0
5375 
5376 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5377 {
5378     o->in1 = tcg_temp_new_i64();
5379     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5380 }
5381 #define SPEC_in1_r1_32u 0
5382 
5383 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5384 {
5385     o->in1 = tcg_temp_new_i64();
5386     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5387 }
5388 #define SPEC_in1_r1_sr32 0
5389 
5390 static void in1_r1p1(DisasContext *s, DisasOps *o)
5391 {
5392     o->in1 = load_reg(get_field(s, r1) + 1);
5393 }
5394 #define SPEC_in1_r1p1 SPEC_r1_even
5395 
5396 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5397 {
5398     o->in1 = regs[get_field(s, r1) + 1];
5399 }
5400 #define SPEC_in1_r1p1_o SPEC_r1_even
5401 
5402 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5403 {
5404     o->in1 = tcg_temp_new_i64();
5405     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5406 }
5407 #define SPEC_in1_r1p1_32s SPEC_r1_even
5408 
5409 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5410 {
5411     o->in1 = tcg_temp_new_i64();
5412     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5413 }
5414 #define SPEC_in1_r1p1_32u SPEC_r1_even
5415 
5416 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5417 {
5418     int r1 = get_field(s, r1);
5419     o->in1 = tcg_temp_new_i64();
5420     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5421 }
5422 #define SPEC_in1_r1_D32 SPEC_r1_even
5423 
5424 static void in1_r2(DisasContext *s, DisasOps *o)
5425 {
5426     o->in1 = load_reg(get_field(s, r2));
5427 }
5428 #define SPEC_in1_r2 0
5429 
5430 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5431 {
5432     o->in1 = tcg_temp_new_i64();
5433     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5434 }
5435 #define SPEC_in1_r2_sr32 0
5436 
5437 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5438 {
5439     o->in1 = tcg_temp_new_i64();
5440     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5441 }
5442 #define SPEC_in1_r2_32u 0
5443 
5444 static void in1_r3(DisasContext *s, DisasOps *o)
5445 {
5446     o->in1 = load_reg(get_field(s, r3));
5447 }
5448 #define SPEC_in1_r3 0
5449 
5450 static void in1_r3_o(DisasContext *s, DisasOps *o)
5451 {
5452     o->in1 = regs[get_field(s, r3)];
5453 }
5454 #define SPEC_in1_r3_o 0
5455 
5456 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5457 {
5458     o->in1 = tcg_temp_new_i64();
5459     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5460 }
5461 #define SPEC_in1_r3_32s 0
5462 
5463 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5464 {
5465     o->in1 = tcg_temp_new_i64();
5466     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5467 }
5468 #define SPEC_in1_r3_32u 0
5469 
5470 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5471 {
5472     int r3 = get_field(s, r3);
5473     o->in1 = tcg_temp_new_i64();
5474     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5475 }
5476 #define SPEC_in1_r3_D32 SPEC_r3_even
5477 
5478 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5479 {
5480     o->in1 = tcg_temp_new_i64();
5481     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5482 }
5483 #define SPEC_in1_r3_sr32 0
5484 
5485 static void in1_e1(DisasContext *s, DisasOps *o)
5486 {
5487     o->in1 = load_freg32_i64(get_field(s, r1));
5488 }
5489 #define SPEC_in1_e1 0
5490 
5491 static void in1_f1(DisasContext *s, DisasOps *o)
5492 {
5493     o->in1 = load_freg(get_field(s, r1));
5494 }
5495 #define SPEC_in1_f1 0
5496 
5497 static void in1_x1(DisasContext *s, DisasOps *o)
5498 {
5499     o->in1_128 = load_freg_128(get_field(s, r1));
5500 }
5501 #define SPEC_in1_x1 SPEC_r1_f128
5502 
5503 /* Load the high double word of an extended (128-bit) format FP number */
5504 static void in1_x2h(DisasContext *s, DisasOps *o)
5505 {
5506     o->in1 = load_freg(get_field(s, r2));
5507 }
5508 #define SPEC_in1_x2h SPEC_r2_f128
5509 
5510 static void in1_f3(DisasContext *s, DisasOps *o)
5511 {
5512     o->in1 = load_freg(get_field(s, r3));
5513 }
5514 #define SPEC_in1_f3 0
5515 
5516 static void in1_la1(DisasContext *s, DisasOps *o)
5517 {
5518     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5519 }
5520 #define SPEC_in1_la1 0
5521 
5522 static void in1_la2(DisasContext *s, DisasOps *o)
5523 {
5524     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5525     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5526 }
5527 #define SPEC_in1_la2 0
5528 
5529 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5530 {
5531     in1_la1(s, o);
5532     o->in1 = tcg_temp_new_i64();
5533     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5534 }
5535 #define SPEC_in1_m1_8u 0
5536 
5537 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5538 {
5539     in1_la1(s, o);
5540     o->in1 = tcg_temp_new_i64();
5541     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5542 }
5543 #define SPEC_in1_m1_16s 0
5544 
5545 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5546 {
5547     in1_la1(s, o);
5548     o->in1 = tcg_temp_new_i64();
5549     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5550 }
5551 #define SPEC_in1_m1_16u 0
5552 
5553 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5554 {
5555     in1_la1(s, o);
5556     o->in1 = tcg_temp_new_i64();
5557     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5558 }
5559 #define SPEC_in1_m1_32s 0
5560 
5561 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5562 {
5563     in1_la1(s, o);
5564     o->in1 = tcg_temp_new_i64();
5565     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5566 }
5567 #define SPEC_in1_m1_32u 0
5568 
5569 static void in1_m1_64(DisasContext *s, DisasOps *o)
5570 {
5571     in1_la1(s, o);
5572     o->in1 = tcg_temp_new_i64();
5573     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5574 }
5575 #define SPEC_in1_m1_64 0
5576 
5577 /* ====================================================================== */
5578 /* The "INput 2" generators.  These load the second operand to an insn.  */
5579 
5580 static void in2_r1_o(DisasContext *s, DisasOps *o)
5581 {
5582     o->in2 = regs[get_field(s, r1)];
5583 }
5584 #define SPEC_in2_r1_o 0
5585 
5586 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5587 {
5588     o->in2 = tcg_temp_new_i64();
5589     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5590 }
5591 #define SPEC_in2_r1_16u 0
5592 
5593 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5594 {
5595     o->in2 = tcg_temp_new_i64();
5596     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5597 }
5598 #define SPEC_in2_r1_32u 0
5599 
5600 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5601 {
5602     int r1 = get_field(s, r1);
5603     o->in2 = tcg_temp_new_i64();
5604     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5605 }
5606 #define SPEC_in2_r1_D32 SPEC_r1_even
5607 
5608 static void in2_r2(DisasContext *s, DisasOps *o)
5609 {
5610     o->in2 = load_reg(get_field(s, r2));
5611 }
5612 #define SPEC_in2_r2 0
5613 
5614 static void in2_r2_o(DisasContext *s, DisasOps *o)
5615 {
5616     o->in2 = regs[get_field(s, r2)];
5617 }
5618 #define SPEC_in2_r2_o 0
5619 
5620 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5621 {
5622     int r2 = get_field(s, r2);
5623     if (r2 != 0) {
5624         o->in2 = load_reg(r2);
5625     }
5626 }
5627 #define SPEC_in2_r2_nz 0
5628 
5629 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5630 {
5631     o->in2 = tcg_temp_new_i64();
5632     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5633 }
5634 #define SPEC_in2_r2_8s 0
5635 
5636 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5637 {
5638     o->in2 = tcg_temp_new_i64();
5639     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5640 }
5641 #define SPEC_in2_r2_8u 0
5642 
5643 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5644 {
5645     o->in2 = tcg_temp_new_i64();
5646     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5647 }
5648 #define SPEC_in2_r2_16s 0
5649 
5650 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5651 {
5652     o->in2 = tcg_temp_new_i64();
5653     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5654 }
5655 #define SPEC_in2_r2_16u 0
5656 
5657 static void in2_r3(DisasContext *s, DisasOps *o)
5658 {
5659     o->in2 = load_reg(get_field(s, r3));
5660 }
5661 #define SPEC_in2_r3 0
5662 
5663 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5664 {
5665     int r3 = get_field(s, r3);
5666     o->in2_128 = tcg_temp_new_i128();
5667     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5668 }
5669 #define SPEC_in2_r3_D64 SPEC_r3_even
5670 
5671 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5672 {
5673     o->in2 = tcg_temp_new_i64();
5674     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5675 }
5676 #define SPEC_in2_r3_sr32 0
5677 
5678 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5679 {
5680     o->in2 = tcg_temp_new_i64();
5681     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5682 }
5683 #define SPEC_in2_r3_32u 0
5684 
5685 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5686 {
5687     o->in2 = tcg_temp_new_i64();
5688     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5689 }
5690 #define SPEC_in2_r2_32s 0
5691 
5692 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5693 {
5694     o->in2 = tcg_temp_new_i64();
5695     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5696 }
5697 #define SPEC_in2_r2_32u 0
5698 
5699 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5700 {
5701     o->in2 = tcg_temp_new_i64();
5702     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5703 }
5704 #define SPEC_in2_r2_sr32 0
5705 
5706 static void in2_e2(DisasContext *s, DisasOps *o)
5707 {
5708     o->in2 = load_freg32_i64(get_field(s, r2));
5709 }
5710 #define SPEC_in2_e2 0
5711 
5712 static void in2_f2(DisasContext *s, DisasOps *o)
5713 {
5714     o->in2 = load_freg(get_field(s, r2));
5715 }
5716 #define SPEC_in2_f2 0
5717 
5718 static void in2_x2(DisasContext *s, DisasOps *o)
5719 {
5720     o->in2_128 = load_freg_128(get_field(s, r2));
5721 }
5722 #define SPEC_in2_x2 SPEC_r2_f128
5723 
5724 /* Load the low double word of an extended (128-bit) format FP number */
5725 static void in2_x2l(DisasContext *s, DisasOps *o)
5726 {
5727     o->in2 = load_freg(get_field(s, r2) + 2);
5728 }
5729 #define SPEC_in2_x2l SPEC_r2_f128
5730 
5731 static void in2_ra2(DisasContext *s, DisasOps *o)
5732 {
5733     int r2 = get_field(s, r2);
5734 
5735     /* Note: *don't* treat !r2 as 0, use the reg value. */
5736     o->in2 = tcg_temp_new_i64();
5737     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5738 }
5739 #define SPEC_in2_ra2 0
5740 
5741 static void in2_a2(DisasContext *s, DisasOps *o)
5742 {
5743     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5744     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5745 }
5746 #define SPEC_in2_a2 0
5747 
5748 static TCGv gen_ri2(DisasContext *s)
5749 {
5750     return tcg_constant_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5751 }
5752 
5753 static void in2_ri2(DisasContext *s, DisasOps *o)
5754 {
5755     o->in2 = gen_ri2(s);
5756 }
5757 #define SPEC_in2_ri2 0
5758 
5759 static void in2_sh(DisasContext *s, DisasOps *o)
5760 {
5761     int b2 = get_field(s, b2);
5762     int d2 = get_field(s, d2);
5763 
5764     if (b2 == 0) {
5765         o->in2 = tcg_constant_i64(d2 & 0x3f);
5766     } else {
5767         o->in2 = get_address(s, 0, b2, d2);
5768         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5769     }
5770 }
5771 #define SPEC_in2_sh 0
5772 
5773 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5774 {
5775     in2_a2(s, o);
5776     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5777 }
5778 #define SPEC_in2_m2_8u 0
5779 
5780 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5781 {
5782     in2_a2(s, o);
5783     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5784 }
5785 #define SPEC_in2_m2_16s 0
5786 
5787 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5788 {
5789     in2_a2(s, o);
5790     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5791 }
5792 #define SPEC_in2_m2_16u 0
5793 
5794 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5795 {
5796     in2_a2(s, o);
5797     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5798 }
5799 #define SPEC_in2_m2_32s 0
5800 
5801 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5802 {
5803     in2_a2(s, o);
5804     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5805 }
5806 #define SPEC_in2_m2_32u 0
5807 
5808 #ifndef CONFIG_USER_ONLY
5809 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5810 {
5811     in2_a2(s, o);
5812     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5813 }
5814 #define SPEC_in2_m2_32ua 0
5815 #endif
5816 
5817 static void in2_m2_64(DisasContext *s, DisasOps *o)
5818 {
5819     in2_a2(s, o);
5820     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5821 }
5822 #define SPEC_in2_m2_64 0
5823 
5824 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5825 {
5826     in2_a2(s, o);
5827     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5828     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5829 }
5830 #define SPEC_in2_m2_64w 0
5831 
5832 #ifndef CONFIG_USER_ONLY
5833 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5834 {
5835     in2_a2(s, o);
5836     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5837 }
5838 #define SPEC_in2_m2_64a 0
5839 #endif
5840 
5841 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5842 {
5843     o->in2 = tcg_temp_new_i64();
5844     tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s));
5845 }
5846 #define SPEC_in2_mri2_16u 0
5847 
5848 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5849 {
5850     o->in2 = tcg_temp_new_i64();
5851     tcg_gen_qemu_ld32s(o->in2, gen_ri2(s), get_mem_index(s));
5852 }
5853 #define SPEC_in2_mri2_32s 0
5854 
5855 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5856 {
5857     o->in2 = tcg_temp_new_i64();
5858     tcg_gen_qemu_ld32u(o->in2, gen_ri2(s), get_mem_index(s));
5859 }
5860 #define SPEC_in2_mri2_32u 0
5861 
5862 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5863 {
5864     o->in2 = tcg_temp_new_i64();
5865     tcg_gen_qemu_ld64(o->in2, gen_ri2(s), get_mem_index(s));
5866 }
5867 #define SPEC_in2_mri2_64 0
5868 
5869 static void in2_i2(DisasContext *s, DisasOps *o)
5870 {
5871     o->in2 = tcg_constant_i64(get_field(s, i2));
5872 }
5873 #define SPEC_in2_i2 0
5874 
5875 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5876 {
5877     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5878 }
5879 #define SPEC_in2_i2_8u 0
5880 
5881 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5882 {
5883     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5884 }
5885 #define SPEC_in2_i2_16u 0
5886 
5887 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5888 {
5889     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5890 }
5891 #define SPEC_in2_i2_32u 0
5892 
5893 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5894 {
5895     uint64_t i2 = (uint16_t)get_field(s, i2);
5896     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5897 }
5898 #define SPEC_in2_i2_16u_shl 0
5899 
5900 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5901 {
5902     uint64_t i2 = (uint32_t)get_field(s, i2);
5903     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5904 }
5905 #define SPEC_in2_i2_32u_shl 0
5906 
5907 #ifndef CONFIG_USER_ONLY
5908 static void in2_insn(DisasContext *s, DisasOps *o)
5909 {
5910     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5911 }
5912 #define SPEC_in2_insn 0
5913 #endif
5914 
5915 /* ====================================================================== */
5916 
5917 /* Find opc within the table of insns.  This is formulated as a switch
5918    statement so that (1) we get compile-time notice of cut-paste errors
5919    for duplicated opcodes, and (2) the compiler generates the binary
5920    search tree, rather than us having to post-process the table.  */
5921 
5922 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5923     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5924 
5925 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5926     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5927 
5928 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5929     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5930 
5931 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5932 
5933 enum DisasInsnEnum {
5934 #include "insn-data.h.inc"
5935 };
5936 
5937 #undef E
5938 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5939     .opc = OPC,                                                             \
5940     .flags = FL,                                                            \
5941     .fmt = FMT_##FT,                                                        \
5942     .fac = FAC_##FC,                                                        \
5943     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5944     .name = #NM,                                                            \
5945     .help_in1 = in1_##I1,                                                   \
5946     .help_in2 = in2_##I2,                                                   \
5947     .help_prep = prep_##P,                                                  \
5948     .help_wout = wout_##W,                                                  \
5949     .help_cout = cout_##CC,                                                 \
5950     .help_op = op_##OP,                                                     \
5951     .data = D                                                               \
5952  },
5953 
5954 /* Allow 0 to be used for NULL in the table below.  */
5955 #define in1_0  NULL
5956 #define in2_0  NULL
5957 #define prep_0  NULL
5958 #define wout_0  NULL
5959 #define cout_0  NULL
5960 #define op_0  NULL
5961 
5962 #define SPEC_in1_0 0
5963 #define SPEC_in2_0 0
5964 #define SPEC_prep_0 0
5965 #define SPEC_wout_0 0
5966 
5967 /* Give smaller names to the various facilities.  */
5968 #define FAC_Z           S390_FEAT_ZARCH
5969 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5970 #define FAC_DFP         S390_FEAT_DFP
5971 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
5972 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
5973 #define FAC_EE          S390_FEAT_EXECUTE_EXT
5974 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
5975 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
5976 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
5977 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
5978 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5979 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
5980 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
5981 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
5982 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5983 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
5984 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
5985 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
5986 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
5987 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
5988 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
5989 #define FAC_SFLE        S390_FEAT_STFLE
5990 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5991 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5992 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5993 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
5994 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
5995 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
5996 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
5997 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5998 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
5999 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6000 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6001 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6002 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6003 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6004 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6005 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6006 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6007 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6008 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6009 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6010 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6011 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6012 
6013 static const DisasInsn insn_info[] = {
6014 #include "insn-data.h.inc"
6015 };
6016 
6017 #undef E
6018 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6019     case OPC: return &insn_info[insn_ ## NM];
6020 
6021 static const DisasInsn *lookup_opc(uint16_t opc)
6022 {
6023     switch (opc) {
6024 #include "insn-data.h.inc"
6025     default:
6026         return NULL;
6027     }
6028 }
6029 
6030 #undef F
6031 #undef E
6032 #undef D
6033 #undef C
6034 
6035 /* Extract a field from the insn.  The INSN should be left-aligned in
6036    the uint64_t so that we can more easily utilize the big-bit-endian
6037    definitions we extract from the Principals of Operation.  */
6038 
6039 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6040 {
6041     uint32_t r, m;
6042 
6043     if (f->size == 0) {
6044         return;
6045     }
6046 
6047     /* Zero extract the field from the insn.  */
6048     r = (insn << f->beg) >> (64 - f->size);
6049 
6050     /* Sign-extend, or un-swap the field as necessary.  */
6051     switch (f->type) {
6052     case 0: /* unsigned */
6053         break;
6054     case 1: /* signed */
6055         assert(f->size <= 32);
6056         m = 1u << (f->size - 1);
6057         r = (r ^ m) - m;
6058         break;
6059     case 2: /* dl+dh split, signed 20 bit. */
6060         r = ((int8_t)r << 12) | (r >> 8);
6061         break;
6062     case 3: /* MSB stored in RXB */
6063         g_assert(f->size == 4);
6064         switch (f->beg) {
6065         case 8:
6066             r |= extract64(insn, 63 - 36, 1) << 4;
6067             break;
6068         case 12:
6069             r |= extract64(insn, 63 - 37, 1) << 4;
6070             break;
6071         case 16:
6072             r |= extract64(insn, 63 - 38, 1) << 4;
6073             break;
6074         case 32:
6075             r |= extract64(insn, 63 - 39, 1) << 4;
6076             break;
6077         default:
6078             g_assert_not_reached();
6079         }
6080         break;
6081     default:
6082         abort();
6083     }
6084 
6085     /*
6086      * Validate that the "compressed" encoding we selected above is valid.
6087      * I.e. we haven't made two different original fields overlap.
6088      */
6089     assert(((o->presentC >> f->indexC) & 1) == 0);
6090     o->presentC |= 1 << f->indexC;
6091     o->presentO |= 1 << f->indexO;
6092 
6093     o->c[f->indexC] = r;
6094 }
6095 
6096 /* Lookup the insn at the current PC, extracting the operands into O and
6097    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6098 
6099 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6100 {
6101     uint64_t insn, pc = s->base.pc_next;
6102     int op, op2, ilen;
6103     const DisasInsn *info;
6104 
6105     if (unlikely(s->ex_value)) {
6106         /* Drop the EX data now, so that it's clear on exception paths.  */
6107         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6108                        offsetof(CPUS390XState, ex_value));
6109 
6110         /* Extract the values saved by EXECUTE.  */
6111         insn = s->ex_value & 0xffffffffffff0000ull;
6112         ilen = s->ex_value & 0xf;
6113 
6114         /* Register insn bytes with translator so plugins work. */
6115         for (int i = 0; i < ilen; i++) {
6116             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6117             translator_fake_ldb(byte, pc + i);
6118         }
6119         op = insn >> 56;
6120     } else {
6121         insn = ld_code2(env, s, pc);
6122         op = (insn >> 8) & 0xff;
6123         ilen = get_ilen(op);
6124         switch (ilen) {
6125         case 2:
6126             insn = insn << 48;
6127             break;
6128         case 4:
6129             insn = ld_code4(env, s, pc) << 32;
6130             break;
6131         case 6:
6132             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6133             break;
6134         default:
6135             g_assert_not_reached();
6136         }
6137     }
6138     s->pc_tmp = s->base.pc_next + ilen;
6139     s->ilen = ilen;
6140 
6141     /* We can't actually determine the insn format until we've looked up
6142        the full insn opcode.  Which we can't do without locating the
6143        secondary opcode.  Assume by default that OP2 is at bit 40; for
6144        those smaller insns that don't actually have a secondary opcode
6145        this will correctly result in OP2 = 0. */
6146     switch (op) {
6147     case 0x01: /* E */
6148     case 0x80: /* S */
6149     case 0x82: /* S */
6150     case 0x93: /* S */
6151     case 0xb2: /* S, RRF, RRE, IE */
6152     case 0xb3: /* RRE, RRD, RRF */
6153     case 0xb9: /* RRE, RRF */
6154     case 0xe5: /* SSE, SIL */
6155         op2 = (insn << 8) >> 56;
6156         break;
6157     case 0xa5: /* RI */
6158     case 0xa7: /* RI */
6159     case 0xc0: /* RIL */
6160     case 0xc2: /* RIL */
6161     case 0xc4: /* RIL */
6162     case 0xc6: /* RIL */
6163     case 0xc8: /* SSF */
6164     case 0xcc: /* RIL */
6165         op2 = (insn << 12) >> 60;
6166         break;
6167     case 0xc5: /* MII */
6168     case 0xc7: /* SMI */
6169     case 0xd0 ... 0xdf: /* SS */
6170     case 0xe1: /* SS */
6171     case 0xe2: /* SS */
6172     case 0xe8: /* SS */
6173     case 0xe9: /* SS */
6174     case 0xea: /* SS */
6175     case 0xee ... 0xf3: /* SS */
6176     case 0xf8 ... 0xfd: /* SS */
6177         op2 = 0;
6178         break;
6179     default:
6180         op2 = (insn << 40) >> 56;
6181         break;
6182     }
6183 
6184     memset(&s->fields, 0, sizeof(s->fields));
6185     s->fields.raw_insn = insn;
6186     s->fields.op = op;
6187     s->fields.op2 = op2;
6188 
6189     /* Lookup the instruction.  */
6190     info = lookup_opc(op << 8 | op2);
6191     s->insn = info;
6192 
6193     /* If we found it, extract the operands.  */
6194     if (info != NULL) {
6195         DisasFormat fmt = info->fmt;
6196         int i;
6197 
6198         for (i = 0; i < NUM_C_FIELD; ++i) {
6199             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6200         }
6201     }
6202     return info;
6203 }
6204 
6205 static bool is_afp_reg(int reg)
6206 {
6207     return reg % 2 || reg > 6;
6208 }
6209 
6210 static bool is_fp_pair(int reg)
6211 {
6212     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6213     return !(reg & 0x2);
6214 }
6215 
6216 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6217 {
6218     const DisasInsn *insn;
6219     DisasJumpType ret = DISAS_NEXT;
6220     DisasOps o = {};
6221     bool icount = false;
6222 
6223     /* Search for the insn in the table.  */
6224     insn = extract_insn(env, s);
6225 
6226     /* Update insn_start now that we know the ILEN.  */
6227     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6228 
6229     /* Not found means unimplemented/illegal opcode.  */
6230     if (insn == NULL) {
6231         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6232                       s->fields.op, s->fields.op2);
6233         gen_illegal_opcode(s);
6234         ret = DISAS_NORETURN;
6235         goto out;
6236     }
6237 
6238 #ifndef CONFIG_USER_ONLY
6239     if (s->base.tb->flags & FLAG_MASK_PER) {
6240         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6241         gen_helper_per_ifetch(cpu_env, addr);
6242     }
6243 #endif
6244 
6245     /* process flags */
6246     if (insn->flags) {
6247         /* privileged instruction */
6248         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6249             gen_program_exception(s, PGM_PRIVILEGED);
6250             ret = DISAS_NORETURN;
6251             goto out;
6252         }
6253 
6254         /* if AFP is not enabled, instructions and registers are forbidden */
6255         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6256             uint8_t dxc = 0;
6257 
6258             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6259                 dxc = 1;
6260             }
6261             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6262                 dxc = 1;
6263             }
6264             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6265                 dxc = 1;
6266             }
6267             if (insn->flags & IF_BFP) {
6268                 dxc = 2;
6269             }
6270             if (insn->flags & IF_DFP) {
6271                 dxc = 3;
6272             }
6273             if (insn->flags & IF_VEC) {
6274                 dxc = 0xfe;
6275             }
6276             if (dxc) {
6277                 gen_data_exception(dxc);
6278                 ret = DISAS_NORETURN;
6279                 goto out;
6280             }
6281         }
6282 
6283         /* if vector instructions not enabled, executing them is forbidden */
6284         if (insn->flags & IF_VEC) {
6285             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6286                 gen_data_exception(0xfe);
6287                 ret = DISAS_NORETURN;
6288                 goto out;
6289             }
6290         }
6291 
6292         /* input/output is the special case for icount mode */
6293         if (unlikely(insn->flags & IF_IO)) {
6294             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6295             if (icount) {
6296                 gen_io_start();
6297             }
6298         }
6299     }
6300 
6301     /* Check for insn specification exceptions.  */
6302     if (insn->spec) {
6303         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6304             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6305             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6306             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6307             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6308             gen_program_exception(s, PGM_SPECIFICATION);
6309             ret = DISAS_NORETURN;
6310             goto out;
6311         }
6312     }
6313 
6314     /* Implement the instruction.  */
6315     if (insn->help_in1) {
6316         insn->help_in1(s, &o);
6317     }
6318     if (insn->help_in2) {
6319         insn->help_in2(s, &o);
6320     }
6321     if (insn->help_prep) {
6322         insn->help_prep(s, &o);
6323     }
6324     if (insn->help_op) {
6325         ret = insn->help_op(s, &o);
6326     }
6327     if (ret != DISAS_NORETURN) {
6328         if (insn->help_wout) {
6329             insn->help_wout(s, &o);
6330         }
6331         if (insn->help_cout) {
6332             insn->help_cout(s, &o);
6333         }
6334     }
6335 
6336     /* io should be the last instruction in tb when icount is enabled */
6337     if (unlikely(icount && ret == DISAS_NEXT)) {
6338         ret = DISAS_TOO_MANY;
6339     }
6340 
6341 #ifndef CONFIG_USER_ONLY
6342     if (s->base.tb->flags & FLAG_MASK_PER) {
6343         /* An exception might be triggered, save PSW if not already done.  */
6344         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6345             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6346         }
6347 
6348         /* Call the helper to check for a possible PER exception.  */
6349         gen_helper_per_check_exception(cpu_env);
6350     }
6351 #endif
6352 
6353 out:
6354     /* Advance to the next instruction.  */
6355     s->base.pc_next = s->pc_tmp;
6356     return ret;
6357 }
6358 
6359 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6360 {
6361     DisasContext *dc = container_of(dcbase, DisasContext, base);
6362 
6363     /* 31-bit mode */
6364     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6365         dc->base.pc_first &= 0x7fffffff;
6366         dc->base.pc_next = dc->base.pc_first;
6367     }
6368 
6369     dc->cc_op = CC_OP_DYNAMIC;
6370     dc->ex_value = dc->base.tb->cs_base;
6371     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6372 }
6373 
6374 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6375 {
6376 }
6377 
6378 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6379 {
6380     DisasContext *dc = container_of(dcbase, DisasContext, base);
6381 
6382     /* Delay the set of ilen until we've read the insn. */
6383     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6384     dc->insn_start = tcg_last_op();
6385 }
6386 
6387 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6388                                 uint64_t pc)
6389 {
6390     uint64_t insn = cpu_lduw_code(env, pc);
6391 
6392     return pc + get_ilen((insn >> 8) & 0xff);
6393 }
6394 
6395 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6396 {
6397     CPUS390XState *env = cs->env_ptr;
6398     DisasContext *dc = container_of(dcbase, DisasContext, base);
6399 
6400     dc->base.is_jmp = translate_one(env, dc);
6401     if (dc->base.is_jmp == DISAS_NEXT) {
6402         if (dc->ex_value ||
6403             !is_same_page(dcbase, dc->base.pc_next) ||
6404             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6405             dc->base.is_jmp = DISAS_TOO_MANY;
6406         }
6407     }
6408 }
6409 
6410 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6411 {
6412     DisasContext *dc = container_of(dcbase, DisasContext, base);
6413 
6414     switch (dc->base.is_jmp) {
6415     case DISAS_NORETURN:
6416         break;
6417     case DISAS_TOO_MANY:
6418         update_psw_addr(dc);
6419         /* FALLTHRU */
6420     case DISAS_PC_UPDATED:
6421         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6422            cc op type is in env */
6423         update_cc_op(dc);
6424         /* FALLTHRU */
6425     case DISAS_PC_CC_UPDATED:
6426         /* Exit the TB, either by raising a debug exception or by return.  */
6427         if (dc->exit_to_mainloop) {
6428             tcg_gen_exit_tb(NULL, 0);
6429         } else {
6430             tcg_gen_lookup_and_goto_ptr();
6431         }
6432         break;
6433     default:
6434         g_assert_not_reached();
6435     }
6436 }
6437 
6438 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6439                                CPUState *cs, FILE *logfile)
6440 {
6441     DisasContext *dc = container_of(dcbase, DisasContext, base);
6442 
6443     if (unlikely(dc->ex_value)) {
6444         /* ??? Unfortunately target_disas can't use host memory.  */
6445         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6446     } else {
6447         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6448         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6449     }
6450 }
6451 
6452 static const TranslatorOps s390x_tr_ops = {
6453     .init_disas_context = s390x_tr_init_disas_context,
6454     .tb_start           = s390x_tr_tb_start,
6455     .insn_start         = s390x_tr_insn_start,
6456     .translate_insn     = s390x_tr_translate_insn,
6457     .tb_stop            = s390x_tr_tb_stop,
6458     .disas_log          = s390x_tr_disas_log,
6459 };
6460 
6461 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6462                            target_ulong pc, void *host_pc)
6463 {
6464     DisasContext dc;
6465 
6466     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6467 }
6468 
6469 void s390x_restore_state_to_opc(CPUState *cs,
6470                                 const TranslationBlock *tb,
6471                                 const uint64_t *data)
6472 {
6473     S390CPU *cpu = S390_CPU(cs);
6474     CPUS390XState *env = &cpu->env;
6475     int cc_op = data[1];
6476 
6477     env->psw.addr = data[0];
6478 
6479     /* Update the CC opcode if it is not already up-to-date.  */
6480     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6481         env->cc_op = cc_op;
6482     }
6483 
6484     /* Record ILEN.  */
6485     env->int_pgm_ilen = data[2];
6486 }
6487