1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "exec/exec-all.h"
35 #include "tcg/tcg-op.h"
36 #include "tcg/tcg-op-gvec.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/helper-proto.h"
40 #include "exec/helper-gen.h"
41
42 #include "exec/translator.h"
43 #include "exec/log.h"
44 #include "qemu/atomic128.h"
45
46 #define HELPER_H "helper.h"
47 #include "exec/helper-info.c.inc"
48 #undef HELPER_H
49
50
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55
56 /*
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
62 */
63
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
91 };
92
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
99
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
103
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
108
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
114
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
118
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
121
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
126
127 NUM_C_FIELD = 7
128 };
129
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
137 };
138
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
144 /*
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
148 */
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool exit_to_mainloop;
153 };
154
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 union {
160 struct { TCGv_i64 a, b; } s64;
161 struct { TCGv_i32 a, b; } s32;
162 } u;
163 } DisasCompare;
164
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169
pc_to_link_info(TCGv_i64 out,DisasContext * s,uint64_t pc)170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172 if (s->base.tb->flags & FLAG_MASK_32) {
173 if (s->base.tb->flags & FLAG_MASK_64) {
174 tcg_gen_movi_i64(out, pc);
175 return;
176 }
177 pc |= 0x80000000;
178 }
179 assert(!(s->base.tb->flags & FLAG_MASK_64));
180 tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194
s390x_translate_init(void)195 void s390x_translate_init(void)
196 {
197 int i;
198
199 psw_addr = tcg_global_mem_new_i64(tcg_env,
200 offsetof(CPUS390XState, psw.addr),
201 "psw_addr");
202 psw_mask = tcg_global_mem_new_i64(tcg_env,
203 offsetof(CPUS390XState, psw.mask),
204 "psw_mask");
205 gbea = tcg_global_mem_new_i64(tcg_env,
206 offsetof(CPUS390XState, gbea),
207 "gbea");
208
209 cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
210 "cc_op");
211 cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
212 "cc_src");
213 cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
214 "cc_dst");
215 cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
216 "cc_vr");
217
218 for (i = 0; i < 16; i++) {
219 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220 regs[i] = tcg_global_mem_new(tcg_env,
221 offsetof(CPUS390XState, regs[i]),
222 cpu_reg_names[i]);
223 }
224 }
225
vec_full_reg_offset(uint8_t reg)226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228 g_assert(reg < 32);
229 return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231
vec_reg_offset(uint8_t reg,uint8_t enr,MemOp es)232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234 /* Convert element size (es) - e.g. MO_8 - to bytes */
235 const uint8_t bytes = 1 << es;
236 int offs = enr * bytes;
237
238 /*
239 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240 * of the 16 byte vector, on both, little and big endian systems.
241 *
242 * Big Endian (target/possible host)
243 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
245 * W: [ 0][ 1] - [ 2][ 3]
246 * DW: [ 0] - [ 1]
247 *
248 * Little Endian (possible host)
249 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
251 * W: [ 1][ 0] - [ 3][ 2]
252 * DW: [ 0] - [ 1]
253 *
254 * For 16 byte elements, the two 8 byte halves will not form a host
255 * int128 if the host is little endian, since they're in the wrong order.
256 * Some operations (e.g. xor) do not care. For operations like addition,
257 * the two 8 byte elements have to be loaded separately. Let's force all
258 * 16 byte operations to handle it in a special way.
259 */
260 g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262 offs ^= (8 - bytes);
263 #endif
264 return offs + vec_full_reg_offset(reg);
265 }
266
freg64_offset(uint8_t reg)267 static inline int freg64_offset(uint8_t reg)
268 {
269 g_assert(reg < 16);
270 return vec_reg_offset(reg, 0, MO_64);
271 }
272
freg32_offset(uint8_t reg)273 static inline int freg32_offset(uint8_t reg)
274 {
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_32);
277 }
278
load_reg(int reg)279 static TCGv_i64 load_reg(int reg)
280 {
281 TCGv_i64 r = tcg_temp_new_i64();
282 tcg_gen_mov_i64(r, regs[reg]);
283 return r;
284 }
285
load_freg(int reg)286 static TCGv_i64 load_freg(int reg)
287 {
288 TCGv_i64 r = tcg_temp_new_i64();
289
290 tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
291 return r;
292 }
293
load_freg32_i64(int reg)294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296 TCGv_i64 r = tcg_temp_new_i64();
297
298 tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
299 return r;
300 }
301
load_freg_128(int reg)302 static TCGv_i128 load_freg_128(int reg)
303 {
304 TCGv_i64 h = load_freg(reg);
305 TCGv_i64 l = load_freg(reg + 2);
306 TCGv_i128 r = tcg_temp_new_i128();
307
308 tcg_gen_concat_i64_i128(r, l, h);
309 return r;
310 }
311
store_reg(int reg,TCGv_i64 v)312 static void store_reg(int reg, TCGv_i64 v)
313 {
314 tcg_gen_mov_i64(regs[reg], v);
315 }
316
store_freg(int reg,TCGv_i64 v)317 static void store_freg(int reg, TCGv_i64 v)
318 {
319 tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
320 }
321
store_reg32_i64(int reg,TCGv_i64 v)322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324 /* 32 bit register writes keep the upper half */
325 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327
store_reg32h_i64(int reg,TCGv_i64 v)328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332
store_freg32_i64(int reg,TCGv_i64 v)333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335 tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
336 }
337
update_psw_addr(DisasContext * s)338 static void update_psw_addr(DisasContext *s)
339 {
340 /* psw.addr */
341 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
342 }
343
per_branch(DisasContext * s,TCGv_i64 dest)344 static void per_branch(DisasContext *s, TCGv_i64 dest)
345 {
346 #ifndef CONFIG_USER_ONLY
347 if (s->base.tb->flags & FLAG_MASK_PER_BRANCH) {
348 gen_helper_per_branch(tcg_env, dest, tcg_constant_i32(s->ilen));
349 }
350 #endif
351 }
352
per_breaking_event(DisasContext * s)353 static void per_breaking_event(DisasContext *s)
354 {
355 tcg_gen_movi_i64(gbea, s->base.pc_next);
356 }
357
update_cc_op(DisasContext * s)358 static void update_cc_op(DisasContext *s)
359 {
360 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
361 tcg_gen_movi_i32(cc_op, s->cc_op);
362 }
363 }
364
ld_code2(CPUS390XState * env,DisasContext * s,uint64_t pc)365 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
366 uint64_t pc)
367 {
368 return (uint64_t)translator_lduw(env, &s->base, pc);
369 }
370
ld_code4(CPUS390XState * env,DisasContext * s,uint64_t pc)371 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
372 uint64_t pc)
373 {
374 return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
375 }
376
get_mem_index(DisasContext * s)377 static int get_mem_index(DisasContext *s)
378 {
379 #ifdef CONFIG_USER_ONLY
380 return MMU_USER_IDX;
381 #else
382 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
383 return MMU_REAL_IDX;
384 }
385
386 switch (s->base.tb->flags & FLAG_MASK_ASC) {
387 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
388 return MMU_PRIMARY_IDX;
389 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
390 return MMU_SECONDARY_IDX;
391 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
392 return MMU_HOME_IDX;
393 default:
394 g_assert_not_reached();
395 }
396 #endif
397 }
398
gen_exception(int excp)399 static void gen_exception(int excp)
400 {
401 gen_helper_exception(tcg_env, tcg_constant_i32(excp));
402 }
403
gen_program_exception(DisasContext * s,int code)404 static void gen_program_exception(DisasContext *s, int code)
405 {
406 /* Remember what pgm exception this was. */
407 tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
408 offsetof(CPUS390XState, int_pgm_code));
409
410 tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
411 offsetof(CPUS390XState, int_pgm_ilen));
412
413 /* update the psw */
414 update_psw_addr(s);
415
416 /* Save off cc. */
417 update_cc_op(s);
418
419 /* Trigger exception. */
420 gen_exception(EXCP_PGM);
421 }
422
gen_illegal_opcode(DisasContext * s)423 static inline void gen_illegal_opcode(DisasContext *s)
424 {
425 gen_program_exception(s, PGM_OPERATION);
426 }
427
gen_data_exception(uint8_t dxc)428 static inline void gen_data_exception(uint8_t dxc)
429 {
430 gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
431 }
432
gen_trap(DisasContext * s)433 static inline void gen_trap(DisasContext *s)
434 {
435 /* Set DXC to 0xff */
436 gen_data_exception(0xff);
437 }
438
gen_addi_and_wrap_i64(DisasContext * s,TCGv_i64 dst,TCGv_i64 src,int64_t imm)439 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
440 int64_t imm)
441 {
442 tcg_gen_addi_i64(dst, src, imm);
443 if (!(s->base.tb->flags & FLAG_MASK_64)) {
444 if (s->base.tb->flags & FLAG_MASK_32) {
445 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
446 } else {
447 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
448 }
449 }
450 }
451
get_address(DisasContext * s,int x2,int b2,int d2)452 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
453 {
454 TCGv_i64 tmp = tcg_temp_new_i64();
455
456 /*
457 * Note that d2 is limited to 20 bits, signed. If we crop negative
458 * displacements early we create larger immediate addends.
459 */
460 if (b2 && x2) {
461 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
462 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
463 } else if (b2) {
464 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
465 } else if (x2) {
466 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
467 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
468 if (s->base.tb->flags & FLAG_MASK_32) {
469 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
470 } else {
471 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
472 }
473 } else {
474 tcg_gen_movi_i64(tmp, d2);
475 }
476
477 return tmp;
478 }
479
live_cc_data(DisasContext * s)480 static inline bool live_cc_data(DisasContext *s)
481 {
482 return (s->cc_op != CC_OP_DYNAMIC
483 && s->cc_op != CC_OP_STATIC
484 && s->cc_op > 3);
485 }
486
gen_op_movi_cc(DisasContext * s,uint32_t val)487 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
488 {
489 if (live_cc_data(s)) {
490 tcg_gen_discard_i64(cc_src);
491 tcg_gen_discard_i64(cc_dst);
492 tcg_gen_discard_i64(cc_vr);
493 }
494 s->cc_op = CC_OP_CONST0 + val;
495 }
496
gen_op_update1_cc_i64(DisasContext * s,enum cc_op op,TCGv_i64 dst)497 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
498 {
499 if (live_cc_data(s)) {
500 tcg_gen_discard_i64(cc_src);
501 tcg_gen_discard_i64(cc_vr);
502 }
503 tcg_gen_mov_i64(cc_dst, dst);
504 s->cc_op = op;
505 }
506
gen_op_update2_cc_i64(DisasContext * s,enum cc_op op,TCGv_i64 src,TCGv_i64 dst)507 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
508 TCGv_i64 dst)
509 {
510 if (live_cc_data(s)) {
511 tcg_gen_discard_i64(cc_vr);
512 }
513 tcg_gen_mov_i64(cc_src, src);
514 tcg_gen_mov_i64(cc_dst, dst);
515 s->cc_op = op;
516 }
517
gen_op_update3_cc_i64(DisasContext * s,enum cc_op op,TCGv_i64 src,TCGv_i64 dst,TCGv_i64 vr)518 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
519 TCGv_i64 dst, TCGv_i64 vr)
520 {
521 tcg_gen_mov_i64(cc_src, src);
522 tcg_gen_mov_i64(cc_dst, dst);
523 tcg_gen_mov_i64(cc_vr, vr);
524 s->cc_op = op;
525 }
526
set_cc_nz_u64(DisasContext * s,TCGv_i64 val)527 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
528 {
529 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
530 }
531
532 /* CC value is in env->cc_op */
set_cc_static(DisasContext * s)533 static void set_cc_static(DisasContext *s)
534 {
535 if (live_cc_data(s)) {
536 tcg_gen_discard_i64(cc_src);
537 tcg_gen_discard_i64(cc_dst);
538 tcg_gen_discard_i64(cc_vr);
539 }
540 s->cc_op = CC_OP_STATIC;
541 }
542
543 /* calculates cc into cc_op */
gen_op_calc_cc(DisasContext * s)544 static void gen_op_calc_cc(DisasContext *s)
545 {
546 TCGv_i32 local_cc_op = NULL;
547 TCGv_i64 dummy = NULL;
548
549 switch (s->cc_op) {
550 default:
551 dummy = tcg_constant_i64(0);
552 /* FALLTHRU */
553 case CC_OP_ADD_64:
554 case CC_OP_SUB_64:
555 case CC_OP_ADD_32:
556 case CC_OP_SUB_32:
557 local_cc_op = tcg_constant_i32(s->cc_op);
558 break;
559 case CC_OP_CONST0:
560 case CC_OP_CONST1:
561 case CC_OP_CONST2:
562 case CC_OP_CONST3:
563 case CC_OP_STATIC:
564 case CC_OP_DYNAMIC:
565 break;
566 }
567
568 switch (s->cc_op) {
569 case CC_OP_CONST0:
570 case CC_OP_CONST1:
571 case CC_OP_CONST2:
572 case CC_OP_CONST3:
573 /* s->cc_op is the cc value */
574 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
575 break;
576 case CC_OP_STATIC:
577 /* env->cc_op already is the cc value */
578 break;
579 case CC_OP_NZ:
580 tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
581 tcg_gen_extrl_i64_i32(cc_op, cc_dst);
582 break;
583 case CC_OP_ABS_64:
584 case CC_OP_NABS_64:
585 case CC_OP_ABS_32:
586 case CC_OP_NABS_32:
587 case CC_OP_LTGT0_32:
588 case CC_OP_LTGT0_64:
589 case CC_OP_COMP_32:
590 case CC_OP_COMP_64:
591 case CC_OP_NZ_F32:
592 case CC_OP_NZ_F64:
593 case CC_OP_FLOGR:
594 case CC_OP_LCBB:
595 case CC_OP_MULS_32:
596 /* 1 argument */
597 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
598 break;
599 case CC_OP_ADDU:
600 case CC_OP_ICM:
601 case CC_OP_LTGT_32:
602 case CC_OP_LTGT_64:
603 case CC_OP_LTUGTU_32:
604 case CC_OP_LTUGTU_64:
605 case CC_OP_TM_32:
606 case CC_OP_TM_64:
607 case CC_OP_SLA:
608 case CC_OP_SUBU:
609 case CC_OP_NZ_F128:
610 case CC_OP_VC:
611 case CC_OP_MULS_64:
612 /* 2 arguments */
613 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
614 break;
615 case CC_OP_ADD_64:
616 case CC_OP_SUB_64:
617 case CC_OP_ADD_32:
618 case CC_OP_SUB_32:
619 /* 3 arguments */
620 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
621 break;
622 case CC_OP_DYNAMIC:
623 /* unknown operation - assume 3 arguments and cc_op in env */
624 gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
625 break;
626 default:
627 g_assert_not_reached();
628 }
629
630 /* We now have cc in cc_op as constant */
631 set_cc_static(s);
632 }
633
use_goto_tb(DisasContext * s,uint64_t dest)634 static bool use_goto_tb(DisasContext *s, uint64_t dest)
635 {
636 return translator_use_goto_tb(&s->base, dest);
637 }
638
account_noninline_branch(DisasContext * s,int cc_op)639 static void account_noninline_branch(DisasContext *s, int cc_op)
640 {
641 #ifdef DEBUG_INLINE_BRANCHES
642 inline_branch_miss[cc_op]++;
643 #endif
644 }
645
account_inline_branch(DisasContext * s,int cc_op)646 static void account_inline_branch(DisasContext *s, int cc_op)
647 {
648 #ifdef DEBUG_INLINE_BRANCHES
649 inline_branch_hit[cc_op]++;
650 #endif
651 }
652
653 /* Table of mask values to comparison codes, given a comparison as input.
654 For such, CC=3 should not be possible. */
655 static const TCGCond ltgt_cond[16] = {
656 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
657 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
658 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
659 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
660 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
661 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
662 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
663 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
664 };
665
666 /* Table of mask values to comparison codes, given a logic op as input.
667 For such, only CC=0 and CC=1 should be possible. */
668 static const TCGCond nz_cond[16] = {
669 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
670 TCG_COND_NEVER, TCG_COND_NEVER,
671 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
672 TCG_COND_NE, TCG_COND_NE,
673 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
674 TCG_COND_EQ, TCG_COND_EQ,
675 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
676 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
677 };
678
679 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
680 details required to generate a TCG comparison. */
disas_jcc(DisasContext * s,DisasCompare * c,uint32_t mask)681 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
682 {
683 TCGCond cond;
684 enum cc_op old_cc_op = s->cc_op;
685
686 if (mask == 15 || mask == 0) {
687 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
688 c->u.s32.a = cc_op;
689 c->u.s32.b = cc_op;
690 c->is_64 = false;
691 return;
692 }
693
694 /* Find the TCG condition for the mask + cc op. */
695 switch (old_cc_op) {
696 case CC_OP_LTGT0_32:
697 case CC_OP_LTGT0_64:
698 case CC_OP_LTGT_32:
699 case CC_OP_LTGT_64:
700 cond = ltgt_cond[mask];
701 if (cond == TCG_COND_NEVER) {
702 goto do_dynamic;
703 }
704 account_inline_branch(s, old_cc_op);
705 break;
706
707 case CC_OP_LTUGTU_32:
708 case CC_OP_LTUGTU_64:
709 cond = tcg_unsigned_cond(ltgt_cond[mask]);
710 if (cond == TCG_COND_NEVER) {
711 goto do_dynamic;
712 }
713 account_inline_branch(s, old_cc_op);
714 break;
715
716 case CC_OP_NZ:
717 cond = nz_cond[mask];
718 if (cond == TCG_COND_NEVER) {
719 goto do_dynamic;
720 }
721 account_inline_branch(s, old_cc_op);
722 break;
723
724 case CC_OP_TM_32:
725 case CC_OP_TM_64:
726 switch (mask) {
727 case 8:
728 cond = TCG_COND_TSTEQ;
729 break;
730 case 4 | 2 | 1:
731 cond = TCG_COND_TSTNE;
732 break;
733 default:
734 goto do_dynamic;
735 }
736 account_inline_branch(s, old_cc_op);
737 break;
738
739 case CC_OP_ICM:
740 switch (mask) {
741 case 8:
742 cond = TCG_COND_TSTEQ;
743 break;
744 case 4 | 2 | 1:
745 case 4 | 2:
746 cond = TCG_COND_TSTNE;
747 break;
748 default:
749 goto do_dynamic;
750 }
751 account_inline_branch(s, old_cc_op);
752 break;
753
754 case CC_OP_FLOGR:
755 switch (mask & 0xa) {
756 case 8: /* src == 0 -> no one bit found */
757 cond = TCG_COND_EQ;
758 break;
759 case 2: /* src != 0 -> one bit found */
760 cond = TCG_COND_NE;
761 break;
762 default:
763 goto do_dynamic;
764 }
765 account_inline_branch(s, old_cc_op);
766 break;
767
768 case CC_OP_ADDU:
769 case CC_OP_SUBU:
770 switch (mask) {
771 case 8 | 2: /* result == 0 */
772 cond = TCG_COND_EQ;
773 break;
774 case 4 | 1: /* result != 0 */
775 cond = TCG_COND_NE;
776 break;
777 case 8 | 4: /* !carry (borrow) */
778 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
779 break;
780 case 2 | 1: /* carry (!borrow) */
781 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
782 break;
783 default:
784 goto do_dynamic;
785 }
786 account_inline_branch(s, old_cc_op);
787 break;
788
789 default:
790 do_dynamic:
791 /* Calculate cc value. */
792 gen_op_calc_cc(s);
793 /* FALLTHRU */
794
795 case CC_OP_STATIC:
796 /* Jump based on CC. We'll load up the real cond below;
797 the assignment here merely avoids a compiler warning. */
798 account_noninline_branch(s, old_cc_op);
799 old_cc_op = CC_OP_STATIC;
800 cond = TCG_COND_NEVER;
801 break;
802 }
803
804 /* Load up the arguments of the comparison. */
805 c->is_64 = true;
806 switch (old_cc_op) {
807 case CC_OP_LTGT0_32:
808 c->is_64 = false;
809 c->u.s32.a = tcg_temp_new_i32();
810 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
811 c->u.s32.b = tcg_constant_i32(0);
812 break;
813 case CC_OP_LTGT_32:
814 case CC_OP_LTUGTU_32:
815 c->is_64 = false;
816 c->u.s32.a = tcg_temp_new_i32();
817 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
818 c->u.s32.b = tcg_temp_new_i32();
819 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
820 break;
821
822 case CC_OP_LTGT0_64:
823 case CC_OP_NZ:
824 case CC_OP_FLOGR:
825 c->u.s64.a = cc_dst;
826 c->u.s64.b = tcg_constant_i64(0);
827 break;
828
829 case CC_OP_LTGT_64:
830 case CC_OP_LTUGTU_64:
831 case CC_OP_TM_32:
832 case CC_OP_TM_64:
833 case CC_OP_ICM:
834 c->u.s64.a = cc_src;
835 c->u.s64.b = cc_dst;
836 break;
837
838 case CC_OP_ADDU:
839 case CC_OP_SUBU:
840 c->is_64 = true;
841 c->u.s64.b = tcg_constant_i64(0);
842 switch (mask) {
843 case 8 | 2:
844 case 4 | 1: /* result */
845 c->u.s64.a = cc_dst;
846 break;
847 case 8 | 4:
848 case 2 | 1: /* carry */
849 c->u.s64.a = cc_src;
850 break;
851 default:
852 g_assert_not_reached();
853 }
854 break;
855
856 case CC_OP_STATIC:
857 c->is_64 = false;
858 c->u.s32.a = cc_op;
859
860 /* Fold half of the cases using bit 3 to invert. */
861 switch (mask & 8 ? mask ^ 0xf : mask) {
862 case 0x1: /* cc == 3 */
863 cond = TCG_COND_EQ;
864 c->u.s32.b = tcg_constant_i32(3);
865 break;
866 case 0x2: /* cc == 2 */
867 cond = TCG_COND_EQ;
868 c->u.s32.b = tcg_constant_i32(2);
869 break;
870 case 0x4: /* cc == 1 */
871 cond = TCG_COND_EQ;
872 c->u.s32.b = tcg_constant_i32(1);
873 break;
874 case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
875 cond = TCG_COND_GTU;
876 c->u.s32.b = tcg_constant_i32(1);
877 break;
878 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
879 cond = TCG_COND_TSTNE;
880 c->u.s32.b = tcg_constant_i32(1);
881 break;
882 case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
883 cond = TCG_COND_LEU;
884 c->u.s32.a = tcg_temp_new_i32();
885 c->u.s32.b = tcg_constant_i32(1);
886 tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
887 break;
888 case 0x4 | 0x2 | 0x1: /* cc != 0 */
889 cond = TCG_COND_NE;
890 c->u.s32.b = tcg_constant_i32(0);
891 break;
892 default:
893 /* case 0: never, handled above. */
894 g_assert_not_reached();
895 }
896 if (mask & 8) {
897 cond = tcg_invert_cond(cond);
898 }
899 break;
900
901 default:
902 abort();
903 }
904 c->cond = cond;
905 }
906
907 /* ====================================================================== */
908 /* Define the insn format enumeration. */
909 #define F0(N) FMT_##N,
910 #define F1(N, X1) F0(N)
911 #define F2(N, X1, X2) F0(N)
912 #define F3(N, X1, X2, X3) F0(N)
913 #define F4(N, X1, X2, X3, X4) F0(N)
914 #define F5(N, X1, X2, X3, X4, X5) F0(N)
915 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
916
917 typedef enum {
918 #include "insn-format.h.inc"
919 } DisasFormat;
920
921 #undef F0
922 #undef F1
923 #undef F2
924 #undef F3
925 #undef F4
926 #undef F5
927 #undef F6
928
929 /* This is the way fields are to be accessed out of DisasFields. */
930 #define have_field(S, F) have_field1((S), FLD_O_##F)
931 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
932
have_field1(const DisasContext * s,enum DisasFieldIndexO c)933 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
934 {
935 return (s->fields.presentO >> c) & 1;
936 }
937
get_field1(const DisasContext * s,enum DisasFieldIndexO o,enum DisasFieldIndexC c)938 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
939 enum DisasFieldIndexC c)
940 {
941 assert(have_field1(s, o));
942 return s->fields.c[c];
943 }
944
945 /* Describe the layout of each field in each format. */
946 typedef struct DisasField {
947 unsigned int beg:8;
948 unsigned int size:8;
949 unsigned int type:2;
950 unsigned int indexC:6;
951 enum DisasFieldIndexO indexO:8;
952 } DisasField;
953
954 typedef struct DisasFormatInfo {
955 DisasField op[NUM_C_FIELD];
956 } DisasFormatInfo;
957
958 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
959 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
960 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
961 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
962 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
963 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
964 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
965 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
966 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
967 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
968 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
969 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
970 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
971 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
972 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
973
974 #define F0(N) { { } },
975 #define F1(N, X1) { { X1 } },
976 #define F2(N, X1, X2) { { X1, X2 } },
977 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
978 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
979 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
980 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
981
982 static const DisasFormatInfo format_info[] = {
983 #include "insn-format.h.inc"
984 };
985
986 #undef F0
987 #undef F1
988 #undef F2
989 #undef F3
990 #undef F4
991 #undef F5
992 #undef F6
993 #undef R
994 #undef M
995 #undef V
996 #undef BD
997 #undef BXD
998 #undef BDL
999 #undef BXDL
1000 #undef I
1001 #undef L
1002
1003 /* Generally, we'll extract operands into this structures, operate upon
1004 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1005 of routines below for more details. */
1006 typedef struct {
1007 TCGv_i64 out, out2, in1, in2;
1008 TCGv_i64 addr1;
1009 TCGv_i128 out_128, in1_128, in2_128;
1010 } DisasOps;
1011
1012 /* Instructions can place constraints on their operands, raising specification
1013 exceptions if they are violated. To make this easy to automate, each "in1",
1014 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1015 of the following, or 0. To make this easy to document, we'll put the
1016 SPEC_<name> defines next to <name>. */
1017
1018 #define SPEC_r1_even 1
1019 #define SPEC_r2_even 2
1020 #define SPEC_r3_even 4
1021 #define SPEC_r1_f128 8
1022 #define SPEC_r2_f128 16
1023
1024 /* Return values from translate_one, indicating the state of the TB. */
1025
1026 /* We are not using a goto_tb (for whatever reason), but have updated
1027 the PC (for whatever reason), so there's no need to do it again on
1028 exiting the TB. */
1029 #define DISAS_PC_UPDATED DISAS_TARGET_0
1030
1031 /* We have updated the PC and CC values. */
1032 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1033
1034
1035 /* Instruction flags */
1036 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1037 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1038 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1039 #define IF_BFP 0x0008 /* binary floating point instruction */
1040 #define IF_DFP 0x0010 /* decimal floating point instruction */
1041 #define IF_PRIV 0x0020 /* privileged instruction */
1042 #define IF_VEC 0x0040 /* vector instruction */
1043 #define IF_IO 0x0080 /* input/output instruction */
1044
1045 struct DisasInsn {
1046 unsigned opc:16;
1047 unsigned flags:16;
1048 DisasFormat fmt:8;
1049 unsigned fac:8;
1050 unsigned spec:8;
1051
1052 const char *name;
1053
1054 /* Pre-process arguments before HELP_OP. */
1055 void (*help_in1)(DisasContext *, DisasOps *);
1056 void (*help_in2)(DisasContext *, DisasOps *);
1057 void (*help_prep)(DisasContext *, DisasOps *);
1058
1059 /*
1060 * Post-process output after HELP_OP.
1061 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1062 */
1063 void (*help_wout)(DisasContext *, DisasOps *);
1064 void (*help_cout)(DisasContext *, DisasOps *);
1065
1066 /* Implement the operation itself. */
1067 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1068
1069 uint64_t data;
1070 };
1071
1072 /* ====================================================================== */
1073 /* Miscellaneous helpers, used by several operations. */
1074
help_goto_direct(DisasContext * s,uint64_t dest)1075 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1076 {
1077 update_cc_op(s);
1078 per_breaking_event(s);
1079 per_branch(s, tcg_constant_i64(dest));
1080
1081 if (dest == s->pc_tmp) {
1082 return DISAS_NEXT;
1083 }
1084 if (use_goto_tb(s, dest)) {
1085 tcg_gen_goto_tb(0);
1086 tcg_gen_movi_i64(psw_addr, dest);
1087 tcg_gen_exit_tb(s->base.tb, 0);
1088 return DISAS_NORETURN;
1089 } else {
1090 tcg_gen_movi_i64(psw_addr, dest);
1091 return DISAS_PC_CC_UPDATED;
1092 }
1093 }
1094
help_goto_indirect(DisasContext * s,TCGv_i64 dest)1095 static DisasJumpType help_goto_indirect(DisasContext *s, TCGv_i64 dest)
1096 {
1097 update_cc_op(s);
1098 per_breaking_event(s);
1099 tcg_gen_mov_i64(psw_addr, dest);
1100 per_branch(s, psw_addr);
1101 return DISAS_PC_CC_UPDATED;
1102 }
1103
help_branch(DisasContext * s,DisasCompare * c,bool is_imm,int imm,TCGv_i64 cdest)1104 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1105 bool is_imm, int imm, TCGv_i64 cdest)
1106 {
1107 uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1108 TCGLabel *lab;
1109
1110 /* Take care of the special cases first. */
1111 if (c->cond == TCG_COND_NEVER) {
1112 return DISAS_NEXT;
1113 }
1114 if (is_imm) {
1115 /*
1116 * Do not optimize a conditional branch if PER enabled, because we
1117 * still need a conditional call to helper_per_branch.
1118 */
1119 if (c->cond == TCG_COND_ALWAYS
1120 || (dest == s->pc_tmp &&
1121 !(s->base.tb->flags & FLAG_MASK_PER_BRANCH))) {
1122 return help_goto_direct(s, dest);
1123 }
1124 } else {
1125 if (!cdest) {
1126 /* E.g. bcr %r0 -> no branch. */
1127 return DISAS_NEXT;
1128 }
1129 if (c->cond == TCG_COND_ALWAYS) {
1130 return help_goto_indirect(s, cdest);
1131 }
1132 }
1133
1134 update_cc_op(s);
1135
1136 /*
1137 * Ensure the taken branch is fall-through of the tcg branch.
1138 * This keeps @cdest usage within the extended basic block,
1139 * which avoids an otherwise unnecessary spill to the stack.
1140 */
1141 lab = gen_new_label();
1142 if (c->is_64) {
1143 tcg_gen_brcond_i64(tcg_invert_cond(c->cond),
1144 c->u.s64.a, c->u.s64.b, lab);
1145 } else {
1146 tcg_gen_brcond_i32(tcg_invert_cond(c->cond),
1147 c->u.s32.a, c->u.s32.b, lab);
1148 }
1149
1150 /* Branch taken. */
1151 per_breaking_event(s);
1152 if (is_imm) {
1153 tcg_gen_movi_i64(psw_addr, dest);
1154 } else {
1155 tcg_gen_mov_i64(psw_addr, cdest);
1156 }
1157 per_branch(s, psw_addr);
1158
1159 if (is_imm && use_goto_tb(s, dest)) {
1160 tcg_gen_goto_tb(0);
1161 tcg_gen_exit_tb(s->base.tb, 0);
1162 } else {
1163 tcg_gen_lookup_and_goto_ptr();
1164 }
1165
1166 gen_set_label(lab);
1167
1168 /* Branch not taken. */
1169 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1170 if (use_goto_tb(s, s->pc_tmp)) {
1171 tcg_gen_goto_tb(1);
1172 tcg_gen_exit_tb(s->base.tb, 1);
1173 return DISAS_NORETURN;
1174 }
1175 return DISAS_PC_CC_UPDATED;
1176 }
1177
1178 /* ====================================================================== */
1179 /* The operations. These perform the bulk of the work for any insn,
1180 usually after the operands have been loaded and output initialized. */
1181
op_abs(DisasContext * s,DisasOps * o)1182 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1183 {
1184 tcg_gen_abs_i64(o->out, o->in2);
1185 return DISAS_NEXT;
1186 }
1187
op_absf32(DisasContext * s,DisasOps * o)1188 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1189 {
1190 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1191 return DISAS_NEXT;
1192 }
1193
op_absf64(DisasContext * s,DisasOps * o)1194 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1195 {
1196 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1197 return DISAS_NEXT;
1198 }
1199
op_absf128(DisasContext * s,DisasOps * o)1200 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1201 {
1202 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1203 tcg_gen_mov_i64(o->out2, o->in2);
1204 return DISAS_NEXT;
1205 }
1206
op_add(DisasContext * s,DisasOps * o)1207 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1208 {
1209 tcg_gen_add_i64(o->out, o->in1, o->in2);
1210 return DISAS_NEXT;
1211 }
1212
op_addu64(DisasContext * s,DisasOps * o)1213 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1214 {
1215 tcg_gen_movi_i64(cc_src, 0);
1216 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1217 return DISAS_NEXT;
1218 }
1219
1220 /* Compute carry into cc_src. */
compute_carry(DisasContext * s)1221 static void compute_carry(DisasContext *s)
1222 {
1223 switch (s->cc_op) {
1224 case CC_OP_ADDU:
1225 /* The carry value is already in cc_src (1,0). */
1226 break;
1227 case CC_OP_SUBU:
1228 tcg_gen_addi_i64(cc_src, cc_src, 1);
1229 break;
1230 default:
1231 gen_op_calc_cc(s);
1232 /* fall through */
1233 case CC_OP_STATIC:
1234 /* The carry flag is the msb of CC; compute into cc_src. */
1235 tcg_gen_extu_i32_i64(cc_src, cc_op);
1236 tcg_gen_shri_i64(cc_src, cc_src, 1);
1237 break;
1238 }
1239 }
1240
op_addc32(DisasContext * s,DisasOps * o)1241 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1242 {
1243 compute_carry(s);
1244 tcg_gen_add_i64(o->out, o->in1, o->in2);
1245 tcg_gen_add_i64(o->out, o->out, cc_src);
1246 return DISAS_NEXT;
1247 }
1248
op_addc64(DisasContext * s,DisasOps * o)1249 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1250 {
1251 compute_carry(s);
1252
1253 TCGv_i64 zero = tcg_constant_i64(0);
1254 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1255 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1256
1257 return DISAS_NEXT;
1258 }
1259
op_asi(DisasContext * s,DisasOps * o)1260 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1261 {
1262 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1263
1264 o->in1 = tcg_temp_new_i64();
1265 if (non_atomic) {
1266 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1267 } else {
1268 /* Perform the atomic addition in memory. */
1269 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1270 s->insn->data);
1271 }
1272
1273 /* Recompute also for atomic case: needed for setting CC. */
1274 tcg_gen_add_i64(o->out, o->in1, o->in2);
1275
1276 if (non_atomic) {
1277 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1278 }
1279 return DISAS_NEXT;
1280 }
1281
op_asiu64(DisasContext * s,DisasOps * o)1282 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1283 {
1284 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1285
1286 o->in1 = tcg_temp_new_i64();
1287 if (non_atomic) {
1288 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1289 } else {
1290 /* Perform the atomic addition in memory. */
1291 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1292 s->insn->data);
1293 }
1294
1295 /* Recompute also for atomic case: needed for setting CC. */
1296 tcg_gen_movi_i64(cc_src, 0);
1297 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1298
1299 if (non_atomic) {
1300 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1301 }
1302 return DISAS_NEXT;
1303 }
1304
op_aeb(DisasContext * s,DisasOps * o)1305 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1306 {
1307 gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1308 return DISAS_NEXT;
1309 }
1310
op_adb(DisasContext * s,DisasOps * o)1311 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1312 {
1313 gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1314 return DISAS_NEXT;
1315 }
1316
op_axb(DisasContext * s,DisasOps * o)1317 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1318 {
1319 gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1320 return DISAS_NEXT;
1321 }
1322
op_and(DisasContext * s,DisasOps * o)1323 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1324 {
1325 tcg_gen_and_i64(o->out, o->in1, o->in2);
1326 return DISAS_NEXT;
1327 }
1328
op_andi(DisasContext * s,DisasOps * o)1329 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1330 {
1331 int shift = s->insn->data & 0xff;
1332 int size = s->insn->data >> 8;
1333 uint64_t mask = ((1ull << size) - 1) << shift;
1334 TCGv_i64 t = tcg_temp_new_i64();
1335
1336 tcg_gen_shli_i64(t, o->in2, shift);
1337 tcg_gen_ori_i64(t, t, ~mask);
1338 tcg_gen_and_i64(o->out, o->in1, t);
1339
1340 /* Produce the CC from only the bits manipulated. */
1341 tcg_gen_andi_i64(cc_dst, o->out, mask);
1342 set_cc_nz_u64(s, cc_dst);
1343 return DISAS_NEXT;
1344 }
1345
op_andc(DisasContext * s,DisasOps * o)1346 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1347 {
1348 tcg_gen_andc_i64(o->out, o->in1, o->in2);
1349 return DISAS_NEXT;
1350 }
1351
op_orc(DisasContext * s,DisasOps * o)1352 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1353 {
1354 tcg_gen_orc_i64(o->out, o->in1, o->in2);
1355 return DISAS_NEXT;
1356 }
1357
op_nand(DisasContext * s,DisasOps * o)1358 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1359 {
1360 tcg_gen_nand_i64(o->out, o->in1, o->in2);
1361 return DISAS_NEXT;
1362 }
1363
op_nor(DisasContext * s,DisasOps * o)1364 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1365 {
1366 tcg_gen_nor_i64(o->out, o->in1, o->in2);
1367 return DISAS_NEXT;
1368 }
1369
op_nxor(DisasContext * s,DisasOps * o)1370 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1371 {
1372 tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1373 return DISAS_NEXT;
1374 }
1375
op_ni(DisasContext * s,DisasOps * o)1376 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1377 {
1378 o->in1 = tcg_temp_new_i64();
1379
1380 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1381 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1382 } else {
1383 /* Perform the atomic operation in memory. */
1384 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1385 s->insn->data);
1386 }
1387
1388 /* Recompute also for atomic case: needed for setting CC. */
1389 tcg_gen_and_i64(o->out, o->in1, o->in2);
1390
1391 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1392 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1393 }
1394 return DISAS_NEXT;
1395 }
1396
op_bas(DisasContext * s,DisasOps * o)1397 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1398 {
1399 pc_to_link_info(o->out, s, s->pc_tmp);
1400 if (o->in2) {
1401 return help_goto_indirect(s, o->in2);
1402 } else {
1403 return DISAS_NEXT;
1404 }
1405 }
1406
save_link_info(DisasContext * s,DisasOps * o)1407 static void save_link_info(DisasContext *s, DisasOps *o)
1408 {
1409 TCGv_i64 t;
1410
1411 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1412 pc_to_link_info(o->out, s, s->pc_tmp);
1413 return;
1414 }
1415 gen_op_calc_cc(s);
1416 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1417 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1418 t = tcg_temp_new_i64();
1419 tcg_gen_shri_i64(t, psw_mask, 16);
1420 tcg_gen_andi_i64(t, t, 0x0f000000);
1421 tcg_gen_or_i64(o->out, o->out, t);
1422 tcg_gen_extu_i32_i64(t, cc_op);
1423 tcg_gen_shli_i64(t, t, 28);
1424 tcg_gen_or_i64(o->out, o->out, t);
1425 }
1426
op_bal(DisasContext * s,DisasOps * o)1427 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1428 {
1429 save_link_info(s, o);
1430 if (o->in2) {
1431 return help_goto_indirect(s, o->in2);
1432 } else {
1433 return DISAS_NEXT;
1434 }
1435 }
1436
1437 /*
1438 * Disassemble the target of a branch. The results are returned in a form
1439 * suitable for passing into help_branch():
1440 *
1441 * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1442 * branches, whose DisasContext *S contains the relative immediate field RI,
1443 * are considered fixed. All the other branches are considered computed.
1444 * - int IMM is the value of RI.
1445 * - TCGv_i64 CDEST is the address of the computed target.
1446 */
1447 #define disas_jdest(s, ri, is_imm, imm, cdest) do { \
1448 if (have_field(s, ri)) { \
1449 if (unlikely(s->ex_value)) { \
1450 cdest = tcg_temp_new_i64(); \
1451 tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1452 tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
1453 is_imm = false; \
1454 } else { \
1455 is_imm = true; \
1456 } \
1457 } else { \
1458 is_imm = false; \
1459 } \
1460 imm = is_imm ? get_field(s, ri) : 0; \
1461 } while (false)
1462
op_basi(DisasContext * s,DisasOps * o)1463 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1464 {
1465 DisasCompare c;
1466 bool is_imm;
1467 int imm;
1468
1469 pc_to_link_info(o->out, s, s->pc_tmp);
1470
1471 disas_jdest(s, i2, is_imm, imm, o->in2);
1472 disas_jcc(s, &c, 0xf);
1473 return help_branch(s, &c, is_imm, imm, o->in2);
1474 }
1475
op_bc(DisasContext * s,DisasOps * o)1476 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1477 {
1478 int m1 = get_field(s, m1);
1479 DisasCompare c;
1480 bool is_imm;
1481 int imm;
1482
1483 /* BCR with R2 = 0 causes no branching */
1484 if (have_field(s, r2) && get_field(s, r2) == 0) {
1485 if (m1 == 14) {
1486 /* Perform serialization */
1487 /* FIXME: check for fast-BCR-serialization facility */
1488 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1489 }
1490 if (m1 == 15) {
1491 /* Perform serialization */
1492 /* FIXME: perform checkpoint-synchronisation */
1493 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1494 }
1495 return DISAS_NEXT;
1496 }
1497
1498 disas_jdest(s, i2, is_imm, imm, o->in2);
1499 disas_jcc(s, &c, m1);
1500 return help_branch(s, &c, is_imm, imm, o->in2);
1501 }
1502
op_bct32(DisasContext * s,DisasOps * o)1503 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1504 {
1505 int r1 = get_field(s, r1);
1506 DisasCompare c;
1507 bool is_imm;
1508 TCGv_i64 t;
1509 int imm;
1510
1511 c.cond = TCG_COND_NE;
1512 c.is_64 = false;
1513
1514 t = tcg_temp_new_i64();
1515 tcg_gen_subi_i64(t, regs[r1], 1);
1516 store_reg32_i64(r1, t);
1517 c.u.s32.a = tcg_temp_new_i32();
1518 c.u.s32.b = tcg_constant_i32(0);
1519 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1520
1521 disas_jdest(s, i2, is_imm, imm, o->in2);
1522 return help_branch(s, &c, is_imm, imm, o->in2);
1523 }
1524
op_bcth(DisasContext * s,DisasOps * o)1525 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1526 {
1527 int r1 = get_field(s, r1);
1528 int imm = get_field(s, i2);
1529 DisasCompare c;
1530 TCGv_i64 t;
1531
1532 c.cond = TCG_COND_NE;
1533 c.is_64 = false;
1534
1535 t = tcg_temp_new_i64();
1536 tcg_gen_shri_i64(t, regs[r1], 32);
1537 tcg_gen_subi_i64(t, t, 1);
1538 store_reg32h_i64(r1, t);
1539 c.u.s32.a = tcg_temp_new_i32();
1540 c.u.s32.b = tcg_constant_i32(0);
1541 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1542
1543 return help_branch(s, &c, 1, imm, o->in2);
1544 }
1545
op_bct64(DisasContext * s,DisasOps * o)1546 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1547 {
1548 int r1 = get_field(s, r1);
1549 DisasCompare c;
1550 bool is_imm;
1551 int imm;
1552
1553 c.cond = TCG_COND_NE;
1554 c.is_64 = true;
1555
1556 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1557 c.u.s64.a = regs[r1];
1558 c.u.s64.b = tcg_constant_i64(0);
1559
1560 disas_jdest(s, i2, is_imm, imm, o->in2);
1561 return help_branch(s, &c, is_imm, imm, o->in2);
1562 }
1563
op_bx32(DisasContext * s,DisasOps * o)1564 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1565 {
1566 int r1 = get_field(s, r1);
1567 int r3 = get_field(s, r3);
1568 DisasCompare c;
1569 bool is_imm;
1570 TCGv_i64 t;
1571 int imm;
1572
1573 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1574 c.is_64 = false;
1575
1576 t = tcg_temp_new_i64();
1577 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1578 c.u.s32.a = tcg_temp_new_i32();
1579 c.u.s32.b = tcg_temp_new_i32();
1580 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1581 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1582 store_reg32_i64(r1, t);
1583
1584 disas_jdest(s, i2, is_imm, imm, o->in2);
1585 return help_branch(s, &c, is_imm, imm, o->in2);
1586 }
1587
op_bx64(DisasContext * s,DisasOps * o)1588 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1589 {
1590 int r1 = get_field(s, r1);
1591 int r3 = get_field(s, r3);
1592 DisasCompare c;
1593 bool is_imm;
1594 int imm;
1595
1596 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1597 c.is_64 = true;
1598
1599 if (r1 == (r3 | 1)) {
1600 c.u.s64.b = load_reg(r3 | 1);
1601 } else {
1602 c.u.s64.b = regs[r3 | 1];
1603 }
1604
1605 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1606 c.u.s64.a = regs[r1];
1607
1608 disas_jdest(s, i2, is_imm, imm, o->in2);
1609 return help_branch(s, &c, is_imm, imm, o->in2);
1610 }
1611
op_cj(DisasContext * s,DisasOps * o)1612 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1613 {
1614 int imm, m3 = get_field(s, m3);
1615 bool is_imm;
1616 DisasCompare c;
1617
1618 c.cond = ltgt_cond[m3];
1619 if (s->insn->data) {
1620 c.cond = tcg_unsigned_cond(c.cond);
1621 }
1622 c.is_64 = true;
1623 c.u.s64.a = o->in1;
1624 c.u.s64.b = o->in2;
1625
1626 o->out = NULL;
1627 disas_jdest(s, i4, is_imm, imm, o->out);
1628 if (!is_imm && !o->out) {
1629 imm = 0;
1630 o->out = get_address(s, 0, get_field(s, b4),
1631 get_field(s, d4));
1632 }
1633
1634 return help_branch(s, &c, is_imm, imm, o->out);
1635 }
1636
op_ceb(DisasContext * s,DisasOps * o)1637 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1638 {
1639 gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1640 set_cc_static(s);
1641 return DISAS_NEXT;
1642 }
1643
op_cdb(DisasContext * s,DisasOps * o)1644 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1645 {
1646 gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1647 set_cc_static(s);
1648 return DISAS_NEXT;
1649 }
1650
op_cxb(DisasContext * s,DisasOps * o)1651 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1652 {
1653 gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1654 set_cc_static(s);
1655 return DISAS_NEXT;
1656 }
1657
fpinst_extract_m34(DisasContext * s,bool m3_with_fpe,bool m4_with_fpe)1658 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1659 bool m4_with_fpe)
1660 {
1661 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1662 uint8_t m3 = get_field(s, m3);
1663 uint8_t m4 = get_field(s, m4);
1664
1665 /* m3 field was introduced with FPE */
1666 if (!fpe && m3_with_fpe) {
1667 m3 = 0;
1668 }
1669 /* m4 field was introduced with FPE */
1670 if (!fpe && m4_with_fpe) {
1671 m4 = 0;
1672 }
1673
1674 /* Check for valid rounding modes. Mode 3 was introduced later. */
1675 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1676 gen_program_exception(s, PGM_SPECIFICATION);
1677 return NULL;
1678 }
1679
1680 return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1681 }
1682
op_cfeb(DisasContext * s,DisasOps * o)1683 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1684 {
1685 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1686
1687 if (!m34) {
1688 return DISAS_NORETURN;
1689 }
1690 gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1691 set_cc_static(s);
1692 return DISAS_NEXT;
1693 }
1694
op_cfdb(DisasContext * s,DisasOps * o)1695 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1696 {
1697 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1698
1699 if (!m34) {
1700 return DISAS_NORETURN;
1701 }
1702 gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1703 set_cc_static(s);
1704 return DISAS_NEXT;
1705 }
1706
op_cfxb(DisasContext * s,DisasOps * o)1707 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1708 {
1709 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1710
1711 if (!m34) {
1712 return DISAS_NORETURN;
1713 }
1714 gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1715 set_cc_static(s);
1716 return DISAS_NEXT;
1717 }
1718
op_cgeb(DisasContext * s,DisasOps * o)1719 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1720 {
1721 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1722
1723 if (!m34) {
1724 return DISAS_NORETURN;
1725 }
1726 gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1727 set_cc_static(s);
1728 return DISAS_NEXT;
1729 }
1730
op_cgdb(DisasContext * s,DisasOps * o)1731 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1732 {
1733 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1734
1735 if (!m34) {
1736 return DISAS_NORETURN;
1737 }
1738 gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1739 set_cc_static(s);
1740 return DISAS_NEXT;
1741 }
1742
op_cgxb(DisasContext * s,DisasOps * o)1743 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1744 {
1745 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1746
1747 if (!m34) {
1748 return DISAS_NORETURN;
1749 }
1750 gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1751 set_cc_static(s);
1752 return DISAS_NEXT;
1753 }
1754
op_clfeb(DisasContext * s,DisasOps * o)1755 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1756 {
1757 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1758
1759 if (!m34) {
1760 return DISAS_NORETURN;
1761 }
1762 gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1763 set_cc_static(s);
1764 return DISAS_NEXT;
1765 }
1766
op_clfdb(DisasContext * s,DisasOps * o)1767 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1768 {
1769 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1770
1771 if (!m34) {
1772 return DISAS_NORETURN;
1773 }
1774 gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1775 set_cc_static(s);
1776 return DISAS_NEXT;
1777 }
1778
op_clfxb(DisasContext * s,DisasOps * o)1779 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1780 {
1781 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1782
1783 if (!m34) {
1784 return DISAS_NORETURN;
1785 }
1786 gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1787 set_cc_static(s);
1788 return DISAS_NEXT;
1789 }
1790
op_clgeb(DisasContext * s,DisasOps * o)1791 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1792 {
1793 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1794
1795 if (!m34) {
1796 return DISAS_NORETURN;
1797 }
1798 gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1799 set_cc_static(s);
1800 return DISAS_NEXT;
1801 }
1802
op_clgdb(DisasContext * s,DisasOps * o)1803 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1804 {
1805 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1806
1807 if (!m34) {
1808 return DISAS_NORETURN;
1809 }
1810 gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1811 set_cc_static(s);
1812 return DISAS_NEXT;
1813 }
1814
op_clgxb(DisasContext * s,DisasOps * o)1815 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1816 {
1817 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1818
1819 if (!m34) {
1820 return DISAS_NORETURN;
1821 }
1822 gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1823 set_cc_static(s);
1824 return DISAS_NEXT;
1825 }
1826
op_cegb(DisasContext * s,DisasOps * o)1827 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1828 {
1829 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1830
1831 if (!m34) {
1832 return DISAS_NORETURN;
1833 }
1834 gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1835 return DISAS_NEXT;
1836 }
1837
op_cdgb(DisasContext * s,DisasOps * o)1838 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1839 {
1840 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1841
1842 if (!m34) {
1843 return DISAS_NORETURN;
1844 }
1845 gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1846 return DISAS_NEXT;
1847 }
1848
op_cxgb(DisasContext * s,DisasOps * o)1849 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1850 {
1851 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1852
1853 if (!m34) {
1854 return DISAS_NORETURN;
1855 }
1856 gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1857 return DISAS_NEXT;
1858 }
1859
op_celgb(DisasContext * s,DisasOps * o)1860 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1861 {
1862 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1863
1864 if (!m34) {
1865 return DISAS_NORETURN;
1866 }
1867 gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1868 return DISAS_NEXT;
1869 }
1870
op_cdlgb(DisasContext * s,DisasOps * o)1871 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1872 {
1873 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1874
1875 if (!m34) {
1876 return DISAS_NORETURN;
1877 }
1878 gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1879 return DISAS_NEXT;
1880 }
1881
op_cxlgb(DisasContext * s,DisasOps * o)1882 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1883 {
1884 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1885
1886 if (!m34) {
1887 return DISAS_NORETURN;
1888 }
1889 gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1890 return DISAS_NEXT;
1891 }
1892
op_cksm(DisasContext * s,DisasOps * o)1893 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1894 {
1895 int r2 = get_field(s, r2);
1896 TCGv_i128 pair = tcg_temp_new_i128();
1897 TCGv_i64 len = tcg_temp_new_i64();
1898
1899 gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1900 set_cc_static(s);
1901 tcg_gen_extr_i128_i64(o->out, len, pair);
1902
1903 tcg_gen_add_i64(regs[r2], regs[r2], len);
1904 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1905
1906 return DISAS_NEXT;
1907 }
1908
op_clc(DisasContext * s,DisasOps * o)1909 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1910 {
1911 int l = get_field(s, l1);
1912 TCGv_i64 src;
1913 TCGv_i32 vl;
1914 MemOp mop;
1915
1916 switch (l + 1) {
1917 case 1:
1918 case 2:
1919 case 4:
1920 case 8:
1921 mop = ctz32(l + 1) | MO_TE;
1922 /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1923 src = tcg_temp_new_i64();
1924 tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1925 tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1926 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1927 return DISAS_NEXT;
1928 default:
1929 vl = tcg_constant_i32(l);
1930 gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
1931 set_cc_static(s);
1932 return DISAS_NEXT;
1933 }
1934 }
1935
op_clcl(DisasContext * s,DisasOps * o)1936 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1937 {
1938 int r1 = get_field(s, r1);
1939 int r2 = get_field(s, r2);
1940 TCGv_i32 t1, t2;
1941
1942 /* r1 and r2 must be even. */
1943 if (r1 & 1 || r2 & 1) {
1944 gen_program_exception(s, PGM_SPECIFICATION);
1945 return DISAS_NORETURN;
1946 }
1947
1948 t1 = tcg_constant_i32(r1);
1949 t2 = tcg_constant_i32(r2);
1950 gen_helper_clcl(cc_op, tcg_env, t1, t2);
1951 set_cc_static(s);
1952 return DISAS_NEXT;
1953 }
1954
op_clcle(DisasContext * s,DisasOps * o)1955 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1956 {
1957 int r1 = get_field(s, r1);
1958 int r3 = get_field(s, r3);
1959 TCGv_i32 t1, t3;
1960
1961 /* r1 and r3 must be even. */
1962 if (r1 & 1 || r3 & 1) {
1963 gen_program_exception(s, PGM_SPECIFICATION);
1964 return DISAS_NORETURN;
1965 }
1966
1967 t1 = tcg_constant_i32(r1);
1968 t3 = tcg_constant_i32(r3);
1969 gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
1970 set_cc_static(s);
1971 return DISAS_NEXT;
1972 }
1973
op_clclu(DisasContext * s,DisasOps * o)1974 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
1975 {
1976 int r1 = get_field(s, r1);
1977 int r3 = get_field(s, r3);
1978 TCGv_i32 t1, t3;
1979
1980 /* r1 and r3 must be even. */
1981 if (r1 & 1 || r3 & 1) {
1982 gen_program_exception(s, PGM_SPECIFICATION);
1983 return DISAS_NORETURN;
1984 }
1985
1986 t1 = tcg_constant_i32(r1);
1987 t3 = tcg_constant_i32(r3);
1988 gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
1989 set_cc_static(s);
1990 return DISAS_NEXT;
1991 }
1992
op_clm(DisasContext * s,DisasOps * o)1993 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
1994 {
1995 TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
1996 TCGv_i32 t1 = tcg_temp_new_i32();
1997
1998 tcg_gen_extrl_i64_i32(t1, o->in1);
1999 gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2000 set_cc_static(s);
2001 return DISAS_NEXT;
2002 }
2003
op_clst(DisasContext * s,DisasOps * o)2004 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2005 {
2006 TCGv_i128 pair = tcg_temp_new_i128();
2007
2008 gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2009 tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2010
2011 set_cc_static(s);
2012 return DISAS_NEXT;
2013 }
2014
op_cps(DisasContext * s,DisasOps * o)2015 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2016 {
2017 TCGv_i64 t = tcg_temp_new_i64();
2018 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2019 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2020 tcg_gen_or_i64(o->out, o->out, t);
2021 return DISAS_NEXT;
2022 }
2023
op_cs(DisasContext * s,DisasOps * o)2024 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2025 {
2026 int d2 = get_field(s, d2);
2027 int b2 = get_field(s, b2);
2028 TCGv_i64 addr, cc;
2029
2030 /* Note that in1 = R3 (new value) and
2031 in2 = (zero-extended) R1 (expected value). */
2032
2033 addr = get_address(s, 0, b2, d2);
2034 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2035 get_mem_index(s), s->insn->data | MO_ALIGN);
2036
2037 /* Are the memory and expected values (un)equal? Note that this setcond
2038 produces the output CC value, thus the NE sense of the test. */
2039 cc = tcg_temp_new_i64();
2040 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2041 tcg_gen_extrl_i64_i32(cc_op, cc);
2042 set_cc_static(s);
2043
2044 return DISAS_NEXT;
2045 }
2046
op_cdsg(DisasContext * s,DisasOps * o)2047 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2048 {
2049 int r1 = get_field(s, r1);
2050
2051 o->out_128 = tcg_temp_new_i128();
2052 tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2053
2054 /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value. */
2055 tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2056 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2057
2058 /*
2059 * Extract result into cc_dst:cc_src, compare vs the expected value
2060 * in the as yet unmodified input registers, then update CC_OP.
2061 */
2062 tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2063 tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2064 tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2065 tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2066 set_cc_nz_u64(s, cc_dst);
2067
2068 return DISAS_NEXT;
2069 }
2070
op_csst(DisasContext * s,DisasOps * o)2071 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2072 {
2073 int r3 = get_field(s, r3);
2074 TCGv_i32 t_r3 = tcg_constant_i32(r3);
2075
2076 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2077 gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2078 } else {
2079 gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2080 }
2081
2082 set_cc_static(s);
2083 return DISAS_NEXT;
2084 }
2085
2086 #ifndef CONFIG_USER_ONLY
op_csp(DisasContext * s,DisasOps * o)2087 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2088 {
2089 MemOp mop = s->insn->data;
2090 TCGv_i64 addr, old, cc;
2091 TCGLabel *lab = gen_new_label();
2092
2093 /* Note that in1 = R1 (zero-extended expected value),
2094 out = R1 (original reg), out2 = R1+1 (new value). */
2095
2096 addr = tcg_temp_new_i64();
2097 old = tcg_temp_new_i64();
2098 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2099 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2100 get_mem_index(s), mop | MO_ALIGN);
2101
2102 /* Are the memory and expected values (un)equal? */
2103 cc = tcg_temp_new_i64();
2104 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2105 tcg_gen_extrl_i64_i32(cc_op, cc);
2106
2107 /* Write back the output now, so that it happens before the
2108 following branch, so that we don't need local temps. */
2109 if ((mop & MO_SIZE) == MO_32) {
2110 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2111 } else {
2112 tcg_gen_mov_i64(o->out, old);
2113 }
2114
2115 /* If the comparison was equal, and the LSB of R2 was set,
2116 then we need to flush the TLB (for all cpus). */
2117 tcg_gen_xori_i64(cc, cc, 1);
2118 tcg_gen_and_i64(cc, cc, o->in2);
2119 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2120
2121 gen_helper_purge(tcg_env);
2122 gen_set_label(lab);
2123
2124 return DISAS_NEXT;
2125 }
2126 #endif
2127
op_cvb(DisasContext * s,DisasOps * o)2128 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2129 {
2130 TCGv_i64 t = tcg_temp_new_i64();
2131 tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2132 gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2133 return DISAS_NEXT;
2134 }
2135
op_cvbg(DisasContext * s,DisasOps * o)2136 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2137 {
2138 TCGv_i128 t = tcg_temp_new_i128();
2139 tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2140 gen_helper_cvbg(o->out, tcg_env, t);
2141 return DISAS_NEXT;
2142 }
2143
op_cvd(DisasContext * s,DisasOps * o)2144 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2145 {
2146 TCGv_i64 t1 = tcg_temp_new_i64();
2147 TCGv_i32 t2 = tcg_temp_new_i32();
2148 tcg_gen_extrl_i64_i32(t2, o->in1);
2149 gen_helper_cvd(t1, t2);
2150 tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2151 return DISAS_NEXT;
2152 }
2153
op_cvdg(DisasContext * s,DisasOps * o)2154 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2155 {
2156 TCGv_i128 t = tcg_temp_new_i128();
2157 gen_helper_cvdg(t, o->in1);
2158 tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2159 return DISAS_NEXT;
2160 }
2161
op_ct(DisasContext * s,DisasOps * o)2162 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2163 {
2164 int m3 = get_field(s, m3);
2165 TCGLabel *lab = gen_new_label();
2166 TCGCond c;
2167
2168 c = tcg_invert_cond(ltgt_cond[m3]);
2169 if (s->insn->data) {
2170 c = tcg_unsigned_cond(c);
2171 }
2172 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2173
2174 /* Trap. */
2175 gen_trap(s);
2176
2177 gen_set_label(lab);
2178 return DISAS_NEXT;
2179 }
2180
op_cuXX(DisasContext * s,DisasOps * o)2181 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2182 {
2183 int m3 = get_field(s, m3);
2184 int r1 = get_field(s, r1);
2185 int r2 = get_field(s, r2);
2186 TCGv_i32 tr1, tr2, chk;
2187
2188 /* R1 and R2 must both be even. */
2189 if ((r1 | r2) & 1) {
2190 gen_program_exception(s, PGM_SPECIFICATION);
2191 return DISAS_NORETURN;
2192 }
2193 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2194 m3 = 0;
2195 }
2196
2197 tr1 = tcg_constant_i32(r1);
2198 tr2 = tcg_constant_i32(r2);
2199 chk = tcg_constant_i32(m3);
2200
2201 switch (s->insn->data) {
2202 case 12:
2203 gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2204 break;
2205 case 14:
2206 gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2207 break;
2208 case 21:
2209 gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2210 break;
2211 case 24:
2212 gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2213 break;
2214 case 41:
2215 gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2216 break;
2217 case 42:
2218 gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2219 break;
2220 default:
2221 g_assert_not_reached();
2222 }
2223
2224 set_cc_static(s);
2225 return DISAS_NEXT;
2226 }
2227
2228 #ifndef CONFIG_USER_ONLY
op_diag(DisasContext * s,DisasOps * o)2229 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2230 {
2231 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2232 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2233 TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2234
2235 gen_helper_diag(tcg_env, r1, r3, func_code);
2236 return DISAS_NEXT;
2237 }
2238 #endif
2239
op_divs32(DisasContext * s,DisasOps * o)2240 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2241 {
2242 gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2243 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2244 return DISAS_NEXT;
2245 }
2246
op_divu32(DisasContext * s,DisasOps * o)2247 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2248 {
2249 gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2250 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2251 return DISAS_NEXT;
2252 }
2253
op_divs64(DisasContext * s,DisasOps * o)2254 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2255 {
2256 TCGv_i128 t = tcg_temp_new_i128();
2257
2258 gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2259 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2260 return DISAS_NEXT;
2261 }
2262
op_divu64(DisasContext * s,DisasOps * o)2263 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2264 {
2265 TCGv_i128 t = tcg_temp_new_i128();
2266
2267 gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2268 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2269 return DISAS_NEXT;
2270 }
2271
op_deb(DisasContext * s,DisasOps * o)2272 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2273 {
2274 gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2275 return DISAS_NEXT;
2276 }
2277
op_ddb(DisasContext * s,DisasOps * o)2278 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2279 {
2280 gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2281 return DISAS_NEXT;
2282 }
2283
op_dxb(DisasContext * s,DisasOps * o)2284 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2285 {
2286 gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2287 return DISAS_NEXT;
2288 }
2289
op_ear(DisasContext * s,DisasOps * o)2290 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2291 {
2292 int r2 = get_field(s, r2);
2293 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2294 return DISAS_NEXT;
2295 }
2296
op_ecag(DisasContext * s,DisasOps * o)2297 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2298 {
2299 /* No cache information provided. */
2300 tcg_gen_movi_i64(o->out, -1);
2301 return DISAS_NEXT;
2302 }
2303
op_efpc(DisasContext * s,DisasOps * o)2304 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2305 {
2306 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2307 return DISAS_NEXT;
2308 }
2309
op_epsw(DisasContext * s,DisasOps * o)2310 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2311 {
2312 int r1 = get_field(s, r1);
2313 int r2 = get_field(s, r2);
2314 TCGv_i64 t = tcg_temp_new_i64();
2315 TCGv_i64 t_cc = tcg_temp_new_i64();
2316
2317 /* Note the "subsequently" in the PoO, which implies a defined result
2318 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2319 gen_op_calc_cc(s);
2320 tcg_gen_extu_i32_i64(t_cc, cc_op);
2321 tcg_gen_shri_i64(t, psw_mask, 32);
2322 tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2323 store_reg32_i64(r1, t);
2324 if (r2 != 0) {
2325 store_reg32_i64(r2, psw_mask);
2326 }
2327 return DISAS_NEXT;
2328 }
2329
op_ex(DisasContext * s,DisasOps * o)2330 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2331 {
2332 int r1 = get_field(s, r1);
2333 TCGv_i32 ilen;
2334 TCGv_i64 v1;
2335
2336 /* Nested EXECUTE is not allowed. */
2337 if (unlikely(s->ex_value)) {
2338 gen_program_exception(s, PGM_EXECUTE);
2339 return DISAS_NORETURN;
2340 }
2341
2342 update_psw_addr(s);
2343 update_cc_op(s);
2344
2345 if (r1 == 0) {
2346 v1 = tcg_constant_i64(0);
2347 } else {
2348 v1 = regs[r1];
2349 }
2350
2351 ilen = tcg_constant_i32(s->ilen);
2352 gen_helper_ex(tcg_env, ilen, v1, o->in2);
2353
2354 return DISAS_PC_CC_UPDATED;
2355 }
2356
op_fieb(DisasContext * s,DisasOps * o)2357 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2358 {
2359 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2360
2361 if (!m34) {
2362 return DISAS_NORETURN;
2363 }
2364 gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2365 return DISAS_NEXT;
2366 }
2367
op_fidb(DisasContext * s,DisasOps * o)2368 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2369 {
2370 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2371
2372 if (!m34) {
2373 return DISAS_NORETURN;
2374 }
2375 gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2376 return DISAS_NEXT;
2377 }
2378
op_fixb(DisasContext * s,DisasOps * o)2379 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2380 {
2381 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2382
2383 if (!m34) {
2384 return DISAS_NORETURN;
2385 }
2386 gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2387 return DISAS_NEXT;
2388 }
2389
op_flogr(DisasContext * s,DisasOps * o)2390 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2391 {
2392 /* We'll use the original input for cc computation, since we get to
2393 compare that against 0, which ought to be better than comparing
2394 the real output against 64. It also lets cc_dst be a convenient
2395 temporary during our computation. */
2396 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2397
2398 /* R1 = IN ? CLZ(IN) : 64. */
2399 tcg_gen_clzi_i64(o->out, o->in2, 64);
2400
2401 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2402 value by 64, which is undefined. But since the shift is 64 iff the
2403 input is zero, we still get the correct result after and'ing. */
2404 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2405 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2406 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2407 return DISAS_NEXT;
2408 }
2409
op_icm(DisasContext * s,DisasOps * o)2410 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2411 {
2412 int m3 = get_field(s, m3);
2413 int pos, len, base = s->insn->data;
2414 TCGv_i64 tmp = tcg_temp_new_i64();
2415 uint64_t ccm;
2416
2417 switch (m3) {
2418 case 0xf:
2419 /* Effectively a 32-bit load. */
2420 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2421 len = 32;
2422 goto one_insert;
2423
2424 case 0xc:
2425 case 0x6:
2426 case 0x3:
2427 /* Effectively a 16-bit load. */
2428 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2429 len = 16;
2430 goto one_insert;
2431
2432 case 0x8:
2433 case 0x4:
2434 case 0x2:
2435 case 0x1:
2436 /* Effectively an 8-bit load. */
2437 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2438 len = 8;
2439 goto one_insert;
2440
2441 one_insert:
2442 pos = base + ctz32(m3) * 8;
2443 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2444 ccm = ((1ull << len) - 1) << pos;
2445 break;
2446
2447 case 0:
2448 /* Recognize access exceptions for the first byte. */
2449 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2450 gen_op_movi_cc(s, 0);
2451 return DISAS_NEXT;
2452
2453 default:
2454 /* This is going to be a sequence of loads and inserts. */
2455 pos = base + 32 - 8;
2456 ccm = 0;
2457 while (m3) {
2458 if (m3 & 0x8) {
2459 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2460 tcg_gen_addi_i64(o->in2, o->in2, 1);
2461 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2462 ccm |= 0xffull << pos;
2463 }
2464 m3 = (m3 << 1) & 0xf;
2465 pos -= 8;
2466 }
2467 break;
2468 }
2469
2470 tcg_gen_movi_i64(tmp, ccm);
2471 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2472 return DISAS_NEXT;
2473 }
2474
op_insi(DisasContext * s,DisasOps * o)2475 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2476 {
2477 int shift = s->insn->data & 0xff;
2478 int size = s->insn->data >> 8;
2479 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2480 return DISAS_NEXT;
2481 }
2482
op_ipm(DisasContext * s,DisasOps * o)2483 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2484 {
2485 TCGv_i64 t1, t2;
2486
2487 gen_op_calc_cc(s);
2488 t1 = tcg_temp_new_i64();
2489 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2490 t2 = tcg_temp_new_i64();
2491 tcg_gen_extu_i32_i64(t2, cc_op);
2492 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2493 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2494 return DISAS_NEXT;
2495 }
2496
2497 #ifndef CONFIG_USER_ONLY
op_idte(DisasContext * s,DisasOps * o)2498 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2499 {
2500 TCGv_i32 m4;
2501
2502 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2503 m4 = tcg_constant_i32(get_field(s, m4));
2504 } else {
2505 m4 = tcg_constant_i32(0);
2506 }
2507 gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2508 return DISAS_NEXT;
2509 }
2510
op_ipte(DisasContext * s,DisasOps * o)2511 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2512 {
2513 TCGv_i32 m4;
2514
2515 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2516 m4 = tcg_constant_i32(get_field(s, m4));
2517 } else {
2518 m4 = tcg_constant_i32(0);
2519 }
2520 gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2521 return DISAS_NEXT;
2522 }
2523
op_iske(DisasContext * s,DisasOps * o)2524 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2525 {
2526 gen_helper_iske(o->out, tcg_env, o->in2);
2527 return DISAS_NEXT;
2528 }
2529 #endif
2530
op_msa(DisasContext * s,DisasOps * o)2531 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2532 {
2533 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2534 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2535 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2536 TCGv_i32 t_r1, t_r2, t_r3, type;
2537
2538 switch (s->insn->data) {
2539 case S390_FEAT_TYPE_KMA:
2540 if (r3 == r1 || r3 == r2) {
2541 gen_program_exception(s, PGM_SPECIFICATION);
2542 return DISAS_NORETURN;
2543 }
2544 /* FALL THROUGH */
2545 case S390_FEAT_TYPE_KMCTR:
2546 if (r3 & 1 || !r3) {
2547 gen_program_exception(s, PGM_SPECIFICATION);
2548 return DISAS_NORETURN;
2549 }
2550 /* FALL THROUGH */
2551 case S390_FEAT_TYPE_PPNO:
2552 case S390_FEAT_TYPE_KMF:
2553 case S390_FEAT_TYPE_KMC:
2554 case S390_FEAT_TYPE_KMO:
2555 case S390_FEAT_TYPE_KM:
2556 if (r1 & 1 || !r1) {
2557 gen_program_exception(s, PGM_SPECIFICATION);
2558 return DISAS_NORETURN;
2559 }
2560 /* FALL THROUGH */
2561 case S390_FEAT_TYPE_KMAC:
2562 case S390_FEAT_TYPE_KIMD:
2563 case S390_FEAT_TYPE_KLMD:
2564 if (r2 & 1 || !r2) {
2565 gen_program_exception(s, PGM_SPECIFICATION);
2566 return DISAS_NORETURN;
2567 }
2568 /* FALL THROUGH */
2569 case S390_FEAT_TYPE_PCKMO:
2570 case S390_FEAT_TYPE_PCC:
2571 break;
2572 default:
2573 g_assert_not_reached();
2574 };
2575
2576 t_r1 = tcg_constant_i32(r1);
2577 t_r2 = tcg_constant_i32(r2);
2578 t_r3 = tcg_constant_i32(r3);
2579 type = tcg_constant_i32(s->insn->data);
2580 gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2581 set_cc_static(s);
2582 return DISAS_NEXT;
2583 }
2584
op_keb(DisasContext * s,DisasOps * o)2585 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2586 {
2587 gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2588 set_cc_static(s);
2589 return DISAS_NEXT;
2590 }
2591
op_kdb(DisasContext * s,DisasOps * o)2592 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2593 {
2594 gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2595 set_cc_static(s);
2596 return DISAS_NEXT;
2597 }
2598
op_kxb(DisasContext * s,DisasOps * o)2599 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2600 {
2601 gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2602 set_cc_static(s);
2603 return DISAS_NEXT;
2604 }
2605
help_laa(DisasContext * s,DisasOps * o,bool addu64)2606 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2607 {
2608 /* The real output is indeed the original value in memory;
2609 recompute the addition for the computation of CC. */
2610 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2611 s->insn->data | MO_ALIGN);
2612 /* However, we need to recompute the addition for setting CC. */
2613 if (addu64) {
2614 tcg_gen_movi_i64(cc_src, 0);
2615 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2616 } else {
2617 tcg_gen_add_i64(o->out, o->in1, o->in2);
2618 }
2619 return DISAS_NEXT;
2620 }
2621
op_laa(DisasContext * s,DisasOps * o)2622 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2623 {
2624 return help_laa(s, o, false);
2625 }
2626
op_laa_addu64(DisasContext * s,DisasOps * o)2627 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2628 {
2629 return help_laa(s, o, true);
2630 }
2631
op_lan(DisasContext * s,DisasOps * o)2632 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2633 {
2634 /* The real output is indeed the original value in memory;
2635 recompute the addition for the computation of CC. */
2636 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2637 s->insn->data | MO_ALIGN);
2638 /* However, we need to recompute the operation for setting CC. */
2639 tcg_gen_and_i64(o->out, o->in1, o->in2);
2640 return DISAS_NEXT;
2641 }
2642
op_lao(DisasContext * s,DisasOps * o)2643 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2644 {
2645 /* The real output is indeed the original value in memory;
2646 recompute the addition for the computation of CC. */
2647 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2648 s->insn->data | MO_ALIGN);
2649 /* However, we need to recompute the operation for setting CC. */
2650 tcg_gen_or_i64(o->out, o->in1, o->in2);
2651 return DISAS_NEXT;
2652 }
2653
op_lax(DisasContext * s,DisasOps * o)2654 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2655 {
2656 /* The real output is indeed the original value in memory;
2657 recompute the addition for the computation of CC. */
2658 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2659 s->insn->data | MO_ALIGN);
2660 /* However, we need to recompute the operation for setting CC. */
2661 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2662 return DISAS_NEXT;
2663 }
2664
op_ldeb(DisasContext * s,DisasOps * o)2665 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2666 {
2667 gen_helper_ldeb(o->out, tcg_env, o->in2);
2668 return DISAS_NEXT;
2669 }
2670
op_ledb(DisasContext * s,DisasOps * o)2671 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2672 {
2673 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2674
2675 if (!m34) {
2676 return DISAS_NORETURN;
2677 }
2678 gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2679 return DISAS_NEXT;
2680 }
2681
op_ldxb(DisasContext * s,DisasOps * o)2682 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2683 {
2684 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2685
2686 if (!m34) {
2687 return DISAS_NORETURN;
2688 }
2689 gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2690 return DISAS_NEXT;
2691 }
2692
op_lexb(DisasContext * s,DisasOps * o)2693 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2694 {
2695 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2696
2697 if (!m34) {
2698 return DISAS_NORETURN;
2699 }
2700 gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2701 return DISAS_NEXT;
2702 }
2703
op_lxdb(DisasContext * s,DisasOps * o)2704 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2705 {
2706 gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2707 return DISAS_NEXT;
2708 }
2709
op_lxeb(DisasContext * s,DisasOps * o)2710 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2711 {
2712 gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2713 return DISAS_NEXT;
2714 }
2715
op_lde(DisasContext * s,DisasOps * o)2716 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2717 {
2718 tcg_gen_shli_i64(o->out, o->in2, 32);
2719 return DISAS_NEXT;
2720 }
2721
op_llgt(DisasContext * s,DisasOps * o)2722 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2723 {
2724 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2725 return DISAS_NEXT;
2726 }
2727
op_ld8s(DisasContext * s,DisasOps * o)2728 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2729 {
2730 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2731 return DISAS_NEXT;
2732 }
2733
op_ld8u(DisasContext * s,DisasOps * o)2734 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2735 {
2736 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2737 return DISAS_NEXT;
2738 }
2739
op_ld16s(DisasContext * s,DisasOps * o)2740 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2741 {
2742 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2743 return DISAS_NEXT;
2744 }
2745
op_ld16u(DisasContext * s,DisasOps * o)2746 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2747 {
2748 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2749 return DISAS_NEXT;
2750 }
2751
op_ld32s(DisasContext * s,DisasOps * o)2752 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2753 {
2754 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2755 MO_TESL | s->insn->data);
2756 return DISAS_NEXT;
2757 }
2758
op_ld32u(DisasContext * s,DisasOps * o)2759 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2760 {
2761 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2762 MO_TEUL | s->insn->data);
2763 return DISAS_NEXT;
2764 }
2765
op_ld64(DisasContext * s,DisasOps * o)2766 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2767 {
2768 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2769 MO_TEUQ | s->insn->data);
2770 return DISAS_NEXT;
2771 }
2772
op_lat(DisasContext * s,DisasOps * o)2773 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2774 {
2775 TCGLabel *lab = gen_new_label();
2776 store_reg32_i64(get_field(s, r1), o->in2);
2777 /* The value is stored even in case of trap. */
2778 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2779 gen_trap(s);
2780 gen_set_label(lab);
2781 return DISAS_NEXT;
2782 }
2783
op_lgat(DisasContext * s,DisasOps * o)2784 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2785 {
2786 TCGLabel *lab = gen_new_label();
2787 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2788 /* The value is stored even in case of trap. */
2789 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2790 gen_trap(s);
2791 gen_set_label(lab);
2792 return DISAS_NEXT;
2793 }
2794
op_lfhat(DisasContext * s,DisasOps * o)2795 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2796 {
2797 TCGLabel *lab = gen_new_label();
2798 store_reg32h_i64(get_field(s, r1), o->in2);
2799 /* The value is stored even in case of trap. */
2800 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2801 gen_trap(s);
2802 gen_set_label(lab);
2803 return DISAS_NEXT;
2804 }
2805
op_llgfat(DisasContext * s,DisasOps * o)2806 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2807 {
2808 TCGLabel *lab = gen_new_label();
2809
2810 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2811 /* The value is stored even in case of trap. */
2812 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2813 gen_trap(s);
2814 gen_set_label(lab);
2815 return DISAS_NEXT;
2816 }
2817
op_llgtat(DisasContext * s,DisasOps * o)2818 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2819 {
2820 TCGLabel *lab = gen_new_label();
2821 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2822 /* The value is stored even in case of trap. */
2823 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2824 gen_trap(s);
2825 gen_set_label(lab);
2826 return DISAS_NEXT;
2827 }
2828
op_loc(DisasContext * s,DisasOps * o)2829 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2830 {
2831 DisasCompare c;
2832
2833 if (have_field(s, m3)) {
2834 /* LOAD * ON CONDITION */
2835 disas_jcc(s, &c, get_field(s, m3));
2836 } else {
2837 /* SELECT */
2838 disas_jcc(s, &c, get_field(s, m4));
2839 }
2840
2841 if (c.is_64) {
2842 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2843 o->in2, o->in1);
2844 } else {
2845 TCGv_i32 t32 = tcg_temp_new_i32();
2846 TCGv_i64 t, z;
2847
2848 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2849
2850 t = tcg_temp_new_i64();
2851 tcg_gen_extu_i32_i64(t, t32);
2852
2853 z = tcg_constant_i64(0);
2854 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2855 }
2856
2857 return DISAS_NEXT;
2858 }
2859
2860 #ifndef CONFIG_USER_ONLY
op_lctl(DisasContext * s,DisasOps * o)2861 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2862 {
2863 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2864 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2865
2866 gen_helper_lctl(tcg_env, r1, o->in2, r3);
2867 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2868 s->exit_to_mainloop = true;
2869 return DISAS_TOO_MANY;
2870 }
2871
op_lctlg(DisasContext * s,DisasOps * o)2872 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2873 {
2874 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2875 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2876
2877 gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2878 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2879 s->exit_to_mainloop = true;
2880 return DISAS_TOO_MANY;
2881 }
2882
op_lra(DisasContext * s,DisasOps * o)2883 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2884 {
2885 gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2886 set_cc_static(s);
2887 return DISAS_NEXT;
2888 }
2889
op_lpp(DisasContext * s,DisasOps * o)2890 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2891 {
2892 tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2893 return DISAS_NEXT;
2894 }
2895
op_lpsw(DisasContext * s,DisasOps * o)2896 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2897 {
2898 TCGv_i64 mask, addr;
2899
2900 per_breaking_event(s);
2901
2902 /*
2903 * Convert the short PSW into the normal PSW, similar to what
2904 * s390_cpu_load_normal() does.
2905 */
2906 mask = tcg_temp_new_i64();
2907 addr = tcg_temp_new_i64();
2908 tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2909 tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2910 tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2911 tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2912 gen_helper_load_psw(tcg_env, mask, addr);
2913 return DISAS_NORETURN;
2914 }
2915
op_lpswe(DisasContext * s,DisasOps * o)2916 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2917 {
2918 TCGv_i64 t1, t2;
2919
2920 per_breaking_event(s);
2921
2922 t1 = tcg_temp_new_i64();
2923 t2 = tcg_temp_new_i64();
2924 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2925 MO_TEUQ | MO_ALIGN_8);
2926 tcg_gen_addi_i64(o->in2, o->in2, 8);
2927 tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2928 gen_helper_load_psw(tcg_env, t1, t2);
2929 return DISAS_NORETURN;
2930 }
2931 #endif
2932
op_lam(DisasContext * s,DisasOps * o)2933 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2934 {
2935 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2936 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2937
2938 gen_helper_lam(tcg_env, r1, o->in2, r3);
2939 return DISAS_NEXT;
2940 }
2941
op_lm32(DisasContext * s,DisasOps * o)2942 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2943 {
2944 int r1 = get_field(s, r1);
2945 int r3 = get_field(s, r3);
2946 TCGv_i64 t1, t2;
2947
2948 /* Only one register to read. */
2949 t1 = tcg_temp_new_i64();
2950 if (unlikely(r1 == r3)) {
2951 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2952 store_reg32_i64(r1, t1);
2953 return DISAS_NEXT;
2954 }
2955
2956 /* First load the values of the first and last registers to trigger
2957 possible page faults. */
2958 t2 = tcg_temp_new_i64();
2959 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2960 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2961 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
2962 store_reg32_i64(r1, t1);
2963 store_reg32_i64(r3, t2);
2964
2965 /* Only two registers to read. */
2966 if (((r1 + 1) & 15) == r3) {
2967 return DISAS_NEXT;
2968 }
2969
2970 /* Then load the remaining registers. Page fault can't occur. */
2971 r3 = (r3 - 1) & 15;
2972 tcg_gen_movi_i64(t2, 4);
2973 while (r1 != r3) {
2974 r1 = (r1 + 1) & 15;
2975 tcg_gen_add_i64(o->in2, o->in2, t2);
2976 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2977 store_reg32_i64(r1, t1);
2978 }
2979 return DISAS_NEXT;
2980 }
2981
op_lmh(DisasContext * s,DisasOps * o)2982 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2983 {
2984 int r1 = get_field(s, r1);
2985 int r3 = get_field(s, r3);
2986 TCGv_i64 t1, t2;
2987
2988 /* Only one register to read. */
2989 t1 = tcg_temp_new_i64();
2990 if (unlikely(r1 == r3)) {
2991 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2992 store_reg32h_i64(r1, t1);
2993 return DISAS_NEXT;
2994 }
2995
2996 /* First load the values of the first and last registers to trigger
2997 possible page faults. */
2998 t2 = tcg_temp_new_i64();
2999 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3000 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3001 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3002 store_reg32h_i64(r1, t1);
3003 store_reg32h_i64(r3, t2);
3004
3005 /* Only two registers to read. */
3006 if (((r1 + 1) & 15) == r3) {
3007 return DISAS_NEXT;
3008 }
3009
3010 /* Then load the remaining registers. Page fault can't occur. */
3011 r3 = (r3 - 1) & 15;
3012 tcg_gen_movi_i64(t2, 4);
3013 while (r1 != r3) {
3014 r1 = (r1 + 1) & 15;
3015 tcg_gen_add_i64(o->in2, o->in2, t2);
3016 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3017 store_reg32h_i64(r1, t1);
3018 }
3019 return DISAS_NEXT;
3020 }
3021
op_lm64(DisasContext * s,DisasOps * o)3022 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3023 {
3024 int r1 = get_field(s, r1);
3025 int r3 = get_field(s, r3);
3026 TCGv_i64 t1, t2;
3027
3028 /* Only one register to read. */
3029 if (unlikely(r1 == r3)) {
3030 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3031 return DISAS_NEXT;
3032 }
3033
3034 /* First load the values of the first and last registers to trigger
3035 possible page faults. */
3036 t1 = tcg_temp_new_i64();
3037 t2 = tcg_temp_new_i64();
3038 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3039 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3040 tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3041 tcg_gen_mov_i64(regs[r1], t1);
3042
3043 /* Only two registers to read. */
3044 if (((r1 + 1) & 15) == r3) {
3045 return DISAS_NEXT;
3046 }
3047
3048 /* Then load the remaining registers. Page fault can't occur. */
3049 r3 = (r3 - 1) & 15;
3050 tcg_gen_movi_i64(t1, 8);
3051 while (r1 != r3) {
3052 r1 = (r1 + 1) & 15;
3053 tcg_gen_add_i64(o->in2, o->in2, t1);
3054 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3055 }
3056 return DISAS_NEXT;
3057 }
3058
op_lpd(DisasContext * s,DisasOps * o)3059 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3060 {
3061 TCGv_i64 a1, a2;
3062 MemOp mop = s->insn->data;
3063
3064 /* In a parallel context, stop the world and single step. */
3065 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3066 update_psw_addr(s);
3067 update_cc_op(s);
3068 gen_exception(EXCP_ATOMIC);
3069 return DISAS_NORETURN;
3070 }
3071
3072 /* In a serial context, perform the two loads ... */
3073 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3074 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3075 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3076 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3077
3078 /* ... and indicate that we performed them while interlocked. */
3079 gen_op_movi_cc(s, 0);
3080 return DISAS_NEXT;
3081 }
3082
op_lpq(DisasContext * s,DisasOps * o)3083 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3084 {
3085 o->out_128 = tcg_temp_new_i128();
3086 tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3087 MO_TE | MO_128 | MO_ALIGN);
3088 return DISAS_NEXT;
3089 }
3090
3091 #ifndef CONFIG_USER_ONLY
op_lura(DisasContext * s,DisasOps * o)3092 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3093 {
3094 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3095 return DISAS_NEXT;
3096 }
3097 #endif
3098
op_lzrb(DisasContext * s,DisasOps * o)3099 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3100 {
3101 tcg_gen_andi_i64(o->out, o->in2, -256);
3102 return DISAS_NEXT;
3103 }
3104
op_lcbb(DisasContext * s,DisasOps * o)3105 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3106 {
3107 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3108
3109 if (get_field(s, m3) > 6) {
3110 gen_program_exception(s, PGM_SPECIFICATION);
3111 return DISAS_NORETURN;
3112 }
3113
3114 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3115 tcg_gen_neg_i64(o->addr1, o->addr1);
3116 tcg_gen_movi_i64(o->out, 16);
3117 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3118 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3119 return DISAS_NEXT;
3120 }
3121
op_mc(DisasContext * s,DisasOps * o)3122 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3123 {
3124 const uint8_t monitor_class = get_field(s, i2);
3125
3126 if (monitor_class & 0xf0) {
3127 gen_program_exception(s, PGM_SPECIFICATION);
3128 return DISAS_NORETURN;
3129 }
3130
3131 #if !defined(CONFIG_USER_ONLY)
3132 gen_helper_monitor_call(tcg_env, o->addr1,
3133 tcg_constant_i32(monitor_class));
3134 #endif
3135 /* Defaults to a NOP. */
3136 return DISAS_NEXT;
3137 }
3138
op_mov2(DisasContext * s,DisasOps * o)3139 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3140 {
3141 o->out = o->in2;
3142 o->in2 = NULL;
3143 return DISAS_NEXT;
3144 }
3145
op_mov2e(DisasContext * s,DisasOps * o)3146 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3147 {
3148 int b2 = get_field(s, b2);
3149 TCGv ar1 = tcg_temp_new_i64();
3150 int r1 = get_field(s, r1);
3151
3152 o->out = o->in2;
3153 o->in2 = NULL;
3154
3155 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3156 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3157 tcg_gen_movi_i64(ar1, 0);
3158 break;
3159 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3160 tcg_gen_movi_i64(ar1, 1);
3161 break;
3162 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3163 if (b2) {
3164 tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3165 } else {
3166 tcg_gen_movi_i64(ar1, 0);
3167 }
3168 break;
3169 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3170 tcg_gen_movi_i64(ar1, 2);
3171 break;
3172 }
3173
3174 tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3175 return DISAS_NEXT;
3176 }
3177
op_movx(DisasContext * s,DisasOps * o)3178 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3179 {
3180 o->out = o->in1;
3181 o->out2 = o->in2;
3182 o->in1 = NULL;
3183 o->in2 = NULL;
3184 return DISAS_NEXT;
3185 }
3186
op_mvc(DisasContext * s,DisasOps * o)3187 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3188 {
3189 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3190
3191 gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3192 return DISAS_NEXT;
3193 }
3194
op_mvcrl(DisasContext * s,DisasOps * o)3195 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3196 {
3197 gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3198 return DISAS_NEXT;
3199 }
3200
op_mvcin(DisasContext * s,DisasOps * o)3201 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3202 {
3203 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3204
3205 gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3206 return DISAS_NEXT;
3207 }
3208
op_mvcl(DisasContext * s,DisasOps * o)3209 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3210 {
3211 int r1 = get_field(s, r1);
3212 int r2 = get_field(s, r2);
3213 TCGv_i32 t1, t2;
3214
3215 /* r1 and r2 must be even. */
3216 if (r1 & 1 || r2 & 1) {
3217 gen_program_exception(s, PGM_SPECIFICATION);
3218 return DISAS_NORETURN;
3219 }
3220
3221 t1 = tcg_constant_i32(r1);
3222 t2 = tcg_constant_i32(r2);
3223 gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3224 set_cc_static(s);
3225 return DISAS_NEXT;
3226 }
3227
op_mvcle(DisasContext * s,DisasOps * o)3228 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3229 {
3230 int r1 = get_field(s, r1);
3231 int r3 = get_field(s, r3);
3232 TCGv_i32 t1, t3;
3233
3234 /* r1 and r3 must be even. */
3235 if (r1 & 1 || r3 & 1) {
3236 gen_program_exception(s, PGM_SPECIFICATION);
3237 return DISAS_NORETURN;
3238 }
3239
3240 t1 = tcg_constant_i32(r1);
3241 t3 = tcg_constant_i32(r3);
3242 gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3243 set_cc_static(s);
3244 return DISAS_NEXT;
3245 }
3246
op_mvclu(DisasContext * s,DisasOps * o)3247 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3248 {
3249 int r1 = get_field(s, r1);
3250 int r3 = get_field(s, r3);
3251 TCGv_i32 t1, t3;
3252
3253 /* r1 and r3 must be even. */
3254 if (r1 & 1 || r3 & 1) {
3255 gen_program_exception(s, PGM_SPECIFICATION);
3256 return DISAS_NORETURN;
3257 }
3258
3259 t1 = tcg_constant_i32(r1);
3260 t3 = tcg_constant_i32(r3);
3261 gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3262 set_cc_static(s);
3263 return DISAS_NEXT;
3264 }
3265
op_mvcos(DisasContext * s,DisasOps * o)3266 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3267 {
3268 int r3 = get_field(s, r3);
3269 gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3270 set_cc_static(s);
3271 return DISAS_NEXT;
3272 }
3273
3274 #ifndef CONFIG_USER_ONLY
op_mvcp(DisasContext * s,DisasOps * o)3275 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3276 {
3277 int r1 = get_field(s, l1);
3278 int r3 = get_field(s, r3);
3279 gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3280 set_cc_static(s);
3281 return DISAS_NEXT;
3282 }
3283
op_mvcs(DisasContext * s,DisasOps * o)3284 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3285 {
3286 int r1 = get_field(s, l1);
3287 int r3 = get_field(s, r3);
3288 gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3289 set_cc_static(s);
3290 return DISAS_NEXT;
3291 }
3292 #endif
3293
op_mvn(DisasContext * s,DisasOps * o)3294 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3295 {
3296 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3297
3298 gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3299 return DISAS_NEXT;
3300 }
3301
op_mvo(DisasContext * s,DisasOps * o)3302 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3303 {
3304 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3305
3306 gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3307 return DISAS_NEXT;
3308 }
3309
op_mvpg(DisasContext * s,DisasOps * o)3310 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3311 {
3312 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3313 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3314
3315 gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3316 set_cc_static(s);
3317 return DISAS_NEXT;
3318 }
3319
op_mvst(DisasContext * s,DisasOps * o)3320 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3321 {
3322 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3323 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3324
3325 gen_helper_mvst(cc_op, tcg_env, t1, t2);
3326 set_cc_static(s);
3327 return DISAS_NEXT;
3328 }
3329
op_mvz(DisasContext * s,DisasOps * o)3330 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3331 {
3332 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3333
3334 gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3335 return DISAS_NEXT;
3336 }
3337
op_mul(DisasContext * s,DisasOps * o)3338 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3339 {
3340 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3341 return DISAS_NEXT;
3342 }
3343
op_mul128(DisasContext * s,DisasOps * o)3344 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3345 {
3346 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3347 return DISAS_NEXT;
3348 }
3349
op_muls128(DisasContext * s,DisasOps * o)3350 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3351 {
3352 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3353 return DISAS_NEXT;
3354 }
3355
op_meeb(DisasContext * s,DisasOps * o)3356 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3357 {
3358 gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3359 return DISAS_NEXT;
3360 }
3361
op_mdeb(DisasContext * s,DisasOps * o)3362 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3363 {
3364 gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3365 return DISAS_NEXT;
3366 }
3367
op_mdb(DisasContext * s,DisasOps * o)3368 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3369 {
3370 gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3371 return DISAS_NEXT;
3372 }
3373
op_mxb(DisasContext * s,DisasOps * o)3374 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3375 {
3376 gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3377 return DISAS_NEXT;
3378 }
3379
op_mxdb(DisasContext * s,DisasOps * o)3380 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3381 {
3382 gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3383 return DISAS_NEXT;
3384 }
3385
op_maeb(DisasContext * s,DisasOps * o)3386 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3387 {
3388 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3389 gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3390 return DISAS_NEXT;
3391 }
3392
op_madb(DisasContext * s,DisasOps * o)3393 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3394 {
3395 TCGv_i64 r3 = load_freg(get_field(s, r3));
3396 gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3397 return DISAS_NEXT;
3398 }
3399
op_mseb(DisasContext * s,DisasOps * o)3400 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3401 {
3402 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3403 gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3404 return DISAS_NEXT;
3405 }
3406
op_msdb(DisasContext * s,DisasOps * o)3407 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3408 {
3409 TCGv_i64 r3 = load_freg(get_field(s, r3));
3410 gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3411 return DISAS_NEXT;
3412 }
3413
op_nabs(DisasContext * s,DisasOps * o)3414 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3415 {
3416 TCGv_i64 z = tcg_constant_i64(0);
3417 TCGv_i64 n = tcg_temp_new_i64();
3418
3419 tcg_gen_neg_i64(n, o->in2);
3420 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3421 return DISAS_NEXT;
3422 }
3423
op_nabsf32(DisasContext * s,DisasOps * o)3424 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3425 {
3426 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3427 return DISAS_NEXT;
3428 }
3429
op_nabsf64(DisasContext * s,DisasOps * o)3430 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3431 {
3432 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3433 return DISAS_NEXT;
3434 }
3435
op_nabsf128(DisasContext * s,DisasOps * o)3436 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3437 {
3438 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3439 tcg_gen_mov_i64(o->out2, o->in2);
3440 return DISAS_NEXT;
3441 }
3442
op_nc(DisasContext * s,DisasOps * o)3443 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3444 {
3445 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3446
3447 gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3448 set_cc_static(s);
3449 return DISAS_NEXT;
3450 }
3451
op_neg(DisasContext * s,DisasOps * o)3452 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3453 {
3454 tcg_gen_neg_i64(o->out, o->in2);
3455 return DISAS_NEXT;
3456 }
3457
op_negf32(DisasContext * s,DisasOps * o)3458 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3459 {
3460 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3461 return DISAS_NEXT;
3462 }
3463
op_negf64(DisasContext * s,DisasOps * o)3464 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3465 {
3466 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3467 return DISAS_NEXT;
3468 }
3469
op_negf128(DisasContext * s,DisasOps * o)3470 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3471 {
3472 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3473 tcg_gen_mov_i64(o->out2, o->in2);
3474 return DISAS_NEXT;
3475 }
3476
op_oc(DisasContext * s,DisasOps * o)3477 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3478 {
3479 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3480
3481 gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3482 set_cc_static(s);
3483 return DISAS_NEXT;
3484 }
3485
op_or(DisasContext * s,DisasOps * o)3486 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3487 {
3488 tcg_gen_or_i64(o->out, o->in1, o->in2);
3489 return DISAS_NEXT;
3490 }
3491
op_ori(DisasContext * s,DisasOps * o)3492 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3493 {
3494 int shift = s->insn->data & 0xff;
3495 int size = s->insn->data >> 8;
3496 uint64_t mask = ((1ull << size) - 1) << shift;
3497 TCGv_i64 t = tcg_temp_new_i64();
3498
3499 tcg_gen_shli_i64(t, o->in2, shift);
3500 tcg_gen_or_i64(o->out, o->in1, t);
3501
3502 /* Produce the CC from only the bits manipulated. */
3503 tcg_gen_andi_i64(cc_dst, o->out, mask);
3504 set_cc_nz_u64(s, cc_dst);
3505 return DISAS_NEXT;
3506 }
3507
op_oi(DisasContext * s,DisasOps * o)3508 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3509 {
3510 o->in1 = tcg_temp_new_i64();
3511
3512 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3513 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3514 } else {
3515 /* Perform the atomic operation in memory. */
3516 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3517 s->insn->data);
3518 }
3519
3520 /* Recompute also for atomic case: needed for setting CC. */
3521 tcg_gen_or_i64(o->out, o->in1, o->in2);
3522
3523 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3524 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3525 }
3526 return DISAS_NEXT;
3527 }
3528
op_pack(DisasContext * s,DisasOps * o)3529 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3530 {
3531 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3532
3533 gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3534 return DISAS_NEXT;
3535 }
3536
op_pka(DisasContext * s,DisasOps * o)3537 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3538 {
3539 int l2 = get_field(s, l2) + 1;
3540 TCGv_i32 l;
3541
3542 /* The length must not exceed 32 bytes. */
3543 if (l2 > 32) {
3544 gen_program_exception(s, PGM_SPECIFICATION);
3545 return DISAS_NORETURN;
3546 }
3547 l = tcg_constant_i32(l2);
3548 gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3549 return DISAS_NEXT;
3550 }
3551
op_pku(DisasContext * s,DisasOps * o)3552 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3553 {
3554 int l2 = get_field(s, l2) + 1;
3555 TCGv_i32 l;
3556
3557 /* The length must be even and should not exceed 64 bytes. */
3558 if ((l2 & 1) || (l2 > 64)) {
3559 gen_program_exception(s, PGM_SPECIFICATION);
3560 return DISAS_NORETURN;
3561 }
3562 l = tcg_constant_i32(l2);
3563 gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3564 return DISAS_NEXT;
3565 }
3566
op_popcnt(DisasContext * s,DisasOps * o)3567 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3568 {
3569 const uint8_t m3 = get_field(s, m3);
3570
3571 if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3572 tcg_gen_ctpop_i64(o->out, o->in2);
3573 } else {
3574 gen_helper_popcnt(o->out, o->in2);
3575 }
3576 return DISAS_NEXT;
3577 }
3578
3579 #ifndef CONFIG_USER_ONLY
op_ptlb(DisasContext * s,DisasOps * o)3580 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3581 {
3582 gen_helper_ptlb(tcg_env);
3583 return DISAS_NEXT;
3584 }
3585 #endif
3586
op_risbg(DisasContext * s,DisasOps * o)3587 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3588 {
3589 int i3 = get_field(s, i3);
3590 int i4 = get_field(s, i4);
3591 int i5 = get_field(s, i5);
3592 int do_zero = i4 & 0x80;
3593 uint64_t mask, imask, pmask;
3594 int pos, len, rot;
3595
3596 /* Adjust the arguments for the specific insn. */
3597 switch (s->fields.op2) {
3598 case 0x55: /* risbg */
3599 case 0x59: /* risbgn */
3600 i3 &= 63;
3601 i4 &= 63;
3602 pmask = ~0;
3603 break;
3604 case 0x5d: /* risbhg */
3605 i3 &= 31;
3606 i4 &= 31;
3607 pmask = 0xffffffff00000000ull;
3608 break;
3609 case 0x51: /* risblg */
3610 i3 = (i3 & 31) + 32;
3611 i4 = (i4 & 31) + 32;
3612 pmask = 0x00000000ffffffffull;
3613 break;
3614 default:
3615 g_assert_not_reached();
3616 }
3617
3618 /* MASK is the set of bits to be inserted from R2. */
3619 if (i3 <= i4) {
3620 /* [0...i3---i4...63] */
3621 mask = (-1ull >> i3) & (-1ull << (63 - i4));
3622 } else {
3623 /* [0---i4...i3---63] */
3624 mask = (-1ull >> i3) | (-1ull << (63 - i4));
3625 }
3626 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3627 mask &= pmask;
3628
3629 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3630 insns, we need to keep the other half of the register. */
3631 imask = ~mask | ~pmask;
3632 if (do_zero) {
3633 imask = ~pmask;
3634 }
3635
3636 len = i4 - i3 + 1;
3637 pos = 63 - i4;
3638 rot = i5 & 63;
3639
3640 /* In some cases we can implement this with extract. */
3641 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3642 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3643 return DISAS_NEXT;
3644 }
3645
3646 /* In some cases we can implement this with deposit. */
3647 if (len > 0 && (imask == 0 || ~mask == imask)) {
3648 /* Note that we rotate the bits to be inserted to the lsb, not to
3649 the position as described in the PoO. */
3650 rot = (rot - pos) & 63;
3651 } else {
3652 pos = -1;
3653 }
3654
3655 /* Rotate the input as necessary. */
3656 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3657
3658 /* Insert the selected bits into the output. */
3659 if (pos >= 0) {
3660 if (imask == 0) {
3661 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3662 } else {
3663 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3664 }
3665 } else if (imask == 0) {
3666 tcg_gen_andi_i64(o->out, o->in2, mask);
3667 } else {
3668 tcg_gen_andi_i64(o->in2, o->in2, mask);
3669 tcg_gen_andi_i64(o->out, o->out, imask);
3670 tcg_gen_or_i64(o->out, o->out, o->in2);
3671 }
3672 return DISAS_NEXT;
3673 }
3674
op_rosbg(DisasContext * s,DisasOps * o)3675 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3676 {
3677 int i3 = get_field(s, i3);
3678 int i4 = get_field(s, i4);
3679 int i5 = get_field(s, i5);
3680 TCGv_i64 orig_out;
3681 uint64_t mask;
3682
3683 /* If this is a test-only form, arrange to discard the result. */
3684 if (i3 & 0x80) {
3685 tcg_debug_assert(o->out != NULL);
3686 orig_out = o->out;
3687 o->out = tcg_temp_new_i64();
3688 tcg_gen_mov_i64(o->out, orig_out);
3689 }
3690
3691 i3 &= 63;
3692 i4 &= 63;
3693 i5 &= 63;
3694
3695 /* MASK is the set of bits to be operated on from R2.
3696 Take care for I3/I4 wraparound. */
3697 mask = ~0ull >> i3;
3698 if (i3 <= i4) {
3699 mask ^= ~0ull >> i4 >> 1;
3700 } else {
3701 mask |= ~(~0ull >> i4 >> 1);
3702 }
3703
3704 /* Rotate the input as necessary. */
3705 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3706
3707 /* Operate. */
3708 switch (s->fields.op2) {
3709 case 0x54: /* AND */
3710 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3711 tcg_gen_and_i64(o->out, o->out, o->in2);
3712 break;
3713 case 0x56: /* OR */
3714 tcg_gen_andi_i64(o->in2, o->in2, mask);
3715 tcg_gen_or_i64(o->out, o->out, o->in2);
3716 break;
3717 case 0x57: /* XOR */
3718 tcg_gen_andi_i64(o->in2, o->in2, mask);
3719 tcg_gen_xor_i64(o->out, o->out, o->in2);
3720 break;
3721 default:
3722 abort();
3723 }
3724
3725 /* Set the CC. */
3726 tcg_gen_andi_i64(cc_dst, o->out, mask);
3727 set_cc_nz_u64(s, cc_dst);
3728 return DISAS_NEXT;
3729 }
3730
op_rev16(DisasContext * s,DisasOps * o)3731 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3732 {
3733 tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3734 return DISAS_NEXT;
3735 }
3736
op_rev32(DisasContext * s,DisasOps * o)3737 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3738 {
3739 tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3740 return DISAS_NEXT;
3741 }
3742
op_rev64(DisasContext * s,DisasOps * o)3743 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3744 {
3745 tcg_gen_bswap64_i64(o->out, o->in2);
3746 return DISAS_NEXT;
3747 }
3748
op_rll32(DisasContext * s,DisasOps * o)3749 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3750 {
3751 TCGv_i32 t1 = tcg_temp_new_i32();
3752 TCGv_i32 t2 = tcg_temp_new_i32();
3753 TCGv_i32 to = tcg_temp_new_i32();
3754 tcg_gen_extrl_i64_i32(t1, o->in1);
3755 tcg_gen_extrl_i64_i32(t2, o->in2);
3756 tcg_gen_rotl_i32(to, t1, t2);
3757 tcg_gen_extu_i32_i64(o->out, to);
3758 return DISAS_NEXT;
3759 }
3760
op_rll64(DisasContext * s,DisasOps * o)3761 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3762 {
3763 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3764 return DISAS_NEXT;
3765 }
3766
3767 #ifndef CONFIG_USER_ONLY
op_rrbe(DisasContext * s,DisasOps * o)3768 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3769 {
3770 gen_helper_rrbe(cc_op, tcg_env, o->in2);
3771 set_cc_static(s);
3772 return DISAS_NEXT;
3773 }
3774
op_sacf(DisasContext * s,DisasOps * o)3775 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3776 {
3777 gen_helper_sacf(tcg_env, o->in2);
3778 /* Addressing mode has changed, so end the block. */
3779 return DISAS_TOO_MANY;
3780 }
3781 #endif
3782
op_sam(DisasContext * s,DisasOps * o)3783 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3784 {
3785 int sam = s->insn->data;
3786 TCGv_i64 tsam;
3787 uint64_t mask;
3788
3789 switch (sam) {
3790 case 0:
3791 mask = 0xffffff;
3792 break;
3793 case 1:
3794 mask = 0x7fffffff;
3795 break;
3796 default:
3797 mask = -1;
3798 break;
3799 }
3800
3801 /* Bizarre but true, we check the address of the current insn for the
3802 specification exception, not the next to be executed. Thus the PoO
3803 documents that Bad Things Happen two bytes before the end. */
3804 if (s->base.pc_next & ~mask) {
3805 gen_program_exception(s, PGM_SPECIFICATION);
3806 return DISAS_NORETURN;
3807 }
3808 s->pc_tmp &= mask;
3809
3810 tsam = tcg_constant_i64(sam);
3811 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3812
3813 /* Always exit the TB, since we (may have) changed execution mode. */
3814 return DISAS_TOO_MANY;
3815 }
3816
op_sar(DisasContext * s,DisasOps * o)3817 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3818 {
3819 int r1 = get_field(s, r1);
3820 tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3821 return DISAS_NEXT;
3822 }
3823
op_seb(DisasContext * s,DisasOps * o)3824 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3825 {
3826 gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3827 return DISAS_NEXT;
3828 }
3829
op_sdb(DisasContext * s,DisasOps * o)3830 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3831 {
3832 gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3833 return DISAS_NEXT;
3834 }
3835
op_sxb(DisasContext * s,DisasOps * o)3836 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3837 {
3838 gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3839 return DISAS_NEXT;
3840 }
3841
op_sqeb(DisasContext * s,DisasOps * o)3842 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3843 {
3844 gen_helper_sqeb(o->out, tcg_env, o->in2);
3845 return DISAS_NEXT;
3846 }
3847
op_sqdb(DisasContext * s,DisasOps * o)3848 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3849 {
3850 gen_helper_sqdb(o->out, tcg_env, o->in2);
3851 return DISAS_NEXT;
3852 }
3853
op_sqxb(DisasContext * s,DisasOps * o)3854 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3855 {
3856 gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3857 return DISAS_NEXT;
3858 }
3859
3860 #ifndef CONFIG_USER_ONLY
op_servc(DisasContext * s,DisasOps * o)3861 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3862 {
3863 gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3864 set_cc_static(s);
3865 return DISAS_NEXT;
3866 }
3867
op_sigp(DisasContext * s,DisasOps * o)3868 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3869 {
3870 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3871 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3872
3873 gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3874 set_cc_static(s);
3875 return DISAS_NEXT;
3876 }
3877 #endif
3878
op_soc(DisasContext * s,DisasOps * o)3879 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3880 {
3881 DisasCompare c;
3882 TCGv_i64 a, h;
3883 TCGLabel *lab;
3884 int r1;
3885
3886 disas_jcc(s, &c, get_field(s, m3));
3887
3888 /* We want to store when the condition is fulfilled, so branch
3889 out when it's not */
3890 c.cond = tcg_invert_cond(c.cond);
3891
3892 lab = gen_new_label();
3893 if (c.is_64) {
3894 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3895 } else {
3896 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3897 }
3898
3899 r1 = get_field(s, r1);
3900 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3901 switch (s->insn->data) {
3902 case 1: /* STOCG */
3903 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3904 break;
3905 case 0: /* STOC */
3906 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3907 break;
3908 case 2: /* STOCFH */
3909 h = tcg_temp_new_i64();
3910 tcg_gen_shri_i64(h, regs[r1], 32);
3911 tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3912 break;
3913 default:
3914 g_assert_not_reached();
3915 }
3916
3917 gen_set_label(lab);
3918 return DISAS_NEXT;
3919 }
3920
op_sla(DisasContext * s,DisasOps * o)3921 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3922 {
3923 TCGv_i64 t;
3924 uint64_t sign = 1ull << s->insn->data;
3925 if (s->insn->data == 31) {
3926 t = tcg_temp_new_i64();
3927 tcg_gen_shli_i64(t, o->in1, 32);
3928 } else {
3929 t = o->in1;
3930 }
3931 gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3932 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3933 /* The arithmetic left shift is curious in that it does not affect
3934 the sign bit. Copy that over from the source unchanged. */
3935 tcg_gen_andi_i64(o->out, o->out, ~sign);
3936 tcg_gen_andi_i64(o->in1, o->in1, sign);
3937 tcg_gen_or_i64(o->out, o->out, o->in1);
3938 return DISAS_NEXT;
3939 }
3940
op_sll(DisasContext * s,DisasOps * o)3941 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3942 {
3943 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3944 return DISAS_NEXT;
3945 }
3946
op_sra(DisasContext * s,DisasOps * o)3947 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3948 {
3949 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3950 return DISAS_NEXT;
3951 }
3952
op_srl(DisasContext * s,DisasOps * o)3953 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3954 {
3955 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3956 return DISAS_NEXT;
3957 }
3958
op_sfpc(DisasContext * s,DisasOps * o)3959 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3960 {
3961 gen_helper_sfpc(tcg_env, o->in2);
3962 return DISAS_NEXT;
3963 }
3964
op_sfas(DisasContext * s,DisasOps * o)3965 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3966 {
3967 gen_helper_sfas(tcg_env, o->in2);
3968 return DISAS_NEXT;
3969 }
3970
op_srnm(DisasContext * s,DisasOps * o)3971 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3972 {
3973 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
3974 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
3975 gen_helper_srnm(tcg_env, o->addr1);
3976 return DISAS_NEXT;
3977 }
3978
op_srnmb(DisasContext * s,DisasOps * o)3979 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
3980 {
3981 /* Bits 0-55 are are ignored. */
3982 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
3983 gen_helper_srnm(tcg_env, o->addr1);
3984 return DISAS_NEXT;
3985 }
3986
op_srnmt(DisasContext * s,DisasOps * o)3987 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
3988 {
3989 TCGv_i64 tmp = tcg_temp_new_i64();
3990
3991 /* Bits other than 61-63 are ignored. */
3992 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
3993
3994 /* No need to call a helper, we don't implement dfp */
3995 tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
3996 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
3997 tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
3998 return DISAS_NEXT;
3999 }
4000
op_spm(DisasContext * s,DisasOps * o)4001 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4002 {
4003 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4004 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4005 set_cc_static(s);
4006
4007 tcg_gen_shri_i64(o->in1, o->in1, 24);
4008 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4009 return DISAS_NEXT;
4010 }
4011
op_ectg(DisasContext * s,DisasOps * o)4012 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4013 {
4014 int b1 = get_field(s, b1);
4015 int d1 = get_field(s, d1);
4016 int b2 = get_field(s, b2);
4017 int d2 = get_field(s, d2);
4018 int r3 = get_field(s, r3);
4019 TCGv_i64 tmp = tcg_temp_new_i64();
4020
4021 /* fetch all operands first */
4022 o->in1 = tcg_temp_new_i64();
4023 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4024 o->in2 = tcg_temp_new_i64();
4025 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4026 o->addr1 = tcg_temp_new_i64();
4027 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4028
4029 /* load the third operand into r3 before modifying anything */
4030 tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4031
4032 /* subtract CPU timer from first operand and store in GR0 */
4033 gen_helper_stpt(tmp, tcg_env);
4034 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4035
4036 /* store second operand in GR1 */
4037 tcg_gen_mov_i64(regs[1], o->in2);
4038 return DISAS_NEXT;
4039 }
4040
4041 #ifndef CONFIG_USER_ONLY
op_spka(DisasContext * s,DisasOps * o)4042 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4043 {
4044 tcg_gen_shri_i64(o->in2, o->in2, 4);
4045 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4046 return DISAS_NEXT;
4047 }
4048
op_sske(DisasContext * s,DisasOps * o)4049 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4050 {
4051 gen_helper_sske(tcg_env, o->in1, o->in2);
4052 return DISAS_NEXT;
4053 }
4054
gen_check_psw_mask(DisasContext * s)4055 static void gen_check_psw_mask(DisasContext *s)
4056 {
4057 TCGv_i64 reserved = tcg_temp_new_i64();
4058 TCGLabel *ok = gen_new_label();
4059
4060 tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4061 tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4062 gen_program_exception(s, PGM_SPECIFICATION);
4063 gen_set_label(ok);
4064 }
4065
op_ssm(DisasContext * s,DisasOps * o)4066 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4067 {
4068 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4069
4070 gen_check_psw_mask(s);
4071
4072 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4073 s->exit_to_mainloop = true;
4074 return DISAS_TOO_MANY;
4075 }
4076
op_stap(DisasContext * s,DisasOps * o)4077 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4078 {
4079 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4080 return DISAS_NEXT;
4081 }
4082 #endif
4083
op_stck(DisasContext * s,DisasOps * o)4084 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4085 {
4086 gen_helper_stck(o->out, tcg_env);
4087 /* ??? We don't implement clock states. */
4088 gen_op_movi_cc(s, 0);
4089 return DISAS_NEXT;
4090 }
4091
op_stcke(DisasContext * s,DisasOps * o)4092 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4093 {
4094 TCGv_i64 c1 = tcg_temp_new_i64();
4095 TCGv_i64 c2 = tcg_temp_new_i64();
4096 TCGv_i64 todpr = tcg_temp_new_i64();
4097 gen_helper_stck(c1, tcg_env);
4098 /* 16 bit value store in an uint32_t (only valid bits set) */
4099 tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4100 /* Shift the 64-bit value into its place as a zero-extended
4101 104-bit value. Note that "bit positions 64-103 are always
4102 non-zero so that they compare differently to STCK"; we set
4103 the least significant bit to 1. */
4104 tcg_gen_shli_i64(c2, c1, 56);
4105 tcg_gen_shri_i64(c1, c1, 8);
4106 tcg_gen_ori_i64(c2, c2, 0x10000);
4107 tcg_gen_or_i64(c2, c2, todpr);
4108 tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4109 tcg_gen_addi_i64(o->in2, o->in2, 8);
4110 tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4111 /* ??? We don't implement clock states. */
4112 gen_op_movi_cc(s, 0);
4113 return DISAS_NEXT;
4114 }
4115
4116 #ifndef CONFIG_USER_ONLY
op_sck(DisasContext * s,DisasOps * o)4117 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4118 {
4119 gen_helper_sck(cc_op, tcg_env, o->in2);
4120 set_cc_static(s);
4121 return DISAS_NEXT;
4122 }
4123
op_sckc(DisasContext * s,DisasOps * o)4124 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4125 {
4126 gen_helper_sckc(tcg_env, o->in2);
4127 return DISAS_NEXT;
4128 }
4129
op_sckpf(DisasContext * s,DisasOps * o)4130 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4131 {
4132 gen_helper_sckpf(tcg_env, regs[0]);
4133 return DISAS_NEXT;
4134 }
4135
op_stckc(DisasContext * s,DisasOps * o)4136 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4137 {
4138 gen_helper_stckc(o->out, tcg_env);
4139 return DISAS_NEXT;
4140 }
4141
op_stctg(DisasContext * s,DisasOps * o)4142 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4143 {
4144 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4145 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4146
4147 gen_helper_stctg(tcg_env, r1, o->in2, r3);
4148 return DISAS_NEXT;
4149 }
4150
op_stctl(DisasContext * s,DisasOps * o)4151 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4152 {
4153 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4154 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4155
4156 gen_helper_stctl(tcg_env, r1, o->in2, r3);
4157 return DISAS_NEXT;
4158 }
4159
op_stidp(DisasContext * s,DisasOps * o)4160 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4161 {
4162 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4163 return DISAS_NEXT;
4164 }
4165
op_spt(DisasContext * s,DisasOps * o)4166 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4167 {
4168 gen_helper_spt(tcg_env, o->in2);
4169 return DISAS_NEXT;
4170 }
4171
op_stfl(DisasContext * s,DisasOps * o)4172 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4173 {
4174 gen_helper_stfl(tcg_env);
4175 return DISAS_NEXT;
4176 }
4177
op_stpt(DisasContext * s,DisasOps * o)4178 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4179 {
4180 gen_helper_stpt(o->out, tcg_env);
4181 return DISAS_NEXT;
4182 }
4183
op_stsi(DisasContext * s,DisasOps * o)4184 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4185 {
4186 gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4187 set_cc_static(s);
4188 return DISAS_NEXT;
4189 }
4190
op_spx(DisasContext * s,DisasOps * o)4191 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4192 {
4193 gen_helper_spx(tcg_env, o->in2);
4194 return DISAS_NEXT;
4195 }
4196
op_xsch(DisasContext * s,DisasOps * o)4197 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4198 {
4199 gen_helper_xsch(tcg_env, regs[1]);
4200 set_cc_static(s);
4201 return DISAS_NEXT;
4202 }
4203
op_csch(DisasContext * s,DisasOps * o)4204 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4205 {
4206 gen_helper_csch(tcg_env, regs[1]);
4207 set_cc_static(s);
4208 return DISAS_NEXT;
4209 }
4210
op_hsch(DisasContext * s,DisasOps * o)4211 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4212 {
4213 gen_helper_hsch(tcg_env, regs[1]);
4214 set_cc_static(s);
4215 return DISAS_NEXT;
4216 }
4217
op_msch(DisasContext * s,DisasOps * o)4218 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4219 {
4220 gen_helper_msch(tcg_env, regs[1], o->in2);
4221 set_cc_static(s);
4222 return DISAS_NEXT;
4223 }
4224
op_rchp(DisasContext * s,DisasOps * o)4225 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4226 {
4227 gen_helper_rchp(tcg_env, regs[1]);
4228 set_cc_static(s);
4229 return DISAS_NEXT;
4230 }
4231
op_rsch(DisasContext * s,DisasOps * o)4232 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4233 {
4234 gen_helper_rsch(tcg_env, regs[1]);
4235 set_cc_static(s);
4236 return DISAS_NEXT;
4237 }
4238
op_sal(DisasContext * s,DisasOps * o)4239 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4240 {
4241 gen_helper_sal(tcg_env, regs[1]);
4242 return DISAS_NEXT;
4243 }
4244
op_schm(DisasContext * s,DisasOps * o)4245 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4246 {
4247 gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4248 return DISAS_NEXT;
4249 }
4250
op_siga(DisasContext * s,DisasOps * o)4251 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4252 {
4253 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4254 gen_op_movi_cc(s, 3);
4255 return DISAS_NEXT;
4256 }
4257
op_stcps(DisasContext * s,DisasOps * o)4258 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4259 {
4260 /* The instruction is suppressed if not provided. */
4261 return DISAS_NEXT;
4262 }
4263
op_ssch(DisasContext * s,DisasOps * o)4264 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4265 {
4266 gen_helper_ssch(tcg_env, regs[1], o->in2);
4267 set_cc_static(s);
4268 return DISAS_NEXT;
4269 }
4270
op_stsch(DisasContext * s,DisasOps * o)4271 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4272 {
4273 gen_helper_stsch(tcg_env, regs[1], o->in2);
4274 set_cc_static(s);
4275 return DISAS_NEXT;
4276 }
4277
op_stcrw(DisasContext * s,DisasOps * o)4278 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4279 {
4280 gen_helper_stcrw(tcg_env, o->in2);
4281 set_cc_static(s);
4282 return DISAS_NEXT;
4283 }
4284
op_tpi(DisasContext * s,DisasOps * o)4285 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4286 {
4287 gen_helper_tpi(cc_op, tcg_env, o->addr1);
4288 set_cc_static(s);
4289 return DISAS_NEXT;
4290 }
4291
op_tsch(DisasContext * s,DisasOps * o)4292 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4293 {
4294 gen_helper_tsch(tcg_env, regs[1], o->in2);
4295 set_cc_static(s);
4296 return DISAS_NEXT;
4297 }
4298
op_chsc(DisasContext * s,DisasOps * o)4299 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4300 {
4301 gen_helper_chsc(tcg_env, o->in2);
4302 set_cc_static(s);
4303 return DISAS_NEXT;
4304 }
4305
op_stpx(DisasContext * s,DisasOps * o)4306 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4307 {
4308 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4309 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4310 return DISAS_NEXT;
4311 }
4312
op_stnosm(DisasContext * s,DisasOps * o)4313 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4314 {
4315 uint64_t i2 = get_field(s, i2);
4316 TCGv_i64 t;
4317
4318 /* It is important to do what the instruction name says: STORE THEN.
4319 If we let the output hook perform the store then if we fault and
4320 restart, we'll have the wrong SYSTEM MASK in place. */
4321 t = tcg_temp_new_i64();
4322 tcg_gen_shri_i64(t, psw_mask, 56);
4323 tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4324
4325 if (s->fields.op == 0xac) {
4326 tcg_gen_andi_i64(psw_mask, psw_mask,
4327 (i2 << 56) | 0x00ffffffffffffffull);
4328 } else {
4329 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4330 }
4331
4332 gen_check_psw_mask(s);
4333
4334 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4335 s->exit_to_mainloop = true;
4336 return DISAS_TOO_MANY;
4337 }
4338
op_stura(DisasContext * s,DisasOps * o)4339 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4340 {
4341 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4342
4343 if (s->base.tb->flags & FLAG_MASK_PER_STORE_REAL) {
4344 update_cc_op(s);
4345 update_psw_addr(s);
4346 gen_helper_per_store_real(tcg_env, tcg_constant_i32(s->ilen));
4347 return DISAS_NORETURN;
4348 }
4349 return DISAS_NEXT;
4350 }
4351 #endif
4352
op_stfle(DisasContext * s,DisasOps * o)4353 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4354 {
4355 gen_helper_stfle(cc_op, tcg_env, o->in2);
4356 set_cc_static(s);
4357 return DISAS_NEXT;
4358 }
4359
op_st8(DisasContext * s,DisasOps * o)4360 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4361 {
4362 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4363 return DISAS_NEXT;
4364 }
4365
op_st16(DisasContext * s,DisasOps * o)4366 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4367 {
4368 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4369 return DISAS_NEXT;
4370 }
4371
op_st32(DisasContext * s,DisasOps * o)4372 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4373 {
4374 tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4375 MO_TEUL | s->insn->data);
4376 return DISAS_NEXT;
4377 }
4378
op_st64(DisasContext * s,DisasOps * o)4379 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4380 {
4381 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4382 MO_TEUQ | s->insn->data);
4383 return DISAS_NEXT;
4384 }
4385
op_stam(DisasContext * s,DisasOps * o)4386 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4387 {
4388 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4389 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4390
4391 gen_helper_stam(tcg_env, r1, o->in2, r3);
4392 return DISAS_NEXT;
4393 }
4394
op_stcm(DisasContext * s,DisasOps * o)4395 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4396 {
4397 int m3 = get_field(s, m3);
4398 int pos, base = s->insn->data;
4399 TCGv_i64 tmp = tcg_temp_new_i64();
4400
4401 pos = base + ctz32(m3) * 8;
4402 switch (m3) {
4403 case 0xf:
4404 /* Effectively a 32-bit store. */
4405 tcg_gen_shri_i64(tmp, o->in1, pos);
4406 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4407 break;
4408
4409 case 0xc:
4410 case 0x6:
4411 case 0x3:
4412 /* Effectively a 16-bit store. */
4413 tcg_gen_shri_i64(tmp, o->in1, pos);
4414 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4415 break;
4416
4417 case 0x8:
4418 case 0x4:
4419 case 0x2:
4420 case 0x1:
4421 /* Effectively an 8-bit store. */
4422 tcg_gen_shri_i64(tmp, o->in1, pos);
4423 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4424 break;
4425
4426 default:
4427 /* This is going to be a sequence of shifts and stores. */
4428 pos = base + 32 - 8;
4429 while (m3) {
4430 if (m3 & 0x8) {
4431 tcg_gen_shri_i64(tmp, o->in1, pos);
4432 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4433 tcg_gen_addi_i64(o->in2, o->in2, 1);
4434 }
4435 m3 = (m3 << 1) & 0xf;
4436 pos -= 8;
4437 }
4438 break;
4439 }
4440 return DISAS_NEXT;
4441 }
4442
op_stm(DisasContext * s,DisasOps * o)4443 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4444 {
4445 int r1 = get_field(s, r1);
4446 int r3 = get_field(s, r3);
4447 int size = s->insn->data;
4448 TCGv_i64 tsize = tcg_constant_i64(size);
4449
4450 while (1) {
4451 tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4452 size == 8 ? MO_TEUQ : MO_TEUL);
4453 if (r1 == r3) {
4454 break;
4455 }
4456 tcg_gen_add_i64(o->in2, o->in2, tsize);
4457 r1 = (r1 + 1) & 15;
4458 }
4459
4460 return DISAS_NEXT;
4461 }
4462
op_stmh(DisasContext * s,DisasOps * o)4463 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4464 {
4465 int r1 = get_field(s, r1);
4466 int r3 = get_field(s, r3);
4467 TCGv_i64 t = tcg_temp_new_i64();
4468 TCGv_i64 t4 = tcg_constant_i64(4);
4469 TCGv_i64 t32 = tcg_constant_i64(32);
4470
4471 while (1) {
4472 tcg_gen_shl_i64(t, regs[r1], t32);
4473 tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4474 if (r1 == r3) {
4475 break;
4476 }
4477 tcg_gen_add_i64(o->in2, o->in2, t4);
4478 r1 = (r1 + 1) & 15;
4479 }
4480 return DISAS_NEXT;
4481 }
4482
op_stpq(DisasContext * s,DisasOps * o)4483 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4484 {
4485 TCGv_i128 t16 = tcg_temp_new_i128();
4486
4487 tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4488 tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4489 MO_TE | MO_128 | MO_ALIGN);
4490 return DISAS_NEXT;
4491 }
4492
op_srst(DisasContext * s,DisasOps * o)4493 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4494 {
4495 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4496 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4497
4498 gen_helper_srst(tcg_env, r1, r2);
4499 set_cc_static(s);
4500 return DISAS_NEXT;
4501 }
4502
op_srstu(DisasContext * s,DisasOps * o)4503 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4504 {
4505 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4506 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4507
4508 gen_helper_srstu(tcg_env, r1, r2);
4509 set_cc_static(s);
4510 return DISAS_NEXT;
4511 }
4512
op_sub(DisasContext * s,DisasOps * o)4513 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4514 {
4515 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4516 return DISAS_NEXT;
4517 }
4518
op_subu64(DisasContext * s,DisasOps * o)4519 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4520 {
4521 tcg_gen_movi_i64(cc_src, 0);
4522 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4523 return DISAS_NEXT;
4524 }
4525
4526 /* Compute borrow (0, -1) into cc_src. */
compute_borrow(DisasContext * s)4527 static void compute_borrow(DisasContext *s)
4528 {
4529 switch (s->cc_op) {
4530 case CC_OP_SUBU:
4531 /* The borrow value is already in cc_src (0,-1). */
4532 break;
4533 default:
4534 gen_op_calc_cc(s);
4535 /* fall through */
4536 case CC_OP_STATIC:
4537 /* The carry flag is the msb of CC; compute into cc_src. */
4538 tcg_gen_extu_i32_i64(cc_src, cc_op);
4539 tcg_gen_shri_i64(cc_src, cc_src, 1);
4540 /* fall through */
4541 case CC_OP_ADDU:
4542 /* Convert carry (1,0) to borrow (0,-1). */
4543 tcg_gen_subi_i64(cc_src, cc_src, 1);
4544 break;
4545 }
4546 }
4547
op_subb32(DisasContext * s,DisasOps * o)4548 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4549 {
4550 compute_borrow(s);
4551
4552 /* Borrow is {0, -1}, so add to subtract. */
4553 tcg_gen_add_i64(o->out, o->in1, cc_src);
4554 tcg_gen_sub_i64(o->out, o->out, o->in2);
4555 return DISAS_NEXT;
4556 }
4557
op_subb64(DisasContext * s,DisasOps * o)4558 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4559 {
4560 compute_borrow(s);
4561
4562 /*
4563 * Borrow is {0, -1}, so add to subtract; replicate the
4564 * borrow input to produce 128-bit -1 for the addition.
4565 */
4566 TCGv_i64 zero = tcg_constant_i64(0);
4567 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4568 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4569
4570 return DISAS_NEXT;
4571 }
4572
op_svc(DisasContext * s,DisasOps * o)4573 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4574 {
4575 TCGv_i32 t;
4576
4577 update_psw_addr(s);
4578 update_cc_op(s);
4579
4580 t = tcg_constant_i32(get_field(s, i1) & 0xff);
4581 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4582
4583 t = tcg_constant_i32(s->ilen);
4584 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4585
4586 gen_exception(EXCP_SVC);
4587 return DISAS_NORETURN;
4588 }
4589
op_tam(DisasContext * s,DisasOps * o)4590 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4591 {
4592 int cc = 0;
4593
4594 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4595 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4596 gen_op_movi_cc(s, cc);
4597 return DISAS_NEXT;
4598 }
4599
op_tceb(DisasContext * s,DisasOps * o)4600 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4601 {
4602 gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4603 set_cc_static(s);
4604 return DISAS_NEXT;
4605 }
4606
op_tcdb(DisasContext * s,DisasOps * o)4607 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4608 {
4609 gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4610 set_cc_static(s);
4611 return DISAS_NEXT;
4612 }
4613
op_tcxb(DisasContext * s,DisasOps * o)4614 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4615 {
4616 gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4617 set_cc_static(s);
4618 return DISAS_NEXT;
4619 }
4620
4621 #ifndef CONFIG_USER_ONLY
4622
op_testblock(DisasContext * s,DisasOps * o)4623 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4624 {
4625 gen_helper_testblock(cc_op, tcg_env, o->in2);
4626 set_cc_static(s);
4627 return DISAS_NEXT;
4628 }
4629
op_tprot(DisasContext * s,DisasOps * o)4630 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4631 {
4632 gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4633 set_cc_static(s);
4634 return DISAS_NEXT;
4635 }
4636
4637 #endif
4638
op_tp(DisasContext * s,DisasOps * o)4639 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4640 {
4641 TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4642
4643 gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4644 set_cc_static(s);
4645 return DISAS_NEXT;
4646 }
4647
op_tr(DisasContext * s,DisasOps * o)4648 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4649 {
4650 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4651
4652 gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4653 set_cc_static(s);
4654 return DISAS_NEXT;
4655 }
4656
op_tre(DisasContext * s,DisasOps * o)4657 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4658 {
4659 TCGv_i128 pair = tcg_temp_new_i128();
4660
4661 gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4662 tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4663 set_cc_static(s);
4664 return DISAS_NEXT;
4665 }
4666
op_trt(DisasContext * s,DisasOps * o)4667 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4668 {
4669 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4670
4671 gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4672 set_cc_static(s);
4673 return DISAS_NEXT;
4674 }
4675
op_trtr(DisasContext * s,DisasOps * o)4676 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4677 {
4678 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4679
4680 gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4681 set_cc_static(s);
4682 return DISAS_NEXT;
4683 }
4684
op_trXX(DisasContext * s,DisasOps * o)4685 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4686 {
4687 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4688 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4689 TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4690 TCGv_i32 tst = tcg_temp_new_i32();
4691 int m3 = get_field(s, m3);
4692
4693 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4694 m3 = 0;
4695 }
4696 if (m3 & 1) {
4697 tcg_gen_movi_i32(tst, -1);
4698 } else {
4699 tcg_gen_extrl_i64_i32(tst, regs[0]);
4700 if (s->insn->opc & 3) {
4701 tcg_gen_ext8u_i32(tst, tst);
4702 } else {
4703 tcg_gen_ext16u_i32(tst, tst);
4704 }
4705 }
4706 gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4707
4708 set_cc_static(s);
4709 return DISAS_NEXT;
4710 }
4711
op_ts(DisasContext * s,DisasOps * o)4712 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4713 {
4714 TCGv_i32 ff = tcg_constant_i32(0xff);
4715 TCGv_i32 t1 = tcg_temp_new_i32();
4716
4717 tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB);
4718 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4719 set_cc_static(s);
4720 return DISAS_NEXT;
4721 }
4722
op_unpk(DisasContext * s,DisasOps * o)4723 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4724 {
4725 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4726
4727 gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4728 return DISAS_NEXT;
4729 }
4730
op_unpka(DisasContext * s,DisasOps * o)4731 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4732 {
4733 int l1 = get_field(s, l1) + 1;
4734 TCGv_i32 l;
4735
4736 /* The length must not exceed 32 bytes. */
4737 if (l1 > 32) {
4738 gen_program_exception(s, PGM_SPECIFICATION);
4739 return DISAS_NORETURN;
4740 }
4741 l = tcg_constant_i32(l1);
4742 gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4743 set_cc_static(s);
4744 return DISAS_NEXT;
4745 }
4746
op_unpku(DisasContext * s,DisasOps * o)4747 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4748 {
4749 int l1 = get_field(s, l1) + 1;
4750 TCGv_i32 l;
4751
4752 /* The length must be even and should not exceed 64 bytes. */
4753 if ((l1 & 1) || (l1 > 64)) {
4754 gen_program_exception(s, PGM_SPECIFICATION);
4755 return DISAS_NORETURN;
4756 }
4757 l = tcg_constant_i32(l1);
4758 gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4759 set_cc_static(s);
4760 return DISAS_NEXT;
4761 }
4762
4763
op_xc(DisasContext * s,DisasOps * o)4764 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4765 {
4766 int d1 = get_field(s, d1);
4767 int d2 = get_field(s, d2);
4768 int b1 = get_field(s, b1);
4769 int b2 = get_field(s, b2);
4770 int l = get_field(s, l1);
4771 TCGv_i32 t32;
4772
4773 o->addr1 = get_address(s, 0, b1, d1);
4774
4775 /* If the addresses are identical, this is a store/memset of zero. */
4776 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4777 o->in2 = tcg_constant_i64(0);
4778
4779 l++;
4780 while (l >= 8) {
4781 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4782 l -= 8;
4783 if (l > 0) {
4784 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4785 }
4786 }
4787 if (l >= 4) {
4788 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4789 l -= 4;
4790 if (l > 0) {
4791 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4792 }
4793 }
4794 if (l >= 2) {
4795 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4796 l -= 2;
4797 if (l > 0) {
4798 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4799 }
4800 }
4801 if (l) {
4802 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4803 }
4804 gen_op_movi_cc(s, 0);
4805 return DISAS_NEXT;
4806 }
4807
4808 /* But in general we'll defer to a helper. */
4809 o->in2 = get_address(s, 0, b2, d2);
4810 t32 = tcg_constant_i32(l);
4811 gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4812 set_cc_static(s);
4813 return DISAS_NEXT;
4814 }
4815
op_xor(DisasContext * s,DisasOps * o)4816 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4817 {
4818 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4819 return DISAS_NEXT;
4820 }
4821
op_xori(DisasContext * s,DisasOps * o)4822 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4823 {
4824 int shift = s->insn->data & 0xff;
4825 int size = s->insn->data >> 8;
4826 uint64_t mask = ((1ull << size) - 1) << shift;
4827 TCGv_i64 t = tcg_temp_new_i64();
4828
4829 tcg_gen_shli_i64(t, o->in2, shift);
4830 tcg_gen_xor_i64(o->out, o->in1, t);
4831
4832 /* Produce the CC from only the bits manipulated. */
4833 tcg_gen_andi_i64(cc_dst, o->out, mask);
4834 set_cc_nz_u64(s, cc_dst);
4835 return DISAS_NEXT;
4836 }
4837
op_xi(DisasContext * s,DisasOps * o)4838 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4839 {
4840 o->in1 = tcg_temp_new_i64();
4841
4842 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4843 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4844 } else {
4845 /* Perform the atomic operation in memory. */
4846 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4847 s->insn->data);
4848 }
4849
4850 /* Recompute also for atomic case: needed for setting CC. */
4851 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4852
4853 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4854 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4855 }
4856 return DISAS_NEXT;
4857 }
4858
op_zero(DisasContext * s,DisasOps * o)4859 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4860 {
4861 o->out = tcg_constant_i64(0);
4862 return DISAS_NEXT;
4863 }
4864
op_zero2(DisasContext * s,DisasOps * o)4865 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4866 {
4867 o->out = tcg_constant_i64(0);
4868 o->out2 = o->out;
4869 return DISAS_NEXT;
4870 }
4871
4872 #ifndef CONFIG_USER_ONLY
op_clp(DisasContext * s,DisasOps * o)4873 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4874 {
4875 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4876
4877 gen_helper_clp(tcg_env, r2);
4878 set_cc_static(s);
4879 return DISAS_NEXT;
4880 }
4881
op_pcilg(DisasContext * s,DisasOps * o)4882 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4883 {
4884 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4885 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4886
4887 gen_helper_pcilg(tcg_env, r1, r2);
4888 set_cc_static(s);
4889 return DISAS_NEXT;
4890 }
4891
op_pcistg(DisasContext * s,DisasOps * o)4892 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4893 {
4894 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4895 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4896
4897 gen_helper_pcistg(tcg_env, r1, r2);
4898 set_cc_static(s);
4899 return DISAS_NEXT;
4900 }
4901
op_stpcifc(DisasContext * s,DisasOps * o)4902 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4903 {
4904 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4905 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4906
4907 gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4908 set_cc_static(s);
4909 return DISAS_NEXT;
4910 }
4911
op_sic(DisasContext * s,DisasOps * o)4912 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4913 {
4914 gen_helper_sic(tcg_env, o->in1, o->in2);
4915 return DISAS_NEXT;
4916 }
4917
op_rpcit(DisasContext * s,DisasOps * o)4918 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4919 {
4920 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4921 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4922
4923 gen_helper_rpcit(tcg_env, r1, r2);
4924 set_cc_static(s);
4925 return DISAS_NEXT;
4926 }
4927
op_pcistb(DisasContext * s,DisasOps * o)4928 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4929 {
4930 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4931 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4932 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4933
4934 gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
4935 set_cc_static(s);
4936 return DISAS_NEXT;
4937 }
4938
op_mpcifc(DisasContext * s,DisasOps * o)4939 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4940 {
4941 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4942 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4943
4944 gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
4945 set_cc_static(s);
4946 return DISAS_NEXT;
4947 }
4948 #endif
4949
4950 #include "translate_vx.c.inc"
4951
4952 /* ====================================================================== */
4953 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4954 the original inputs), update the various cc data structures in order to
4955 be able to compute the new condition code. */
4956
cout_abs32(DisasContext * s,DisasOps * o)4957 static void cout_abs32(DisasContext *s, DisasOps *o)
4958 {
4959 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4960 }
4961
cout_abs64(DisasContext * s,DisasOps * o)4962 static void cout_abs64(DisasContext *s, DisasOps *o)
4963 {
4964 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4965 }
4966
cout_adds32(DisasContext * s,DisasOps * o)4967 static void cout_adds32(DisasContext *s, DisasOps *o)
4968 {
4969 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4970 }
4971
cout_adds64(DisasContext * s,DisasOps * o)4972 static void cout_adds64(DisasContext *s, DisasOps *o)
4973 {
4974 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4975 }
4976
cout_addu32(DisasContext * s,DisasOps * o)4977 static void cout_addu32(DisasContext *s, DisasOps *o)
4978 {
4979 tcg_gen_shri_i64(cc_src, o->out, 32);
4980 tcg_gen_ext32u_i64(cc_dst, o->out);
4981 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
4982 }
4983
cout_addu64(DisasContext * s,DisasOps * o)4984 static void cout_addu64(DisasContext *s, DisasOps *o)
4985 {
4986 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
4987 }
4988
cout_cmps32(DisasContext * s,DisasOps * o)4989 static void cout_cmps32(DisasContext *s, DisasOps *o)
4990 {
4991 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4992 }
4993
cout_cmps64(DisasContext * s,DisasOps * o)4994 static void cout_cmps64(DisasContext *s, DisasOps *o)
4995 {
4996 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4997 }
4998
cout_cmpu32(DisasContext * s,DisasOps * o)4999 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5000 {
5001 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5002 }
5003
cout_cmpu64(DisasContext * s,DisasOps * o)5004 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5005 {
5006 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5007 }
5008
cout_f32(DisasContext * s,DisasOps * o)5009 static void cout_f32(DisasContext *s, DisasOps *o)
5010 {
5011 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5012 }
5013
cout_f64(DisasContext * s,DisasOps * o)5014 static void cout_f64(DisasContext *s, DisasOps *o)
5015 {
5016 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5017 }
5018
cout_f128(DisasContext * s,DisasOps * o)5019 static void cout_f128(DisasContext *s, DisasOps *o)
5020 {
5021 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5022 }
5023
cout_nabs32(DisasContext * s,DisasOps * o)5024 static void cout_nabs32(DisasContext *s, DisasOps *o)
5025 {
5026 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5027 }
5028
cout_nabs64(DisasContext * s,DisasOps * o)5029 static void cout_nabs64(DisasContext *s, DisasOps *o)
5030 {
5031 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5032 }
5033
cout_neg32(DisasContext * s,DisasOps * o)5034 static void cout_neg32(DisasContext *s, DisasOps *o)
5035 {
5036 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5037 }
5038
cout_neg64(DisasContext * s,DisasOps * o)5039 static void cout_neg64(DisasContext *s, DisasOps *o)
5040 {
5041 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5042 }
5043
cout_nz32(DisasContext * s,DisasOps * o)5044 static void cout_nz32(DisasContext *s, DisasOps *o)
5045 {
5046 tcg_gen_ext32u_i64(cc_dst, o->out);
5047 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5048 }
5049
cout_nz64(DisasContext * s,DisasOps * o)5050 static void cout_nz64(DisasContext *s, DisasOps *o)
5051 {
5052 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5053 }
5054
cout_s32(DisasContext * s,DisasOps * o)5055 static void cout_s32(DisasContext *s, DisasOps *o)
5056 {
5057 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5058 }
5059
cout_s64(DisasContext * s,DisasOps * o)5060 static void cout_s64(DisasContext *s, DisasOps *o)
5061 {
5062 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5063 }
5064
cout_subs32(DisasContext * s,DisasOps * o)5065 static void cout_subs32(DisasContext *s, DisasOps *o)
5066 {
5067 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5068 }
5069
cout_subs64(DisasContext * s,DisasOps * o)5070 static void cout_subs64(DisasContext *s, DisasOps *o)
5071 {
5072 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5073 }
5074
cout_subu32(DisasContext * s,DisasOps * o)5075 static void cout_subu32(DisasContext *s, DisasOps *o)
5076 {
5077 tcg_gen_sari_i64(cc_src, o->out, 32);
5078 tcg_gen_ext32u_i64(cc_dst, o->out);
5079 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5080 }
5081
cout_subu64(DisasContext * s,DisasOps * o)5082 static void cout_subu64(DisasContext *s, DisasOps *o)
5083 {
5084 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5085 }
5086
cout_tm32(DisasContext * s,DisasOps * o)5087 static void cout_tm32(DisasContext *s, DisasOps *o)
5088 {
5089 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5090 }
5091
cout_tm64(DisasContext * s,DisasOps * o)5092 static void cout_tm64(DisasContext *s, DisasOps *o)
5093 {
5094 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5095 }
5096
cout_muls32(DisasContext * s,DisasOps * o)5097 static void cout_muls32(DisasContext *s, DisasOps *o)
5098 {
5099 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5100 }
5101
cout_muls64(DisasContext * s,DisasOps * o)5102 static void cout_muls64(DisasContext *s, DisasOps *o)
5103 {
5104 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5105 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5106 }
5107
5108 /* ====================================================================== */
5109 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5110 with the TCG register to which we will write. Used in combination with
5111 the "wout" generators, in some cases we need a new temporary, and in
5112 some cases we can write to a TCG global. */
5113
prep_new(DisasContext * s,DisasOps * o)5114 static void prep_new(DisasContext *s, DisasOps *o)
5115 {
5116 o->out = tcg_temp_new_i64();
5117 }
5118 #define SPEC_prep_new 0
5119
prep_new_P(DisasContext * s,DisasOps * o)5120 static void prep_new_P(DisasContext *s, DisasOps *o)
5121 {
5122 o->out = tcg_temp_new_i64();
5123 o->out2 = tcg_temp_new_i64();
5124 }
5125 #define SPEC_prep_new_P 0
5126
prep_new_x(DisasContext * s,DisasOps * o)5127 static void prep_new_x(DisasContext *s, DisasOps *o)
5128 {
5129 o->out_128 = tcg_temp_new_i128();
5130 }
5131 #define SPEC_prep_new_x 0
5132
prep_r1(DisasContext * s,DisasOps * o)5133 static void prep_r1(DisasContext *s, DisasOps *o)
5134 {
5135 o->out = regs[get_field(s, r1)];
5136 }
5137 #define SPEC_prep_r1 0
5138
prep_r1_P(DisasContext * s,DisasOps * o)5139 static void prep_r1_P(DisasContext *s, DisasOps *o)
5140 {
5141 int r1 = get_field(s, r1);
5142 o->out = regs[r1];
5143 o->out2 = regs[r1 + 1];
5144 }
5145 #define SPEC_prep_r1_P SPEC_r1_even
5146
5147 /* ====================================================================== */
5148 /* The "Write OUTput" generators. These generally perform some non-trivial
5149 copy of data to TCG globals, or to main memory. The trivial cases are
5150 generally handled by having a "prep" generator install the TCG global
5151 as the destination of the operation. */
5152
wout_r1(DisasContext * s,DisasOps * o)5153 static void wout_r1(DisasContext *s, DisasOps *o)
5154 {
5155 store_reg(get_field(s, r1), o->out);
5156 }
5157 #define SPEC_wout_r1 0
5158
wout_out2_r1(DisasContext * s,DisasOps * o)5159 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5160 {
5161 store_reg(get_field(s, r1), o->out2);
5162 }
5163 #define SPEC_wout_out2_r1 0
5164
wout_r1_8(DisasContext * s,DisasOps * o)5165 static void wout_r1_8(DisasContext *s, DisasOps *o)
5166 {
5167 int r1 = get_field(s, r1);
5168 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5169 }
5170 #define SPEC_wout_r1_8 0
5171
wout_r1_16(DisasContext * s,DisasOps * o)5172 static void wout_r1_16(DisasContext *s, DisasOps *o)
5173 {
5174 int r1 = get_field(s, r1);
5175 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5176 }
5177 #define SPEC_wout_r1_16 0
5178
wout_r1_32(DisasContext * s,DisasOps * o)5179 static void wout_r1_32(DisasContext *s, DisasOps *o)
5180 {
5181 store_reg32_i64(get_field(s, r1), o->out);
5182 }
5183 #define SPEC_wout_r1_32 0
5184
wout_r1_32h(DisasContext * s,DisasOps * o)5185 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5186 {
5187 store_reg32h_i64(get_field(s, r1), o->out);
5188 }
5189 #define SPEC_wout_r1_32h 0
5190
wout_r1_P32(DisasContext * s,DisasOps * o)5191 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5192 {
5193 int r1 = get_field(s, r1);
5194 store_reg32_i64(r1, o->out);
5195 store_reg32_i64(r1 + 1, o->out2);
5196 }
5197 #define SPEC_wout_r1_P32 SPEC_r1_even
5198
wout_r1_D32(DisasContext * s,DisasOps * o)5199 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5200 {
5201 int r1 = get_field(s, r1);
5202 TCGv_i64 t = tcg_temp_new_i64();
5203 store_reg32_i64(r1 + 1, o->out);
5204 tcg_gen_shri_i64(t, o->out, 32);
5205 store_reg32_i64(r1, t);
5206 }
5207 #define SPEC_wout_r1_D32 SPEC_r1_even
5208
wout_r1_D64(DisasContext * s,DisasOps * o)5209 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5210 {
5211 int r1 = get_field(s, r1);
5212 tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5213 }
5214 #define SPEC_wout_r1_D64 SPEC_r1_even
5215
wout_r3_P32(DisasContext * s,DisasOps * o)5216 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5217 {
5218 int r3 = get_field(s, r3);
5219 store_reg32_i64(r3, o->out);
5220 store_reg32_i64(r3 + 1, o->out2);
5221 }
5222 #define SPEC_wout_r3_P32 SPEC_r3_even
5223
wout_r3_P64(DisasContext * s,DisasOps * o)5224 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5225 {
5226 int r3 = get_field(s, r3);
5227 store_reg(r3, o->out);
5228 store_reg(r3 + 1, o->out2);
5229 }
5230 #define SPEC_wout_r3_P64 SPEC_r3_even
5231
wout_e1(DisasContext * s,DisasOps * o)5232 static void wout_e1(DisasContext *s, DisasOps *o)
5233 {
5234 store_freg32_i64(get_field(s, r1), o->out);
5235 }
5236 #define SPEC_wout_e1 0
5237
wout_f1(DisasContext * s,DisasOps * o)5238 static void wout_f1(DisasContext *s, DisasOps *o)
5239 {
5240 store_freg(get_field(s, r1), o->out);
5241 }
5242 #define SPEC_wout_f1 0
5243
wout_x1(DisasContext * s,DisasOps * o)5244 static void wout_x1(DisasContext *s, DisasOps *o)
5245 {
5246 int f1 = get_field(s, r1);
5247
5248 /* Split out_128 into out+out2 for cout_f128. */
5249 tcg_debug_assert(o->out == NULL);
5250 o->out = tcg_temp_new_i64();
5251 o->out2 = tcg_temp_new_i64();
5252
5253 tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5254 store_freg(f1, o->out);
5255 store_freg(f1 + 2, o->out2);
5256 }
5257 #define SPEC_wout_x1 SPEC_r1_f128
5258
wout_x1_P(DisasContext * s,DisasOps * o)5259 static void wout_x1_P(DisasContext *s, DisasOps *o)
5260 {
5261 int f1 = get_field(s, r1);
5262 store_freg(f1, o->out);
5263 store_freg(f1 + 2, o->out2);
5264 }
5265 #define SPEC_wout_x1_P SPEC_r1_f128
5266
wout_cond_r1r2_32(DisasContext * s,DisasOps * o)5267 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5268 {
5269 if (get_field(s, r1) != get_field(s, r2)) {
5270 store_reg32_i64(get_field(s, r1), o->out);
5271 }
5272 }
5273 #define SPEC_wout_cond_r1r2_32 0
5274
wout_cond_e1e2(DisasContext * s,DisasOps * o)5275 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5276 {
5277 if (get_field(s, r1) != get_field(s, r2)) {
5278 store_freg32_i64(get_field(s, r1), o->out);
5279 }
5280 }
5281 #define SPEC_wout_cond_e1e2 0
5282
wout_m1_8(DisasContext * s,DisasOps * o)5283 static void wout_m1_8(DisasContext *s, DisasOps *o)
5284 {
5285 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5286 }
5287 #define SPEC_wout_m1_8 0
5288
wout_m1_16(DisasContext * s,DisasOps * o)5289 static void wout_m1_16(DisasContext *s, DisasOps *o)
5290 {
5291 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5292 }
5293 #define SPEC_wout_m1_16 0
5294
5295 #ifndef CONFIG_USER_ONLY
wout_m1_16a(DisasContext * s,DisasOps * o)5296 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5297 {
5298 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5299 }
5300 #define SPEC_wout_m1_16a 0
5301 #endif
5302
wout_m1_32(DisasContext * s,DisasOps * o)5303 static void wout_m1_32(DisasContext *s, DisasOps *o)
5304 {
5305 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5306 }
5307 #define SPEC_wout_m1_32 0
5308
5309 #ifndef CONFIG_USER_ONLY
wout_m1_32a(DisasContext * s,DisasOps * o)5310 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5311 {
5312 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5313 }
5314 #define SPEC_wout_m1_32a 0
5315 #endif
5316
wout_m1_64(DisasContext * s,DisasOps * o)5317 static void wout_m1_64(DisasContext *s, DisasOps *o)
5318 {
5319 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5320 }
5321 #define SPEC_wout_m1_64 0
5322
5323 #ifndef CONFIG_USER_ONLY
wout_m1_64a(DisasContext * s,DisasOps * o)5324 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5325 {
5326 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5327 }
5328 #define SPEC_wout_m1_64a 0
5329 #endif
5330
wout_m2_32(DisasContext * s,DisasOps * o)5331 static void wout_m2_32(DisasContext *s, DisasOps *o)
5332 {
5333 tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5334 }
5335 #define SPEC_wout_m2_32 0
5336
wout_in2_r1(DisasContext * s,DisasOps * o)5337 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5338 {
5339 store_reg(get_field(s, r1), o->in2);
5340 }
5341 #define SPEC_wout_in2_r1 0
5342
wout_in2_r1_32(DisasContext * s,DisasOps * o)5343 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5344 {
5345 store_reg32_i64(get_field(s, r1), o->in2);
5346 }
5347 #define SPEC_wout_in2_r1_32 0
5348
5349 /* ====================================================================== */
5350 /* The "INput 1" generators. These load the first operand to an insn. */
5351
in1_r1(DisasContext * s,DisasOps * o)5352 static void in1_r1(DisasContext *s, DisasOps *o)
5353 {
5354 o->in1 = load_reg(get_field(s, r1));
5355 }
5356 #define SPEC_in1_r1 0
5357
in1_r1_o(DisasContext * s,DisasOps * o)5358 static void in1_r1_o(DisasContext *s, DisasOps *o)
5359 {
5360 o->in1 = regs[get_field(s, r1)];
5361 }
5362 #define SPEC_in1_r1_o 0
5363
in1_r1_32s(DisasContext * s,DisasOps * o)5364 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5365 {
5366 o->in1 = tcg_temp_new_i64();
5367 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5368 }
5369 #define SPEC_in1_r1_32s 0
5370
in1_r1_32u(DisasContext * s,DisasOps * o)5371 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5372 {
5373 o->in1 = tcg_temp_new_i64();
5374 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5375 }
5376 #define SPEC_in1_r1_32u 0
5377
in1_r1_sr32(DisasContext * s,DisasOps * o)5378 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5379 {
5380 o->in1 = tcg_temp_new_i64();
5381 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5382 }
5383 #define SPEC_in1_r1_sr32 0
5384
in1_r1p1(DisasContext * s,DisasOps * o)5385 static void in1_r1p1(DisasContext *s, DisasOps *o)
5386 {
5387 o->in1 = load_reg(get_field(s, r1) + 1);
5388 }
5389 #define SPEC_in1_r1p1 SPEC_r1_even
5390
in1_r1p1_o(DisasContext * s,DisasOps * o)5391 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5392 {
5393 o->in1 = regs[get_field(s, r1) + 1];
5394 }
5395 #define SPEC_in1_r1p1_o SPEC_r1_even
5396
in1_r1p1_32s(DisasContext * s,DisasOps * o)5397 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5398 {
5399 o->in1 = tcg_temp_new_i64();
5400 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5401 }
5402 #define SPEC_in1_r1p1_32s SPEC_r1_even
5403
in1_r1p1_32u(DisasContext * s,DisasOps * o)5404 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5405 {
5406 o->in1 = tcg_temp_new_i64();
5407 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5408 }
5409 #define SPEC_in1_r1p1_32u SPEC_r1_even
5410
in1_r1_D32(DisasContext * s,DisasOps * o)5411 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5412 {
5413 int r1 = get_field(s, r1);
5414 o->in1 = tcg_temp_new_i64();
5415 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5416 }
5417 #define SPEC_in1_r1_D32 SPEC_r1_even
5418
in1_r2(DisasContext * s,DisasOps * o)5419 static void in1_r2(DisasContext *s, DisasOps *o)
5420 {
5421 o->in1 = load_reg(get_field(s, r2));
5422 }
5423 #define SPEC_in1_r2 0
5424
in1_r2_sr32(DisasContext * s,DisasOps * o)5425 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5426 {
5427 o->in1 = tcg_temp_new_i64();
5428 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5429 }
5430 #define SPEC_in1_r2_sr32 0
5431
in1_r2_32u(DisasContext * s,DisasOps * o)5432 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5433 {
5434 o->in1 = tcg_temp_new_i64();
5435 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5436 }
5437 #define SPEC_in1_r2_32u 0
5438
in1_r3(DisasContext * s,DisasOps * o)5439 static void in1_r3(DisasContext *s, DisasOps *o)
5440 {
5441 o->in1 = load_reg(get_field(s, r3));
5442 }
5443 #define SPEC_in1_r3 0
5444
in1_r3_o(DisasContext * s,DisasOps * o)5445 static void in1_r3_o(DisasContext *s, DisasOps *o)
5446 {
5447 o->in1 = regs[get_field(s, r3)];
5448 }
5449 #define SPEC_in1_r3_o 0
5450
in1_r3_32s(DisasContext * s,DisasOps * o)5451 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5452 {
5453 o->in1 = tcg_temp_new_i64();
5454 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5455 }
5456 #define SPEC_in1_r3_32s 0
5457
in1_r3_32u(DisasContext * s,DisasOps * o)5458 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5459 {
5460 o->in1 = tcg_temp_new_i64();
5461 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5462 }
5463 #define SPEC_in1_r3_32u 0
5464
in1_r3_D32(DisasContext * s,DisasOps * o)5465 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5466 {
5467 int r3 = get_field(s, r3);
5468 o->in1 = tcg_temp_new_i64();
5469 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5470 }
5471 #define SPEC_in1_r3_D32 SPEC_r3_even
5472
in1_r3_sr32(DisasContext * s,DisasOps * o)5473 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5474 {
5475 o->in1 = tcg_temp_new_i64();
5476 tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5477 }
5478 #define SPEC_in1_r3_sr32 0
5479
in1_e1(DisasContext * s,DisasOps * o)5480 static void in1_e1(DisasContext *s, DisasOps *o)
5481 {
5482 o->in1 = load_freg32_i64(get_field(s, r1));
5483 }
5484 #define SPEC_in1_e1 0
5485
in1_f1(DisasContext * s,DisasOps * o)5486 static void in1_f1(DisasContext *s, DisasOps *o)
5487 {
5488 o->in1 = load_freg(get_field(s, r1));
5489 }
5490 #define SPEC_in1_f1 0
5491
in1_x1(DisasContext * s,DisasOps * o)5492 static void in1_x1(DisasContext *s, DisasOps *o)
5493 {
5494 o->in1_128 = load_freg_128(get_field(s, r1));
5495 }
5496 #define SPEC_in1_x1 SPEC_r1_f128
5497
5498 /* Load the high double word of an extended (128-bit) format FP number */
in1_x2h(DisasContext * s,DisasOps * o)5499 static void in1_x2h(DisasContext *s, DisasOps *o)
5500 {
5501 o->in1 = load_freg(get_field(s, r2));
5502 }
5503 #define SPEC_in1_x2h SPEC_r2_f128
5504
in1_f3(DisasContext * s,DisasOps * o)5505 static void in1_f3(DisasContext *s, DisasOps *o)
5506 {
5507 o->in1 = load_freg(get_field(s, r3));
5508 }
5509 #define SPEC_in1_f3 0
5510
in1_la1(DisasContext * s,DisasOps * o)5511 static void in1_la1(DisasContext *s, DisasOps *o)
5512 {
5513 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5514 }
5515 #define SPEC_in1_la1 0
5516
in1_la2(DisasContext * s,DisasOps * o)5517 static void in1_la2(DisasContext *s, DisasOps *o)
5518 {
5519 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5520 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5521 }
5522 #define SPEC_in1_la2 0
5523
in1_m1_8u(DisasContext * s,DisasOps * o)5524 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5525 {
5526 in1_la1(s, o);
5527 o->in1 = tcg_temp_new_i64();
5528 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5529 }
5530 #define SPEC_in1_m1_8u 0
5531
in1_m1_16s(DisasContext * s,DisasOps * o)5532 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5533 {
5534 in1_la1(s, o);
5535 o->in1 = tcg_temp_new_i64();
5536 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5537 }
5538 #define SPEC_in1_m1_16s 0
5539
in1_m1_16u(DisasContext * s,DisasOps * o)5540 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5541 {
5542 in1_la1(s, o);
5543 o->in1 = tcg_temp_new_i64();
5544 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5545 }
5546 #define SPEC_in1_m1_16u 0
5547
in1_m1_32s(DisasContext * s,DisasOps * o)5548 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5549 {
5550 in1_la1(s, o);
5551 o->in1 = tcg_temp_new_i64();
5552 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5553 }
5554 #define SPEC_in1_m1_32s 0
5555
in1_m1_32u(DisasContext * s,DisasOps * o)5556 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5557 {
5558 in1_la1(s, o);
5559 o->in1 = tcg_temp_new_i64();
5560 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5561 }
5562 #define SPEC_in1_m1_32u 0
5563
in1_m1_64(DisasContext * s,DisasOps * o)5564 static void in1_m1_64(DisasContext *s, DisasOps *o)
5565 {
5566 in1_la1(s, o);
5567 o->in1 = tcg_temp_new_i64();
5568 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5569 }
5570 #define SPEC_in1_m1_64 0
5571
5572 /* ====================================================================== */
5573 /* The "INput 2" generators. These load the second operand to an insn. */
5574
in2_r1_o(DisasContext * s,DisasOps * o)5575 static void in2_r1_o(DisasContext *s, DisasOps *o)
5576 {
5577 o->in2 = regs[get_field(s, r1)];
5578 }
5579 #define SPEC_in2_r1_o 0
5580
in2_r1_16u(DisasContext * s,DisasOps * o)5581 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5582 {
5583 o->in2 = tcg_temp_new_i64();
5584 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5585 }
5586 #define SPEC_in2_r1_16u 0
5587
in2_r1_32u(DisasContext * s,DisasOps * o)5588 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5589 {
5590 o->in2 = tcg_temp_new_i64();
5591 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5592 }
5593 #define SPEC_in2_r1_32u 0
5594
in2_r1_D32(DisasContext * s,DisasOps * o)5595 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5596 {
5597 int r1 = get_field(s, r1);
5598 o->in2 = tcg_temp_new_i64();
5599 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5600 }
5601 #define SPEC_in2_r1_D32 SPEC_r1_even
5602
in2_r2(DisasContext * s,DisasOps * o)5603 static void in2_r2(DisasContext *s, DisasOps *o)
5604 {
5605 o->in2 = load_reg(get_field(s, r2));
5606 }
5607 #define SPEC_in2_r2 0
5608
in2_r2_o(DisasContext * s,DisasOps * o)5609 static void in2_r2_o(DisasContext *s, DisasOps *o)
5610 {
5611 o->in2 = regs[get_field(s, r2)];
5612 }
5613 #define SPEC_in2_r2_o 0
5614
in2_r2_nz(DisasContext * s,DisasOps * o)5615 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5616 {
5617 int r2 = get_field(s, r2);
5618 if (r2 != 0) {
5619 o->in2 = load_reg(r2);
5620 }
5621 }
5622 #define SPEC_in2_r2_nz 0
5623
in2_r2_8s(DisasContext * s,DisasOps * o)5624 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5625 {
5626 o->in2 = tcg_temp_new_i64();
5627 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5628 }
5629 #define SPEC_in2_r2_8s 0
5630
in2_r2_8u(DisasContext * s,DisasOps * o)5631 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5632 {
5633 o->in2 = tcg_temp_new_i64();
5634 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5635 }
5636 #define SPEC_in2_r2_8u 0
5637
in2_r2_16s(DisasContext * s,DisasOps * o)5638 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5639 {
5640 o->in2 = tcg_temp_new_i64();
5641 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5642 }
5643 #define SPEC_in2_r2_16s 0
5644
in2_r2_16u(DisasContext * s,DisasOps * o)5645 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5646 {
5647 o->in2 = tcg_temp_new_i64();
5648 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5649 }
5650 #define SPEC_in2_r2_16u 0
5651
in2_r3(DisasContext * s,DisasOps * o)5652 static void in2_r3(DisasContext *s, DisasOps *o)
5653 {
5654 o->in2 = load_reg(get_field(s, r3));
5655 }
5656 #define SPEC_in2_r3 0
5657
in2_r3_D64(DisasContext * s,DisasOps * o)5658 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5659 {
5660 int r3 = get_field(s, r3);
5661 o->in2_128 = tcg_temp_new_i128();
5662 tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5663 }
5664 #define SPEC_in2_r3_D64 SPEC_r3_even
5665
in2_r3_sr32(DisasContext * s,DisasOps * o)5666 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5667 {
5668 o->in2 = tcg_temp_new_i64();
5669 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5670 }
5671 #define SPEC_in2_r3_sr32 0
5672
in2_r3_32u(DisasContext * s,DisasOps * o)5673 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5674 {
5675 o->in2 = tcg_temp_new_i64();
5676 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5677 }
5678 #define SPEC_in2_r3_32u 0
5679
in2_r2_32s(DisasContext * s,DisasOps * o)5680 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5681 {
5682 o->in2 = tcg_temp_new_i64();
5683 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5684 }
5685 #define SPEC_in2_r2_32s 0
5686
in2_r2_32u(DisasContext * s,DisasOps * o)5687 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5688 {
5689 o->in2 = tcg_temp_new_i64();
5690 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5691 }
5692 #define SPEC_in2_r2_32u 0
5693
in2_r2_sr32(DisasContext * s,DisasOps * o)5694 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5695 {
5696 o->in2 = tcg_temp_new_i64();
5697 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5698 }
5699 #define SPEC_in2_r2_sr32 0
5700
in2_e2(DisasContext * s,DisasOps * o)5701 static void in2_e2(DisasContext *s, DisasOps *o)
5702 {
5703 o->in2 = load_freg32_i64(get_field(s, r2));
5704 }
5705 #define SPEC_in2_e2 0
5706
in2_f2(DisasContext * s,DisasOps * o)5707 static void in2_f2(DisasContext *s, DisasOps *o)
5708 {
5709 o->in2 = load_freg(get_field(s, r2));
5710 }
5711 #define SPEC_in2_f2 0
5712
in2_x2(DisasContext * s,DisasOps * o)5713 static void in2_x2(DisasContext *s, DisasOps *o)
5714 {
5715 o->in2_128 = load_freg_128(get_field(s, r2));
5716 }
5717 #define SPEC_in2_x2 SPEC_r2_f128
5718
5719 /* Load the low double word of an extended (128-bit) format FP number */
in2_x2l(DisasContext * s,DisasOps * o)5720 static void in2_x2l(DisasContext *s, DisasOps *o)
5721 {
5722 o->in2 = load_freg(get_field(s, r2) + 2);
5723 }
5724 #define SPEC_in2_x2l SPEC_r2_f128
5725
in2_ra2(DisasContext * s,DisasOps * o)5726 static void in2_ra2(DisasContext *s, DisasOps *o)
5727 {
5728 int r2 = get_field(s, r2);
5729
5730 /* Note: *don't* treat !r2 as 0, use the reg value. */
5731 o->in2 = tcg_temp_new_i64();
5732 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5733 }
5734 #define SPEC_in2_ra2 0
5735
in2_ra2_E(DisasContext * s,DisasOps * o)5736 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5737 {
5738 return in2_ra2(s, o);
5739 }
5740 #define SPEC_in2_ra2_E SPEC_r2_even
5741
in2_a2(DisasContext * s,DisasOps * o)5742 static void in2_a2(DisasContext *s, DisasOps *o)
5743 {
5744 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5745 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5746 }
5747 #define SPEC_in2_a2 0
5748
gen_ri2(DisasContext * s)5749 static TCGv gen_ri2(DisasContext *s)
5750 {
5751 TCGv ri2 = NULL;
5752 bool is_imm;
5753 int imm;
5754
5755 disas_jdest(s, i2, is_imm, imm, ri2);
5756 if (is_imm) {
5757 ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5758 }
5759
5760 return ri2;
5761 }
5762
in2_ri2(DisasContext * s,DisasOps * o)5763 static void in2_ri2(DisasContext *s, DisasOps *o)
5764 {
5765 o->in2 = gen_ri2(s);
5766 }
5767 #define SPEC_in2_ri2 0
5768
in2_sh(DisasContext * s,DisasOps * o)5769 static void in2_sh(DisasContext *s, DisasOps *o)
5770 {
5771 int b2 = get_field(s, b2);
5772 int d2 = get_field(s, d2);
5773
5774 if (b2 == 0) {
5775 o->in2 = tcg_constant_i64(d2 & 0x3f);
5776 } else {
5777 o->in2 = get_address(s, 0, b2, d2);
5778 tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5779 }
5780 }
5781 #define SPEC_in2_sh 0
5782
in2_m2_8u(DisasContext * s,DisasOps * o)5783 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5784 {
5785 in2_a2(s, o);
5786 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5787 }
5788 #define SPEC_in2_m2_8u 0
5789
in2_m2_16s(DisasContext * s,DisasOps * o)5790 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5791 {
5792 in2_a2(s, o);
5793 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5794 }
5795 #define SPEC_in2_m2_16s 0
5796
in2_m2_16u(DisasContext * s,DisasOps * o)5797 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5798 {
5799 in2_a2(s, o);
5800 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5801 }
5802 #define SPEC_in2_m2_16u 0
5803
in2_m2_32s(DisasContext * s,DisasOps * o)5804 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5805 {
5806 in2_a2(s, o);
5807 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5808 }
5809 #define SPEC_in2_m2_32s 0
5810
in2_m2_32u(DisasContext * s,DisasOps * o)5811 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5812 {
5813 in2_a2(s, o);
5814 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5815 }
5816 #define SPEC_in2_m2_32u 0
5817
5818 #ifndef CONFIG_USER_ONLY
in2_m2_32ua(DisasContext * s,DisasOps * o)5819 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5820 {
5821 in2_a2(s, o);
5822 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5823 }
5824 #define SPEC_in2_m2_32ua 0
5825 #endif
5826
in2_m2_64(DisasContext * s,DisasOps * o)5827 static void in2_m2_64(DisasContext *s, DisasOps *o)
5828 {
5829 in2_a2(s, o);
5830 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5831 }
5832 #define SPEC_in2_m2_64 0
5833
in2_m2_64w(DisasContext * s,DisasOps * o)5834 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5835 {
5836 in2_a2(s, o);
5837 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5838 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5839 }
5840 #define SPEC_in2_m2_64w 0
5841
5842 #ifndef CONFIG_USER_ONLY
in2_m2_64a(DisasContext * s,DisasOps * o)5843 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5844 {
5845 in2_a2(s, o);
5846 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5847 }
5848 #define SPEC_in2_m2_64a 0
5849 #endif
5850
in2_mri2_16s(DisasContext * s,DisasOps * o)5851 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5852 {
5853 o->in2 = tcg_temp_new_i64();
5854 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5855 }
5856 #define SPEC_in2_mri2_16s 0
5857
in2_mri2_16u(DisasContext * s,DisasOps * o)5858 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5859 {
5860 o->in2 = tcg_temp_new_i64();
5861 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5862 }
5863 #define SPEC_in2_mri2_16u 0
5864
in2_mri2_32s(DisasContext * s,DisasOps * o)5865 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5866 {
5867 o->in2 = tcg_temp_new_i64();
5868 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5869 MO_TESL | MO_ALIGN);
5870 }
5871 #define SPEC_in2_mri2_32s 0
5872
in2_mri2_32u(DisasContext * s,DisasOps * o)5873 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5874 {
5875 o->in2 = tcg_temp_new_i64();
5876 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5877 MO_TEUL | MO_ALIGN);
5878 }
5879 #define SPEC_in2_mri2_32u 0
5880
in2_mri2_64(DisasContext * s,DisasOps * o)5881 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5882 {
5883 o->in2 = tcg_temp_new_i64();
5884 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5885 MO_TEUQ | MO_ALIGN);
5886 }
5887 #define SPEC_in2_mri2_64 0
5888
in2_i2(DisasContext * s,DisasOps * o)5889 static void in2_i2(DisasContext *s, DisasOps *o)
5890 {
5891 o->in2 = tcg_constant_i64(get_field(s, i2));
5892 }
5893 #define SPEC_in2_i2 0
5894
in2_i2_8u(DisasContext * s,DisasOps * o)5895 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5896 {
5897 o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5898 }
5899 #define SPEC_in2_i2_8u 0
5900
in2_i2_16u(DisasContext * s,DisasOps * o)5901 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5902 {
5903 o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5904 }
5905 #define SPEC_in2_i2_16u 0
5906
in2_i2_32u(DisasContext * s,DisasOps * o)5907 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5908 {
5909 o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5910 }
5911 #define SPEC_in2_i2_32u 0
5912
in2_i2_16u_shl(DisasContext * s,DisasOps * o)5913 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5914 {
5915 uint64_t i2 = (uint16_t)get_field(s, i2);
5916 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5917 }
5918 #define SPEC_in2_i2_16u_shl 0
5919
in2_i2_32u_shl(DisasContext * s,DisasOps * o)5920 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5921 {
5922 uint64_t i2 = (uint32_t)get_field(s, i2);
5923 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5924 }
5925 #define SPEC_in2_i2_32u_shl 0
5926
5927 #ifndef CONFIG_USER_ONLY
in2_insn(DisasContext * s,DisasOps * o)5928 static void in2_insn(DisasContext *s, DisasOps *o)
5929 {
5930 o->in2 = tcg_constant_i64(s->fields.raw_insn);
5931 }
5932 #define SPEC_in2_insn 0
5933 #endif
5934
5935 /* ====================================================================== */
5936
5937 /* Find opc within the table of insns. This is formulated as a switch
5938 statement so that (1) we get compile-time notice of cut-paste errors
5939 for duplicated opcodes, and (2) the compiler generates the binary
5940 search tree, rather than us having to post-process the table. */
5941
5942 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5943 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5944
5945 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5946 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5947
5948 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5949 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5950
5951 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5952
5953 enum DisasInsnEnum {
5954 #include "insn-data.h.inc"
5955 };
5956
5957 #undef E
5958 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5959 .opc = OPC, \
5960 .flags = FL, \
5961 .fmt = FMT_##FT, \
5962 .fac = FAC_##FC, \
5963 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5964 .name = #NM, \
5965 .help_in1 = in1_##I1, \
5966 .help_in2 = in2_##I2, \
5967 .help_prep = prep_##P, \
5968 .help_wout = wout_##W, \
5969 .help_cout = cout_##CC, \
5970 .help_op = op_##OP, \
5971 .data = D \
5972 },
5973
5974 /* Allow 0 to be used for NULL in the table below. */
5975 #define in1_0 NULL
5976 #define in2_0 NULL
5977 #define prep_0 NULL
5978 #define wout_0 NULL
5979 #define cout_0 NULL
5980 #define op_0 NULL
5981
5982 #define SPEC_in1_0 0
5983 #define SPEC_in2_0 0
5984 #define SPEC_prep_0 0
5985 #define SPEC_wout_0 0
5986
5987 /* Give smaller names to the various facilities. */
5988 #define FAC_Z S390_FEAT_ZARCH
5989 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5990 #define FAC_DFP S390_FEAT_DFP
5991 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
5992 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5993 #define FAC_EE S390_FEAT_EXECUTE_EXT
5994 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5995 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5996 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
5997 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
5998 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5999 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6000 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6001 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6002 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6003 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6004 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6005 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6006 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6007 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6008 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6009 #define FAC_SFLE S390_FEAT_STFLE
6010 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6011 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6012 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6013 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6014 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6015 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6016 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6017 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6018 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6019 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6020 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6021 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6022 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6023 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6024 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6025 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6026 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6027 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6028 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6029 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6030 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6031 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6032
6033 static const DisasInsn insn_info[] = {
6034 #include "insn-data.h.inc"
6035 };
6036
6037 #undef E
6038 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6039 case OPC: return &insn_info[insn_ ## NM];
6040
lookup_opc(uint16_t opc)6041 static const DisasInsn *lookup_opc(uint16_t opc)
6042 {
6043 switch (opc) {
6044 #include "insn-data.h.inc"
6045 default:
6046 return NULL;
6047 }
6048 }
6049
6050 #undef F
6051 #undef E
6052 #undef D
6053 #undef C
6054
6055 /* Extract a field from the insn. The INSN should be left-aligned in
6056 the uint64_t so that we can more easily utilize the big-bit-endian
6057 definitions we extract from the Principals of Operation. */
6058
extract_field(DisasFields * o,const DisasField * f,uint64_t insn)6059 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6060 {
6061 uint32_t r, m;
6062
6063 if (f->size == 0) {
6064 return;
6065 }
6066
6067 /* Zero extract the field from the insn. */
6068 r = (insn << f->beg) >> (64 - f->size);
6069
6070 /* Sign-extend, or un-swap the field as necessary. */
6071 switch (f->type) {
6072 case 0: /* unsigned */
6073 break;
6074 case 1: /* signed */
6075 assert(f->size <= 32);
6076 m = 1u << (f->size - 1);
6077 r = (r ^ m) - m;
6078 break;
6079 case 2: /* dl+dh split, signed 20 bit. */
6080 r = ((int8_t)r << 12) | (r >> 8);
6081 break;
6082 case 3: /* MSB stored in RXB */
6083 g_assert(f->size == 4);
6084 switch (f->beg) {
6085 case 8:
6086 r |= extract64(insn, 63 - 36, 1) << 4;
6087 break;
6088 case 12:
6089 r |= extract64(insn, 63 - 37, 1) << 4;
6090 break;
6091 case 16:
6092 r |= extract64(insn, 63 - 38, 1) << 4;
6093 break;
6094 case 32:
6095 r |= extract64(insn, 63 - 39, 1) << 4;
6096 break;
6097 default:
6098 g_assert_not_reached();
6099 }
6100 break;
6101 default:
6102 abort();
6103 }
6104
6105 /*
6106 * Validate that the "compressed" encoding we selected above is valid.
6107 * I.e. we haven't made two different original fields overlap.
6108 */
6109 assert(((o->presentC >> f->indexC) & 1) == 0);
6110 o->presentC |= 1 << f->indexC;
6111 o->presentO |= 1 << f->indexO;
6112
6113 o->c[f->indexC] = r;
6114 }
6115
6116 /* Lookup the insn at the current PC, extracting the operands into O and
6117 returning the info struct for the insn. Returns NULL for invalid insn. */
6118
extract_insn(CPUS390XState * env,DisasContext * s)6119 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6120 {
6121 uint64_t insn, pc = s->base.pc_next;
6122 int op, op2, ilen;
6123 const DisasInsn *info;
6124
6125 if (unlikely(s->ex_value)) {
6126 uint64_t be_insn;
6127
6128 /* Drop the EX data now, so that it's clear on exception paths. */
6129 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6130 offsetof(CPUS390XState, ex_value));
6131
6132 /* Extract the values saved by EXECUTE. */
6133 insn = s->ex_value & 0xffffffffffff0000ull;
6134 ilen = s->ex_value & 0xf;
6135 op = insn >> 56;
6136
6137 /* Register insn bytes with translator so plugins work. */
6138 be_insn = cpu_to_be64(insn);
6139 translator_fake_ld(&s->base, &be_insn, get_ilen(op));
6140 } else {
6141 insn = ld_code2(env, s, pc);
6142 op = (insn >> 8) & 0xff;
6143 ilen = get_ilen(op);
6144 switch (ilen) {
6145 case 2:
6146 insn = insn << 48;
6147 break;
6148 case 4:
6149 insn = ld_code4(env, s, pc) << 32;
6150 break;
6151 case 6:
6152 insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6153 break;
6154 default:
6155 g_assert_not_reached();
6156 }
6157 }
6158 s->pc_tmp = s->base.pc_next + ilen;
6159 s->ilen = ilen;
6160
6161 /* We can't actually determine the insn format until we've looked up
6162 the full insn opcode. Which we can't do without locating the
6163 secondary opcode. Assume by default that OP2 is at bit 40; for
6164 those smaller insns that don't actually have a secondary opcode
6165 this will correctly result in OP2 = 0. */
6166 switch (op) {
6167 case 0x01: /* E */
6168 case 0x80: /* S */
6169 case 0x82: /* S */
6170 case 0x93: /* S */
6171 case 0xb2: /* S, RRF, RRE, IE */
6172 case 0xb3: /* RRE, RRD, RRF */
6173 case 0xb9: /* RRE, RRF */
6174 case 0xe5: /* SSE, SIL */
6175 op2 = (insn << 8) >> 56;
6176 break;
6177 case 0xa5: /* RI */
6178 case 0xa7: /* RI */
6179 case 0xc0: /* RIL */
6180 case 0xc2: /* RIL */
6181 case 0xc4: /* RIL */
6182 case 0xc6: /* RIL */
6183 case 0xc8: /* SSF */
6184 case 0xcc: /* RIL */
6185 op2 = (insn << 12) >> 60;
6186 break;
6187 case 0xc5: /* MII */
6188 case 0xc7: /* SMI */
6189 case 0xd0 ... 0xdf: /* SS */
6190 case 0xe1: /* SS */
6191 case 0xe2: /* SS */
6192 case 0xe8: /* SS */
6193 case 0xe9: /* SS */
6194 case 0xea: /* SS */
6195 case 0xee ... 0xf3: /* SS */
6196 case 0xf8 ... 0xfd: /* SS */
6197 op2 = 0;
6198 break;
6199 default:
6200 op2 = (insn << 40) >> 56;
6201 break;
6202 }
6203
6204 memset(&s->fields, 0, sizeof(s->fields));
6205 s->fields.raw_insn = insn;
6206 s->fields.op = op;
6207 s->fields.op2 = op2;
6208
6209 /* Lookup the instruction. */
6210 info = lookup_opc(op << 8 | op2);
6211 s->insn = info;
6212
6213 /* If we found it, extract the operands. */
6214 if (info != NULL) {
6215 DisasFormat fmt = info->fmt;
6216 int i;
6217
6218 for (i = 0; i < NUM_C_FIELD; ++i) {
6219 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6220 }
6221 }
6222 return info;
6223 }
6224
is_afp_reg(int reg)6225 static bool is_afp_reg(int reg)
6226 {
6227 return reg % 2 || reg > 6;
6228 }
6229
is_fp_pair(int reg)6230 static bool is_fp_pair(int reg)
6231 {
6232 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6233 return !(reg & 0x2);
6234 }
6235
translate_one(CPUS390XState * env,DisasContext * s)6236 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6237 {
6238 const DisasInsn *insn;
6239 DisasJumpType ret = DISAS_NEXT;
6240 DisasOps o = {};
6241 bool icount = false;
6242
6243 /* Search for the insn in the table. */
6244 insn = extract_insn(env, s);
6245
6246 /* Update insn_start now that we know the ILEN. */
6247 tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen);
6248
6249 /* Not found means unimplemented/illegal opcode. */
6250 if (insn == NULL) {
6251 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6252 s->fields.op, s->fields.op2);
6253 gen_illegal_opcode(s);
6254 ret = DISAS_NORETURN;
6255 goto out;
6256 }
6257
6258 #ifndef CONFIG_USER_ONLY
6259 if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
6260 /* With ifetch set, psw_addr and cc_op are always up-to-date. */
6261 gen_helper_per_ifetch(tcg_env, tcg_constant_i32(s->ilen));
6262 }
6263 #endif
6264
6265 /* process flags */
6266 if (insn->flags) {
6267 /* privileged instruction */
6268 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6269 gen_program_exception(s, PGM_PRIVILEGED);
6270 ret = DISAS_NORETURN;
6271 goto out;
6272 }
6273
6274 /* if AFP is not enabled, instructions and registers are forbidden */
6275 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6276 uint8_t dxc = 0;
6277
6278 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6279 dxc = 1;
6280 }
6281 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6282 dxc = 1;
6283 }
6284 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6285 dxc = 1;
6286 }
6287 if (insn->flags & IF_BFP) {
6288 dxc = 2;
6289 }
6290 if (insn->flags & IF_DFP) {
6291 dxc = 3;
6292 }
6293 if (insn->flags & IF_VEC) {
6294 dxc = 0xfe;
6295 }
6296 if (dxc) {
6297 gen_data_exception(dxc);
6298 ret = DISAS_NORETURN;
6299 goto out;
6300 }
6301 }
6302
6303 /* if vector instructions not enabled, executing them is forbidden */
6304 if (insn->flags & IF_VEC) {
6305 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6306 gen_data_exception(0xfe);
6307 ret = DISAS_NORETURN;
6308 goto out;
6309 }
6310 }
6311
6312 /* input/output is the special case for icount mode */
6313 if (unlikely(insn->flags & IF_IO)) {
6314 icount = translator_io_start(&s->base);
6315 }
6316 }
6317
6318 /* Check for insn specification exceptions. */
6319 if (insn->spec) {
6320 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6321 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6322 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6323 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6324 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6325 gen_program_exception(s, PGM_SPECIFICATION);
6326 ret = DISAS_NORETURN;
6327 goto out;
6328 }
6329 }
6330
6331 /* Implement the instruction. */
6332 if (insn->help_in1) {
6333 insn->help_in1(s, &o);
6334 }
6335 if (insn->help_in2) {
6336 insn->help_in2(s, &o);
6337 }
6338 if (insn->help_prep) {
6339 insn->help_prep(s, &o);
6340 }
6341 if (insn->help_op) {
6342 ret = insn->help_op(s, &o);
6343 if (ret == DISAS_NORETURN) {
6344 goto out;
6345 }
6346 }
6347 if (insn->help_wout) {
6348 insn->help_wout(s, &o);
6349 }
6350 if (insn->help_cout) {
6351 insn->help_cout(s, &o);
6352 }
6353
6354 /* io should be the last instruction in tb when icount is enabled */
6355 if (unlikely(icount && ret == DISAS_NEXT)) {
6356 ret = DISAS_TOO_MANY;
6357 }
6358
6359 #ifndef CONFIG_USER_ONLY
6360 if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
6361 switch (ret) {
6362 case DISAS_TOO_MANY:
6363 s->base.is_jmp = DISAS_PC_CC_UPDATED;
6364 /* fall through */
6365 case DISAS_NEXT:
6366 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6367 break;
6368 default:
6369 break;
6370 }
6371 update_cc_op(s);
6372 gen_helper_per_check_exception(tcg_env);
6373 }
6374 #endif
6375
6376 out:
6377 /* Advance to the next instruction. */
6378 s->base.pc_next = s->pc_tmp;
6379 return ret;
6380 }
6381
s390x_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)6382 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6383 {
6384 DisasContext *dc = container_of(dcbase, DisasContext, base);
6385
6386 /* 31-bit mode */
6387 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6388 dc->base.pc_first &= 0x7fffffff;
6389 dc->base.pc_next = dc->base.pc_first;
6390 }
6391
6392 dc->cc_op = CC_OP_DYNAMIC;
6393 dc->ex_value = dc->base.tb->cs_base;
6394 dc->exit_to_mainloop = dc->ex_value;
6395 }
6396
s390x_tr_tb_start(DisasContextBase * db,CPUState * cs)6397 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6398 {
6399 }
6400
s390x_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)6401 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6402 {
6403 DisasContext *dc = container_of(dcbase, DisasContext, base);
6404
6405 /* Delay the set of ilen until we've read the insn. */
6406 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6407 }
6408
get_next_pc(CPUS390XState * env,DisasContext * s,uint64_t pc)6409 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6410 uint64_t pc)
6411 {
6412 uint64_t insn = translator_lduw(env, &s->base, pc);
6413
6414 return pc + get_ilen((insn >> 8) & 0xff);
6415 }
6416
s390x_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)6417 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6418 {
6419 CPUS390XState *env = cpu_env(cs);
6420 DisasContext *dc = container_of(dcbase, DisasContext, base);
6421
6422 dc->base.is_jmp = translate_one(env, dc);
6423 if (dc->base.is_jmp == DISAS_NEXT) {
6424 if (dc->ex_value ||
6425 !is_same_page(dcbase, dc->base.pc_next) ||
6426 !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6427 dc->base.is_jmp = DISAS_TOO_MANY;
6428 }
6429 }
6430 }
6431
s390x_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)6432 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6433 {
6434 DisasContext *dc = container_of(dcbase, DisasContext, base);
6435
6436 switch (dc->base.is_jmp) {
6437 case DISAS_NORETURN:
6438 break;
6439 case DISAS_TOO_MANY:
6440 update_psw_addr(dc);
6441 /* FALLTHRU */
6442 case DISAS_PC_UPDATED:
6443 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6444 cc op type is in env */
6445 update_cc_op(dc);
6446 /* FALLTHRU */
6447 case DISAS_PC_CC_UPDATED:
6448 /* Exit the TB, either by raising a debug exception or by return. */
6449 if (dc->exit_to_mainloop) {
6450 tcg_gen_exit_tb(NULL, 0);
6451 } else {
6452 tcg_gen_lookup_and_goto_ptr();
6453 }
6454 break;
6455 default:
6456 g_assert_not_reached();
6457 }
6458 }
6459
s390x_tr_disas_log(const DisasContextBase * dcbase,CPUState * cs,FILE * logfile)6460 static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
6461 CPUState *cs, FILE *logfile)
6462 {
6463 DisasContext *dc = container_of(dcbase, DisasContext, base);
6464
6465 if (unlikely(dc->ex_value)) {
6466 /* The ex_value has been recorded with translator_fake_ld. */
6467 fprintf(logfile, "IN: EXECUTE\n");
6468 target_disas(logfile, cs, &dc->base);
6469 return true;
6470 }
6471 return false;
6472 }
6473
6474 static const TranslatorOps s390x_tr_ops = {
6475 .init_disas_context = s390x_tr_init_disas_context,
6476 .tb_start = s390x_tr_tb_start,
6477 .insn_start = s390x_tr_insn_start,
6478 .translate_insn = s390x_tr_translate_insn,
6479 .tb_stop = s390x_tr_tb_stop,
6480 .disas_log = s390x_tr_disas_log,
6481 };
6482
gen_intermediate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)6483 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6484 vaddr pc, void *host_pc)
6485 {
6486 DisasContext dc;
6487
6488 translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6489 }
6490
s390x_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)6491 void s390x_restore_state_to_opc(CPUState *cs,
6492 const TranslationBlock *tb,
6493 const uint64_t *data)
6494 {
6495 CPUS390XState *env = cpu_env(cs);
6496 int cc_op = data[1];
6497
6498 env->psw.addr = data[0];
6499
6500 /* Update the CC opcode if it is not already up-to-date. */
6501 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6502 env->cc_op = cc_op;
6503 }
6504
6505 /* Record ILEN. */
6506 env->int_pgm_ilen = data[2];
6507 }
6508