1 /* 2 * S/390 translation 3 * 4 * Copyright (c) 2009 Ulrich Hecht 5 * Copyright (c) 2010 Alexander Graf 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 /* #define DEBUG_INLINE_BRANCHES */ 22 #define S390X_DEBUG_DISAS 23 /* #define S390X_DEBUG_DISAS_VERBOSE */ 24 25 #ifdef S390X_DEBUG_DISAS_VERBOSE 26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__) 27 #else 28 # define LOG_DISAS(...) do { } while (0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "cpu.h" 33 #include "s390x-internal.h" 34 #include "disas/disas.h" 35 #include "exec/exec-all.h" 36 #include "tcg/tcg-op.h" 37 #include "tcg/tcg-op-gvec.h" 38 #include "qemu/log.h" 39 #include "qemu/host-utils.h" 40 #include "exec/cpu_ldst.h" 41 #include "exec/helper-proto.h" 42 #include "exec/helper-gen.h" 43 44 #include "exec/translator.h" 45 #include "exec/log.h" 46 #include "qemu/atomic128.h" 47 48 #define HELPER_H "helper.h" 49 #include "exec/helper-info.c.inc" 50 #undef HELPER_H 51 52 53 /* Information that (most) every instruction needs to manipulate. */ 54 typedef struct DisasContext DisasContext; 55 typedef struct DisasInsn DisasInsn; 56 typedef struct DisasFields DisasFields; 57 58 /* 59 * Define a structure to hold the decoded fields. We'll store each inside 60 * an array indexed by an enum. In order to conserve memory, we'll arrange 61 * for fields that do not exist at the same time to overlap, thus the "C" 62 * for compact. For checking purposes there is an "O" for original index 63 * as well that will be applied to availability bitmaps. 64 */ 65 66 enum DisasFieldIndexO { 67 FLD_O_r1, 68 FLD_O_r2, 69 FLD_O_r3, 70 FLD_O_m1, 71 FLD_O_m3, 72 FLD_O_m4, 73 FLD_O_m5, 74 FLD_O_m6, 75 FLD_O_b1, 76 FLD_O_b2, 77 FLD_O_b4, 78 FLD_O_d1, 79 FLD_O_d2, 80 FLD_O_d4, 81 FLD_O_x2, 82 FLD_O_l1, 83 FLD_O_l2, 84 FLD_O_i1, 85 FLD_O_i2, 86 FLD_O_i3, 87 FLD_O_i4, 88 FLD_O_i5, 89 FLD_O_v1, 90 FLD_O_v2, 91 FLD_O_v3, 92 FLD_O_v4, 93 }; 94 95 enum DisasFieldIndexC { 96 FLD_C_r1 = 0, 97 FLD_C_m1 = 0, 98 FLD_C_b1 = 0, 99 FLD_C_i1 = 0, 100 FLD_C_v1 = 0, 101 102 FLD_C_r2 = 1, 103 FLD_C_b2 = 1, 104 FLD_C_i2 = 1, 105 106 FLD_C_r3 = 2, 107 FLD_C_m3 = 2, 108 FLD_C_i3 = 2, 109 FLD_C_v3 = 2, 110 111 FLD_C_m4 = 3, 112 FLD_C_b4 = 3, 113 FLD_C_i4 = 3, 114 FLD_C_l1 = 3, 115 FLD_C_v4 = 3, 116 117 FLD_C_i5 = 4, 118 FLD_C_d1 = 4, 119 FLD_C_m5 = 4, 120 121 FLD_C_d2 = 5, 122 FLD_C_m6 = 5, 123 124 FLD_C_d4 = 6, 125 FLD_C_x2 = 6, 126 FLD_C_l2 = 6, 127 FLD_C_v2 = 6, 128 129 NUM_C_FIELD = 7 130 }; 131 132 struct DisasFields { 133 uint64_t raw_insn; 134 unsigned op:8; 135 unsigned op2:8; 136 unsigned presentC:16; 137 unsigned int presentO; 138 int c[NUM_C_FIELD]; 139 }; 140 141 struct DisasContext { 142 DisasContextBase base; 143 const DisasInsn *insn; 144 TCGOp *insn_start; 145 DisasFields fields; 146 uint64_t ex_value; 147 /* 148 * During translate_one(), pc_tmp is used to determine the instruction 149 * to be executed after base.pc_next - e.g. next sequential instruction 150 * or a branch target. 151 */ 152 uint64_t pc_tmp; 153 uint32_t ilen; 154 enum cc_op cc_op; 155 bool exit_to_mainloop; 156 }; 157 158 /* Information carried about a condition to be evaluated. */ 159 typedef struct { 160 TCGCond cond:8; 161 bool is_64; 162 union { 163 struct { TCGv_i64 a, b; } s64; 164 struct { TCGv_i32 a, b; } s32; 165 } u; 166 } DisasCompare; 167 168 #ifdef DEBUG_INLINE_BRANCHES 169 static uint64_t inline_branch_hit[CC_OP_MAX]; 170 static uint64_t inline_branch_miss[CC_OP_MAX]; 171 #endif 172 173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc) 174 { 175 if (s->base.tb->flags & FLAG_MASK_32) { 176 if (s->base.tb->flags & FLAG_MASK_64) { 177 tcg_gen_movi_i64(out, pc); 178 return; 179 } 180 pc |= 0x80000000; 181 } 182 assert(!(s->base.tb->flags & FLAG_MASK_64)); 183 tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32); 184 } 185 186 static TCGv_i64 psw_addr; 187 static TCGv_i64 psw_mask; 188 static TCGv_i64 gbea; 189 190 static TCGv_i32 cc_op; 191 static TCGv_i64 cc_src; 192 static TCGv_i64 cc_dst; 193 static TCGv_i64 cc_vr; 194 195 static char cpu_reg_names[16][4]; 196 static TCGv_i64 regs[16]; 197 198 void s390x_translate_init(void) 199 { 200 int i; 201 202 psw_addr = tcg_global_mem_new_i64(tcg_env, 203 offsetof(CPUS390XState, psw.addr), 204 "psw_addr"); 205 psw_mask = tcg_global_mem_new_i64(tcg_env, 206 offsetof(CPUS390XState, psw.mask), 207 "psw_mask"); 208 gbea = tcg_global_mem_new_i64(tcg_env, 209 offsetof(CPUS390XState, gbea), 210 "gbea"); 211 212 cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op), 213 "cc_op"); 214 cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src), 215 "cc_src"); 216 cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst), 217 "cc_dst"); 218 cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr), 219 "cc_vr"); 220 221 for (i = 0; i < 16; i++) { 222 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i); 223 regs[i] = tcg_global_mem_new(tcg_env, 224 offsetof(CPUS390XState, regs[i]), 225 cpu_reg_names[i]); 226 } 227 } 228 229 static inline int vec_full_reg_offset(uint8_t reg) 230 { 231 g_assert(reg < 32); 232 return offsetof(CPUS390XState, vregs[reg][0]); 233 } 234 235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) 236 { 237 /* Convert element size (es) - e.g. MO_8 - to bytes */ 238 const uint8_t bytes = 1 << es; 239 int offs = enr * bytes; 240 241 /* 242 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte 243 * of the 16 byte vector, on both, little and big endian systems. 244 * 245 * Big Endian (target/possible host) 246 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] 247 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] 248 * W: [ 0][ 1] - [ 2][ 3] 249 * DW: [ 0] - [ 1] 250 * 251 * Little Endian (possible host) 252 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] 253 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] 254 * W: [ 1][ 0] - [ 3][ 2] 255 * DW: [ 0] - [ 1] 256 * 257 * For 16 byte elements, the two 8 byte halves will not form a host 258 * int128 if the host is little endian, since they're in the wrong order. 259 * Some operations (e.g. xor) do not care. For operations like addition, 260 * the two 8 byte elements have to be loaded separately. Let's force all 261 * 16 byte operations to handle it in a special way. 262 */ 263 g_assert(es <= MO_64); 264 #if !HOST_BIG_ENDIAN 265 offs ^= (8 - bytes); 266 #endif 267 return offs + vec_full_reg_offset(reg); 268 } 269 270 static inline int freg64_offset(uint8_t reg) 271 { 272 g_assert(reg < 16); 273 return vec_reg_offset(reg, 0, MO_64); 274 } 275 276 static inline int freg32_offset(uint8_t reg) 277 { 278 g_assert(reg < 16); 279 return vec_reg_offset(reg, 0, MO_32); 280 } 281 282 static TCGv_i64 load_reg(int reg) 283 { 284 TCGv_i64 r = tcg_temp_new_i64(); 285 tcg_gen_mov_i64(r, regs[reg]); 286 return r; 287 } 288 289 static TCGv_i64 load_freg(int reg) 290 { 291 TCGv_i64 r = tcg_temp_new_i64(); 292 293 tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg)); 294 return r; 295 } 296 297 static TCGv_i64 load_freg32_i64(int reg) 298 { 299 TCGv_i64 r = tcg_temp_new_i64(); 300 301 tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg)); 302 return r; 303 } 304 305 static TCGv_i128 load_freg_128(int reg) 306 { 307 TCGv_i64 h = load_freg(reg); 308 TCGv_i64 l = load_freg(reg + 2); 309 TCGv_i128 r = tcg_temp_new_i128(); 310 311 tcg_gen_concat_i64_i128(r, l, h); 312 return r; 313 } 314 315 static void store_reg(int reg, TCGv_i64 v) 316 { 317 tcg_gen_mov_i64(regs[reg], v); 318 } 319 320 static void store_freg(int reg, TCGv_i64 v) 321 { 322 tcg_gen_st_i64(v, tcg_env, freg64_offset(reg)); 323 } 324 325 static void store_reg32_i64(int reg, TCGv_i64 v) 326 { 327 /* 32 bit register writes keep the upper half */ 328 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32); 329 } 330 331 static void store_reg32h_i64(int reg, TCGv_i64 v) 332 { 333 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32); 334 } 335 336 static void store_freg32_i64(int reg, TCGv_i64 v) 337 { 338 tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg)); 339 } 340 341 static void update_psw_addr(DisasContext *s) 342 { 343 /* psw.addr */ 344 tcg_gen_movi_i64(psw_addr, s->base.pc_next); 345 } 346 347 static void per_branch(DisasContext *s, bool to_next) 348 { 349 #ifndef CONFIG_USER_ONLY 350 tcg_gen_movi_i64(gbea, s->base.pc_next); 351 352 if (s->base.tb->flags & FLAG_MASK_PER) { 353 TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr; 354 gen_helper_per_branch(tcg_env, gbea, next_pc); 355 } 356 #endif 357 } 358 359 static void per_branch_cond(DisasContext *s, TCGCond cond, 360 TCGv_i64 arg1, TCGv_i64 arg2) 361 { 362 #ifndef CONFIG_USER_ONLY 363 if (s->base.tb->flags & FLAG_MASK_PER) { 364 TCGLabel *lab = gen_new_label(); 365 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab); 366 367 tcg_gen_movi_i64(gbea, s->base.pc_next); 368 gen_helper_per_branch(tcg_env, gbea, psw_addr); 369 370 gen_set_label(lab); 371 } else { 372 TCGv_i64 pc = tcg_constant_i64(s->base.pc_next); 373 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc); 374 } 375 #endif 376 } 377 378 static void per_breaking_event(DisasContext *s) 379 { 380 tcg_gen_movi_i64(gbea, s->base.pc_next); 381 } 382 383 static void update_cc_op(DisasContext *s) 384 { 385 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { 386 tcg_gen_movi_i32(cc_op, s->cc_op); 387 } 388 } 389 390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s, 391 uint64_t pc) 392 { 393 return (uint64_t)translator_lduw(env, &s->base, pc); 394 } 395 396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s, 397 uint64_t pc) 398 { 399 return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc); 400 } 401 402 static int get_mem_index(DisasContext *s) 403 { 404 #ifdef CONFIG_USER_ONLY 405 return MMU_USER_IDX; 406 #else 407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) { 408 return MMU_REAL_IDX; 409 } 410 411 switch (s->base.tb->flags & FLAG_MASK_ASC) { 412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: 413 return MMU_PRIMARY_IDX; 414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: 415 return MMU_SECONDARY_IDX; 416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: 417 return MMU_HOME_IDX; 418 default: 419 g_assert_not_reached(); 420 break; 421 } 422 #endif 423 } 424 425 static void gen_exception(int excp) 426 { 427 gen_helper_exception(tcg_env, tcg_constant_i32(excp)); 428 } 429 430 static void gen_program_exception(DisasContext *s, int code) 431 { 432 /* Remember what pgm exception this was. */ 433 tcg_gen_st_i32(tcg_constant_i32(code), tcg_env, 434 offsetof(CPUS390XState, int_pgm_code)); 435 436 tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env, 437 offsetof(CPUS390XState, int_pgm_ilen)); 438 439 /* update the psw */ 440 update_psw_addr(s); 441 442 /* Save off cc. */ 443 update_cc_op(s); 444 445 /* Trigger exception. */ 446 gen_exception(EXCP_PGM); 447 } 448 449 static inline void gen_illegal_opcode(DisasContext *s) 450 { 451 gen_program_exception(s, PGM_OPERATION); 452 } 453 454 static inline void gen_data_exception(uint8_t dxc) 455 { 456 gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc)); 457 } 458 459 static inline void gen_trap(DisasContext *s) 460 { 461 /* Set DXC to 0xff */ 462 gen_data_exception(0xff); 463 } 464 465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, 466 int64_t imm) 467 { 468 tcg_gen_addi_i64(dst, src, imm); 469 if (!(s->base.tb->flags & FLAG_MASK_64)) { 470 if (s->base.tb->flags & FLAG_MASK_32) { 471 tcg_gen_andi_i64(dst, dst, 0x7fffffff); 472 } else { 473 tcg_gen_andi_i64(dst, dst, 0x00ffffff); 474 } 475 } 476 } 477 478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) 479 { 480 TCGv_i64 tmp = tcg_temp_new_i64(); 481 482 /* 483 * Note that d2 is limited to 20 bits, signed. If we crop negative 484 * displacements early we create larger immediate addends. 485 */ 486 if (b2 && x2) { 487 tcg_gen_add_i64(tmp, regs[b2], regs[x2]); 488 gen_addi_and_wrap_i64(s, tmp, tmp, d2); 489 } else if (b2) { 490 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2); 491 } else if (x2) { 492 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2); 493 } else if (!(s->base.tb->flags & FLAG_MASK_64)) { 494 if (s->base.tb->flags & FLAG_MASK_32) { 495 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff); 496 } else { 497 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff); 498 } 499 } else { 500 tcg_gen_movi_i64(tmp, d2); 501 } 502 503 return tmp; 504 } 505 506 static inline bool live_cc_data(DisasContext *s) 507 { 508 return (s->cc_op != CC_OP_DYNAMIC 509 && s->cc_op != CC_OP_STATIC 510 && s->cc_op > 3); 511 } 512 513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) 514 { 515 if (live_cc_data(s)) { 516 tcg_gen_discard_i64(cc_src); 517 tcg_gen_discard_i64(cc_dst); 518 tcg_gen_discard_i64(cc_vr); 519 } 520 s->cc_op = CC_OP_CONST0 + val; 521 } 522 523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) 524 { 525 if (live_cc_data(s)) { 526 tcg_gen_discard_i64(cc_src); 527 tcg_gen_discard_i64(cc_vr); 528 } 529 tcg_gen_mov_i64(cc_dst, dst); 530 s->cc_op = op; 531 } 532 533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, 534 TCGv_i64 dst) 535 { 536 if (live_cc_data(s)) { 537 tcg_gen_discard_i64(cc_vr); 538 } 539 tcg_gen_mov_i64(cc_src, src); 540 tcg_gen_mov_i64(cc_dst, dst); 541 s->cc_op = op; 542 } 543 544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, 545 TCGv_i64 dst, TCGv_i64 vr) 546 { 547 tcg_gen_mov_i64(cc_src, src); 548 tcg_gen_mov_i64(cc_dst, dst); 549 tcg_gen_mov_i64(cc_vr, vr); 550 s->cc_op = op; 551 } 552 553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) 554 { 555 gen_op_update1_cc_i64(s, CC_OP_NZ, val); 556 } 557 558 /* CC value is in env->cc_op */ 559 static void set_cc_static(DisasContext *s) 560 { 561 if (live_cc_data(s)) { 562 tcg_gen_discard_i64(cc_src); 563 tcg_gen_discard_i64(cc_dst); 564 tcg_gen_discard_i64(cc_vr); 565 } 566 s->cc_op = CC_OP_STATIC; 567 } 568 569 /* calculates cc into cc_op */ 570 static void gen_op_calc_cc(DisasContext *s) 571 { 572 TCGv_i32 local_cc_op = NULL; 573 TCGv_i64 dummy = NULL; 574 575 switch (s->cc_op) { 576 default: 577 dummy = tcg_constant_i64(0); 578 /* FALLTHRU */ 579 case CC_OP_ADD_64: 580 case CC_OP_SUB_64: 581 case CC_OP_ADD_32: 582 case CC_OP_SUB_32: 583 local_cc_op = tcg_constant_i32(s->cc_op); 584 break; 585 case CC_OP_CONST0: 586 case CC_OP_CONST1: 587 case CC_OP_CONST2: 588 case CC_OP_CONST3: 589 case CC_OP_STATIC: 590 case CC_OP_DYNAMIC: 591 break; 592 } 593 594 switch (s->cc_op) { 595 case CC_OP_CONST0: 596 case CC_OP_CONST1: 597 case CC_OP_CONST2: 598 case CC_OP_CONST3: 599 /* s->cc_op is the cc value */ 600 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0); 601 break; 602 case CC_OP_STATIC: 603 /* env->cc_op already is the cc value */ 604 break; 605 case CC_OP_NZ: 606 tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0); 607 tcg_gen_extrl_i64_i32(cc_op, cc_dst); 608 break; 609 case CC_OP_ABS_64: 610 case CC_OP_NABS_64: 611 case CC_OP_ABS_32: 612 case CC_OP_NABS_32: 613 case CC_OP_LTGT0_32: 614 case CC_OP_LTGT0_64: 615 case CC_OP_COMP_32: 616 case CC_OP_COMP_64: 617 case CC_OP_NZ_F32: 618 case CC_OP_NZ_F64: 619 case CC_OP_FLOGR: 620 case CC_OP_LCBB: 621 case CC_OP_MULS_32: 622 /* 1 argument */ 623 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy); 624 break; 625 case CC_OP_ADDU: 626 case CC_OP_ICM: 627 case CC_OP_LTGT_32: 628 case CC_OP_LTGT_64: 629 case CC_OP_LTUGTU_32: 630 case CC_OP_LTUGTU_64: 631 case CC_OP_TM_32: 632 case CC_OP_TM_64: 633 case CC_OP_SLA: 634 case CC_OP_SUBU: 635 case CC_OP_NZ_F128: 636 case CC_OP_VC: 637 case CC_OP_MULS_64: 638 /* 2 arguments */ 639 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy); 640 break; 641 case CC_OP_ADD_64: 642 case CC_OP_SUB_64: 643 case CC_OP_ADD_32: 644 case CC_OP_SUB_32: 645 /* 3 arguments */ 646 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr); 647 break; 648 case CC_OP_DYNAMIC: 649 /* unknown operation - assume 3 arguments and cc_op in env */ 650 gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr); 651 break; 652 default: 653 g_assert_not_reached(); 654 } 655 656 /* We now have cc in cc_op as constant */ 657 set_cc_static(s); 658 } 659 660 static bool use_goto_tb(DisasContext *s, uint64_t dest) 661 { 662 if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) { 663 return false; 664 } 665 return translator_use_goto_tb(&s->base, dest); 666 } 667 668 static void account_noninline_branch(DisasContext *s, int cc_op) 669 { 670 #ifdef DEBUG_INLINE_BRANCHES 671 inline_branch_miss[cc_op]++; 672 #endif 673 } 674 675 static void account_inline_branch(DisasContext *s, int cc_op) 676 { 677 #ifdef DEBUG_INLINE_BRANCHES 678 inline_branch_hit[cc_op]++; 679 #endif 680 } 681 682 /* Table of mask values to comparison codes, given a comparison as input. 683 For such, CC=3 should not be possible. */ 684 static const TCGCond ltgt_cond[16] = { 685 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ 686 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ 687 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ 688 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ 689 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ 690 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ 691 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ 692 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ 693 }; 694 695 /* Table of mask values to comparison codes, given a logic op as input. 696 For such, only CC=0 and CC=1 should be possible. */ 697 static const TCGCond nz_cond[16] = { 698 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ 699 TCG_COND_NEVER, TCG_COND_NEVER, 700 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ 701 TCG_COND_NE, TCG_COND_NE, 702 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ 703 TCG_COND_EQ, TCG_COND_EQ, 704 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ 705 TCG_COND_ALWAYS, TCG_COND_ALWAYS, 706 }; 707 708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the 709 details required to generate a TCG comparison. */ 710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) 711 { 712 TCGCond cond; 713 enum cc_op old_cc_op = s->cc_op; 714 715 if (mask == 15 || mask == 0) { 716 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); 717 c->u.s32.a = cc_op; 718 c->u.s32.b = cc_op; 719 c->is_64 = false; 720 return; 721 } 722 723 /* Find the TCG condition for the mask + cc op. */ 724 switch (old_cc_op) { 725 case CC_OP_LTGT0_32: 726 case CC_OP_LTGT0_64: 727 case CC_OP_LTGT_32: 728 case CC_OP_LTGT_64: 729 cond = ltgt_cond[mask]; 730 if (cond == TCG_COND_NEVER) { 731 goto do_dynamic; 732 } 733 account_inline_branch(s, old_cc_op); 734 break; 735 736 case CC_OP_LTUGTU_32: 737 case CC_OP_LTUGTU_64: 738 cond = tcg_unsigned_cond(ltgt_cond[mask]); 739 if (cond == TCG_COND_NEVER) { 740 goto do_dynamic; 741 } 742 account_inline_branch(s, old_cc_op); 743 break; 744 745 case CC_OP_NZ: 746 cond = nz_cond[mask]; 747 if (cond == TCG_COND_NEVER) { 748 goto do_dynamic; 749 } 750 account_inline_branch(s, old_cc_op); 751 break; 752 753 case CC_OP_TM_32: 754 case CC_OP_TM_64: 755 switch (mask) { 756 case 8: 757 cond = TCG_COND_TSTEQ; 758 break; 759 case 4 | 2 | 1: 760 cond = TCG_COND_TSTNE; 761 break; 762 default: 763 goto do_dynamic; 764 } 765 account_inline_branch(s, old_cc_op); 766 break; 767 768 case CC_OP_ICM: 769 switch (mask) { 770 case 8: 771 cond = TCG_COND_TSTEQ; 772 break; 773 case 4 | 2 | 1: 774 case 4 | 2: 775 cond = TCG_COND_TSTNE; 776 break; 777 default: 778 goto do_dynamic; 779 } 780 account_inline_branch(s, old_cc_op); 781 break; 782 783 case CC_OP_FLOGR: 784 switch (mask & 0xa) { 785 case 8: /* src == 0 -> no one bit found */ 786 cond = TCG_COND_EQ; 787 break; 788 case 2: /* src != 0 -> one bit found */ 789 cond = TCG_COND_NE; 790 break; 791 default: 792 goto do_dynamic; 793 } 794 account_inline_branch(s, old_cc_op); 795 break; 796 797 case CC_OP_ADDU: 798 case CC_OP_SUBU: 799 switch (mask) { 800 case 8 | 2: /* result == 0 */ 801 cond = TCG_COND_EQ; 802 break; 803 case 4 | 1: /* result != 0 */ 804 cond = TCG_COND_NE; 805 break; 806 case 8 | 4: /* !carry (borrow) */ 807 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE; 808 break; 809 case 2 | 1: /* carry (!borrow) */ 810 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ; 811 break; 812 default: 813 goto do_dynamic; 814 } 815 account_inline_branch(s, old_cc_op); 816 break; 817 818 default: 819 do_dynamic: 820 /* Calculate cc value. */ 821 gen_op_calc_cc(s); 822 /* FALLTHRU */ 823 824 case CC_OP_STATIC: 825 /* Jump based on CC. We'll load up the real cond below; 826 the assignment here merely avoids a compiler warning. */ 827 account_noninline_branch(s, old_cc_op); 828 old_cc_op = CC_OP_STATIC; 829 cond = TCG_COND_NEVER; 830 break; 831 } 832 833 /* Load up the arguments of the comparison. */ 834 c->is_64 = true; 835 switch (old_cc_op) { 836 case CC_OP_LTGT0_32: 837 c->is_64 = false; 838 c->u.s32.a = tcg_temp_new_i32(); 839 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst); 840 c->u.s32.b = tcg_constant_i32(0); 841 break; 842 case CC_OP_LTGT_32: 843 case CC_OP_LTUGTU_32: 844 c->is_64 = false; 845 c->u.s32.a = tcg_temp_new_i32(); 846 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src); 847 c->u.s32.b = tcg_temp_new_i32(); 848 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst); 849 break; 850 851 case CC_OP_LTGT0_64: 852 case CC_OP_NZ: 853 case CC_OP_FLOGR: 854 c->u.s64.a = cc_dst; 855 c->u.s64.b = tcg_constant_i64(0); 856 break; 857 858 case CC_OP_LTGT_64: 859 case CC_OP_LTUGTU_64: 860 case CC_OP_TM_32: 861 case CC_OP_TM_64: 862 case CC_OP_ICM: 863 c->u.s64.a = cc_src; 864 c->u.s64.b = cc_dst; 865 break; 866 867 case CC_OP_ADDU: 868 case CC_OP_SUBU: 869 c->is_64 = true; 870 c->u.s64.b = tcg_constant_i64(0); 871 switch (mask) { 872 case 8 | 2: 873 case 4 | 1: /* result */ 874 c->u.s64.a = cc_dst; 875 break; 876 case 8 | 4: 877 case 2 | 1: /* carry */ 878 c->u.s64.a = cc_src; 879 break; 880 default: 881 g_assert_not_reached(); 882 } 883 break; 884 885 case CC_OP_STATIC: 886 c->is_64 = false; 887 c->u.s32.a = cc_op; 888 889 /* Fold half of the cases using bit 3 to invert. */ 890 switch (mask & 8 ? mask ^ 0xf : mask) { 891 case 0x1: /* cc == 3 */ 892 cond = TCG_COND_EQ; 893 c->u.s32.b = tcg_constant_i32(3); 894 break; 895 case 0x2: /* cc == 2 */ 896 cond = TCG_COND_EQ; 897 c->u.s32.b = tcg_constant_i32(2); 898 break; 899 case 0x4: /* cc == 1 */ 900 cond = TCG_COND_EQ; 901 c->u.s32.b = tcg_constant_i32(1); 902 break; 903 case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */ 904 cond = TCG_COND_GTU; 905 c->u.s32.b = tcg_constant_i32(1); 906 break; 907 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ 908 cond = TCG_COND_TSTNE; 909 c->u.s32.b = tcg_constant_i32(1); 910 break; 911 case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */ 912 cond = TCG_COND_LEU; 913 c->u.s32.a = tcg_temp_new_i32(); 914 c->u.s32.b = tcg_constant_i32(1); 915 tcg_gen_addi_i32(c->u.s32.a, cc_op, -1); 916 break; 917 case 0x4 | 0x2 | 0x1: /* cc != 0 */ 918 cond = TCG_COND_NE; 919 c->u.s32.b = tcg_constant_i32(0); 920 break; 921 default: 922 /* case 0: never, handled above. */ 923 g_assert_not_reached(); 924 } 925 if (mask & 8) { 926 cond = tcg_invert_cond(cond); 927 } 928 break; 929 930 default: 931 abort(); 932 } 933 c->cond = cond; 934 } 935 936 /* ====================================================================== */ 937 /* Define the insn format enumeration. */ 938 #define F0(N) FMT_##N, 939 #define F1(N, X1) F0(N) 940 #define F2(N, X1, X2) F0(N) 941 #define F3(N, X1, X2, X3) F0(N) 942 #define F4(N, X1, X2, X3, X4) F0(N) 943 #define F5(N, X1, X2, X3, X4, X5) F0(N) 944 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N) 945 946 typedef enum { 947 #include "insn-format.h.inc" 948 } DisasFormat; 949 950 #undef F0 951 #undef F1 952 #undef F2 953 #undef F3 954 #undef F4 955 #undef F5 956 #undef F6 957 958 /* This is the way fields are to be accessed out of DisasFields. */ 959 #define have_field(S, F) have_field1((S), FLD_O_##F) 960 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) 961 962 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c) 963 { 964 return (s->fields.presentO >> c) & 1; 965 } 966 967 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o, 968 enum DisasFieldIndexC c) 969 { 970 assert(have_field1(s, o)); 971 return s->fields.c[c]; 972 } 973 974 /* Describe the layout of each field in each format. */ 975 typedef struct DisasField { 976 unsigned int beg:8; 977 unsigned int size:8; 978 unsigned int type:2; 979 unsigned int indexC:6; 980 enum DisasFieldIndexO indexO:8; 981 } DisasField; 982 983 typedef struct DisasFormatInfo { 984 DisasField op[NUM_C_FIELD]; 985 } DisasFormatInfo; 986 987 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } 988 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } 989 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N } 990 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ 991 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } 992 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ 993 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ 994 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } 995 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ 996 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } 997 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ 998 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ 999 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } 1000 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } 1001 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } 1002 1003 #define F0(N) { { } }, 1004 #define F1(N, X1) { { X1 } }, 1005 #define F2(N, X1, X2) { { X1, X2 } }, 1006 #define F3(N, X1, X2, X3) { { X1, X2, X3 } }, 1007 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, 1008 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, 1009 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, 1010 1011 static const DisasFormatInfo format_info[] = { 1012 #include "insn-format.h.inc" 1013 }; 1014 1015 #undef F0 1016 #undef F1 1017 #undef F2 1018 #undef F3 1019 #undef F4 1020 #undef F5 1021 #undef F6 1022 #undef R 1023 #undef M 1024 #undef V 1025 #undef BD 1026 #undef BXD 1027 #undef BDL 1028 #undef BXDL 1029 #undef I 1030 #undef L 1031 1032 /* Generally, we'll extract operands into this structures, operate upon 1033 them, and store them back. See the "in1", "in2", "prep", "wout" sets 1034 of routines below for more details. */ 1035 typedef struct { 1036 TCGv_i64 out, out2, in1, in2; 1037 TCGv_i64 addr1; 1038 TCGv_i128 out_128, in1_128, in2_128; 1039 } DisasOps; 1040 1041 /* Instructions can place constraints on their operands, raising specification 1042 exceptions if they are violated. To make this easy to automate, each "in1", 1043 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one 1044 of the following, or 0. To make this easy to document, we'll put the 1045 SPEC_<name> defines next to <name>. */ 1046 1047 #define SPEC_r1_even 1 1048 #define SPEC_r2_even 2 1049 #define SPEC_r3_even 4 1050 #define SPEC_r1_f128 8 1051 #define SPEC_r2_f128 16 1052 1053 /* Return values from translate_one, indicating the state of the TB. */ 1054 1055 /* We are not using a goto_tb (for whatever reason), but have updated 1056 the PC (for whatever reason), so there's no need to do it again on 1057 exiting the TB. */ 1058 #define DISAS_PC_UPDATED DISAS_TARGET_0 1059 1060 /* We have updated the PC and CC values. */ 1061 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2 1062 1063 1064 /* Instruction flags */ 1065 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ 1066 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ 1067 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ 1068 #define IF_BFP 0x0008 /* binary floating point instruction */ 1069 #define IF_DFP 0x0010 /* decimal floating point instruction */ 1070 #define IF_PRIV 0x0020 /* privileged instruction */ 1071 #define IF_VEC 0x0040 /* vector instruction */ 1072 #define IF_IO 0x0080 /* input/output instruction */ 1073 1074 struct DisasInsn { 1075 unsigned opc:16; 1076 unsigned flags:16; 1077 DisasFormat fmt:8; 1078 unsigned fac:8; 1079 unsigned spec:8; 1080 1081 const char *name; 1082 1083 /* Pre-process arguments before HELP_OP. */ 1084 void (*help_in1)(DisasContext *, DisasOps *); 1085 void (*help_in2)(DisasContext *, DisasOps *); 1086 void (*help_prep)(DisasContext *, DisasOps *); 1087 1088 /* 1089 * Post-process output after HELP_OP. 1090 * Note that these are not called if HELP_OP returns DISAS_NORETURN. 1091 */ 1092 void (*help_wout)(DisasContext *, DisasOps *); 1093 void (*help_cout)(DisasContext *, DisasOps *); 1094 1095 /* Implement the operation itself. */ 1096 DisasJumpType (*help_op)(DisasContext *, DisasOps *); 1097 1098 uint64_t data; 1099 }; 1100 1101 /* ====================================================================== */ 1102 /* Miscellaneous helpers, used by several operations. */ 1103 1104 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) 1105 { 1106 if (dest == s->pc_tmp) { 1107 per_branch(s, true); 1108 return DISAS_NEXT; 1109 } 1110 if (use_goto_tb(s, dest)) { 1111 update_cc_op(s); 1112 per_breaking_event(s); 1113 tcg_gen_goto_tb(0); 1114 tcg_gen_movi_i64(psw_addr, dest); 1115 tcg_gen_exit_tb(s->base.tb, 0); 1116 return DISAS_NORETURN; 1117 } else { 1118 tcg_gen_movi_i64(psw_addr, dest); 1119 per_branch(s, false); 1120 return DISAS_PC_UPDATED; 1121 } 1122 } 1123 1124 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, 1125 bool is_imm, int imm, TCGv_i64 cdest) 1126 { 1127 DisasJumpType ret; 1128 uint64_t dest = s->base.pc_next + (int64_t)imm * 2; 1129 TCGLabel *lab; 1130 1131 /* Take care of the special cases first. */ 1132 if (c->cond == TCG_COND_NEVER) { 1133 ret = DISAS_NEXT; 1134 goto egress; 1135 } 1136 if (is_imm) { 1137 if (dest == s->pc_tmp) { 1138 /* Branch to next. */ 1139 per_branch(s, true); 1140 ret = DISAS_NEXT; 1141 goto egress; 1142 } 1143 if (c->cond == TCG_COND_ALWAYS) { 1144 ret = help_goto_direct(s, dest); 1145 goto egress; 1146 } 1147 } else { 1148 if (!cdest) { 1149 /* E.g. bcr %r0 -> no branch. */ 1150 ret = DISAS_NEXT; 1151 goto egress; 1152 } 1153 if (c->cond == TCG_COND_ALWAYS) { 1154 tcg_gen_mov_i64(psw_addr, cdest); 1155 per_branch(s, false); 1156 ret = DISAS_PC_UPDATED; 1157 goto egress; 1158 } 1159 } 1160 1161 if (use_goto_tb(s, s->pc_tmp)) { 1162 if (is_imm && use_goto_tb(s, dest)) { 1163 /* Both exits can use goto_tb. */ 1164 update_cc_op(s); 1165 1166 lab = gen_new_label(); 1167 if (c->is_64) { 1168 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); 1169 } else { 1170 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); 1171 } 1172 1173 /* Branch not taken. */ 1174 tcg_gen_goto_tb(0); 1175 tcg_gen_movi_i64(psw_addr, s->pc_tmp); 1176 tcg_gen_exit_tb(s->base.tb, 0); 1177 1178 /* Branch taken. */ 1179 gen_set_label(lab); 1180 per_breaking_event(s); 1181 tcg_gen_goto_tb(1); 1182 tcg_gen_movi_i64(psw_addr, dest); 1183 tcg_gen_exit_tb(s->base.tb, 1); 1184 1185 ret = DISAS_NORETURN; 1186 } else { 1187 /* Fallthru can use goto_tb, but taken branch cannot. */ 1188 /* Store taken branch destination before the brcond. This 1189 avoids having to allocate a new local temp to hold it. 1190 We'll overwrite this in the not taken case anyway. */ 1191 if (!is_imm) { 1192 tcg_gen_mov_i64(psw_addr, cdest); 1193 } 1194 1195 lab = gen_new_label(); 1196 if (c->is_64) { 1197 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); 1198 } else { 1199 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); 1200 } 1201 1202 /* Branch not taken. */ 1203 update_cc_op(s); 1204 tcg_gen_goto_tb(0); 1205 tcg_gen_movi_i64(psw_addr, s->pc_tmp); 1206 tcg_gen_exit_tb(s->base.tb, 0); 1207 1208 gen_set_label(lab); 1209 if (is_imm) { 1210 tcg_gen_movi_i64(psw_addr, dest); 1211 } 1212 per_breaking_event(s); 1213 ret = DISAS_PC_UPDATED; 1214 } 1215 } else { 1216 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. 1217 Most commonly we're single-stepping or some other condition that 1218 disables all use of goto_tb. Just update the PC and exit. */ 1219 1220 TCGv_i64 next = tcg_constant_i64(s->pc_tmp); 1221 if (is_imm) { 1222 cdest = tcg_constant_i64(dest); 1223 } 1224 1225 if (c->is_64) { 1226 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b, 1227 cdest, next); 1228 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); 1229 } else { 1230 TCGv_i32 t0 = tcg_temp_new_i32(); 1231 TCGv_i64 t1 = tcg_temp_new_i64(); 1232 TCGv_i64 z = tcg_constant_i64(0); 1233 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); 1234 tcg_gen_extu_i32_i64(t1, t0); 1235 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); 1236 per_branch_cond(s, TCG_COND_NE, t1, z); 1237 } 1238 1239 ret = DISAS_PC_UPDATED; 1240 } 1241 1242 egress: 1243 return ret; 1244 } 1245 1246 /* ====================================================================== */ 1247 /* The operations. These perform the bulk of the work for any insn, 1248 usually after the operands have been loaded and output initialized. */ 1249 1250 static DisasJumpType op_abs(DisasContext *s, DisasOps *o) 1251 { 1252 tcg_gen_abs_i64(o->out, o->in2); 1253 return DISAS_NEXT; 1254 } 1255 1256 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o) 1257 { 1258 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull); 1259 return DISAS_NEXT; 1260 } 1261 1262 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o) 1263 { 1264 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); 1265 return DISAS_NEXT; 1266 } 1267 1268 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o) 1269 { 1270 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull); 1271 tcg_gen_mov_i64(o->out2, o->in2); 1272 return DISAS_NEXT; 1273 } 1274 1275 static DisasJumpType op_add(DisasContext *s, DisasOps *o) 1276 { 1277 tcg_gen_add_i64(o->out, o->in1, o->in2); 1278 return DISAS_NEXT; 1279 } 1280 1281 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o) 1282 { 1283 tcg_gen_movi_i64(cc_src, 0); 1284 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); 1285 return DISAS_NEXT; 1286 } 1287 1288 /* Compute carry into cc_src. */ 1289 static void compute_carry(DisasContext *s) 1290 { 1291 switch (s->cc_op) { 1292 case CC_OP_ADDU: 1293 /* The carry value is already in cc_src (1,0). */ 1294 break; 1295 case CC_OP_SUBU: 1296 tcg_gen_addi_i64(cc_src, cc_src, 1); 1297 break; 1298 default: 1299 gen_op_calc_cc(s); 1300 /* fall through */ 1301 case CC_OP_STATIC: 1302 /* The carry flag is the msb of CC; compute into cc_src. */ 1303 tcg_gen_extu_i32_i64(cc_src, cc_op); 1304 tcg_gen_shri_i64(cc_src, cc_src, 1); 1305 break; 1306 } 1307 } 1308 1309 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o) 1310 { 1311 compute_carry(s); 1312 tcg_gen_add_i64(o->out, o->in1, o->in2); 1313 tcg_gen_add_i64(o->out, o->out, cc_src); 1314 return DISAS_NEXT; 1315 } 1316 1317 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o) 1318 { 1319 compute_carry(s); 1320 1321 TCGv_i64 zero = tcg_constant_i64(0); 1322 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero); 1323 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); 1324 1325 return DISAS_NEXT; 1326 } 1327 1328 static DisasJumpType op_asi(DisasContext *s, DisasOps *o) 1329 { 1330 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45); 1331 1332 o->in1 = tcg_temp_new_i64(); 1333 if (non_atomic) { 1334 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); 1335 } else { 1336 /* Perform the atomic addition in memory. */ 1337 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), 1338 s->insn->data); 1339 } 1340 1341 /* Recompute also for atomic case: needed for setting CC. */ 1342 tcg_gen_add_i64(o->out, o->in1, o->in2); 1343 1344 if (non_atomic) { 1345 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); 1346 } 1347 return DISAS_NEXT; 1348 } 1349 1350 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o) 1351 { 1352 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45); 1353 1354 o->in1 = tcg_temp_new_i64(); 1355 if (non_atomic) { 1356 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); 1357 } else { 1358 /* Perform the atomic addition in memory. */ 1359 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), 1360 s->insn->data); 1361 } 1362 1363 /* Recompute also for atomic case: needed for setting CC. */ 1364 tcg_gen_movi_i64(cc_src, 0); 1365 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); 1366 1367 if (non_atomic) { 1368 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); 1369 } 1370 return DISAS_NEXT; 1371 } 1372 1373 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o) 1374 { 1375 gen_helper_aeb(o->out, tcg_env, o->in1, o->in2); 1376 return DISAS_NEXT; 1377 } 1378 1379 static DisasJumpType op_adb(DisasContext *s, DisasOps *o) 1380 { 1381 gen_helper_adb(o->out, tcg_env, o->in1, o->in2); 1382 return DISAS_NEXT; 1383 } 1384 1385 static DisasJumpType op_axb(DisasContext *s, DisasOps *o) 1386 { 1387 gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128); 1388 return DISAS_NEXT; 1389 } 1390 1391 static DisasJumpType op_and(DisasContext *s, DisasOps *o) 1392 { 1393 tcg_gen_and_i64(o->out, o->in1, o->in2); 1394 return DISAS_NEXT; 1395 } 1396 1397 static DisasJumpType op_andi(DisasContext *s, DisasOps *o) 1398 { 1399 int shift = s->insn->data & 0xff; 1400 int size = s->insn->data >> 8; 1401 uint64_t mask = ((1ull << size) - 1) << shift; 1402 TCGv_i64 t = tcg_temp_new_i64(); 1403 1404 tcg_gen_shli_i64(t, o->in2, shift); 1405 tcg_gen_ori_i64(t, t, ~mask); 1406 tcg_gen_and_i64(o->out, o->in1, t); 1407 1408 /* Produce the CC from only the bits manipulated. */ 1409 tcg_gen_andi_i64(cc_dst, o->out, mask); 1410 set_cc_nz_u64(s, cc_dst); 1411 return DISAS_NEXT; 1412 } 1413 1414 static DisasJumpType op_andc(DisasContext *s, DisasOps *o) 1415 { 1416 tcg_gen_andc_i64(o->out, o->in1, o->in2); 1417 return DISAS_NEXT; 1418 } 1419 1420 static DisasJumpType op_orc(DisasContext *s, DisasOps *o) 1421 { 1422 tcg_gen_orc_i64(o->out, o->in1, o->in2); 1423 return DISAS_NEXT; 1424 } 1425 1426 static DisasJumpType op_nand(DisasContext *s, DisasOps *o) 1427 { 1428 tcg_gen_nand_i64(o->out, o->in1, o->in2); 1429 return DISAS_NEXT; 1430 } 1431 1432 static DisasJumpType op_nor(DisasContext *s, DisasOps *o) 1433 { 1434 tcg_gen_nor_i64(o->out, o->in1, o->in2); 1435 return DISAS_NEXT; 1436 } 1437 1438 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o) 1439 { 1440 tcg_gen_eqv_i64(o->out, o->in1, o->in2); 1441 return DISAS_NEXT; 1442 } 1443 1444 static DisasJumpType op_ni(DisasContext *s, DisasOps *o) 1445 { 1446 o->in1 = tcg_temp_new_i64(); 1447 1448 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { 1449 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); 1450 } else { 1451 /* Perform the atomic operation in memory. */ 1452 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s), 1453 s->insn->data); 1454 } 1455 1456 /* Recompute also for atomic case: needed for setting CC. */ 1457 tcg_gen_and_i64(o->out, o->in1, o->in2); 1458 1459 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { 1460 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); 1461 } 1462 return DISAS_NEXT; 1463 } 1464 1465 static DisasJumpType op_bas(DisasContext *s, DisasOps *o) 1466 { 1467 pc_to_link_info(o->out, s, s->pc_tmp); 1468 if (o->in2) { 1469 tcg_gen_mov_i64(psw_addr, o->in2); 1470 per_branch(s, false); 1471 return DISAS_PC_UPDATED; 1472 } else { 1473 return DISAS_NEXT; 1474 } 1475 } 1476 1477 static void save_link_info(DisasContext *s, DisasOps *o) 1478 { 1479 TCGv_i64 t; 1480 1481 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) { 1482 pc_to_link_info(o->out, s, s->pc_tmp); 1483 return; 1484 } 1485 gen_op_calc_cc(s); 1486 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull); 1487 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); 1488 t = tcg_temp_new_i64(); 1489 tcg_gen_shri_i64(t, psw_mask, 16); 1490 tcg_gen_andi_i64(t, t, 0x0f000000); 1491 tcg_gen_or_i64(o->out, o->out, t); 1492 tcg_gen_extu_i32_i64(t, cc_op); 1493 tcg_gen_shli_i64(t, t, 28); 1494 tcg_gen_or_i64(o->out, o->out, t); 1495 } 1496 1497 static DisasJumpType op_bal(DisasContext *s, DisasOps *o) 1498 { 1499 save_link_info(s, o); 1500 if (o->in2) { 1501 tcg_gen_mov_i64(psw_addr, o->in2); 1502 per_branch(s, false); 1503 return DISAS_PC_UPDATED; 1504 } else { 1505 return DISAS_NEXT; 1506 } 1507 } 1508 1509 /* 1510 * Disassemble the target of a branch. The results are returned in a form 1511 * suitable for passing into help_branch(): 1512 * 1513 * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd 1514 * branches, whose DisasContext *S contains the relative immediate field RI, 1515 * are considered fixed. All the other branches are considered computed. 1516 * - int IMM is the value of RI. 1517 * - TCGv_i64 CDEST is the address of the computed target. 1518 */ 1519 #define disas_jdest(s, ri, is_imm, imm, cdest) do { \ 1520 if (have_field(s, ri)) { \ 1521 if (unlikely(s->ex_value)) { \ 1522 cdest = tcg_temp_new_i64(); \ 1523 tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\ 1524 tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \ 1525 is_imm = false; \ 1526 } else { \ 1527 is_imm = true; \ 1528 } \ 1529 } else { \ 1530 is_imm = false; \ 1531 } \ 1532 imm = is_imm ? get_field(s, ri) : 0; \ 1533 } while (false) 1534 1535 static DisasJumpType op_basi(DisasContext *s, DisasOps *o) 1536 { 1537 DisasCompare c; 1538 bool is_imm; 1539 int imm; 1540 1541 pc_to_link_info(o->out, s, s->pc_tmp); 1542 1543 disas_jdest(s, i2, is_imm, imm, o->in2); 1544 disas_jcc(s, &c, 0xf); 1545 return help_branch(s, &c, is_imm, imm, o->in2); 1546 } 1547 1548 static DisasJumpType op_bc(DisasContext *s, DisasOps *o) 1549 { 1550 int m1 = get_field(s, m1); 1551 DisasCompare c; 1552 bool is_imm; 1553 int imm; 1554 1555 /* BCR with R2 = 0 causes no branching */ 1556 if (have_field(s, r2) && get_field(s, r2) == 0) { 1557 if (m1 == 14) { 1558 /* Perform serialization */ 1559 /* FIXME: check for fast-BCR-serialization facility */ 1560 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1561 } 1562 if (m1 == 15) { 1563 /* Perform serialization */ 1564 /* FIXME: perform checkpoint-synchronisation */ 1565 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1566 } 1567 return DISAS_NEXT; 1568 } 1569 1570 disas_jdest(s, i2, is_imm, imm, o->in2); 1571 disas_jcc(s, &c, m1); 1572 return help_branch(s, &c, is_imm, imm, o->in2); 1573 } 1574 1575 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) 1576 { 1577 int r1 = get_field(s, r1); 1578 DisasCompare c; 1579 bool is_imm; 1580 TCGv_i64 t; 1581 int imm; 1582 1583 c.cond = TCG_COND_NE; 1584 c.is_64 = false; 1585 1586 t = tcg_temp_new_i64(); 1587 tcg_gen_subi_i64(t, regs[r1], 1); 1588 store_reg32_i64(r1, t); 1589 c.u.s32.a = tcg_temp_new_i32(); 1590 c.u.s32.b = tcg_constant_i32(0); 1591 tcg_gen_extrl_i64_i32(c.u.s32.a, t); 1592 1593 disas_jdest(s, i2, is_imm, imm, o->in2); 1594 return help_branch(s, &c, is_imm, imm, o->in2); 1595 } 1596 1597 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) 1598 { 1599 int r1 = get_field(s, r1); 1600 int imm = get_field(s, i2); 1601 DisasCompare c; 1602 TCGv_i64 t; 1603 1604 c.cond = TCG_COND_NE; 1605 c.is_64 = false; 1606 1607 t = tcg_temp_new_i64(); 1608 tcg_gen_shri_i64(t, regs[r1], 32); 1609 tcg_gen_subi_i64(t, t, 1); 1610 store_reg32h_i64(r1, t); 1611 c.u.s32.a = tcg_temp_new_i32(); 1612 c.u.s32.b = tcg_constant_i32(0); 1613 tcg_gen_extrl_i64_i32(c.u.s32.a, t); 1614 1615 return help_branch(s, &c, 1, imm, o->in2); 1616 } 1617 1618 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) 1619 { 1620 int r1 = get_field(s, r1); 1621 DisasCompare c; 1622 bool is_imm; 1623 int imm; 1624 1625 c.cond = TCG_COND_NE; 1626 c.is_64 = true; 1627 1628 tcg_gen_subi_i64(regs[r1], regs[r1], 1); 1629 c.u.s64.a = regs[r1]; 1630 c.u.s64.b = tcg_constant_i64(0); 1631 1632 disas_jdest(s, i2, is_imm, imm, o->in2); 1633 return help_branch(s, &c, is_imm, imm, o->in2); 1634 } 1635 1636 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) 1637 { 1638 int r1 = get_field(s, r1); 1639 int r3 = get_field(s, r3); 1640 DisasCompare c; 1641 bool is_imm; 1642 TCGv_i64 t; 1643 int imm; 1644 1645 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); 1646 c.is_64 = false; 1647 1648 t = tcg_temp_new_i64(); 1649 tcg_gen_add_i64(t, regs[r1], regs[r3]); 1650 c.u.s32.a = tcg_temp_new_i32(); 1651 c.u.s32.b = tcg_temp_new_i32(); 1652 tcg_gen_extrl_i64_i32(c.u.s32.a, t); 1653 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); 1654 store_reg32_i64(r1, t); 1655 1656 disas_jdest(s, i2, is_imm, imm, o->in2); 1657 return help_branch(s, &c, is_imm, imm, o->in2); 1658 } 1659 1660 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) 1661 { 1662 int r1 = get_field(s, r1); 1663 int r3 = get_field(s, r3); 1664 DisasCompare c; 1665 bool is_imm; 1666 int imm; 1667 1668 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); 1669 c.is_64 = true; 1670 1671 if (r1 == (r3 | 1)) { 1672 c.u.s64.b = load_reg(r3 | 1); 1673 } else { 1674 c.u.s64.b = regs[r3 | 1]; 1675 } 1676 1677 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); 1678 c.u.s64.a = regs[r1]; 1679 1680 disas_jdest(s, i2, is_imm, imm, o->in2); 1681 return help_branch(s, &c, is_imm, imm, o->in2); 1682 } 1683 1684 static DisasJumpType op_cj(DisasContext *s, DisasOps *o) 1685 { 1686 int imm, m3 = get_field(s, m3); 1687 bool is_imm; 1688 DisasCompare c; 1689 1690 c.cond = ltgt_cond[m3]; 1691 if (s->insn->data) { 1692 c.cond = tcg_unsigned_cond(c.cond); 1693 } 1694 c.is_64 = true; 1695 c.u.s64.a = o->in1; 1696 c.u.s64.b = o->in2; 1697 1698 o->out = NULL; 1699 disas_jdest(s, i4, is_imm, imm, o->out); 1700 if (!is_imm && !o->out) { 1701 imm = 0; 1702 o->out = get_address(s, 0, get_field(s, b4), 1703 get_field(s, d4)); 1704 } 1705 1706 return help_branch(s, &c, is_imm, imm, o->out); 1707 } 1708 1709 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) 1710 { 1711 gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2); 1712 set_cc_static(s); 1713 return DISAS_NEXT; 1714 } 1715 1716 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) 1717 { 1718 gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2); 1719 set_cc_static(s); 1720 return DISAS_NEXT; 1721 } 1722 1723 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) 1724 { 1725 gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128); 1726 set_cc_static(s); 1727 return DISAS_NEXT; 1728 } 1729 1730 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe, 1731 bool m4_with_fpe) 1732 { 1733 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT); 1734 uint8_t m3 = get_field(s, m3); 1735 uint8_t m4 = get_field(s, m4); 1736 1737 /* m3 field was introduced with FPE */ 1738 if (!fpe && m3_with_fpe) { 1739 m3 = 0; 1740 } 1741 /* m4 field was introduced with FPE */ 1742 if (!fpe && m4_with_fpe) { 1743 m4 = 0; 1744 } 1745 1746 /* Check for valid rounding modes. Mode 3 was introduced later. */ 1747 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) { 1748 gen_program_exception(s, PGM_SPECIFICATION); 1749 return NULL; 1750 } 1751 1752 return tcg_constant_i32(deposit32(m3, 4, 4, m4)); 1753 } 1754 1755 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o) 1756 { 1757 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 1758 1759 if (!m34) { 1760 return DISAS_NORETURN; 1761 } 1762 gen_helper_cfeb(o->out, tcg_env, o->in2, m34); 1763 set_cc_static(s); 1764 return DISAS_NEXT; 1765 } 1766 1767 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o) 1768 { 1769 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 1770 1771 if (!m34) { 1772 return DISAS_NORETURN; 1773 } 1774 gen_helper_cfdb(o->out, tcg_env, o->in2, m34); 1775 set_cc_static(s); 1776 return DISAS_NEXT; 1777 } 1778 1779 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o) 1780 { 1781 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 1782 1783 if (!m34) { 1784 return DISAS_NORETURN; 1785 } 1786 gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34); 1787 set_cc_static(s); 1788 return DISAS_NEXT; 1789 } 1790 1791 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o) 1792 { 1793 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 1794 1795 if (!m34) { 1796 return DISAS_NORETURN; 1797 } 1798 gen_helper_cgeb(o->out, tcg_env, o->in2, m34); 1799 set_cc_static(s); 1800 return DISAS_NEXT; 1801 } 1802 1803 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o) 1804 { 1805 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 1806 1807 if (!m34) { 1808 return DISAS_NORETURN; 1809 } 1810 gen_helper_cgdb(o->out, tcg_env, o->in2, m34); 1811 set_cc_static(s); 1812 return DISAS_NEXT; 1813 } 1814 1815 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o) 1816 { 1817 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 1818 1819 if (!m34) { 1820 return DISAS_NORETURN; 1821 } 1822 gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34); 1823 set_cc_static(s); 1824 return DISAS_NEXT; 1825 } 1826 1827 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o) 1828 { 1829 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1830 1831 if (!m34) { 1832 return DISAS_NORETURN; 1833 } 1834 gen_helper_clfeb(o->out, tcg_env, o->in2, m34); 1835 set_cc_static(s); 1836 return DISAS_NEXT; 1837 } 1838 1839 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o) 1840 { 1841 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1842 1843 if (!m34) { 1844 return DISAS_NORETURN; 1845 } 1846 gen_helper_clfdb(o->out, tcg_env, o->in2, m34); 1847 set_cc_static(s); 1848 return DISAS_NEXT; 1849 } 1850 1851 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o) 1852 { 1853 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1854 1855 if (!m34) { 1856 return DISAS_NORETURN; 1857 } 1858 gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34); 1859 set_cc_static(s); 1860 return DISAS_NEXT; 1861 } 1862 1863 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o) 1864 { 1865 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1866 1867 if (!m34) { 1868 return DISAS_NORETURN; 1869 } 1870 gen_helper_clgeb(o->out, tcg_env, o->in2, m34); 1871 set_cc_static(s); 1872 return DISAS_NEXT; 1873 } 1874 1875 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o) 1876 { 1877 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1878 1879 if (!m34) { 1880 return DISAS_NORETURN; 1881 } 1882 gen_helper_clgdb(o->out, tcg_env, o->in2, m34); 1883 set_cc_static(s); 1884 return DISAS_NEXT; 1885 } 1886 1887 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o) 1888 { 1889 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1890 1891 if (!m34) { 1892 return DISAS_NORETURN; 1893 } 1894 gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34); 1895 set_cc_static(s); 1896 return DISAS_NEXT; 1897 } 1898 1899 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o) 1900 { 1901 TCGv_i32 m34 = fpinst_extract_m34(s, true, true); 1902 1903 if (!m34) { 1904 return DISAS_NORETURN; 1905 } 1906 gen_helper_cegb(o->out, tcg_env, o->in2, m34); 1907 return DISAS_NEXT; 1908 } 1909 1910 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o) 1911 { 1912 TCGv_i32 m34 = fpinst_extract_m34(s, true, true); 1913 1914 if (!m34) { 1915 return DISAS_NORETURN; 1916 } 1917 gen_helper_cdgb(o->out, tcg_env, o->in2, m34); 1918 return DISAS_NEXT; 1919 } 1920 1921 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o) 1922 { 1923 TCGv_i32 m34 = fpinst_extract_m34(s, true, true); 1924 1925 if (!m34) { 1926 return DISAS_NORETURN; 1927 } 1928 gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34); 1929 return DISAS_NEXT; 1930 } 1931 1932 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o) 1933 { 1934 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1935 1936 if (!m34) { 1937 return DISAS_NORETURN; 1938 } 1939 gen_helper_celgb(o->out, tcg_env, o->in2, m34); 1940 return DISAS_NEXT; 1941 } 1942 1943 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o) 1944 { 1945 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1946 1947 if (!m34) { 1948 return DISAS_NORETURN; 1949 } 1950 gen_helper_cdlgb(o->out, tcg_env, o->in2, m34); 1951 return DISAS_NEXT; 1952 } 1953 1954 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) 1955 { 1956 TCGv_i32 m34 = fpinst_extract_m34(s, false, false); 1957 1958 if (!m34) { 1959 return DISAS_NORETURN; 1960 } 1961 gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34); 1962 return DISAS_NEXT; 1963 } 1964 1965 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) 1966 { 1967 int r2 = get_field(s, r2); 1968 TCGv_i128 pair = tcg_temp_new_i128(); 1969 TCGv_i64 len = tcg_temp_new_i64(); 1970 1971 gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]); 1972 set_cc_static(s); 1973 tcg_gen_extr_i128_i64(o->out, len, pair); 1974 1975 tcg_gen_add_i64(regs[r2], regs[r2], len); 1976 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); 1977 1978 return DISAS_NEXT; 1979 } 1980 1981 static DisasJumpType op_clc(DisasContext *s, DisasOps *o) 1982 { 1983 int l = get_field(s, l1); 1984 TCGv_i64 src; 1985 TCGv_i32 vl; 1986 MemOp mop; 1987 1988 switch (l + 1) { 1989 case 1: 1990 case 2: 1991 case 4: 1992 case 8: 1993 mop = ctz32(l + 1) | MO_TE; 1994 /* Do not update cc_src yet: loading cc_dst may cause an exception. */ 1995 src = tcg_temp_new_i64(); 1996 tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop); 1997 tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop); 1998 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst); 1999 return DISAS_NEXT; 2000 default: 2001 vl = tcg_constant_i32(l); 2002 gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2); 2003 set_cc_static(s); 2004 return DISAS_NEXT; 2005 } 2006 } 2007 2008 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) 2009 { 2010 int r1 = get_field(s, r1); 2011 int r2 = get_field(s, r2); 2012 TCGv_i32 t1, t2; 2013 2014 /* r1 and r2 must be even. */ 2015 if (r1 & 1 || r2 & 1) { 2016 gen_program_exception(s, PGM_SPECIFICATION); 2017 return DISAS_NORETURN; 2018 } 2019 2020 t1 = tcg_constant_i32(r1); 2021 t2 = tcg_constant_i32(r2); 2022 gen_helper_clcl(cc_op, tcg_env, t1, t2); 2023 set_cc_static(s); 2024 return DISAS_NEXT; 2025 } 2026 2027 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) 2028 { 2029 int r1 = get_field(s, r1); 2030 int r3 = get_field(s, r3); 2031 TCGv_i32 t1, t3; 2032 2033 /* r1 and r3 must be even. */ 2034 if (r1 & 1 || r3 & 1) { 2035 gen_program_exception(s, PGM_SPECIFICATION); 2036 return DISAS_NORETURN; 2037 } 2038 2039 t1 = tcg_constant_i32(r1); 2040 t3 = tcg_constant_i32(r3); 2041 gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3); 2042 set_cc_static(s); 2043 return DISAS_NEXT; 2044 } 2045 2046 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) 2047 { 2048 int r1 = get_field(s, r1); 2049 int r3 = get_field(s, r3); 2050 TCGv_i32 t1, t3; 2051 2052 /* r1 and r3 must be even. */ 2053 if (r1 & 1 || r3 & 1) { 2054 gen_program_exception(s, PGM_SPECIFICATION); 2055 return DISAS_NORETURN; 2056 } 2057 2058 t1 = tcg_constant_i32(r1); 2059 t3 = tcg_constant_i32(r3); 2060 gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3); 2061 set_cc_static(s); 2062 return DISAS_NEXT; 2063 } 2064 2065 static DisasJumpType op_clm(DisasContext *s, DisasOps *o) 2066 { 2067 TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3)); 2068 TCGv_i32 t1 = tcg_temp_new_i32(); 2069 2070 tcg_gen_extrl_i64_i32(t1, o->in1); 2071 gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2); 2072 set_cc_static(s); 2073 return DISAS_NEXT; 2074 } 2075 2076 static DisasJumpType op_clst(DisasContext *s, DisasOps *o) 2077 { 2078 TCGv_i128 pair = tcg_temp_new_i128(); 2079 2080 gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2); 2081 tcg_gen_extr_i128_i64(o->in2, o->in1, pair); 2082 2083 set_cc_static(s); 2084 return DISAS_NEXT; 2085 } 2086 2087 static DisasJumpType op_cps(DisasContext *s, DisasOps *o) 2088 { 2089 TCGv_i64 t = tcg_temp_new_i64(); 2090 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); 2091 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); 2092 tcg_gen_or_i64(o->out, o->out, t); 2093 return DISAS_NEXT; 2094 } 2095 2096 static DisasJumpType op_cs(DisasContext *s, DisasOps *o) 2097 { 2098 int d2 = get_field(s, d2); 2099 int b2 = get_field(s, b2); 2100 TCGv_i64 addr, cc; 2101 2102 /* Note that in1 = R3 (new value) and 2103 in2 = (zero-extended) R1 (expected value). */ 2104 2105 addr = get_address(s, 0, b2, d2); 2106 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, 2107 get_mem_index(s), s->insn->data | MO_ALIGN); 2108 2109 /* Are the memory and expected values (un)equal? Note that this setcond 2110 produces the output CC value, thus the NE sense of the test. */ 2111 cc = tcg_temp_new_i64(); 2112 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); 2113 tcg_gen_extrl_i64_i32(cc_op, cc); 2114 set_cc_static(s); 2115 2116 return DISAS_NEXT; 2117 } 2118 2119 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) 2120 { 2121 int r1 = get_field(s, r1); 2122 2123 o->out_128 = tcg_temp_new_i128(); 2124 tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]); 2125 2126 /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value. */ 2127 tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128, 2128 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN); 2129 2130 /* 2131 * Extract result into cc_dst:cc_src, compare vs the expected value 2132 * in the as yet unmodified input registers, then update CC_OP. 2133 */ 2134 tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128); 2135 tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]); 2136 tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]); 2137 tcg_gen_or_i64(cc_dst, cc_dst, cc_src); 2138 set_cc_nz_u64(s, cc_dst); 2139 2140 return DISAS_NEXT; 2141 } 2142 2143 static DisasJumpType op_csst(DisasContext *s, DisasOps *o) 2144 { 2145 int r3 = get_field(s, r3); 2146 TCGv_i32 t_r3 = tcg_constant_i32(r3); 2147 2148 if (tb_cflags(s->base.tb) & CF_PARALLEL) { 2149 gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2); 2150 } else { 2151 gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2); 2152 } 2153 2154 set_cc_static(s); 2155 return DISAS_NEXT; 2156 } 2157 2158 #ifndef CONFIG_USER_ONLY 2159 static DisasJumpType op_csp(DisasContext *s, DisasOps *o) 2160 { 2161 MemOp mop = s->insn->data; 2162 TCGv_i64 addr, old, cc; 2163 TCGLabel *lab = gen_new_label(); 2164 2165 /* Note that in1 = R1 (zero-extended expected value), 2166 out = R1 (original reg), out2 = R1+1 (new value). */ 2167 2168 addr = tcg_temp_new_i64(); 2169 old = tcg_temp_new_i64(); 2170 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); 2171 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2, 2172 get_mem_index(s), mop | MO_ALIGN); 2173 2174 /* Are the memory and expected values (un)equal? */ 2175 cc = tcg_temp_new_i64(); 2176 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old); 2177 tcg_gen_extrl_i64_i32(cc_op, cc); 2178 2179 /* Write back the output now, so that it happens before the 2180 following branch, so that we don't need local temps. */ 2181 if ((mop & MO_SIZE) == MO_32) { 2182 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32); 2183 } else { 2184 tcg_gen_mov_i64(o->out, old); 2185 } 2186 2187 /* If the comparison was equal, and the LSB of R2 was set, 2188 then we need to flush the TLB (for all cpus). */ 2189 tcg_gen_xori_i64(cc, cc, 1); 2190 tcg_gen_and_i64(cc, cc, o->in2); 2191 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab); 2192 2193 gen_helper_purge(tcg_env); 2194 gen_set_label(lab); 2195 2196 return DISAS_NEXT; 2197 } 2198 #endif 2199 2200 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o) 2201 { 2202 TCGv_i64 t = tcg_temp_new_i64(); 2203 tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ); 2204 gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t); 2205 return DISAS_NEXT; 2206 } 2207 2208 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o) 2209 { 2210 TCGv_i128 t = tcg_temp_new_i128(); 2211 tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128); 2212 gen_helper_cvbg(o->out, tcg_env, t); 2213 return DISAS_NEXT; 2214 } 2215 2216 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) 2217 { 2218 TCGv_i64 t1 = tcg_temp_new_i64(); 2219 TCGv_i32 t2 = tcg_temp_new_i32(); 2220 tcg_gen_extrl_i64_i32(t2, o->in1); 2221 gen_helper_cvd(t1, t2); 2222 tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ); 2223 return DISAS_NEXT; 2224 } 2225 2226 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o) 2227 { 2228 TCGv_i128 t = tcg_temp_new_i128(); 2229 gen_helper_cvdg(t, o->in1); 2230 tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128); 2231 return DISAS_NEXT; 2232 } 2233 2234 static DisasJumpType op_ct(DisasContext *s, DisasOps *o) 2235 { 2236 int m3 = get_field(s, m3); 2237 TCGLabel *lab = gen_new_label(); 2238 TCGCond c; 2239 2240 c = tcg_invert_cond(ltgt_cond[m3]); 2241 if (s->insn->data) { 2242 c = tcg_unsigned_cond(c); 2243 } 2244 tcg_gen_brcond_i64(c, o->in1, o->in2, lab); 2245 2246 /* Trap. */ 2247 gen_trap(s); 2248 2249 gen_set_label(lab); 2250 return DISAS_NEXT; 2251 } 2252 2253 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) 2254 { 2255 int m3 = get_field(s, m3); 2256 int r1 = get_field(s, r1); 2257 int r2 = get_field(s, r2); 2258 TCGv_i32 tr1, tr2, chk; 2259 2260 /* R1 and R2 must both be even. */ 2261 if ((r1 | r2) & 1) { 2262 gen_program_exception(s, PGM_SPECIFICATION); 2263 return DISAS_NORETURN; 2264 } 2265 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) { 2266 m3 = 0; 2267 } 2268 2269 tr1 = tcg_constant_i32(r1); 2270 tr2 = tcg_constant_i32(r2); 2271 chk = tcg_constant_i32(m3); 2272 2273 switch (s->insn->data) { 2274 case 12: 2275 gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk); 2276 break; 2277 case 14: 2278 gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk); 2279 break; 2280 case 21: 2281 gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk); 2282 break; 2283 case 24: 2284 gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk); 2285 break; 2286 case 41: 2287 gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk); 2288 break; 2289 case 42: 2290 gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk); 2291 break; 2292 default: 2293 g_assert_not_reached(); 2294 } 2295 2296 set_cc_static(s); 2297 return DISAS_NEXT; 2298 } 2299 2300 #ifndef CONFIG_USER_ONLY 2301 static DisasJumpType op_diag(DisasContext *s, DisasOps *o) 2302 { 2303 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 2304 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 2305 TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2)); 2306 2307 gen_helper_diag(tcg_env, r1, r3, func_code); 2308 return DISAS_NEXT; 2309 } 2310 #endif 2311 2312 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o) 2313 { 2314 gen_helper_divs32(o->out, tcg_env, o->in1, o->in2); 2315 tcg_gen_extr32_i64(o->out2, o->out, o->out); 2316 return DISAS_NEXT; 2317 } 2318 2319 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o) 2320 { 2321 gen_helper_divu32(o->out, tcg_env, o->in1, o->in2); 2322 tcg_gen_extr32_i64(o->out2, o->out, o->out); 2323 return DISAS_NEXT; 2324 } 2325 2326 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) 2327 { 2328 TCGv_i128 t = tcg_temp_new_i128(); 2329 2330 gen_helper_divs64(t, tcg_env, o->in1, o->in2); 2331 tcg_gen_extr_i128_i64(o->out2, o->out, t); 2332 return DISAS_NEXT; 2333 } 2334 2335 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) 2336 { 2337 TCGv_i128 t = tcg_temp_new_i128(); 2338 2339 gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2); 2340 tcg_gen_extr_i128_i64(o->out2, o->out, t); 2341 return DISAS_NEXT; 2342 } 2343 2344 static DisasJumpType op_deb(DisasContext *s, DisasOps *o) 2345 { 2346 gen_helper_deb(o->out, tcg_env, o->in1, o->in2); 2347 return DISAS_NEXT; 2348 } 2349 2350 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o) 2351 { 2352 gen_helper_ddb(o->out, tcg_env, o->in1, o->in2); 2353 return DISAS_NEXT; 2354 } 2355 2356 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) 2357 { 2358 gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128); 2359 return DISAS_NEXT; 2360 } 2361 2362 static DisasJumpType op_ear(DisasContext *s, DisasOps *o) 2363 { 2364 int r2 = get_field(s, r2); 2365 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2])); 2366 return DISAS_NEXT; 2367 } 2368 2369 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o) 2370 { 2371 /* No cache information provided. */ 2372 tcg_gen_movi_i64(o->out, -1); 2373 return DISAS_NEXT; 2374 } 2375 2376 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) 2377 { 2378 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc)); 2379 return DISAS_NEXT; 2380 } 2381 2382 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) 2383 { 2384 int r1 = get_field(s, r1); 2385 int r2 = get_field(s, r2); 2386 TCGv_i64 t = tcg_temp_new_i64(); 2387 TCGv_i64 t_cc = tcg_temp_new_i64(); 2388 2389 /* Note the "subsequently" in the PoO, which implies a defined result 2390 if r1 == r2. Thus we cannot defer these writes to an output hook. */ 2391 gen_op_calc_cc(s); 2392 tcg_gen_extu_i32_i64(t_cc, cc_op); 2393 tcg_gen_shri_i64(t, psw_mask, 32); 2394 tcg_gen_deposit_i64(t, t, t_cc, 12, 2); 2395 store_reg32_i64(r1, t); 2396 if (r2 != 0) { 2397 store_reg32_i64(r2, psw_mask); 2398 } 2399 return DISAS_NEXT; 2400 } 2401 2402 static DisasJumpType op_ex(DisasContext *s, DisasOps *o) 2403 { 2404 int r1 = get_field(s, r1); 2405 TCGv_i32 ilen; 2406 TCGv_i64 v1; 2407 2408 /* Nested EXECUTE is not allowed. */ 2409 if (unlikely(s->ex_value)) { 2410 gen_program_exception(s, PGM_EXECUTE); 2411 return DISAS_NORETURN; 2412 } 2413 2414 update_psw_addr(s); 2415 update_cc_op(s); 2416 2417 if (r1 == 0) { 2418 v1 = tcg_constant_i64(0); 2419 } else { 2420 v1 = regs[r1]; 2421 } 2422 2423 ilen = tcg_constant_i32(s->ilen); 2424 gen_helper_ex(tcg_env, ilen, v1, o->in2); 2425 2426 return DISAS_PC_CC_UPDATED; 2427 } 2428 2429 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o) 2430 { 2431 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 2432 2433 if (!m34) { 2434 return DISAS_NORETURN; 2435 } 2436 gen_helper_fieb(o->out, tcg_env, o->in2, m34); 2437 return DISAS_NEXT; 2438 } 2439 2440 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o) 2441 { 2442 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 2443 2444 if (!m34) { 2445 return DISAS_NORETURN; 2446 } 2447 gen_helper_fidb(o->out, tcg_env, o->in2, m34); 2448 return DISAS_NEXT; 2449 } 2450 2451 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o) 2452 { 2453 TCGv_i32 m34 = fpinst_extract_m34(s, false, true); 2454 2455 if (!m34) { 2456 return DISAS_NORETURN; 2457 } 2458 gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34); 2459 return DISAS_NEXT; 2460 } 2461 2462 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) 2463 { 2464 /* We'll use the original input for cc computation, since we get to 2465 compare that against 0, which ought to be better than comparing 2466 the real output against 64. It also lets cc_dst be a convenient 2467 temporary during our computation. */ 2468 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); 2469 2470 /* R1 = IN ? CLZ(IN) : 64. */ 2471 tcg_gen_clzi_i64(o->out, o->in2, 64); 2472 2473 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this 2474 value by 64, which is undefined. But since the shift is 64 iff the 2475 input is zero, we still get the correct result after and'ing. */ 2476 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull); 2477 tcg_gen_shr_i64(o->out2, o->out2, o->out); 2478 tcg_gen_andc_i64(o->out2, cc_dst, o->out2); 2479 return DISAS_NEXT; 2480 } 2481 2482 static DisasJumpType op_icm(DisasContext *s, DisasOps *o) 2483 { 2484 int m3 = get_field(s, m3); 2485 int pos, len, base = s->insn->data; 2486 TCGv_i64 tmp = tcg_temp_new_i64(); 2487 uint64_t ccm; 2488 2489 switch (m3) { 2490 case 0xf: 2491 /* Effectively a 32-bit load. */ 2492 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL); 2493 len = 32; 2494 goto one_insert; 2495 2496 case 0xc: 2497 case 0x6: 2498 case 0x3: 2499 /* Effectively a 16-bit load. */ 2500 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW); 2501 len = 16; 2502 goto one_insert; 2503 2504 case 0x8: 2505 case 0x4: 2506 case 0x2: 2507 case 0x1: 2508 /* Effectively an 8-bit load. */ 2509 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); 2510 len = 8; 2511 goto one_insert; 2512 2513 one_insert: 2514 pos = base + ctz32(m3) * 8; 2515 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len); 2516 ccm = ((1ull << len) - 1) << pos; 2517 break; 2518 2519 case 0: 2520 /* Recognize access exceptions for the first byte. */ 2521 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); 2522 gen_op_movi_cc(s, 0); 2523 return DISAS_NEXT; 2524 2525 default: 2526 /* This is going to be a sequence of loads and inserts. */ 2527 pos = base + 32 - 8; 2528 ccm = 0; 2529 while (m3) { 2530 if (m3 & 0x8) { 2531 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); 2532 tcg_gen_addi_i64(o->in2, o->in2, 1); 2533 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); 2534 ccm |= 0xffull << pos; 2535 } 2536 m3 = (m3 << 1) & 0xf; 2537 pos -= 8; 2538 } 2539 break; 2540 } 2541 2542 tcg_gen_movi_i64(tmp, ccm); 2543 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); 2544 return DISAS_NEXT; 2545 } 2546 2547 static DisasJumpType op_insi(DisasContext *s, DisasOps *o) 2548 { 2549 int shift = s->insn->data & 0xff; 2550 int size = s->insn->data >> 8; 2551 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size); 2552 return DISAS_NEXT; 2553 } 2554 2555 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) 2556 { 2557 TCGv_i64 t1, t2; 2558 2559 gen_op_calc_cc(s); 2560 t1 = tcg_temp_new_i64(); 2561 tcg_gen_extract_i64(t1, psw_mask, 40, 4); 2562 t2 = tcg_temp_new_i64(); 2563 tcg_gen_extu_i32_i64(t2, cc_op); 2564 tcg_gen_deposit_i64(t1, t1, t2, 4, 60); 2565 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8); 2566 return DISAS_NEXT; 2567 } 2568 2569 #ifndef CONFIG_USER_ONLY 2570 static DisasJumpType op_idte(DisasContext *s, DisasOps *o) 2571 { 2572 TCGv_i32 m4; 2573 2574 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { 2575 m4 = tcg_constant_i32(get_field(s, m4)); 2576 } else { 2577 m4 = tcg_constant_i32(0); 2578 } 2579 gen_helper_idte(tcg_env, o->in1, o->in2, m4); 2580 return DISAS_NEXT; 2581 } 2582 2583 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) 2584 { 2585 TCGv_i32 m4; 2586 2587 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { 2588 m4 = tcg_constant_i32(get_field(s, m4)); 2589 } else { 2590 m4 = tcg_constant_i32(0); 2591 } 2592 gen_helper_ipte(tcg_env, o->in1, o->in2, m4); 2593 return DISAS_NEXT; 2594 } 2595 2596 static DisasJumpType op_iske(DisasContext *s, DisasOps *o) 2597 { 2598 gen_helper_iske(o->out, tcg_env, o->in2); 2599 return DISAS_NEXT; 2600 } 2601 #endif 2602 2603 static DisasJumpType op_msa(DisasContext *s, DisasOps *o) 2604 { 2605 int r1 = have_field(s, r1) ? get_field(s, r1) : 0; 2606 int r2 = have_field(s, r2) ? get_field(s, r2) : 0; 2607 int r3 = have_field(s, r3) ? get_field(s, r3) : 0; 2608 TCGv_i32 t_r1, t_r2, t_r3, type; 2609 2610 switch (s->insn->data) { 2611 case S390_FEAT_TYPE_KMA: 2612 if (r3 == r1 || r3 == r2) { 2613 gen_program_exception(s, PGM_SPECIFICATION); 2614 return DISAS_NORETURN; 2615 } 2616 /* FALL THROUGH */ 2617 case S390_FEAT_TYPE_KMCTR: 2618 if (r3 & 1 || !r3) { 2619 gen_program_exception(s, PGM_SPECIFICATION); 2620 return DISAS_NORETURN; 2621 } 2622 /* FALL THROUGH */ 2623 case S390_FEAT_TYPE_PPNO: 2624 case S390_FEAT_TYPE_KMF: 2625 case S390_FEAT_TYPE_KMC: 2626 case S390_FEAT_TYPE_KMO: 2627 case S390_FEAT_TYPE_KM: 2628 if (r1 & 1 || !r1) { 2629 gen_program_exception(s, PGM_SPECIFICATION); 2630 return DISAS_NORETURN; 2631 } 2632 /* FALL THROUGH */ 2633 case S390_FEAT_TYPE_KMAC: 2634 case S390_FEAT_TYPE_KIMD: 2635 case S390_FEAT_TYPE_KLMD: 2636 if (r2 & 1 || !r2) { 2637 gen_program_exception(s, PGM_SPECIFICATION); 2638 return DISAS_NORETURN; 2639 } 2640 /* FALL THROUGH */ 2641 case S390_FEAT_TYPE_PCKMO: 2642 case S390_FEAT_TYPE_PCC: 2643 break; 2644 default: 2645 g_assert_not_reached(); 2646 }; 2647 2648 t_r1 = tcg_constant_i32(r1); 2649 t_r2 = tcg_constant_i32(r2); 2650 t_r3 = tcg_constant_i32(r3); 2651 type = tcg_constant_i32(s->insn->data); 2652 gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type); 2653 set_cc_static(s); 2654 return DISAS_NEXT; 2655 } 2656 2657 static DisasJumpType op_keb(DisasContext *s, DisasOps *o) 2658 { 2659 gen_helper_keb(cc_op, tcg_env, o->in1, o->in2); 2660 set_cc_static(s); 2661 return DISAS_NEXT; 2662 } 2663 2664 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) 2665 { 2666 gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2); 2667 set_cc_static(s); 2668 return DISAS_NEXT; 2669 } 2670 2671 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) 2672 { 2673 gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128); 2674 set_cc_static(s); 2675 return DISAS_NEXT; 2676 } 2677 2678 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64) 2679 { 2680 /* The real output is indeed the original value in memory; 2681 recompute the addition for the computation of CC. */ 2682 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s), 2683 s->insn->data | MO_ALIGN); 2684 /* However, we need to recompute the addition for setting CC. */ 2685 if (addu64) { 2686 tcg_gen_movi_i64(cc_src, 0); 2687 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); 2688 } else { 2689 tcg_gen_add_i64(o->out, o->in1, o->in2); 2690 } 2691 return DISAS_NEXT; 2692 } 2693 2694 static DisasJumpType op_laa(DisasContext *s, DisasOps *o) 2695 { 2696 return help_laa(s, o, false); 2697 } 2698 2699 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o) 2700 { 2701 return help_laa(s, o, true); 2702 } 2703 2704 static DisasJumpType op_lan(DisasContext *s, DisasOps *o) 2705 { 2706 /* The real output is indeed the original value in memory; 2707 recompute the addition for the computation of CC. */ 2708 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s), 2709 s->insn->data | MO_ALIGN); 2710 /* However, we need to recompute the operation for setting CC. */ 2711 tcg_gen_and_i64(o->out, o->in1, o->in2); 2712 return DISAS_NEXT; 2713 } 2714 2715 static DisasJumpType op_lao(DisasContext *s, DisasOps *o) 2716 { 2717 /* The real output is indeed the original value in memory; 2718 recompute the addition for the computation of CC. */ 2719 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s), 2720 s->insn->data | MO_ALIGN); 2721 /* However, we need to recompute the operation for setting CC. */ 2722 tcg_gen_or_i64(o->out, o->in1, o->in2); 2723 return DISAS_NEXT; 2724 } 2725 2726 static DisasJumpType op_lax(DisasContext *s, DisasOps *o) 2727 { 2728 /* The real output is indeed the original value in memory; 2729 recompute the addition for the computation of CC. */ 2730 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s), 2731 s->insn->data | MO_ALIGN); 2732 /* However, we need to recompute the operation for setting CC. */ 2733 tcg_gen_xor_i64(o->out, o->in1, o->in2); 2734 return DISAS_NEXT; 2735 } 2736 2737 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o) 2738 { 2739 gen_helper_ldeb(o->out, tcg_env, o->in2); 2740 return DISAS_NEXT; 2741 } 2742 2743 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o) 2744 { 2745 TCGv_i32 m34 = fpinst_extract_m34(s, true, true); 2746 2747 if (!m34) { 2748 return DISAS_NORETURN; 2749 } 2750 gen_helper_ledb(o->out, tcg_env, o->in2, m34); 2751 return DISAS_NEXT; 2752 } 2753 2754 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o) 2755 { 2756 TCGv_i32 m34 = fpinst_extract_m34(s, true, true); 2757 2758 if (!m34) { 2759 return DISAS_NORETURN; 2760 } 2761 gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34); 2762 return DISAS_NEXT; 2763 } 2764 2765 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o) 2766 { 2767 TCGv_i32 m34 = fpinst_extract_m34(s, true, true); 2768 2769 if (!m34) { 2770 return DISAS_NORETURN; 2771 } 2772 gen_helper_lexb(o->out, tcg_env, o->in2_128, m34); 2773 return DISAS_NEXT; 2774 } 2775 2776 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o) 2777 { 2778 gen_helper_lxdb(o->out_128, tcg_env, o->in2); 2779 return DISAS_NEXT; 2780 } 2781 2782 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o) 2783 { 2784 gen_helper_lxeb(o->out_128, tcg_env, o->in2); 2785 return DISAS_NEXT; 2786 } 2787 2788 static DisasJumpType op_lde(DisasContext *s, DisasOps *o) 2789 { 2790 tcg_gen_shli_i64(o->out, o->in2, 32); 2791 return DISAS_NEXT; 2792 } 2793 2794 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) 2795 { 2796 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); 2797 return DISAS_NEXT; 2798 } 2799 2800 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) 2801 { 2802 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB); 2803 return DISAS_NEXT; 2804 } 2805 2806 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) 2807 { 2808 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB); 2809 return DISAS_NEXT; 2810 } 2811 2812 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) 2813 { 2814 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW); 2815 return DISAS_NEXT; 2816 } 2817 2818 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) 2819 { 2820 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW); 2821 return DISAS_NEXT; 2822 } 2823 2824 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) 2825 { 2826 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s), 2827 MO_TESL | s->insn->data); 2828 return DISAS_NEXT; 2829 } 2830 2831 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) 2832 { 2833 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s), 2834 MO_TEUL | s->insn->data); 2835 return DISAS_NEXT; 2836 } 2837 2838 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) 2839 { 2840 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), 2841 MO_TEUQ | s->insn->data); 2842 return DISAS_NEXT; 2843 } 2844 2845 static DisasJumpType op_lat(DisasContext *s, DisasOps *o) 2846 { 2847 TCGLabel *lab = gen_new_label(); 2848 store_reg32_i64(get_field(s, r1), o->in2); 2849 /* The value is stored even in case of trap. */ 2850 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); 2851 gen_trap(s); 2852 gen_set_label(lab); 2853 return DISAS_NEXT; 2854 } 2855 2856 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) 2857 { 2858 TCGLabel *lab = gen_new_label(); 2859 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ); 2860 /* The value is stored even in case of trap. */ 2861 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); 2862 gen_trap(s); 2863 gen_set_label(lab); 2864 return DISAS_NEXT; 2865 } 2866 2867 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) 2868 { 2869 TCGLabel *lab = gen_new_label(); 2870 store_reg32h_i64(get_field(s, r1), o->in2); 2871 /* The value is stored even in case of trap. */ 2872 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); 2873 gen_trap(s); 2874 gen_set_label(lab); 2875 return DISAS_NEXT; 2876 } 2877 2878 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) 2879 { 2880 TCGLabel *lab = gen_new_label(); 2881 2882 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL); 2883 /* The value is stored even in case of trap. */ 2884 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); 2885 gen_trap(s); 2886 gen_set_label(lab); 2887 return DISAS_NEXT; 2888 } 2889 2890 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o) 2891 { 2892 TCGLabel *lab = gen_new_label(); 2893 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); 2894 /* The value is stored even in case of trap. */ 2895 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); 2896 gen_trap(s); 2897 gen_set_label(lab); 2898 return DISAS_NEXT; 2899 } 2900 2901 static DisasJumpType op_loc(DisasContext *s, DisasOps *o) 2902 { 2903 DisasCompare c; 2904 2905 if (have_field(s, m3)) { 2906 /* LOAD * ON CONDITION */ 2907 disas_jcc(s, &c, get_field(s, m3)); 2908 } else { 2909 /* SELECT */ 2910 disas_jcc(s, &c, get_field(s, m4)); 2911 } 2912 2913 if (c.is_64) { 2914 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, 2915 o->in2, o->in1); 2916 } else { 2917 TCGv_i32 t32 = tcg_temp_new_i32(); 2918 TCGv_i64 t, z; 2919 2920 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); 2921 2922 t = tcg_temp_new_i64(); 2923 tcg_gen_extu_i32_i64(t, t32); 2924 2925 z = tcg_constant_i64(0); 2926 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); 2927 } 2928 2929 return DISAS_NEXT; 2930 } 2931 2932 #ifndef CONFIG_USER_ONLY 2933 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) 2934 { 2935 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 2936 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 2937 2938 gen_helper_lctl(tcg_env, r1, o->in2, r3); 2939 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ 2940 s->exit_to_mainloop = true; 2941 return DISAS_TOO_MANY; 2942 } 2943 2944 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) 2945 { 2946 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 2947 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 2948 2949 gen_helper_lctlg(tcg_env, r1, o->in2, r3); 2950 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ 2951 s->exit_to_mainloop = true; 2952 return DISAS_TOO_MANY; 2953 } 2954 2955 static DisasJumpType op_lra(DisasContext *s, DisasOps *o) 2956 { 2957 gen_helper_lra(o->out, tcg_env, o->out, o->in2); 2958 set_cc_static(s); 2959 return DISAS_NEXT; 2960 } 2961 2962 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) 2963 { 2964 tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp)); 2965 return DISAS_NEXT; 2966 } 2967 2968 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) 2969 { 2970 TCGv_i64 mask, addr; 2971 2972 per_breaking_event(s); 2973 2974 /* 2975 * Convert the short PSW into the normal PSW, similar to what 2976 * s390_cpu_load_normal() does. 2977 */ 2978 mask = tcg_temp_new_i64(); 2979 addr = tcg_temp_new_i64(); 2980 tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8); 2981 tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR); 2982 tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL); 2983 tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW); 2984 gen_helper_load_psw(tcg_env, mask, addr); 2985 return DISAS_NORETURN; 2986 } 2987 2988 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) 2989 { 2990 TCGv_i64 t1, t2; 2991 2992 per_breaking_event(s); 2993 2994 t1 = tcg_temp_new_i64(); 2995 t2 = tcg_temp_new_i64(); 2996 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), 2997 MO_TEUQ | MO_ALIGN_8); 2998 tcg_gen_addi_i64(o->in2, o->in2, 8); 2999 tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ); 3000 gen_helper_load_psw(tcg_env, t1, t2); 3001 return DISAS_NORETURN; 3002 } 3003 #endif 3004 3005 static DisasJumpType op_lam(DisasContext *s, DisasOps *o) 3006 { 3007 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 3008 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 3009 3010 gen_helper_lam(tcg_env, r1, o->in2, r3); 3011 return DISAS_NEXT; 3012 } 3013 3014 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) 3015 { 3016 int r1 = get_field(s, r1); 3017 int r3 = get_field(s, r3); 3018 TCGv_i64 t1, t2; 3019 3020 /* Only one register to read. */ 3021 t1 = tcg_temp_new_i64(); 3022 if (unlikely(r1 == r3)) { 3023 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); 3024 store_reg32_i64(r1, t1); 3025 return DISAS_NEXT; 3026 } 3027 3028 /* First load the values of the first and last registers to trigger 3029 possible page faults. */ 3030 t2 = tcg_temp_new_i64(); 3031 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); 3032 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); 3033 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL); 3034 store_reg32_i64(r1, t1); 3035 store_reg32_i64(r3, t2); 3036 3037 /* Only two registers to read. */ 3038 if (((r1 + 1) & 15) == r3) { 3039 return DISAS_NEXT; 3040 } 3041 3042 /* Then load the remaining registers. Page fault can't occur. */ 3043 r3 = (r3 - 1) & 15; 3044 tcg_gen_movi_i64(t2, 4); 3045 while (r1 != r3) { 3046 r1 = (r1 + 1) & 15; 3047 tcg_gen_add_i64(o->in2, o->in2, t2); 3048 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); 3049 store_reg32_i64(r1, t1); 3050 } 3051 return DISAS_NEXT; 3052 } 3053 3054 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) 3055 { 3056 int r1 = get_field(s, r1); 3057 int r3 = get_field(s, r3); 3058 TCGv_i64 t1, t2; 3059 3060 /* Only one register to read. */ 3061 t1 = tcg_temp_new_i64(); 3062 if (unlikely(r1 == r3)) { 3063 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); 3064 store_reg32h_i64(r1, t1); 3065 return DISAS_NEXT; 3066 } 3067 3068 /* First load the values of the first and last registers to trigger 3069 possible page faults. */ 3070 t2 = tcg_temp_new_i64(); 3071 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); 3072 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); 3073 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL); 3074 store_reg32h_i64(r1, t1); 3075 store_reg32h_i64(r3, t2); 3076 3077 /* Only two registers to read. */ 3078 if (((r1 + 1) & 15) == r3) { 3079 return DISAS_NEXT; 3080 } 3081 3082 /* Then load the remaining registers. Page fault can't occur. */ 3083 r3 = (r3 - 1) & 15; 3084 tcg_gen_movi_i64(t2, 4); 3085 while (r1 != r3) { 3086 r1 = (r1 + 1) & 15; 3087 tcg_gen_add_i64(o->in2, o->in2, t2); 3088 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); 3089 store_reg32h_i64(r1, t1); 3090 } 3091 return DISAS_NEXT; 3092 } 3093 3094 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) 3095 { 3096 int r1 = get_field(s, r1); 3097 int r3 = get_field(s, r3); 3098 TCGv_i64 t1, t2; 3099 3100 /* Only one register to read. */ 3101 if (unlikely(r1 == r3)) { 3102 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ); 3103 return DISAS_NEXT; 3104 } 3105 3106 /* First load the values of the first and last registers to trigger 3107 possible page faults. */ 3108 t1 = tcg_temp_new_i64(); 3109 t2 = tcg_temp_new_i64(); 3110 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ); 3111 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); 3112 tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ); 3113 tcg_gen_mov_i64(regs[r1], t1); 3114 3115 /* Only two registers to read. */ 3116 if (((r1 + 1) & 15) == r3) { 3117 return DISAS_NEXT; 3118 } 3119 3120 /* Then load the remaining registers. Page fault can't occur. */ 3121 r3 = (r3 - 1) & 15; 3122 tcg_gen_movi_i64(t1, 8); 3123 while (r1 != r3) { 3124 r1 = (r1 + 1) & 15; 3125 tcg_gen_add_i64(o->in2, o->in2, t1); 3126 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ); 3127 } 3128 return DISAS_NEXT; 3129 } 3130 3131 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) 3132 { 3133 TCGv_i64 a1, a2; 3134 MemOp mop = s->insn->data; 3135 3136 /* In a parallel context, stop the world and single step. */ 3137 if (tb_cflags(s->base.tb) & CF_PARALLEL) { 3138 update_psw_addr(s); 3139 update_cc_op(s); 3140 gen_exception(EXCP_ATOMIC); 3141 return DISAS_NORETURN; 3142 } 3143 3144 /* In a serial context, perform the two loads ... */ 3145 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); 3146 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); 3147 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); 3148 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); 3149 3150 /* ... and indicate that we performed them while interlocked. */ 3151 gen_op_movi_cc(s, 0); 3152 return DISAS_NEXT; 3153 } 3154 3155 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) 3156 { 3157 o->out_128 = tcg_temp_new_i128(); 3158 tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s), 3159 MO_TE | MO_128 | MO_ALIGN); 3160 return DISAS_NEXT; 3161 } 3162 3163 #ifndef CONFIG_USER_ONLY 3164 static DisasJumpType op_lura(DisasContext *s, DisasOps *o) 3165 { 3166 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data); 3167 return DISAS_NEXT; 3168 } 3169 #endif 3170 3171 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) 3172 { 3173 tcg_gen_andi_i64(o->out, o->in2, -256); 3174 return DISAS_NEXT; 3175 } 3176 3177 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) 3178 { 3179 const int64_t block_size = (1ull << (get_field(s, m3) + 6)); 3180 3181 if (get_field(s, m3) > 6) { 3182 gen_program_exception(s, PGM_SPECIFICATION); 3183 return DISAS_NORETURN; 3184 } 3185 3186 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size); 3187 tcg_gen_neg_i64(o->addr1, o->addr1); 3188 tcg_gen_movi_i64(o->out, 16); 3189 tcg_gen_umin_i64(o->out, o->out, o->addr1); 3190 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out); 3191 return DISAS_NEXT; 3192 } 3193 3194 static DisasJumpType op_mc(DisasContext *s, DisasOps *o) 3195 { 3196 const uint8_t monitor_class = get_field(s, i2); 3197 3198 if (monitor_class & 0xf0) { 3199 gen_program_exception(s, PGM_SPECIFICATION); 3200 return DISAS_NORETURN; 3201 } 3202 3203 #if !defined(CONFIG_USER_ONLY) 3204 gen_helper_monitor_call(tcg_env, o->addr1, 3205 tcg_constant_i32(monitor_class)); 3206 #endif 3207 /* Defaults to a NOP. */ 3208 return DISAS_NEXT; 3209 } 3210 3211 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) 3212 { 3213 o->out = o->in2; 3214 o->in2 = NULL; 3215 return DISAS_NEXT; 3216 } 3217 3218 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) 3219 { 3220 int b2 = get_field(s, b2); 3221 TCGv ar1 = tcg_temp_new_i64(); 3222 int r1 = get_field(s, r1); 3223 3224 o->out = o->in2; 3225 o->in2 = NULL; 3226 3227 switch (s->base.tb->flags & FLAG_MASK_ASC) { 3228 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: 3229 tcg_gen_movi_i64(ar1, 0); 3230 break; 3231 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT: 3232 tcg_gen_movi_i64(ar1, 1); 3233 break; 3234 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: 3235 if (b2) { 3236 tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2])); 3237 } else { 3238 tcg_gen_movi_i64(ar1, 0); 3239 } 3240 break; 3241 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: 3242 tcg_gen_movi_i64(ar1, 2); 3243 break; 3244 } 3245 3246 tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1])); 3247 return DISAS_NEXT; 3248 } 3249 3250 static DisasJumpType op_movx(DisasContext *s, DisasOps *o) 3251 { 3252 o->out = o->in1; 3253 o->out2 = o->in2; 3254 o->in1 = NULL; 3255 o->in2 = NULL; 3256 return DISAS_NEXT; 3257 } 3258 3259 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) 3260 { 3261 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3262 3263 gen_helper_mvc(tcg_env, l, o->addr1, o->in2); 3264 return DISAS_NEXT; 3265 } 3266 3267 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o) 3268 { 3269 gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2); 3270 return DISAS_NEXT; 3271 } 3272 3273 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) 3274 { 3275 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3276 3277 gen_helper_mvcin(tcg_env, l, o->addr1, o->in2); 3278 return DISAS_NEXT; 3279 } 3280 3281 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) 3282 { 3283 int r1 = get_field(s, r1); 3284 int r2 = get_field(s, r2); 3285 TCGv_i32 t1, t2; 3286 3287 /* r1 and r2 must be even. */ 3288 if (r1 & 1 || r2 & 1) { 3289 gen_program_exception(s, PGM_SPECIFICATION); 3290 return DISAS_NORETURN; 3291 } 3292 3293 t1 = tcg_constant_i32(r1); 3294 t2 = tcg_constant_i32(r2); 3295 gen_helper_mvcl(cc_op, tcg_env, t1, t2); 3296 set_cc_static(s); 3297 return DISAS_NEXT; 3298 } 3299 3300 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) 3301 { 3302 int r1 = get_field(s, r1); 3303 int r3 = get_field(s, r3); 3304 TCGv_i32 t1, t3; 3305 3306 /* r1 and r3 must be even. */ 3307 if (r1 & 1 || r3 & 1) { 3308 gen_program_exception(s, PGM_SPECIFICATION); 3309 return DISAS_NORETURN; 3310 } 3311 3312 t1 = tcg_constant_i32(r1); 3313 t3 = tcg_constant_i32(r3); 3314 gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3); 3315 set_cc_static(s); 3316 return DISAS_NEXT; 3317 } 3318 3319 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) 3320 { 3321 int r1 = get_field(s, r1); 3322 int r3 = get_field(s, r3); 3323 TCGv_i32 t1, t3; 3324 3325 /* r1 and r3 must be even. */ 3326 if (r1 & 1 || r3 & 1) { 3327 gen_program_exception(s, PGM_SPECIFICATION); 3328 return DISAS_NORETURN; 3329 } 3330 3331 t1 = tcg_constant_i32(r1); 3332 t3 = tcg_constant_i32(r3); 3333 gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3); 3334 set_cc_static(s); 3335 return DISAS_NEXT; 3336 } 3337 3338 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) 3339 { 3340 int r3 = get_field(s, r3); 3341 gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]); 3342 set_cc_static(s); 3343 return DISAS_NEXT; 3344 } 3345 3346 #ifndef CONFIG_USER_ONLY 3347 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) 3348 { 3349 int r1 = get_field(s, l1); 3350 int r3 = get_field(s, r3); 3351 gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]); 3352 set_cc_static(s); 3353 return DISAS_NEXT; 3354 } 3355 3356 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) 3357 { 3358 int r1 = get_field(s, l1); 3359 int r3 = get_field(s, r3); 3360 gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]); 3361 set_cc_static(s); 3362 return DISAS_NEXT; 3363 } 3364 #endif 3365 3366 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) 3367 { 3368 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3369 3370 gen_helper_mvn(tcg_env, l, o->addr1, o->in2); 3371 return DISAS_NEXT; 3372 } 3373 3374 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) 3375 { 3376 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3377 3378 gen_helper_mvo(tcg_env, l, o->addr1, o->in2); 3379 return DISAS_NEXT; 3380 } 3381 3382 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) 3383 { 3384 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1)); 3385 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2)); 3386 3387 gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2); 3388 set_cc_static(s); 3389 return DISAS_NEXT; 3390 } 3391 3392 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) 3393 { 3394 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1)); 3395 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2)); 3396 3397 gen_helper_mvst(cc_op, tcg_env, t1, t2); 3398 set_cc_static(s); 3399 return DISAS_NEXT; 3400 } 3401 3402 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) 3403 { 3404 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3405 3406 gen_helper_mvz(tcg_env, l, o->addr1, o->in2); 3407 return DISAS_NEXT; 3408 } 3409 3410 static DisasJumpType op_mul(DisasContext *s, DisasOps *o) 3411 { 3412 tcg_gen_mul_i64(o->out, o->in1, o->in2); 3413 return DISAS_NEXT; 3414 } 3415 3416 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o) 3417 { 3418 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2); 3419 return DISAS_NEXT; 3420 } 3421 3422 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o) 3423 { 3424 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2); 3425 return DISAS_NEXT; 3426 } 3427 3428 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o) 3429 { 3430 gen_helper_meeb(o->out, tcg_env, o->in1, o->in2); 3431 return DISAS_NEXT; 3432 } 3433 3434 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o) 3435 { 3436 gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2); 3437 return DISAS_NEXT; 3438 } 3439 3440 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o) 3441 { 3442 gen_helper_mdb(o->out, tcg_env, o->in1, o->in2); 3443 return DISAS_NEXT; 3444 } 3445 3446 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o) 3447 { 3448 gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128); 3449 return DISAS_NEXT; 3450 } 3451 3452 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) 3453 { 3454 gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2); 3455 return DISAS_NEXT; 3456 } 3457 3458 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) 3459 { 3460 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); 3461 gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3); 3462 return DISAS_NEXT; 3463 } 3464 3465 static DisasJumpType op_madb(DisasContext *s, DisasOps *o) 3466 { 3467 TCGv_i64 r3 = load_freg(get_field(s, r3)); 3468 gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3); 3469 return DISAS_NEXT; 3470 } 3471 3472 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) 3473 { 3474 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); 3475 gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3); 3476 return DISAS_NEXT; 3477 } 3478 3479 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) 3480 { 3481 TCGv_i64 r3 = load_freg(get_field(s, r3)); 3482 gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3); 3483 return DISAS_NEXT; 3484 } 3485 3486 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) 3487 { 3488 TCGv_i64 z = tcg_constant_i64(0); 3489 TCGv_i64 n = tcg_temp_new_i64(); 3490 3491 tcg_gen_neg_i64(n, o->in2); 3492 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2); 3493 return DISAS_NEXT; 3494 } 3495 3496 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o) 3497 { 3498 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull); 3499 return DISAS_NEXT; 3500 } 3501 3502 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o) 3503 { 3504 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull); 3505 return DISAS_NEXT; 3506 } 3507 3508 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) 3509 { 3510 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull); 3511 tcg_gen_mov_i64(o->out2, o->in2); 3512 return DISAS_NEXT; 3513 } 3514 3515 static DisasJumpType op_nc(DisasContext *s, DisasOps *o) 3516 { 3517 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3518 3519 gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2); 3520 set_cc_static(s); 3521 return DISAS_NEXT; 3522 } 3523 3524 static DisasJumpType op_neg(DisasContext *s, DisasOps *o) 3525 { 3526 tcg_gen_neg_i64(o->out, o->in2); 3527 return DISAS_NEXT; 3528 } 3529 3530 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o) 3531 { 3532 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull); 3533 return DISAS_NEXT; 3534 } 3535 3536 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o) 3537 { 3538 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull); 3539 return DISAS_NEXT; 3540 } 3541 3542 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) 3543 { 3544 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull); 3545 tcg_gen_mov_i64(o->out2, o->in2); 3546 return DISAS_NEXT; 3547 } 3548 3549 static DisasJumpType op_oc(DisasContext *s, DisasOps *o) 3550 { 3551 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3552 3553 gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2); 3554 set_cc_static(s); 3555 return DISAS_NEXT; 3556 } 3557 3558 static DisasJumpType op_or(DisasContext *s, DisasOps *o) 3559 { 3560 tcg_gen_or_i64(o->out, o->in1, o->in2); 3561 return DISAS_NEXT; 3562 } 3563 3564 static DisasJumpType op_ori(DisasContext *s, DisasOps *o) 3565 { 3566 int shift = s->insn->data & 0xff; 3567 int size = s->insn->data >> 8; 3568 uint64_t mask = ((1ull << size) - 1) << shift; 3569 TCGv_i64 t = tcg_temp_new_i64(); 3570 3571 tcg_gen_shli_i64(t, o->in2, shift); 3572 tcg_gen_or_i64(o->out, o->in1, t); 3573 3574 /* Produce the CC from only the bits manipulated. */ 3575 tcg_gen_andi_i64(cc_dst, o->out, mask); 3576 set_cc_nz_u64(s, cc_dst); 3577 return DISAS_NEXT; 3578 } 3579 3580 static DisasJumpType op_oi(DisasContext *s, DisasOps *o) 3581 { 3582 o->in1 = tcg_temp_new_i64(); 3583 3584 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { 3585 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); 3586 } else { 3587 /* Perform the atomic operation in memory. */ 3588 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s), 3589 s->insn->data); 3590 } 3591 3592 /* Recompute also for atomic case: needed for setting CC. */ 3593 tcg_gen_or_i64(o->out, o->in1, o->in2); 3594 3595 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { 3596 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); 3597 } 3598 return DISAS_NEXT; 3599 } 3600 3601 static DisasJumpType op_pack(DisasContext *s, DisasOps *o) 3602 { 3603 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 3604 3605 gen_helper_pack(tcg_env, l, o->addr1, o->in2); 3606 return DISAS_NEXT; 3607 } 3608 3609 static DisasJumpType op_pka(DisasContext *s, DisasOps *o) 3610 { 3611 int l2 = get_field(s, l2) + 1; 3612 TCGv_i32 l; 3613 3614 /* The length must not exceed 32 bytes. */ 3615 if (l2 > 32) { 3616 gen_program_exception(s, PGM_SPECIFICATION); 3617 return DISAS_NORETURN; 3618 } 3619 l = tcg_constant_i32(l2); 3620 gen_helper_pka(tcg_env, o->addr1, o->in2, l); 3621 return DISAS_NEXT; 3622 } 3623 3624 static DisasJumpType op_pku(DisasContext *s, DisasOps *o) 3625 { 3626 int l2 = get_field(s, l2) + 1; 3627 TCGv_i32 l; 3628 3629 /* The length must be even and should not exceed 64 bytes. */ 3630 if ((l2 & 1) || (l2 > 64)) { 3631 gen_program_exception(s, PGM_SPECIFICATION); 3632 return DISAS_NORETURN; 3633 } 3634 l = tcg_constant_i32(l2); 3635 gen_helper_pku(tcg_env, o->addr1, o->in2, l); 3636 return DISAS_NEXT; 3637 } 3638 3639 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) 3640 { 3641 const uint8_t m3 = get_field(s, m3); 3642 3643 if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) { 3644 tcg_gen_ctpop_i64(o->out, o->in2); 3645 } else { 3646 gen_helper_popcnt(o->out, o->in2); 3647 } 3648 return DISAS_NEXT; 3649 } 3650 3651 #ifndef CONFIG_USER_ONLY 3652 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) 3653 { 3654 gen_helper_ptlb(tcg_env); 3655 return DISAS_NEXT; 3656 } 3657 #endif 3658 3659 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) 3660 { 3661 int i3 = get_field(s, i3); 3662 int i4 = get_field(s, i4); 3663 int i5 = get_field(s, i5); 3664 int do_zero = i4 & 0x80; 3665 uint64_t mask, imask, pmask; 3666 int pos, len, rot; 3667 3668 /* Adjust the arguments for the specific insn. */ 3669 switch (s->fields.op2) { 3670 case 0x55: /* risbg */ 3671 case 0x59: /* risbgn */ 3672 i3 &= 63; 3673 i4 &= 63; 3674 pmask = ~0; 3675 break; 3676 case 0x5d: /* risbhg */ 3677 i3 &= 31; 3678 i4 &= 31; 3679 pmask = 0xffffffff00000000ull; 3680 break; 3681 case 0x51: /* risblg */ 3682 i3 = (i3 & 31) + 32; 3683 i4 = (i4 & 31) + 32; 3684 pmask = 0x00000000ffffffffull; 3685 break; 3686 default: 3687 g_assert_not_reached(); 3688 } 3689 3690 /* MASK is the set of bits to be inserted from R2. */ 3691 if (i3 <= i4) { 3692 /* [0...i3---i4...63] */ 3693 mask = (-1ull >> i3) & (-1ull << (63 - i4)); 3694 } else { 3695 /* [0---i4...i3---63] */ 3696 mask = (-1ull >> i3) | (-1ull << (63 - i4)); 3697 } 3698 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */ 3699 mask &= pmask; 3700 3701 /* IMASK is the set of bits to be kept from R1. In the case of the high/low 3702 insns, we need to keep the other half of the register. */ 3703 imask = ~mask | ~pmask; 3704 if (do_zero) { 3705 imask = ~pmask; 3706 } 3707 3708 len = i4 - i3 + 1; 3709 pos = 63 - i4; 3710 rot = i5 & 63; 3711 3712 /* In some cases we can implement this with extract. */ 3713 if (imask == 0 && pos == 0 && len > 0 && len <= rot) { 3714 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len); 3715 return DISAS_NEXT; 3716 } 3717 3718 /* In some cases we can implement this with deposit. */ 3719 if (len > 0 && (imask == 0 || ~mask == imask)) { 3720 /* Note that we rotate the bits to be inserted to the lsb, not to 3721 the position as described in the PoO. */ 3722 rot = (rot - pos) & 63; 3723 } else { 3724 pos = -1; 3725 } 3726 3727 /* Rotate the input as necessary. */ 3728 tcg_gen_rotli_i64(o->in2, o->in2, rot); 3729 3730 /* Insert the selected bits into the output. */ 3731 if (pos >= 0) { 3732 if (imask == 0) { 3733 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len); 3734 } else { 3735 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len); 3736 } 3737 } else if (imask == 0) { 3738 tcg_gen_andi_i64(o->out, o->in2, mask); 3739 } else { 3740 tcg_gen_andi_i64(o->in2, o->in2, mask); 3741 tcg_gen_andi_i64(o->out, o->out, imask); 3742 tcg_gen_or_i64(o->out, o->out, o->in2); 3743 } 3744 return DISAS_NEXT; 3745 } 3746 3747 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) 3748 { 3749 int i3 = get_field(s, i3); 3750 int i4 = get_field(s, i4); 3751 int i5 = get_field(s, i5); 3752 TCGv_i64 orig_out; 3753 uint64_t mask; 3754 3755 /* If this is a test-only form, arrange to discard the result. */ 3756 if (i3 & 0x80) { 3757 tcg_debug_assert(o->out != NULL); 3758 orig_out = o->out; 3759 o->out = tcg_temp_new_i64(); 3760 tcg_gen_mov_i64(o->out, orig_out); 3761 } 3762 3763 i3 &= 63; 3764 i4 &= 63; 3765 i5 &= 63; 3766 3767 /* MASK is the set of bits to be operated on from R2. 3768 Take care for I3/I4 wraparound. */ 3769 mask = ~0ull >> i3; 3770 if (i3 <= i4) { 3771 mask ^= ~0ull >> i4 >> 1; 3772 } else { 3773 mask |= ~(~0ull >> i4 >> 1); 3774 } 3775 3776 /* Rotate the input as necessary. */ 3777 tcg_gen_rotli_i64(o->in2, o->in2, i5); 3778 3779 /* Operate. */ 3780 switch (s->fields.op2) { 3781 case 0x54: /* AND */ 3782 tcg_gen_ori_i64(o->in2, o->in2, ~mask); 3783 tcg_gen_and_i64(o->out, o->out, o->in2); 3784 break; 3785 case 0x56: /* OR */ 3786 tcg_gen_andi_i64(o->in2, o->in2, mask); 3787 tcg_gen_or_i64(o->out, o->out, o->in2); 3788 break; 3789 case 0x57: /* XOR */ 3790 tcg_gen_andi_i64(o->in2, o->in2, mask); 3791 tcg_gen_xor_i64(o->out, o->out, o->in2); 3792 break; 3793 default: 3794 abort(); 3795 } 3796 3797 /* Set the CC. */ 3798 tcg_gen_andi_i64(cc_dst, o->out, mask); 3799 set_cc_nz_u64(s, cc_dst); 3800 return DISAS_NEXT; 3801 } 3802 3803 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o) 3804 { 3805 tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ); 3806 return DISAS_NEXT; 3807 } 3808 3809 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o) 3810 { 3811 tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ); 3812 return DISAS_NEXT; 3813 } 3814 3815 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o) 3816 { 3817 tcg_gen_bswap64_i64(o->out, o->in2); 3818 return DISAS_NEXT; 3819 } 3820 3821 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) 3822 { 3823 TCGv_i32 t1 = tcg_temp_new_i32(); 3824 TCGv_i32 t2 = tcg_temp_new_i32(); 3825 TCGv_i32 to = tcg_temp_new_i32(); 3826 tcg_gen_extrl_i64_i32(t1, o->in1); 3827 tcg_gen_extrl_i64_i32(t2, o->in2); 3828 tcg_gen_rotl_i32(to, t1, t2); 3829 tcg_gen_extu_i32_i64(o->out, to); 3830 return DISAS_NEXT; 3831 } 3832 3833 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) 3834 { 3835 tcg_gen_rotl_i64(o->out, o->in1, o->in2); 3836 return DISAS_NEXT; 3837 } 3838 3839 #ifndef CONFIG_USER_ONLY 3840 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) 3841 { 3842 gen_helper_rrbe(cc_op, tcg_env, o->in2); 3843 set_cc_static(s); 3844 return DISAS_NEXT; 3845 } 3846 3847 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) 3848 { 3849 gen_helper_sacf(tcg_env, o->in2); 3850 /* Addressing mode has changed, so end the block. */ 3851 return DISAS_TOO_MANY; 3852 } 3853 #endif 3854 3855 static DisasJumpType op_sam(DisasContext *s, DisasOps *o) 3856 { 3857 int sam = s->insn->data; 3858 TCGv_i64 tsam; 3859 uint64_t mask; 3860 3861 switch (sam) { 3862 case 0: 3863 mask = 0xffffff; 3864 break; 3865 case 1: 3866 mask = 0x7fffffff; 3867 break; 3868 default: 3869 mask = -1; 3870 break; 3871 } 3872 3873 /* Bizarre but true, we check the address of the current insn for the 3874 specification exception, not the next to be executed. Thus the PoO 3875 documents that Bad Things Happen two bytes before the end. */ 3876 if (s->base.pc_next & ~mask) { 3877 gen_program_exception(s, PGM_SPECIFICATION); 3878 return DISAS_NORETURN; 3879 } 3880 s->pc_tmp &= mask; 3881 3882 tsam = tcg_constant_i64(sam); 3883 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2); 3884 3885 /* Always exit the TB, since we (may have) changed execution mode. */ 3886 return DISAS_TOO_MANY; 3887 } 3888 3889 static DisasJumpType op_sar(DisasContext *s, DisasOps *o) 3890 { 3891 int r1 = get_field(s, r1); 3892 tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1])); 3893 return DISAS_NEXT; 3894 } 3895 3896 static DisasJumpType op_seb(DisasContext *s, DisasOps *o) 3897 { 3898 gen_helper_seb(o->out, tcg_env, o->in1, o->in2); 3899 return DISAS_NEXT; 3900 } 3901 3902 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o) 3903 { 3904 gen_helper_sdb(o->out, tcg_env, o->in1, o->in2); 3905 return DISAS_NEXT; 3906 } 3907 3908 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o) 3909 { 3910 gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128); 3911 return DISAS_NEXT; 3912 } 3913 3914 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o) 3915 { 3916 gen_helper_sqeb(o->out, tcg_env, o->in2); 3917 return DISAS_NEXT; 3918 } 3919 3920 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o) 3921 { 3922 gen_helper_sqdb(o->out, tcg_env, o->in2); 3923 return DISAS_NEXT; 3924 } 3925 3926 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) 3927 { 3928 gen_helper_sqxb(o->out_128, tcg_env, o->in2_128); 3929 return DISAS_NEXT; 3930 } 3931 3932 #ifndef CONFIG_USER_ONLY 3933 static DisasJumpType op_servc(DisasContext *s, DisasOps *o) 3934 { 3935 gen_helper_servc(cc_op, tcg_env, o->in2, o->in1); 3936 set_cc_static(s); 3937 return DISAS_NEXT; 3938 } 3939 3940 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) 3941 { 3942 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 3943 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 3944 3945 gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3); 3946 set_cc_static(s); 3947 return DISAS_NEXT; 3948 } 3949 #endif 3950 3951 static DisasJumpType op_soc(DisasContext *s, DisasOps *o) 3952 { 3953 DisasCompare c; 3954 TCGv_i64 a, h; 3955 TCGLabel *lab; 3956 int r1; 3957 3958 disas_jcc(s, &c, get_field(s, m3)); 3959 3960 /* We want to store when the condition is fulfilled, so branch 3961 out when it's not */ 3962 c.cond = tcg_invert_cond(c.cond); 3963 3964 lab = gen_new_label(); 3965 if (c.is_64) { 3966 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab); 3967 } else { 3968 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab); 3969 } 3970 3971 r1 = get_field(s, r1); 3972 a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); 3973 switch (s->insn->data) { 3974 case 1: /* STOCG */ 3975 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ); 3976 break; 3977 case 0: /* STOC */ 3978 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL); 3979 break; 3980 case 2: /* STOCFH */ 3981 h = tcg_temp_new_i64(); 3982 tcg_gen_shri_i64(h, regs[r1], 32); 3983 tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL); 3984 break; 3985 default: 3986 g_assert_not_reached(); 3987 } 3988 3989 gen_set_label(lab); 3990 return DISAS_NEXT; 3991 } 3992 3993 static DisasJumpType op_sla(DisasContext *s, DisasOps *o) 3994 { 3995 TCGv_i64 t; 3996 uint64_t sign = 1ull << s->insn->data; 3997 if (s->insn->data == 31) { 3998 t = tcg_temp_new_i64(); 3999 tcg_gen_shli_i64(t, o->in1, 32); 4000 } else { 4001 t = o->in1; 4002 } 4003 gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2); 4004 tcg_gen_shl_i64(o->out, o->in1, o->in2); 4005 /* The arithmetic left shift is curious in that it does not affect 4006 the sign bit. Copy that over from the source unchanged. */ 4007 tcg_gen_andi_i64(o->out, o->out, ~sign); 4008 tcg_gen_andi_i64(o->in1, o->in1, sign); 4009 tcg_gen_or_i64(o->out, o->out, o->in1); 4010 return DISAS_NEXT; 4011 } 4012 4013 static DisasJumpType op_sll(DisasContext *s, DisasOps *o) 4014 { 4015 tcg_gen_shl_i64(o->out, o->in1, o->in2); 4016 return DISAS_NEXT; 4017 } 4018 4019 static DisasJumpType op_sra(DisasContext *s, DisasOps *o) 4020 { 4021 tcg_gen_sar_i64(o->out, o->in1, o->in2); 4022 return DISAS_NEXT; 4023 } 4024 4025 static DisasJumpType op_srl(DisasContext *s, DisasOps *o) 4026 { 4027 tcg_gen_shr_i64(o->out, o->in1, o->in2); 4028 return DISAS_NEXT; 4029 } 4030 4031 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o) 4032 { 4033 gen_helper_sfpc(tcg_env, o->in2); 4034 return DISAS_NEXT; 4035 } 4036 4037 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o) 4038 { 4039 gen_helper_sfas(tcg_env, o->in2); 4040 return DISAS_NEXT; 4041 } 4042 4043 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o) 4044 { 4045 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */ 4046 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull); 4047 gen_helper_srnm(tcg_env, o->addr1); 4048 return DISAS_NEXT; 4049 } 4050 4051 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o) 4052 { 4053 /* Bits 0-55 are are ignored. */ 4054 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull); 4055 gen_helper_srnm(tcg_env, o->addr1); 4056 return DISAS_NEXT; 4057 } 4058 4059 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) 4060 { 4061 TCGv_i64 tmp = tcg_temp_new_i64(); 4062 4063 /* Bits other than 61-63 are ignored. */ 4064 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull); 4065 4066 /* No need to call a helper, we don't implement dfp */ 4067 tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc)); 4068 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3); 4069 tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc)); 4070 return DISAS_NEXT; 4071 } 4072 4073 static DisasJumpType op_spm(DisasContext *s, DisasOps *o) 4074 { 4075 tcg_gen_extrl_i64_i32(cc_op, o->in1); 4076 tcg_gen_extract_i32(cc_op, cc_op, 28, 2); 4077 set_cc_static(s); 4078 4079 tcg_gen_shri_i64(o->in1, o->in1, 24); 4080 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); 4081 return DISAS_NEXT; 4082 } 4083 4084 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) 4085 { 4086 int b1 = get_field(s, b1); 4087 int d1 = get_field(s, d1); 4088 int b2 = get_field(s, b2); 4089 int d2 = get_field(s, d2); 4090 int r3 = get_field(s, r3); 4091 TCGv_i64 tmp = tcg_temp_new_i64(); 4092 4093 /* fetch all operands first */ 4094 o->in1 = tcg_temp_new_i64(); 4095 tcg_gen_addi_i64(o->in1, regs[b1], d1); 4096 o->in2 = tcg_temp_new_i64(); 4097 tcg_gen_addi_i64(o->in2, regs[b2], d2); 4098 o->addr1 = tcg_temp_new_i64(); 4099 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0); 4100 4101 /* load the third operand into r3 before modifying anything */ 4102 tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ); 4103 4104 /* subtract CPU timer from first operand and store in GR0 */ 4105 gen_helper_stpt(tmp, tcg_env); 4106 tcg_gen_sub_i64(regs[0], o->in1, tmp); 4107 4108 /* store second operand in GR1 */ 4109 tcg_gen_mov_i64(regs[1], o->in2); 4110 return DISAS_NEXT; 4111 } 4112 4113 #ifndef CONFIG_USER_ONLY 4114 static DisasJumpType op_spka(DisasContext *s, DisasOps *o) 4115 { 4116 tcg_gen_shri_i64(o->in2, o->in2, 4); 4117 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); 4118 return DISAS_NEXT; 4119 } 4120 4121 static DisasJumpType op_sske(DisasContext *s, DisasOps *o) 4122 { 4123 gen_helper_sske(tcg_env, o->in1, o->in2); 4124 return DISAS_NEXT; 4125 } 4126 4127 static void gen_check_psw_mask(DisasContext *s) 4128 { 4129 TCGv_i64 reserved = tcg_temp_new_i64(); 4130 TCGLabel *ok = gen_new_label(); 4131 4132 tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED); 4133 tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok); 4134 gen_program_exception(s, PGM_SPECIFICATION); 4135 gen_set_label(ok); 4136 } 4137 4138 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) 4139 { 4140 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); 4141 4142 gen_check_psw_mask(s); 4143 4144 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ 4145 s->exit_to_mainloop = true; 4146 return DISAS_TOO_MANY; 4147 } 4148 4149 static DisasJumpType op_stap(DisasContext *s, DisasOps *o) 4150 { 4151 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id)); 4152 return DISAS_NEXT; 4153 } 4154 #endif 4155 4156 static DisasJumpType op_stck(DisasContext *s, DisasOps *o) 4157 { 4158 gen_helper_stck(o->out, tcg_env); 4159 /* ??? We don't implement clock states. */ 4160 gen_op_movi_cc(s, 0); 4161 return DISAS_NEXT; 4162 } 4163 4164 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) 4165 { 4166 TCGv_i64 c1 = tcg_temp_new_i64(); 4167 TCGv_i64 c2 = tcg_temp_new_i64(); 4168 TCGv_i64 todpr = tcg_temp_new_i64(); 4169 gen_helper_stck(c1, tcg_env); 4170 /* 16 bit value store in an uint32_t (only valid bits set) */ 4171 tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr)); 4172 /* Shift the 64-bit value into its place as a zero-extended 4173 104-bit value. Note that "bit positions 64-103 are always 4174 non-zero so that they compare differently to STCK"; we set 4175 the least significant bit to 1. */ 4176 tcg_gen_shli_i64(c2, c1, 56); 4177 tcg_gen_shri_i64(c1, c1, 8); 4178 tcg_gen_ori_i64(c2, c2, 0x10000); 4179 tcg_gen_or_i64(c2, c2, todpr); 4180 tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ); 4181 tcg_gen_addi_i64(o->in2, o->in2, 8); 4182 tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ); 4183 /* ??? We don't implement clock states. */ 4184 gen_op_movi_cc(s, 0); 4185 return DISAS_NEXT; 4186 } 4187 4188 #ifndef CONFIG_USER_ONLY 4189 static DisasJumpType op_sck(DisasContext *s, DisasOps *o) 4190 { 4191 gen_helper_sck(cc_op, tcg_env, o->in2); 4192 set_cc_static(s); 4193 return DISAS_NEXT; 4194 } 4195 4196 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) 4197 { 4198 gen_helper_sckc(tcg_env, o->in2); 4199 return DISAS_NEXT; 4200 } 4201 4202 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) 4203 { 4204 gen_helper_sckpf(tcg_env, regs[0]); 4205 return DISAS_NEXT; 4206 } 4207 4208 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) 4209 { 4210 gen_helper_stckc(o->out, tcg_env); 4211 return DISAS_NEXT; 4212 } 4213 4214 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) 4215 { 4216 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4217 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 4218 4219 gen_helper_stctg(tcg_env, r1, o->in2, r3); 4220 return DISAS_NEXT; 4221 } 4222 4223 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) 4224 { 4225 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4226 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 4227 4228 gen_helper_stctl(tcg_env, r1, o->in2, r3); 4229 return DISAS_NEXT; 4230 } 4231 4232 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) 4233 { 4234 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid)); 4235 return DISAS_NEXT; 4236 } 4237 4238 static DisasJumpType op_spt(DisasContext *s, DisasOps *o) 4239 { 4240 gen_helper_spt(tcg_env, o->in2); 4241 return DISAS_NEXT; 4242 } 4243 4244 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) 4245 { 4246 gen_helper_stfl(tcg_env); 4247 return DISAS_NEXT; 4248 } 4249 4250 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) 4251 { 4252 gen_helper_stpt(o->out, tcg_env); 4253 return DISAS_NEXT; 4254 } 4255 4256 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) 4257 { 4258 gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]); 4259 set_cc_static(s); 4260 return DISAS_NEXT; 4261 } 4262 4263 static DisasJumpType op_spx(DisasContext *s, DisasOps *o) 4264 { 4265 gen_helper_spx(tcg_env, o->in2); 4266 return DISAS_NEXT; 4267 } 4268 4269 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) 4270 { 4271 gen_helper_xsch(tcg_env, regs[1]); 4272 set_cc_static(s); 4273 return DISAS_NEXT; 4274 } 4275 4276 static DisasJumpType op_csch(DisasContext *s, DisasOps *o) 4277 { 4278 gen_helper_csch(tcg_env, regs[1]); 4279 set_cc_static(s); 4280 return DISAS_NEXT; 4281 } 4282 4283 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) 4284 { 4285 gen_helper_hsch(tcg_env, regs[1]); 4286 set_cc_static(s); 4287 return DISAS_NEXT; 4288 } 4289 4290 static DisasJumpType op_msch(DisasContext *s, DisasOps *o) 4291 { 4292 gen_helper_msch(tcg_env, regs[1], o->in2); 4293 set_cc_static(s); 4294 return DISAS_NEXT; 4295 } 4296 4297 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) 4298 { 4299 gen_helper_rchp(tcg_env, regs[1]); 4300 set_cc_static(s); 4301 return DISAS_NEXT; 4302 } 4303 4304 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) 4305 { 4306 gen_helper_rsch(tcg_env, regs[1]); 4307 set_cc_static(s); 4308 return DISAS_NEXT; 4309 } 4310 4311 static DisasJumpType op_sal(DisasContext *s, DisasOps *o) 4312 { 4313 gen_helper_sal(tcg_env, regs[1]); 4314 return DISAS_NEXT; 4315 } 4316 4317 static DisasJumpType op_schm(DisasContext *s, DisasOps *o) 4318 { 4319 gen_helper_schm(tcg_env, regs[1], regs[2], o->in2); 4320 return DISAS_NEXT; 4321 } 4322 4323 static DisasJumpType op_siga(DisasContext *s, DisasOps *o) 4324 { 4325 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ 4326 gen_op_movi_cc(s, 3); 4327 return DISAS_NEXT; 4328 } 4329 4330 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) 4331 { 4332 /* The instruction is suppressed if not provided. */ 4333 return DISAS_NEXT; 4334 } 4335 4336 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) 4337 { 4338 gen_helper_ssch(tcg_env, regs[1], o->in2); 4339 set_cc_static(s); 4340 return DISAS_NEXT; 4341 } 4342 4343 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) 4344 { 4345 gen_helper_stsch(tcg_env, regs[1], o->in2); 4346 set_cc_static(s); 4347 return DISAS_NEXT; 4348 } 4349 4350 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) 4351 { 4352 gen_helper_stcrw(tcg_env, o->in2); 4353 set_cc_static(s); 4354 return DISAS_NEXT; 4355 } 4356 4357 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) 4358 { 4359 gen_helper_tpi(cc_op, tcg_env, o->addr1); 4360 set_cc_static(s); 4361 return DISAS_NEXT; 4362 } 4363 4364 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) 4365 { 4366 gen_helper_tsch(tcg_env, regs[1], o->in2); 4367 set_cc_static(s); 4368 return DISAS_NEXT; 4369 } 4370 4371 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) 4372 { 4373 gen_helper_chsc(tcg_env, o->in2); 4374 set_cc_static(s); 4375 return DISAS_NEXT; 4376 } 4377 4378 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) 4379 { 4380 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa)); 4381 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000); 4382 return DISAS_NEXT; 4383 } 4384 4385 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) 4386 { 4387 uint64_t i2 = get_field(s, i2); 4388 TCGv_i64 t; 4389 4390 /* It is important to do what the instruction name says: STORE THEN. 4391 If we let the output hook perform the store then if we fault and 4392 restart, we'll have the wrong SYSTEM MASK in place. */ 4393 t = tcg_temp_new_i64(); 4394 tcg_gen_shri_i64(t, psw_mask, 56); 4395 tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB); 4396 4397 if (s->fields.op == 0xac) { 4398 tcg_gen_andi_i64(psw_mask, psw_mask, 4399 (i2 << 56) | 0x00ffffffffffffffull); 4400 } else { 4401 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56); 4402 } 4403 4404 gen_check_psw_mask(s); 4405 4406 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ 4407 s->exit_to_mainloop = true; 4408 return DISAS_TOO_MANY; 4409 } 4410 4411 static DisasJumpType op_stura(DisasContext *s, DisasOps *o) 4412 { 4413 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data); 4414 4415 if (s->base.tb->flags & FLAG_MASK_PER) { 4416 update_psw_addr(s); 4417 gen_helper_per_store_real(tcg_env); 4418 } 4419 return DISAS_NEXT; 4420 } 4421 #endif 4422 4423 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) 4424 { 4425 gen_helper_stfle(cc_op, tcg_env, o->in2); 4426 set_cc_static(s); 4427 return DISAS_NEXT; 4428 } 4429 4430 static DisasJumpType op_st8(DisasContext *s, DisasOps *o) 4431 { 4432 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB); 4433 return DISAS_NEXT; 4434 } 4435 4436 static DisasJumpType op_st16(DisasContext *s, DisasOps *o) 4437 { 4438 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW); 4439 return DISAS_NEXT; 4440 } 4441 4442 static DisasJumpType op_st32(DisasContext *s, DisasOps *o) 4443 { 4444 tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s), 4445 MO_TEUL | s->insn->data); 4446 return DISAS_NEXT; 4447 } 4448 4449 static DisasJumpType op_st64(DisasContext *s, DisasOps *o) 4450 { 4451 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), 4452 MO_TEUQ | s->insn->data); 4453 return DISAS_NEXT; 4454 } 4455 4456 static DisasJumpType op_stam(DisasContext *s, DisasOps *o) 4457 { 4458 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4459 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 4460 4461 gen_helper_stam(tcg_env, r1, o->in2, r3); 4462 return DISAS_NEXT; 4463 } 4464 4465 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) 4466 { 4467 int m3 = get_field(s, m3); 4468 int pos, base = s->insn->data; 4469 TCGv_i64 tmp = tcg_temp_new_i64(); 4470 4471 pos = base + ctz32(m3) * 8; 4472 switch (m3) { 4473 case 0xf: 4474 /* Effectively a 32-bit store. */ 4475 tcg_gen_shri_i64(tmp, o->in1, pos); 4476 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL); 4477 break; 4478 4479 case 0xc: 4480 case 0x6: 4481 case 0x3: 4482 /* Effectively a 16-bit store. */ 4483 tcg_gen_shri_i64(tmp, o->in1, pos); 4484 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW); 4485 break; 4486 4487 case 0x8: 4488 case 0x4: 4489 case 0x2: 4490 case 0x1: 4491 /* Effectively an 8-bit store. */ 4492 tcg_gen_shri_i64(tmp, o->in1, pos); 4493 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB); 4494 break; 4495 4496 default: 4497 /* This is going to be a sequence of shifts and stores. */ 4498 pos = base + 32 - 8; 4499 while (m3) { 4500 if (m3 & 0x8) { 4501 tcg_gen_shri_i64(tmp, o->in1, pos); 4502 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB); 4503 tcg_gen_addi_i64(o->in2, o->in2, 1); 4504 } 4505 m3 = (m3 << 1) & 0xf; 4506 pos -= 8; 4507 } 4508 break; 4509 } 4510 return DISAS_NEXT; 4511 } 4512 4513 static DisasJumpType op_stm(DisasContext *s, DisasOps *o) 4514 { 4515 int r1 = get_field(s, r1); 4516 int r3 = get_field(s, r3); 4517 int size = s->insn->data; 4518 TCGv_i64 tsize = tcg_constant_i64(size); 4519 4520 while (1) { 4521 tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s), 4522 size == 8 ? MO_TEUQ : MO_TEUL); 4523 if (r1 == r3) { 4524 break; 4525 } 4526 tcg_gen_add_i64(o->in2, o->in2, tsize); 4527 r1 = (r1 + 1) & 15; 4528 } 4529 4530 return DISAS_NEXT; 4531 } 4532 4533 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) 4534 { 4535 int r1 = get_field(s, r1); 4536 int r3 = get_field(s, r3); 4537 TCGv_i64 t = tcg_temp_new_i64(); 4538 TCGv_i64 t4 = tcg_constant_i64(4); 4539 TCGv_i64 t32 = tcg_constant_i64(32); 4540 4541 while (1) { 4542 tcg_gen_shl_i64(t, regs[r1], t32); 4543 tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL); 4544 if (r1 == r3) { 4545 break; 4546 } 4547 tcg_gen_add_i64(o->in2, o->in2, t4); 4548 r1 = (r1 + 1) & 15; 4549 } 4550 return DISAS_NEXT; 4551 } 4552 4553 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) 4554 { 4555 TCGv_i128 t16 = tcg_temp_new_i128(); 4556 4557 tcg_gen_concat_i64_i128(t16, o->out2, o->out); 4558 tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s), 4559 MO_TE | MO_128 | MO_ALIGN); 4560 return DISAS_NEXT; 4561 } 4562 4563 static DisasJumpType op_srst(DisasContext *s, DisasOps *o) 4564 { 4565 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4566 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4567 4568 gen_helper_srst(tcg_env, r1, r2); 4569 set_cc_static(s); 4570 return DISAS_NEXT; 4571 } 4572 4573 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) 4574 { 4575 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4576 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4577 4578 gen_helper_srstu(tcg_env, r1, r2); 4579 set_cc_static(s); 4580 return DISAS_NEXT; 4581 } 4582 4583 static DisasJumpType op_sub(DisasContext *s, DisasOps *o) 4584 { 4585 tcg_gen_sub_i64(o->out, o->in1, o->in2); 4586 return DISAS_NEXT; 4587 } 4588 4589 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o) 4590 { 4591 tcg_gen_movi_i64(cc_src, 0); 4592 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); 4593 return DISAS_NEXT; 4594 } 4595 4596 /* Compute borrow (0, -1) into cc_src. */ 4597 static void compute_borrow(DisasContext *s) 4598 { 4599 switch (s->cc_op) { 4600 case CC_OP_SUBU: 4601 /* The borrow value is already in cc_src (0,-1). */ 4602 break; 4603 default: 4604 gen_op_calc_cc(s); 4605 /* fall through */ 4606 case CC_OP_STATIC: 4607 /* The carry flag is the msb of CC; compute into cc_src. */ 4608 tcg_gen_extu_i32_i64(cc_src, cc_op); 4609 tcg_gen_shri_i64(cc_src, cc_src, 1); 4610 /* fall through */ 4611 case CC_OP_ADDU: 4612 /* Convert carry (1,0) to borrow (0,-1). */ 4613 tcg_gen_subi_i64(cc_src, cc_src, 1); 4614 break; 4615 } 4616 } 4617 4618 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o) 4619 { 4620 compute_borrow(s); 4621 4622 /* Borrow is {0, -1}, so add to subtract. */ 4623 tcg_gen_add_i64(o->out, o->in1, cc_src); 4624 tcg_gen_sub_i64(o->out, o->out, o->in2); 4625 return DISAS_NEXT; 4626 } 4627 4628 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o) 4629 { 4630 compute_borrow(s); 4631 4632 /* 4633 * Borrow is {0, -1}, so add to subtract; replicate the 4634 * borrow input to produce 128-bit -1 for the addition. 4635 */ 4636 TCGv_i64 zero = tcg_constant_i64(0); 4637 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src); 4638 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); 4639 4640 return DISAS_NEXT; 4641 } 4642 4643 static DisasJumpType op_svc(DisasContext *s, DisasOps *o) 4644 { 4645 TCGv_i32 t; 4646 4647 update_psw_addr(s); 4648 update_cc_op(s); 4649 4650 t = tcg_constant_i32(get_field(s, i1) & 0xff); 4651 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code)); 4652 4653 t = tcg_constant_i32(s->ilen); 4654 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen)); 4655 4656 gen_exception(EXCP_SVC); 4657 return DISAS_NORETURN; 4658 } 4659 4660 static DisasJumpType op_tam(DisasContext *s, DisasOps *o) 4661 { 4662 int cc = 0; 4663 4664 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0; 4665 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0; 4666 gen_op_movi_cc(s, cc); 4667 return DISAS_NEXT; 4668 } 4669 4670 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) 4671 { 4672 gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2); 4673 set_cc_static(s); 4674 return DISAS_NEXT; 4675 } 4676 4677 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) 4678 { 4679 gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2); 4680 set_cc_static(s); 4681 return DISAS_NEXT; 4682 } 4683 4684 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) 4685 { 4686 gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2); 4687 set_cc_static(s); 4688 return DISAS_NEXT; 4689 } 4690 4691 #ifndef CONFIG_USER_ONLY 4692 4693 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) 4694 { 4695 gen_helper_testblock(cc_op, tcg_env, o->in2); 4696 set_cc_static(s); 4697 return DISAS_NEXT; 4698 } 4699 4700 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) 4701 { 4702 gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2); 4703 set_cc_static(s); 4704 return DISAS_NEXT; 4705 } 4706 4707 #endif 4708 4709 static DisasJumpType op_tp(DisasContext *s, DisasOps *o) 4710 { 4711 TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1); 4712 4713 gen_helper_tp(cc_op, tcg_env, o->addr1, l1); 4714 set_cc_static(s); 4715 return DISAS_NEXT; 4716 } 4717 4718 static DisasJumpType op_tr(DisasContext *s, DisasOps *o) 4719 { 4720 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 4721 4722 gen_helper_tr(tcg_env, l, o->addr1, o->in2); 4723 set_cc_static(s); 4724 return DISAS_NEXT; 4725 } 4726 4727 static DisasJumpType op_tre(DisasContext *s, DisasOps *o) 4728 { 4729 TCGv_i128 pair = tcg_temp_new_i128(); 4730 4731 gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2); 4732 tcg_gen_extr_i128_i64(o->out2, o->out, pair); 4733 set_cc_static(s); 4734 return DISAS_NEXT; 4735 } 4736 4737 static DisasJumpType op_trt(DisasContext *s, DisasOps *o) 4738 { 4739 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 4740 4741 gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2); 4742 set_cc_static(s); 4743 return DISAS_NEXT; 4744 } 4745 4746 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) 4747 { 4748 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 4749 4750 gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2); 4751 set_cc_static(s); 4752 return DISAS_NEXT; 4753 } 4754 4755 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) 4756 { 4757 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4758 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4759 TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3); 4760 TCGv_i32 tst = tcg_temp_new_i32(); 4761 int m3 = get_field(s, m3); 4762 4763 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) { 4764 m3 = 0; 4765 } 4766 if (m3 & 1) { 4767 tcg_gen_movi_i32(tst, -1); 4768 } else { 4769 tcg_gen_extrl_i64_i32(tst, regs[0]); 4770 if (s->insn->opc & 3) { 4771 tcg_gen_ext8u_i32(tst, tst); 4772 } else { 4773 tcg_gen_ext16u_i32(tst, tst); 4774 } 4775 } 4776 gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes); 4777 4778 set_cc_static(s); 4779 return DISAS_NEXT; 4780 } 4781 4782 static DisasJumpType op_ts(DisasContext *s, DisasOps *o) 4783 { 4784 TCGv_i32 ff = tcg_constant_i32(0xff); 4785 TCGv_i32 t1 = tcg_temp_new_i32(); 4786 4787 tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB); 4788 tcg_gen_extract_i32(cc_op, t1, 7, 1); 4789 set_cc_static(s); 4790 return DISAS_NEXT; 4791 } 4792 4793 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) 4794 { 4795 TCGv_i32 l = tcg_constant_i32(get_field(s, l1)); 4796 4797 gen_helper_unpk(tcg_env, l, o->addr1, o->in2); 4798 return DISAS_NEXT; 4799 } 4800 4801 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) 4802 { 4803 int l1 = get_field(s, l1) + 1; 4804 TCGv_i32 l; 4805 4806 /* The length must not exceed 32 bytes. */ 4807 if (l1 > 32) { 4808 gen_program_exception(s, PGM_SPECIFICATION); 4809 return DISAS_NORETURN; 4810 } 4811 l = tcg_constant_i32(l1); 4812 gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2); 4813 set_cc_static(s); 4814 return DISAS_NEXT; 4815 } 4816 4817 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) 4818 { 4819 int l1 = get_field(s, l1) + 1; 4820 TCGv_i32 l; 4821 4822 /* The length must be even and should not exceed 64 bytes. */ 4823 if ((l1 & 1) || (l1 > 64)) { 4824 gen_program_exception(s, PGM_SPECIFICATION); 4825 return DISAS_NORETURN; 4826 } 4827 l = tcg_constant_i32(l1); 4828 gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2); 4829 set_cc_static(s); 4830 return DISAS_NEXT; 4831 } 4832 4833 4834 static DisasJumpType op_xc(DisasContext *s, DisasOps *o) 4835 { 4836 int d1 = get_field(s, d1); 4837 int d2 = get_field(s, d2); 4838 int b1 = get_field(s, b1); 4839 int b2 = get_field(s, b2); 4840 int l = get_field(s, l1); 4841 TCGv_i32 t32; 4842 4843 o->addr1 = get_address(s, 0, b1, d1); 4844 4845 /* If the addresses are identical, this is a store/memset of zero. */ 4846 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) { 4847 o->in2 = tcg_constant_i64(0); 4848 4849 l++; 4850 while (l >= 8) { 4851 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ); 4852 l -= 8; 4853 if (l > 0) { 4854 tcg_gen_addi_i64(o->addr1, o->addr1, 8); 4855 } 4856 } 4857 if (l >= 4) { 4858 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL); 4859 l -= 4; 4860 if (l > 0) { 4861 tcg_gen_addi_i64(o->addr1, o->addr1, 4); 4862 } 4863 } 4864 if (l >= 2) { 4865 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW); 4866 l -= 2; 4867 if (l > 0) { 4868 tcg_gen_addi_i64(o->addr1, o->addr1, 2); 4869 } 4870 } 4871 if (l) { 4872 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB); 4873 } 4874 gen_op_movi_cc(s, 0); 4875 return DISAS_NEXT; 4876 } 4877 4878 /* But in general we'll defer to a helper. */ 4879 o->in2 = get_address(s, 0, b2, d2); 4880 t32 = tcg_constant_i32(l); 4881 gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2); 4882 set_cc_static(s); 4883 return DISAS_NEXT; 4884 } 4885 4886 static DisasJumpType op_xor(DisasContext *s, DisasOps *o) 4887 { 4888 tcg_gen_xor_i64(o->out, o->in1, o->in2); 4889 return DISAS_NEXT; 4890 } 4891 4892 static DisasJumpType op_xori(DisasContext *s, DisasOps *o) 4893 { 4894 int shift = s->insn->data & 0xff; 4895 int size = s->insn->data >> 8; 4896 uint64_t mask = ((1ull << size) - 1) << shift; 4897 TCGv_i64 t = tcg_temp_new_i64(); 4898 4899 tcg_gen_shli_i64(t, o->in2, shift); 4900 tcg_gen_xor_i64(o->out, o->in1, t); 4901 4902 /* Produce the CC from only the bits manipulated. */ 4903 tcg_gen_andi_i64(cc_dst, o->out, mask); 4904 set_cc_nz_u64(s, cc_dst); 4905 return DISAS_NEXT; 4906 } 4907 4908 static DisasJumpType op_xi(DisasContext *s, DisasOps *o) 4909 { 4910 o->in1 = tcg_temp_new_i64(); 4911 4912 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { 4913 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); 4914 } else { 4915 /* Perform the atomic operation in memory. */ 4916 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s), 4917 s->insn->data); 4918 } 4919 4920 /* Recompute also for atomic case: needed for setting CC. */ 4921 tcg_gen_xor_i64(o->out, o->in1, o->in2); 4922 4923 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { 4924 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); 4925 } 4926 return DISAS_NEXT; 4927 } 4928 4929 static DisasJumpType op_zero(DisasContext *s, DisasOps *o) 4930 { 4931 o->out = tcg_constant_i64(0); 4932 return DISAS_NEXT; 4933 } 4934 4935 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) 4936 { 4937 o->out = tcg_constant_i64(0); 4938 o->out2 = o->out; 4939 return DISAS_NEXT; 4940 } 4941 4942 #ifndef CONFIG_USER_ONLY 4943 static DisasJumpType op_clp(DisasContext *s, DisasOps *o) 4944 { 4945 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4946 4947 gen_helper_clp(tcg_env, r2); 4948 set_cc_static(s); 4949 return DISAS_NEXT; 4950 } 4951 4952 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) 4953 { 4954 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4955 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4956 4957 gen_helper_pcilg(tcg_env, r1, r2); 4958 set_cc_static(s); 4959 return DISAS_NEXT; 4960 } 4961 4962 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) 4963 { 4964 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4965 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4966 4967 gen_helper_pcistg(tcg_env, r1, r2); 4968 set_cc_static(s); 4969 return DISAS_NEXT; 4970 } 4971 4972 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) 4973 { 4974 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4975 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2)); 4976 4977 gen_helper_stpcifc(tcg_env, r1, o->addr1, ar); 4978 set_cc_static(s); 4979 return DISAS_NEXT; 4980 } 4981 4982 static DisasJumpType op_sic(DisasContext *s, DisasOps *o) 4983 { 4984 gen_helper_sic(tcg_env, o->in1, o->in2); 4985 return DISAS_NEXT; 4986 } 4987 4988 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) 4989 { 4990 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 4991 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2)); 4992 4993 gen_helper_rpcit(tcg_env, r1, r2); 4994 set_cc_static(s); 4995 return DISAS_NEXT; 4996 } 4997 4998 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) 4999 { 5000 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 5001 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3)); 5002 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2)); 5003 5004 gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar); 5005 set_cc_static(s); 5006 return DISAS_NEXT; 5007 } 5008 5009 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) 5010 { 5011 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1)); 5012 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2)); 5013 5014 gen_helper_mpcifc(tcg_env, r1, o->addr1, ar); 5015 set_cc_static(s); 5016 return DISAS_NEXT; 5017 } 5018 #endif 5019 5020 #include "translate_vx.c.inc" 5021 5022 /* ====================================================================== */ 5023 /* The "Cc OUTput" generators. Given the generated output (and in some cases 5024 the original inputs), update the various cc data structures in order to 5025 be able to compute the new condition code. */ 5026 5027 static void cout_abs32(DisasContext *s, DisasOps *o) 5028 { 5029 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out); 5030 } 5031 5032 static void cout_abs64(DisasContext *s, DisasOps *o) 5033 { 5034 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out); 5035 } 5036 5037 static void cout_adds32(DisasContext *s, DisasOps *o) 5038 { 5039 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out); 5040 } 5041 5042 static void cout_adds64(DisasContext *s, DisasOps *o) 5043 { 5044 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out); 5045 } 5046 5047 static void cout_addu32(DisasContext *s, DisasOps *o) 5048 { 5049 tcg_gen_shri_i64(cc_src, o->out, 32); 5050 tcg_gen_ext32u_i64(cc_dst, o->out); 5051 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst); 5052 } 5053 5054 static void cout_addu64(DisasContext *s, DisasOps *o) 5055 { 5056 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out); 5057 } 5058 5059 static void cout_cmps32(DisasContext *s, DisasOps *o) 5060 { 5061 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2); 5062 } 5063 5064 static void cout_cmps64(DisasContext *s, DisasOps *o) 5065 { 5066 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2); 5067 } 5068 5069 static void cout_cmpu32(DisasContext *s, DisasOps *o) 5070 { 5071 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2); 5072 } 5073 5074 static void cout_cmpu64(DisasContext *s, DisasOps *o) 5075 { 5076 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2); 5077 } 5078 5079 static void cout_f32(DisasContext *s, DisasOps *o) 5080 { 5081 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out); 5082 } 5083 5084 static void cout_f64(DisasContext *s, DisasOps *o) 5085 { 5086 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out); 5087 } 5088 5089 static void cout_f128(DisasContext *s, DisasOps *o) 5090 { 5091 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2); 5092 } 5093 5094 static void cout_nabs32(DisasContext *s, DisasOps *o) 5095 { 5096 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out); 5097 } 5098 5099 static void cout_nabs64(DisasContext *s, DisasOps *o) 5100 { 5101 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out); 5102 } 5103 5104 static void cout_neg32(DisasContext *s, DisasOps *o) 5105 { 5106 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out); 5107 } 5108 5109 static void cout_neg64(DisasContext *s, DisasOps *o) 5110 { 5111 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out); 5112 } 5113 5114 static void cout_nz32(DisasContext *s, DisasOps *o) 5115 { 5116 tcg_gen_ext32u_i64(cc_dst, o->out); 5117 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst); 5118 } 5119 5120 static void cout_nz64(DisasContext *s, DisasOps *o) 5121 { 5122 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out); 5123 } 5124 5125 static void cout_s32(DisasContext *s, DisasOps *o) 5126 { 5127 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out); 5128 } 5129 5130 static void cout_s64(DisasContext *s, DisasOps *o) 5131 { 5132 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out); 5133 } 5134 5135 static void cout_subs32(DisasContext *s, DisasOps *o) 5136 { 5137 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out); 5138 } 5139 5140 static void cout_subs64(DisasContext *s, DisasOps *o) 5141 { 5142 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out); 5143 } 5144 5145 static void cout_subu32(DisasContext *s, DisasOps *o) 5146 { 5147 tcg_gen_sari_i64(cc_src, o->out, 32); 5148 tcg_gen_ext32u_i64(cc_dst, o->out); 5149 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst); 5150 } 5151 5152 static void cout_subu64(DisasContext *s, DisasOps *o) 5153 { 5154 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out); 5155 } 5156 5157 static void cout_tm32(DisasContext *s, DisasOps *o) 5158 { 5159 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2); 5160 } 5161 5162 static void cout_tm64(DisasContext *s, DisasOps *o) 5163 { 5164 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2); 5165 } 5166 5167 static void cout_muls32(DisasContext *s, DisasOps *o) 5168 { 5169 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out); 5170 } 5171 5172 static void cout_muls64(DisasContext *s, DisasOps *o) 5173 { 5174 /* out contains "high" part, out2 contains "low" part of 128 bit result */ 5175 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2); 5176 } 5177 5178 /* ====================================================================== */ 5179 /* The "PREParation" generators. These initialize the DisasOps.OUT fields 5180 with the TCG register to which we will write. Used in combination with 5181 the "wout" generators, in some cases we need a new temporary, and in 5182 some cases we can write to a TCG global. */ 5183 5184 static void prep_new(DisasContext *s, DisasOps *o) 5185 { 5186 o->out = tcg_temp_new_i64(); 5187 } 5188 #define SPEC_prep_new 0 5189 5190 static void prep_new_P(DisasContext *s, DisasOps *o) 5191 { 5192 o->out = tcg_temp_new_i64(); 5193 o->out2 = tcg_temp_new_i64(); 5194 } 5195 #define SPEC_prep_new_P 0 5196 5197 static void prep_new_x(DisasContext *s, DisasOps *o) 5198 { 5199 o->out_128 = tcg_temp_new_i128(); 5200 } 5201 #define SPEC_prep_new_x 0 5202 5203 static void prep_r1(DisasContext *s, DisasOps *o) 5204 { 5205 o->out = regs[get_field(s, r1)]; 5206 } 5207 #define SPEC_prep_r1 0 5208 5209 static void prep_r1_P(DisasContext *s, DisasOps *o) 5210 { 5211 int r1 = get_field(s, r1); 5212 o->out = regs[r1]; 5213 o->out2 = regs[r1 + 1]; 5214 } 5215 #define SPEC_prep_r1_P SPEC_r1_even 5216 5217 /* ====================================================================== */ 5218 /* The "Write OUTput" generators. These generally perform some non-trivial 5219 copy of data to TCG globals, or to main memory. The trivial cases are 5220 generally handled by having a "prep" generator install the TCG global 5221 as the destination of the operation. */ 5222 5223 static void wout_r1(DisasContext *s, DisasOps *o) 5224 { 5225 store_reg(get_field(s, r1), o->out); 5226 } 5227 #define SPEC_wout_r1 0 5228 5229 static void wout_out2_r1(DisasContext *s, DisasOps *o) 5230 { 5231 store_reg(get_field(s, r1), o->out2); 5232 } 5233 #define SPEC_wout_out2_r1 0 5234 5235 static void wout_r1_8(DisasContext *s, DisasOps *o) 5236 { 5237 int r1 = get_field(s, r1); 5238 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8); 5239 } 5240 #define SPEC_wout_r1_8 0 5241 5242 static void wout_r1_16(DisasContext *s, DisasOps *o) 5243 { 5244 int r1 = get_field(s, r1); 5245 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16); 5246 } 5247 #define SPEC_wout_r1_16 0 5248 5249 static void wout_r1_32(DisasContext *s, DisasOps *o) 5250 { 5251 store_reg32_i64(get_field(s, r1), o->out); 5252 } 5253 #define SPEC_wout_r1_32 0 5254 5255 static void wout_r1_32h(DisasContext *s, DisasOps *o) 5256 { 5257 store_reg32h_i64(get_field(s, r1), o->out); 5258 } 5259 #define SPEC_wout_r1_32h 0 5260 5261 static void wout_r1_P32(DisasContext *s, DisasOps *o) 5262 { 5263 int r1 = get_field(s, r1); 5264 store_reg32_i64(r1, o->out); 5265 store_reg32_i64(r1 + 1, o->out2); 5266 } 5267 #define SPEC_wout_r1_P32 SPEC_r1_even 5268 5269 static void wout_r1_D32(DisasContext *s, DisasOps *o) 5270 { 5271 int r1 = get_field(s, r1); 5272 TCGv_i64 t = tcg_temp_new_i64(); 5273 store_reg32_i64(r1 + 1, o->out); 5274 tcg_gen_shri_i64(t, o->out, 32); 5275 store_reg32_i64(r1, t); 5276 } 5277 #define SPEC_wout_r1_D32 SPEC_r1_even 5278 5279 static void wout_r1_D64(DisasContext *s, DisasOps *o) 5280 { 5281 int r1 = get_field(s, r1); 5282 tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128); 5283 } 5284 #define SPEC_wout_r1_D64 SPEC_r1_even 5285 5286 static void wout_r3_P32(DisasContext *s, DisasOps *o) 5287 { 5288 int r3 = get_field(s, r3); 5289 store_reg32_i64(r3, o->out); 5290 store_reg32_i64(r3 + 1, o->out2); 5291 } 5292 #define SPEC_wout_r3_P32 SPEC_r3_even 5293 5294 static void wout_r3_P64(DisasContext *s, DisasOps *o) 5295 { 5296 int r3 = get_field(s, r3); 5297 store_reg(r3, o->out); 5298 store_reg(r3 + 1, o->out2); 5299 } 5300 #define SPEC_wout_r3_P64 SPEC_r3_even 5301 5302 static void wout_e1(DisasContext *s, DisasOps *o) 5303 { 5304 store_freg32_i64(get_field(s, r1), o->out); 5305 } 5306 #define SPEC_wout_e1 0 5307 5308 static void wout_f1(DisasContext *s, DisasOps *o) 5309 { 5310 store_freg(get_field(s, r1), o->out); 5311 } 5312 #define SPEC_wout_f1 0 5313 5314 static void wout_x1(DisasContext *s, DisasOps *o) 5315 { 5316 int f1 = get_field(s, r1); 5317 5318 /* Split out_128 into out+out2 for cout_f128. */ 5319 tcg_debug_assert(o->out == NULL); 5320 o->out = tcg_temp_new_i64(); 5321 o->out2 = tcg_temp_new_i64(); 5322 5323 tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128); 5324 store_freg(f1, o->out); 5325 store_freg(f1 + 2, o->out2); 5326 } 5327 #define SPEC_wout_x1 SPEC_r1_f128 5328 5329 static void wout_x1_P(DisasContext *s, DisasOps *o) 5330 { 5331 int f1 = get_field(s, r1); 5332 store_freg(f1, o->out); 5333 store_freg(f1 + 2, o->out2); 5334 } 5335 #define SPEC_wout_x1_P SPEC_r1_f128 5336 5337 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o) 5338 { 5339 if (get_field(s, r1) != get_field(s, r2)) { 5340 store_reg32_i64(get_field(s, r1), o->out); 5341 } 5342 } 5343 #define SPEC_wout_cond_r1r2_32 0 5344 5345 static void wout_cond_e1e2(DisasContext *s, DisasOps *o) 5346 { 5347 if (get_field(s, r1) != get_field(s, r2)) { 5348 store_freg32_i64(get_field(s, r1), o->out); 5349 } 5350 } 5351 #define SPEC_wout_cond_e1e2 0 5352 5353 static void wout_m1_8(DisasContext *s, DisasOps *o) 5354 { 5355 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB); 5356 } 5357 #define SPEC_wout_m1_8 0 5358 5359 static void wout_m1_16(DisasContext *s, DisasOps *o) 5360 { 5361 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW); 5362 } 5363 #define SPEC_wout_m1_16 0 5364 5365 #ifndef CONFIG_USER_ONLY 5366 static void wout_m1_16a(DisasContext *s, DisasOps *o) 5367 { 5368 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); 5369 } 5370 #define SPEC_wout_m1_16a 0 5371 #endif 5372 5373 static void wout_m1_32(DisasContext *s, DisasOps *o) 5374 { 5375 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL); 5376 } 5377 #define SPEC_wout_m1_32 0 5378 5379 #ifndef CONFIG_USER_ONLY 5380 static void wout_m1_32a(DisasContext *s, DisasOps *o) 5381 { 5382 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); 5383 } 5384 #define SPEC_wout_m1_32a 0 5385 #endif 5386 5387 static void wout_m1_64(DisasContext *s, DisasOps *o) 5388 { 5389 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ); 5390 } 5391 #define SPEC_wout_m1_64 0 5392 5393 #ifndef CONFIG_USER_ONLY 5394 static void wout_m1_64a(DisasContext *s, DisasOps *o) 5395 { 5396 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN); 5397 } 5398 #define SPEC_wout_m1_64a 0 5399 #endif 5400 5401 static void wout_m2_32(DisasContext *s, DisasOps *o) 5402 { 5403 tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL); 5404 } 5405 #define SPEC_wout_m2_32 0 5406 5407 static void wout_in2_r1(DisasContext *s, DisasOps *o) 5408 { 5409 store_reg(get_field(s, r1), o->in2); 5410 } 5411 #define SPEC_wout_in2_r1 0 5412 5413 static void wout_in2_r1_32(DisasContext *s, DisasOps *o) 5414 { 5415 store_reg32_i64(get_field(s, r1), o->in2); 5416 } 5417 #define SPEC_wout_in2_r1_32 0 5418 5419 /* ====================================================================== */ 5420 /* The "INput 1" generators. These load the first operand to an insn. */ 5421 5422 static void in1_r1(DisasContext *s, DisasOps *o) 5423 { 5424 o->in1 = load_reg(get_field(s, r1)); 5425 } 5426 #define SPEC_in1_r1 0 5427 5428 static void in1_r1_o(DisasContext *s, DisasOps *o) 5429 { 5430 o->in1 = regs[get_field(s, r1)]; 5431 } 5432 #define SPEC_in1_r1_o 0 5433 5434 static void in1_r1_32s(DisasContext *s, DisasOps *o) 5435 { 5436 o->in1 = tcg_temp_new_i64(); 5437 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]); 5438 } 5439 #define SPEC_in1_r1_32s 0 5440 5441 static void in1_r1_32u(DisasContext *s, DisasOps *o) 5442 { 5443 o->in1 = tcg_temp_new_i64(); 5444 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]); 5445 } 5446 #define SPEC_in1_r1_32u 0 5447 5448 static void in1_r1_sr32(DisasContext *s, DisasOps *o) 5449 { 5450 o->in1 = tcg_temp_new_i64(); 5451 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32); 5452 } 5453 #define SPEC_in1_r1_sr32 0 5454 5455 static void in1_r1p1(DisasContext *s, DisasOps *o) 5456 { 5457 o->in1 = load_reg(get_field(s, r1) + 1); 5458 } 5459 #define SPEC_in1_r1p1 SPEC_r1_even 5460 5461 static void in1_r1p1_o(DisasContext *s, DisasOps *o) 5462 { 5463 o->in1 = regs[get_field(s, r1) + 1]; 5464 } 5465 #define SPEC_in1_r1p1_o SPEC_r1_even 5466 5467 static void in1_r1p1_32s(DisasContext *s, DisasOps *o) 5468 { 5469 o->in1 = tcg_temp_new_i64(); 5470 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]); 5471 } 5472 #define SPEC_in1_r1p1_32s SPEC_r1_even 5473 5474 static void in1_r1p1_32u(DisasContext *s, DisasOps *o) 5475 { 5476 o->in1 = tcg_temp_new_i64(); 5477 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]); 5478 } 5479 #define SPEC_in1_r1p1_32u SPEC_r1_even 5480 5481 static void in1_r1_D32(DisasContext *s, DisasOps *o) 5482 { 5483 int r1 = get_field(s, r1); 5484 o->in1 = tcg_temp_new_i64(); 5485 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]); 5486 } 5487 #define SPEC_in1_r1_D32 SPEC_r1_even 5488 5489 static void in1_r2(DisasContext *s, DisasOps *o) 5490 { 5491 o->in1 = load_reg(get_field(s, r2)); 5492 } 5493 #define SPEC_in1_r2 0 5494 5495 static void in1_r2_sr32(DisasContext *s, DisasOps *o) 5496 { 5497 o->in1 = tcg_temp_new_i64(); 5498 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32); 5499 } 5500 #define SPEC_in1_r2_sr32 0 5501 5502 static void in1_r2_32u(DisasContext *s, DisasOps *o) 5503 { 5504 o->in1 = tcg_temp_new_i64(); 5505 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]); 5506 } 5507 #define SPEC_in1_r2_32u 0 5508 5509 static void in1_r3(DisasContext *s, DisasOps *o) 5510 { 5511 o->in1 = load_reg(get_field(s, r3)); 5512 } 5513 #define SPEC_in1_r3 0 5514 5515 static void in1_r3_o(DisasContext *s, DisasOps *o) 5516 { 5517 o->in1 = regs[get_field(s, r3)]; 5518 } 5519 #define SPEC_in1_r3_o 0 5520 5521 static void in1_r3_32s(DisasContext *s, DisasOps *o) 5522 { 5523 o->in1 = tcg_temp_new_i64(); 5524 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]); 5525 } 5526 #define SPEC_in1_r3_32s 0 5527 5528 static void in1_r3_32u(DisasContext *s, DisasOps *o) 5529 { 5530 o->in1 = tcg_temp_new_i64(); 5531 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]); 5532 } 5533 #define SPEC_in1_r3_32u 0 5534 5535 static void in1_r3_D32(DisasContext *s, DisasOps *o) 5536 { 5537 int r3 = get_field(s, r3); 5538 o->in1 = tcg_temp_new_i64(); 5539 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]); 5540 } 5541 #define SPEC_in1_r3_D32 SPEC_r3_even 5542 5543 static void in1_r3_sr32(DisasContext *s, DisasOps *o) 5544 { 5545 o->in1 = tcg_temp_new_i64(); 5546 tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32); 5547 } 5548 #define SPEC_in1_r3_sr32 0 5549 5550 static void in1_e1(DisasContext *s, DisasOps *o) 5551 { 5552 o->in1 = load_freg32_i64(get_field(s, r1)); 5553 } 5554 #define SPEC_in1_e1 0 5555 5556 static void in1_f1(DisasContext *s, DisasOps *o) 5557 { 5558 o->in1 = load_freg(get_field(s, r1)); 5559 } 5560 #define SPEC_in1_f1 0 5561 5562 static void in1_x1(DisasContext *s, DisasOps *o) 5563 { 5564 o->in1_128 = load_freg_128(get_field(s, r1)); 5565 } 5566 #define SPEC_in1_x1 SPEC_r1_f128 5567 5568 /* Load the high double word of an extended (128-bit) format FP number */ 5569 static void in1_x2h(DisasContext *s, DisasOps *o) 5570 { 5571 o->in1 = load_freg(get_field(s, r2)); 5572 } 5573 #define SPEC_in1_x2h SPEC_r2_f128 5574 5575 static void in1_f3(DisasContext *s, DisasOps *o) 5576 { 5577 o->in1 = load_freg(get_field(s, r3)); 5578 } 5579 #define SPEC_in1_f3 0 5580 5581 static void in1_la1(DisasContext *s, DisasOps *o) 5582 { 5583 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); 5584 } 5585 #define SPEC_in1_la1 0 5586 5587 static void in1_la2(DisasContext *s, DisasOps *o) 5588 { 5589 int x2 = have_field(s, x2) ? get_field(s, x2) : 0; 5590 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); 5591 } 5592 #define SPEC_in1_la2 0 5593 5594 static void in1_m1_8u(DisasContext *s, DisasOps *o) 5595 { 5596 in1_la1(s, o); 5597 o->in1 = tcg_temp_new_i64(); 5598 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB); 5599 } 5600 #define SPEC_in1_m1_8u 0 5601 5602 static void in1_m1_16s(DisasContext *s, DisasOps *o) 5603 { 5604 in1_la1(s, o); 5605 o->in1 = tcg_temp_new_i64(); 5606 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW); 5607 } 5608 #define SPEC_in1_m1_16s 0 5609 5610 static void in1_m1_16u(DisasContext *s, DisasOps *o) 5611 { 5612 in1_la1(s, o); 5613 o->in1 = tcg_temp_new_i64(); 5614 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW); 5615 } 5616 #define SPEC_in1_m1_16u 0 5617 5618 static void in1_m1_32s(DisasContext *s, DisasOps *o) 5619 { 5620 in1_la1(s, o); 5621 o->in1 = tcg_temp_new_i64(); 5622 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL); 5623 } 5624 #define SPEC_in1_m1_32s 0 5625 5626 static void in1_m1_32u(DisasContext *s, DisasOps *o) 5627 { 5628 in1_la1(s, o); 5629 o->in1 = tcg_temp_new_i64(); 5630 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL); 5631 } 5632 #define SPEC_in1_m1_32u 0 5633 5634 static void in1_m1_64(DisasContext *s, DisasOps *o) 5635 { 5636 in1_la1(s, o); 5637 o->in1 = tcg_temp_new_i64(); 5638 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ); 5639 } 5640 #define SPEC_in1_m1_64 0 5641 5642 /* ====================================================================== */ 5643 /* The "INput 2" generators. These load the second operand to an insn. */ 5644 5645 static void in2_r1_o(DisasContext *s, DisasOps *o) 5646 { 5647 o->in2 = regs[get_field(s, r1)]; 5648 } 5649 #define SPEC_in2_r1_o 0 5650 5651 static void in2_r1_16u(DisasContext *s, DisasOps *o) 5652 { 5653 o->in2 = tcg_temp_new_i64(); 5654 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]); 5655 } 5656 #define SPEC_in2_r1_16u 0 5657 5658 static void in2_r1_32u(DisasContext *s, DisasOps *o) 5659 { 5660 o->in2 = tcg_temp_new_i64(); 5661 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]); 5662 } 5663 #define SPEC_in2_r1_32u 0 5664 5665 static void in2_r1_D32(DisasContext *s, DisasOps *o) 5666 { 5667 int r1 = get_field(s, r1); 5668 o->in2 = tcg_temp_new_i64(); 5669 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]); 5670 } 5671 #define SPEC_in2_r1_D32 SPEC_r1_even 5672 5673 static void in2_r2(DisasContext *s, DisasOps *o) 5674 { 5675 o->in2 = load_reg(get_field(s, r2)); 5676 } 5677 #define SPEC_in2_r2 0 5678 5679 static void in2_r2_o(DisasContext *s, DisasOps *o) 5680 { 5681 o->in2 = regs[get_field(s, r2)]; 5682 } 5683 #define SPEC_in2_r2_o 0 5684 5685 static void in2_r2_nz(DisasContext *s, DisasOps *o) 5686 { 5687 int r2 = get_field(s, r2); 5688 if (r2 != 0) { 5689 o->in2 = load_reg(r2); 5690 } 5691 } 5692 #define SPEC_in2_r2_nz 0 5693 5694 static void in2_r2_8s(DisasContext *s, DisasOps *o) 5695 { 5696 o->in2 = tcg_temp_new_i64(); 5697 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]); 5698 } 5699 #define SPEC_in2_r2_8s 0 5700 5701 static void in2_r2_8u(DisasContext *s, DisasOps *o) 5702 { 5703 o->in2 = tcg_temp_new_i64(); 5704 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]); 5705 } 5706 #define SPEC_in2_r2_8u 0 5707 5708 static void in2_r2_16s(DisasContext *s, DisasOps *o) 5709 { 5710 o->in2 = tcg_temp_new_i64(); 5711 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]); 5712 } 5713 #define SPEC_in2_r2_16s 0 5714 5715 static void in2_r2_16u(DisasContext *s, DisasOps *o) 5716 { 5717 o->in2 = tcg_temp_new_i64(); 5718 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]); 5719 } 5720 #define SPEC_in2_r2_16u 0 5721 5722 static void in2_r3(DisasContext *s, DisasOps *o) 5723 { 5724 o->in2 = load_reg(get_field(s, r3)); 5725 } 5726 #define SPEC_in2_r3 0 5727 5728 static void in2_r3_D64(DisasContext *s, DisasOps *o) 5729 { 5730 int r3 = get_field(s, r3); 5731 o->in2_128 = tcg_temp_new_i128(); 5732 tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]); 5733 } 5734 #define SPEC_in2_r3_D64 SPEC_r3_even 5735 5736 static void in2_r3_sr32(DisasContext *s, DisasOps *o) 5737 { 5738 o->in2 = tcg_temp_new_i64(); 5739 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32); 5740 } 5741 #define SPEC_in2_r3_sr32 0 5742 5743 static void in2_r3_32u(DisasContext *s, DisasOps *o) 5744 { 5745 o->in2 = tcg_temp_new_i64(); 5746 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]); 5747 } 5748 #define SPEC_in2_r3_32u 0 5749 5750 static void in2_r2_32s(DisasContext *s, DisasOps *o) 5751 { 5752 o->in2 = tcg_temp_new_i64(); 5753 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]); 5754 } 5755 #define SPEC_in2_r2_32s 0 5756 5757 static void in2_r2_32u(DisasContext *s, DisasOps *o) 5758 { 5759 o->in2 = tcg_temp_new_i64(); 5760 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]); 5761 } 5762 #define SPEC_in2_r2_32u 0 5763 5764 static void in2_r2_sr32(DisasContext *s, DisasOps *o) 5765 { 5766 o->in2 = tcg_temp_new_i64(); 5767 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32); 5768 } 5769 #define SPEC_in2_r2_sr32 0 5770 5771 static void in2_e2(DisasContext *s, DisasOps *o) 5772 { 5773 o->in2 = load_freg32_i64(get_field(s, r2)); 5774 } 5775 #define SPEC_in2_e2 0 5776 5777 static void in2_f2(DisasContext *s, DisasOps *o) 5778 { 5779 o->in2 = load_freg(get_field(s, r2)); 5780 } 5781 #define SPEC_in2_f2 0 5782 5783 static void in2_x2(DisasContext *s, DisasOps *o) 5784 { 5785 o->in2_128 = load_freg_128(get_field(s, r2)); 5786 } 5787 #define SPEC_in2_x2 SPEC_r2_f128 5788 5789 /* Load the low double word of an extended (128-bit) format FP number */ 5790 static void in2_x2l(DisasContext *s, DisasOps *o) 5791 { 5792 o->in2 = load_freg(get_field(s, r2) + 2); 5793 } 5794 #define SPEC_in2_x2l SPEC_r2_f128 5795 5796 static void in2_ra2(DisasContext *s, DisasOps *o) 5797 { 5798 int r2 = get_field(s, r2); 5799 5800 /* Note: *don't* treat !r2 as 0, use the reg value. */ 5801 o->in2 = tcg_temp_new_i64(); 5802 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0); 5803 } 5804 #define SPEC_in2_ra2 0 5805 5806 static void in2_ra2_E(DisasContext *s, DisasOps *o) 5807 { 5808 return in2_ra2(s, o); 5809 } 5810 #define SPEC_in2_ra2_E SPEC_r2_even 5811 5812 static void in2_a2(DisasContext *s, DisasOps *o) 5813 { 5814 int x2 = have_field(s, x2) ? get_field(s, x2) : 0; 5815 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); 5816 } 5817 #define SPEC_in2_a2 0 5818 5819 static TCGv gen_ri2(DisasContext *s) 5820 { 5821 TCGv ri2 = NULL; 5822 bool is_imm; 5823 int imm; 5824 5825 disas_jdest(s, i2, is_imm, imm, ri2); 5826 if (is_imm) { 5827 ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2); 5828 } 5829 5830 return ri2; 5831 } 5832 5833 static void in2_ri2(DisasContext *s, DisasOps *o) 5834 { 5835 o->in2 = gen_ri2(s); 5836 } 5837 #define SPEC_in2_ri2 0 5838 5839 static void in2_sh(DisasContext *s, DisasOps *o) 5840 { 5841 int b2 = get_field(s, b2); 5842 int d2 = get_field(s, d2); 5843 5844 if (b2 == 0) { 5845 o->in2 = tcg_constant_i64(d2 & 0x3f); 5846 } else { 5847 o->in2 = get_address(s, 0, b2, d2); 5848 tcg_gen_andi_i64(o->in2, o->in2, 0x3f); 5849 } 5850 } 5851 #define SPEC_in2_sh 0 5852 5853 static void in2_m2_8u(DisasContext *s, DisasOps *o) 5854 { 5855 in2_a2(s, o); 5856 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB); 5857 } 5858 #define SPEC_in2_m2_8u 0 5859 5860 static void in2_m2_16s(DisasContext *s, DisasOps *o) 5861 { 5862 in2_a2(s, o); 5863 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW); 5864 } 5865 #define SPEC_in2_m2_16s 0 5866 5867 static void in2_m2_16u(DisasContext *s, DisasOps *o) 5868 { 5869 in2_a2(s, o); 5870 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW); 5871 } 5872 #define SPEC_in2_m2_16u 0 5873 5874 static void in2_m2_32s(DisasContext *s, DisasOps *o) 5875 { 5876 in2_a2(s, o); 5877 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL); 5878 } 5879 #define SPEC_in2_m2_32s 0 5880 5881 static void in2_m2_32u(DisasContext *s, DisasOps *o) 5882 { 5883 in2_a2(s, o); 5884 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL); 5885 } 5886 #define SPEC_in2_m2_32u 0 5887 5888 #ifndef CONFIG_USER_ONLY 5889 static void in2_m2_32ua(DisasContext *s, DisasOps *o) 5890 { 5891 in2_a2(s, o); 5892 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); 5893 } 5894 #define SPEC_in2_m2_32ua 0 5895 #endif 5896 5897 static void in2_m2_64(DisasContext *s, DisasOps *o) 5898 { 5899 in2_a2(s, o); 5900 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ); 5901 } 5902 #define SPEC_in2_m2_64 0 5903 5904 static void in2_m2_64w(DisasContext *s, DisasOps *o) 5905 { 5906 in2_a2(s, o); 5907 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ); 5908 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0); 5909 } 5910 #define SPEC_in2_m2_64w 0 5911 5912 #ifndef CONFIG_USER_ONLY 5913 static void in2_m2_64a(DisasContext *s, DisasOps *o) 5914 { 5915 in2_a2(s, o); 5916 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN); 5917 } 5918 #define SPEC_in2_m2_64a 0 5919 #endif 5920 5921 static void in2_mri2_16s(DisasContext *s, DisasOps *o) 5922 { 5923 o->in2 = tcg_temp_new_i64(); 5924 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW); 5925 } 5926 #define SPEC_in2_mri2_16s 0 5927 5928 static void in2_mri2_16u(DisasContext *s, DisasOps *o) 5929 { 5930 o->in2 = tcg_temp_new_i64(); 5931 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW); 5932 } 5933 #define SPEC_in2_mri2_16u 0 5934 5935 static void in2_mri2_32s(DisasContext *s, DisasOps *o) 5936 { 5937 o->in2 = tcg_temp_new_i64(); 5938 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s), 5939 MO_TESL | MO_ALIGN); 5940 } 5941 #define SPEC_in2_mri2_32s 0 5942 5943 static void in2_mri2_32u(DisasContext *s, DisasOps *o) 5944 { 5945 o->in2 = tcg_temp_new_i64(); 5946 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s), 5947 MO_TEUL | MO_ALIGN); 5948 } 5949 #define SPEC_in2_mri2_32u 0 5950 5951 static void in2_mri2_64(DisasContext *s, DisasOps *o) 5952 { 5953 o->in2 = tcg_temp_new_i64(); 5954 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), 5955 MO_TEUQ | MO_ALIGN); 5956 } 5957 #define SPEC_in2_mri2_64 0 5958 5959 static void in2_i2(DisasContext *s, DisasOps *o) 5960 { 5961 o->in2 = tcg_constant_i64(get_field(s, i2)); 5962 } 5963 #define SPEC_in2_i2 0 5964 5965 static void in2_i2_8u(DisasContext *s, DisasOps *o) 5966 { 5967 o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2)); 5968 } 5969 #define SPEC_in2_i2_8u 0 5970 5971 static void in2_i2_16u(DisasContext *s, DisasOps *o) 5972 { 5973 o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2)); 5974 } 5975 #define SPEC_in2_i2_16u 0 5976 5977 static void in2_i2_32u(DisasContext *s, DisasOps *o) 5978 { 5979 o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2)); 5980 } 5981 #define SPEC_in2_i2_32u 0 5982 5983 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) 5984 { 5985 uint64_t i2 = (uint16_t)get_field(s, i2); 5986 o->in2 = tcg_constant_i64(i2 << s->insn->data); 5987 } 5988 #define SPEC_in2_i2_16u_shl 0 5989 5990 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) 5991 { 5992 uint64_t i2 = (uint32_t)get_field(s, i2); 5993 o->in2 = tcg_constant_i64(i2 << s->insn->data); 5994 } 5995 #define SPEC_in2_i2_32u_shl 0 5996 5997 #ifndef CONFIG_USER_ONLY 5998 static void in2_insn(DisasContext *s, DisasOps *o) 5999 { 6000 o->in2 = tcg_constant_i64(s->fields.raw_insn); 6001 } 6002 #define SPEC_in2_insn 0 6003 #endif 6004 6005 /* ====================================================================== */ 6006 6007 /* Find opc within the table of insns. This is formulated as a switch 6008 statement so that (1) we get compile-time notice of cut-paste errors 6009 for duplicated opcodes, and (2) the compiler generates the binary 6010 search tree, rather than us having to post-process the table. */ 6011 6012 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ 6013 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) 6014 6015 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ 6016 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) 6017 6018 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ 6019 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) 6020 6021 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, 6022 6023 enum DisasInsnEnum { 6024 #include "insn-data.h.inc" 6025 }; 6026 6027 #undef E 6028 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ 6029 .opc = OPC, \ 6030 .flags = FL, \ 6031 .fmt = FMT_##FT, \ 6032 .fac = FAC_##FC, \ 6033 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ 6034 .name = #NM, \ 6035 .help_in1 = in1_##I1, \ 6036 .help_in2 = in2_##I2, \ 6037 .help_prep = prep_##P, \ 6038 .help_wout = wout_##W, \ 6039 .help_cout = cout_##CC, \ 6040 .help_op = op_##OP, \ 6041 .data = D \ 6042 }, 6043 6044 /* Allow 0 to be used for NULL in the table below. */ 6045 #define in1_0 NULL 6046 #define in2_0 NULL 6047 #define prep_0 NULL 6048 #define wout_0 NULL 6049 #define cout_0 NULL 6050 #define op_0 NULL 6051 6052 #define SPEC_in1_0 0 6053 #define SPEC_in2_0 0 6054 #define SPEC_prep_0 0 6055 #define SPEC_wout_0 0 6056 6057 /* Give smaller names to the various facilities. */ 6058 #define FAC_Z S390_FEAT_ZARCH 6059 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE 6060 #define FAC_DFP S390_FEAT_DFP 6061 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */ 6062 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ 6063 #define FAC_EE S390_FEAT_EXECUTE_EXT 6064 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE 6065 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT 6066 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */ 6067 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */ 6068 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT 6069 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB 6070 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */ 6071 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */ 6072 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ 6073 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ 6074 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ 6075 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */ 6076 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT 6077 #define FAC_PC S390_FEAT_STFLE_45 /* population count */ 6078 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST 6079 #define FAC_SFLE S390_FEAT_STFLE 6080 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */ 6081 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC 6082 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */ 6083 #define FAC_DAT_ENH S390_FEAT_DAT_ENH 6084 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2 6085 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */ 6086 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */ 6087 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */ 6088 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3 6089 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */ 6090 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */ 6091 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */ 6092 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */ 6093 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */ 6094 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME 6095 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ 6096 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION 6097 #define FAC_V S390_FEAT_VECTOR /* vector facility */ 6098 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ 6099 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */ 6100 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */ 6101 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */ 6102 6103 static const DisasInsn insn_info[] = { 6104 #include "insn-data.h.inc" 6105 }; 6106 6107 #undef E 6108 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ 6109 case OPC: return &insn_info[insn_ ## NM]; 6110 6111 static const DisasInsn *lookup_opc(uint16_t opc) 6112 { 6113 switch (opc) { 6114 #include "insn-data.h.inc" 6115 default: 6116 return NULL; 6117 } 6118 } 6119 6120 #undef F 6121 #undef E 6122 #undef D 6123 #undef C 6124 6125 /* Extract a field from the insn. The INSN should be left-aligned in 6126 the uint64_t so that we can more easily utilize the big-bit-endian 6127 definitions we extract from the Principals of Operation. */ 6128 6129 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn) 6130 { 6131 uint32_t r, m; 6132 6133 if (f->size == 0) { 6134 return; 6135 } 6136 6137 /* Zero extract the field from the insn. */ 6138 r = (insn << f->beg) >> (64 - f->size); 6139 6140 /* Sign-extend, or un-swap the field as necessary. */ 6141 switch (f->type) { 6142 case 0: /* unsigned */ 6143 break; 6144 case 1: /* signed */ 6145 assert(f->size <= 32); 6146 m = 1u << (f->size - 1); 6147 r = (r ^ m) - m; 6148 break; 6149 case 2: /* dl+dh split, signed 20 bit. */ 6150 r = ((int8_t)r << 12) | (r >> 8); 6151 break; 6152 case 3: /* MSB stored in RXB */ 6153 g_assert(f->size == 4); 6154 switch (f->beg) { 6155 case 8: 6156 r |= extract64(insn, 63 - 36, 1) << 4; 6157 break; 6158 case 12: 6159 r |= extract64(insn, 63 - 37, 1) << 4; 6160 break; 6161 case 16: 6162 r |= extract64(insn, 63 - 38, 1) << 4; 6163 break; 6164 case 32: 6165 r |= extract64(insn, 63 - 39, 1) << 4; 6166 break; 6167 default: 6168 g_assert_not_reached(); 6169 } 6170 break; 6171 default: 6172 abort(); 6173 } 6174 6175 /* 6176 * Validate that the "compressed" encoding we selected above is valid. 6177 * I.e. we haven't made two different original fields overlap. 6178 */ 6179 assert(((o->presentC >> f->indexC) & 1) == 0); 6180 o->presentC |= 1 << f->indexC; 6181 o->presentO |= 1 << f->indexO; 6182 6183 o->c[f->indexC] = r; 6184 } 6185 6186 /* Lookup the insn at the current PC, extracting the operands into O and 6187 returning the info struct for the insn. Returns NULL for invalid insn. */ 6188 6189 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) 6190 { 6191 uint64_t insn, pc = s->base.pc_next; 6192 int op, op2, ilen; 6193 const DisasInsn *info; 6194 6195 if (unlikely(s->ex_value)) { 6196 /* Drop the EX data now, so that it's clear on exception paths. */ 6197 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, 6198 offsetof(CPUS390XState, ex_value)); 6199 6200 /* Extract the values saved by EXECUTE. */ 6201 insn = s->ex_value & 0xffffffffffff0000ull; 6202 ilen = s->ex_value & 0xf; 6203 6204 /* Register insn bytes with translator so plugins work. */ 6205 for (int i = 0; i < ilen; i++) { 6206 uint8_t byte = extract64(insn, 56 - (i * 8), 8); 6207 translator_fake_ldb(byte, pc + i); 6208 } 6209 op = insn >> 56; 6210 } else { 6211 insn = ld_code2(env, s, pc); 6212 op = (insn >> 8) & 0xff; 6213 ilen = get_ilen(op); 6214 switch (ilen) { 6215 case 2: 6216 insn = insn << 48; 6217 break; 6218 case 4: 6219 insn = ld_code4(env, s, pc) << 32; 6220 break; 6221 case 6: 6222 insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16); 6223 break; 6224 default: 6225 g_assert_not_reached(); 6226 } 6227 } 6228 s->pc_tmp = s->base.pc_next + ilen; 6229 s->ilen = ilen; 6230 6231 /* We can't actually determine the insn format until we've looked up 6232 the full insn opcode. Which we can't do without locating the 6233 secondary opcode. Assume by default that OP2 is at bit 40; for 6234 those smaller insns that don't actually have a secondary opcode 6235 this will correctly result in OP2 = 0. */ 6236 switch (op) { 6237 case 0x01: /* E */ 6238 case 0x80: /* S */ 6239 case 0x82: /* S */ 6240 case 0x93: /* S */ 6241 case 0xb2: /* S, RRF, RRE, IE */ 6242 case 0xb3: /* RRE, RRD, RRF */ 6243 case 0xb9: /* RRE, RRF */ 6244 case 0xe5: /* SSE, SIL */ 6245 op2 = (insn << 8) >> 56; 6246 break; 6247 case 0xa5: /* RI */ 6248 case 0xa7: /* RI */ 6249 case 0xc0: /* RIL */ 6250 case 0xc2: /* RIL */ 6251 case 0xc4: /* RIL */ 6252 case 0xc6: /* RIL */ 6253 case 0xc8: /* SSF */ 6254 case 0xcc: /* RIL */ 6255 op2 = (insn << 12) >> 60; 6256 break; 6257 case 0xc5: /* MII */ 6258 case 0xc7: /* SMI */ 6259 case 0xd0 ... 0xdf: /* SS */ 6260 case 0xe1: /* SS */ 6261 case 0xe2: /* SS */ 6262 case 0xe8: /* SS */ 6263 case 0xe9: /* SS */ 6264 case 0xea: /* SS */ 6265 case 0xee ... 0xf3: /* SS */ 6266 case 0xf8 ... 0xfd: /* SS */ 6267 op2 = 0; 6268 break; 6269 default: 6270 op2 = (insn << 40) >> 56; 6271 break; 6272 } 6273 6274 memset(&s->fields, 0, sizeof(s->fields)); 6275 s->fields.raw_insn = insn; 6276 s->fields.op = op; 6277 s->fields.op2 = op2; 6278 6279 /* Lookup the instruction. */ 6280 info = lookup_opc(op << 8 | op2); 6281 s->insn = info; 6282 6283 /* If we found it, extract the operands. */ 6284 if (info != NULL) { 6285 DisasFormat fmt = info->fmt; 6286 int i; 6287 6288 for (i = 0; i < NUM_C_FIELD; ++i) { 6289 extract_field(&s->fields, &format_info[fmt].op[i], insn); 6290 } 6291 } 6292 return info; 6293 } 6294 6295 static bool is_afp_reg(int reg) 6296 { 6297 return reg % 2 || reg > 6; 6298 } 6299 6300 static bool is_fp_pair(int reg) 6301 { 6302 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ 6303 return !(reg & 0x2); 6304 } 6305 6306 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) 6307 { 6308 const DisasInsn *insn; 6309 DisasJumpType ret = DISAS_NEXT; 6310 DisasOps o = {}; 6311 bool icount = false; 6312 6313 /* Search for the insn in the table. */ 6314 insn = extract_insn(env, s); 6315 6316 /* Update insn_start now that we know the ILEN. */ 6317 tcg_set_insn_start_param(s->insn_start, 2, s->ilen); 6318 6319 /* Not found means unimplemented/illegal opcode. */ 6320 if (insn == NULL) { 6321 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", 6322 s->fields.op, s->fields.op2); 6323 gen_illegal_opcode(s); 6324 ret = DISAS_NORETURN; 6325 goto out; 6326 } 6327 6328 #ifndef CONFIG_USER_ONLY 6329 if (s->base.tb->flags & FLAG_MASK_PER) { 6330 TCGv_i64 addr = tcg_constant_i64(s->base.pc_next); 6331 gen_helper_per_ifetch(tcg_env, addr); 6332 } 6333 #endif 6334 6335 /* process flags */ 6336 if (insn->flags) { 6337 /* privileged instruction */ 6338 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { 6339 gen_program_exception(s, PGM_PRIVILEGED); 6340 ret = DISAS_NORETURN; 6341 goto out; 6342 } 6343 6344 /* if AFP is not enabled, instructions and registers are forbidden */ 6345 if (!(s->base.tb->flags & FLAG_MASK_AFP)) { 6346 uint8_t dxc = 0; 6347 6348 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { 6349 dxc = 1; 6350 } 6351 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { 6352 dxc = 1; 6353 } 6354 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { 6355 dxc = 1; 6356 } 6357 if (insn->flags & IF_BFP) { 6358 dxc = 2; 6359 } 6360 if (insn->flags & IF_DFP) { 6361 dxc = 3; 6362 } 6363 if (insn->flags & IF_VEC) { 6364 dxc = 0xfe; 6365 } 6366 if (dxc) { 6367 gen_data_exception(dxc); 6368 ret = DISAS_NORETURN; 6369 goto out; 6370 } 6371 } 6372 6373 /* if vector instructions not enabled, executing them is forbidden */ 6374 if (insn->flags & IF_VEC) { 6375 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { 6376 gen_data_exception(0xfe); 6377 ret = DISAS_NORETURN; 6378 goto out; 6379 } 6380 } 6381 6382 /* input/output is the special case for icount mode */ 6383 if (unlikely(insn->flags & IF_IO)) { 6384 icount = translator_io_start(&s->base); 6385 } 6386 } 6387 6388 /* Check for insn specification exceptions. */ 6389 if (insn->spec) { 6390 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || 6391 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || 6392 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || 6393 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || 6394 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { 6395 gen_program_exception(s, PGM_SPECIFICATION); 6396 ret = DISAS_NORETURN; 6397 goto out; 6398 } 6399 } 6400 6401 /* Implement the instruction. */ 6402 if (insn->help_in1) { 6403 insn->help_in1(s, &o); 6404 } 6405 if (insn->help_in2) { 6406 insn->help_in2(s, &o); 6407 } 6408 if (insn->help_prep) { 6409 insn->help_prep(s, &o); 6410 } 6411 if (insn->help_op) { 6412 ret = insn->help_op(s, &o); 6413 } 6414 if (ret != DISAS_NORETURN) { 6415 if (insn->help_wout) { 6416 insn->help_wout(s, &o); 6417 } 6418 if (insn->help_cout) { 6419 insn->help_cout(s, &o); 6420 } 6421 } 6422 6423 /* io should be the last instruction in tb when icount is enabled */ 6424 if (unlikely(icount && ret == DISAS_NEXT)) { 6425 ret = DISAS_TOO_MANY; 6426 } 6427 6428 #ifndef CONFIG_USER_ONLY 6429 if (s->base.tb->flags & FLAG_MASK_PER) { 6430 /* An exception might be triggered, save PSW if not already done. */ 6431 if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) { 6432 tcg_gen_movi_i64(psw_addr, s->pc_tmp); 6433 } 6434 6435 /* Call the helper to check for a possible PER exception. */ 6436 gen_helper_per_check_exception(tcg_env); 6437 } 6438 #endif 6439 6440 out: 6441 /* Advance to the next instruction. */ 6442 s->base.pc_next = s->pc_tmp; 6443 return ret; 6444 } 6445 6446 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 6447 { 6448 DisasContext *dc = container_of(dcbase, DisasContext, base); 6449 6450 /* 31-bit mode */ 6451 if (!(dc->base.tb->flags & FLAG_MASK_64)) { 6452 dc->base.pc_first &= 0x7fffffff; 6453 dc->base.pc_next = dc->base.pc_first; 6454 } 6455 6456 dc->cc_op = CC_OP_DYNAMIC; 6457 dc->ex_value = dc->base.tb->cs_base; 6458 dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value; 6459 } 6460 6461 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) 6462 { 6463 } 6464 6465 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 6466 { 6467 DisasContext *dc = container_of(dcbase, DisasContext, base); 6468 6469 /* Delay the set of ilen until we've read the insn. */ 6470 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0); 6471 dc->insn_start = tcg_last_op(); 6472 } 6473 6474 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s, 6475 uint64_t pc) 6476 { 6477 uint64_t insn = cpu_lduw_code(env, pc); 6478 6479 return pc + get_ilen((insn >> 8) & 0xff); 6480 } 6481 6482 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 6483 { 6484 CPUS390XState *env = cpu_env(cs); 6485 DisasContext *dc = container_of(dcbase, DisasContext, base); 6486 6487 dc->base.is_jmp = translate_one(env, dc); 6488 if (dc->base.is_jmp == DISAS_NEXT) { 6489 if (dc->ex_value || 6490 !is_same_page(dcbase, dc->base.pc_next) || 6491 !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) { 6492 dc->base.is_jmp = DISAS_TOO_MANY; 6493 } 6494 } 6495 } 6496 6497 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 6498 { 6499 DisasContext *dc = container_of(dcbase, DisasContext, base); 6500 6501 switch (dc->base.is_jmp) { 6502 case DISAS_NORETURN: 6503 break; 6504 case DISAS_TOO_MANY: 6505 update_psw_addr(dc); 6506 /* FALLTHRU */ 6507 case DISAS_PC_UPDATED: 6508 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the 6509 cc op type is in env */ 6510 update_cc_op(dc); 6511 /* FALLTHRU */ 6512 case DISAS_PC_CC_UPDATED: 6513 /* Exit the TB, either by raising a debug exception or by return. */ 6514 if (dc->exit_to_mainloop) { 6515 tcg_gen_exit_tb(NULL, 0); 6516 } else { 6517 tcg_gen_lookup_and_goto_ptr(); 6518 } 6519 break; 6520 default: 6521 g_assert_not_reached(); 6522 } 6523 } 6524 6525 static void s390x_tr_disas_log(const DisasContextBase *dcbase, 6526 CPUState *cs, FILE *logfile) 6527 { 6528 DisasContext *dc = container_of(dcbase, DisasContext, base); 6529 6530 if (unlikely(dc->ex_value)) { 6531 /* ??? Unfortunately target_disas can't use host memory. */ 6532 fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value); 6533 } else { 6534 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); 6535 target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size); 6536 } 6537 } 6538 6539 static const TranslatorOps s390x_tr_ops = { 6540 .init_disas_context = s390x_tr_init_disas_context, 6541 .tb_start = s390x_tr_tb_start, 6542 .insn_start = s390x_tr_insn_start, 6543 .translate_insn = s390x_tr_translate_insn, 6544 .tb_stop = s390x_tr_tb_stop, 6545 .disas_log = s390x_tr_disas_log, 6546 }; 6547 6548 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 6549 vaddr pc, void *host_pc) 6550 { 6551 DisasContext dc; 6552 6553 translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base); 6554 } 6555 6556 void s390x_restore_state_to_opc(CPUState *cs, 6557 const TranslationBlock *tb, 6558 const uint64_t *data) 6559 { 6560 CPUS390XState *env = cpu_env(cs); 6561 int cc_op = data[1]; 6562 6563 env->psw.addr = data[0]; 6564 6565 /* Update the CC opcode if it is not already up-to-date. */ 6566 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { 6567 env->cc_op = cc_op; 6568 } 6569 6570 /* Record ILEN. */ 6571 env->int_pgm_ilen = data[2]; 6572 } 6573