1 /* 2 * Alpha emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "sysemu/cpus.h" 23 #include "disas/disas.h" 24 #include "qemu/host-utils.h" 25 #include "exec/exec-all.h" 26 #include "tcg-op.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/helper-proto.h" 29 #include "exec/helper-gen.h" 30 #include "trace-tcg.h" 31 #include "exec/translator.h" 32 #include "exec/log.h" 33 34 35 #undef ALPHA_DEBUG_DISAS 36 #define CONFIG_SOFTFLOAT_INLINE 37 38 #ifdef ALPHA_DEBUG_DISAS 39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 40 #else 41 # define LOG_DISAS(...) do { } while (0) 42 #endif 43 44 typedef struct DisasContext DisasContext; 45 struct DisasContext { 46 DisasContextBase base; 47 48 #ifndef CONFIG_USER_ONLY 49 uint64_t palbr; 50 #endif 51 uint32_t tbflags; 52 int mem_idx; 53 54 /* implver and amask values for this CPU. */ 55 int implver; 56 int amask; 57 58 /* Current rounding mode for this TB. */ 59 int tb_rm; 60 /* Current flush-to-zero setting for this TB. */ 61 int tb_ftz; 62 63 /* The set of registers active in the current context. */ 64 TCGv *ir; 65 66 /* Temporaries for $31 and $f31 as source and destination. */ 67 TCGv zero; 68 TCGv sink; 69 /* Temporary for immediate constants. */ 70 TCGv lit; 71 }; 72 73 /* Target-specific return values from translate_one, indicating the 74 state of the TB. Note that DISAS_NEXT indicates that we are not 75 exiting the TB. */ 76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 77 #define DISAS_PC_UPDATED DISAS_TARGET_1 78 #define DISAS_PC_STALE DISAS_TARGET_2 79 80 /* global register indexes */ 81 static TCGv_env cpu_env; 82 static TCGv cpu_std_ir[31]; 83 static TCGv cpu_fir[31]; 84 static TCGv cpu_pc; 85 static TCGv cpu_lock_addr; 86 static TCGv cpu_lock_value; 87 88 #ifndef CONFIG_USER_ONLY 89 static TCGv cpu_pal_ir[31]; 90 #endif 91 92 #include "exec/gen-icount.h" 93 94 void alpha_translate_init(void) 95 { 96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } 97 98 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 99 static const GlobalVar vars[] = { 100 DEF_VAR(pc), 101 DEF_VAR(lock_addr), 102 DEF_VAR(lock_value), 103 }; 104 105 #undef DEF_VAR 106 107 /* Use the symbolic register names that match the disassembler. */ 108 static const char greg_names[31][4] = { 109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", 110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", 111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", 112 "t10", "t11", "ra", "t12", "at", "gp", "sp" 113 }; 114 static const char freg_names[31][4] = { 115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 118 "f24", "f25", "f26", "f27", "f28", "f29", "f30" 119 }; 120 #ifndef CONFIG_USER_ONLY 121 static const char shadow_names[8][8] = { 122 "pal_t7", "pal_s0", "pal_s1", "pal_s2", 123 "pal_s3", "pal_s4", "pal_s5", "pal_t11" 124 }; 125 #endif 126 127 static bool done_init = 0; 128 int i; 129 130 if (done_init) { 131 return; 132 } 133 done_init = 1; 134 135 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 136 tcg_ctx.tcg_env = cpu_env; 137 138 for (i = 0; i < 31; i++) { 139 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env, 140 offsetof(CPUAlphaState, ir[i]), 141 greg_names[i]); 142 } 143 144 for (i = 0; i < 31; i++) { 145 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env, 146 offsetof(CPUAlphaState, fir[i]), 147 freg_names[i]); 148 } 149 150 #ifndef CONFIG_USER_ONLY 151 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); 152 for (i = 0; i < 8; i++) { 153 int r = (i == 7 ? 25 : i + 8); 154 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env, 155 offsetof(CPUAlphaState, 156 shadow[i]), 157 shadow_names[i]); 158 } 159 #endif 160 161 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 162 const GlobalVar *v = &vars[i]; 163 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name); 164 } 165 } 166 167 static TCGv load_zero(DisasContext *ctx) 168 { 169 if (TCGV_IS_UNUSED_I64(ctx->zero)) { 170 ctx->zero = tcg_const_i64(0); 171 } 172 return ctx->zero; 173 } 174 175 static TCGv dest_sink(DisasContext *ctx) 176 { 177 if (TCGV_IS_UNUSED_I64(ctx->sink)) { 178 ctx->sink = tcg_temp_new(); 179 } 180 return ctx->sink; 181 } 182 183 static void free_context_temps(DisasContext *ctx) 184 { 185 if (!TCGV_IS_UNUSED_I64(ctx->sink)) { 186 tcg_gen_discard_i64(ctx->sink); 187 tcg_temp_free(ctx->sink); 188 TCGV_UNUSED_I64(ctx->sink); 189 } 190 if (!TCGV_IS_UNUSED_I64(ctx->zero)) { 191 tcg_temp_free(ctx->zero); 192 TCGV_UNUSED_I64(ctx->zero); 193 } 194 if (!TCGV_IS_UNUSED_I64(ctx->lit)) { 195 tcg_temp_free(ctx->lit); 196 TCGV_UNUSED_I64(ctx->lit); 197 } 198 } 199 200 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 201 { 202 if (likely(reg < 31)) { 203 return ctx->ir[reg]; 204 } else { 205 return load_zero(ctx); 206 } 207 } 208 209 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, 210 uint8_t lit, bool islit) 211 { 212 if (islit) { 213 ctx->lit = tcg_const_i64(lit); 214 return ctx->lit; 215 } else if (likely(reg < 31)) { 216 return ctx->ir[reg]; 217 } else { 218 return load_zero(ctx); 219 } 220 } 221 222 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 223 { 224 if (likely(reg < 31)) { 225 return ctx->ir[reg]; 226 } else { 227 return dest_sink(ctx); 228 } 229 } 230 231 static TCGv load_fpr(DisasContext *ctx, unsigned reg) 232 { 233 if (likely(reg < 31)) { 234 return cpu_fir[reg]; 235 } else { 236 return load_zero(ctx); 237 } 238 } 239 240 static TCGv dest_fpr(DisasContext *ctx, unsigned reg) 241 { 242 if (likely(reg < 31)) { 243 return cpu_fir[reg]; 244 } else { 245 return dest_sink(ctx); 246 } 247 } 248 249 static int get_flag_ofs(unsigned shift) 250 { 251 int ofs = offsetof(CPUAlphaState, flags); 252 #ifdef HOST_WORDS_BIGENDIAN 253 ofs += 3 - (shift / 8); 254 #else 255 ofs += shift / 8; 256 #endif 257 return ofs; 258 } 259 260 static void ld_flag_byte(TCGv val, unsigned shift) 261 { 262 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift)); 263 } 264 265 static void st_flag_byte(TCGv val, unsigned shift) 266 { 267 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift)); 268 } 269 270 static void gen_excp_1(int exception, int error_code) 271 { 272 TCGv_i32 tmp1, tmp2; 273 274 tmp1 = tcg_const_i32(exception); 275 tmp2 = tcg_const_i32(error_code); 276 gen_helper_excp(cpu_env, tmp1, tmp2); 277 tcg_temp_free_i32(tmp2); 278 tcg_temp_free_i32(tmp1); 279 } 280 281 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) 282 { 283 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 284 gen_excp_1(exception, error_code); 285 return DISAS_NORETURN; 286 } 287 288 static inline DisasJumpType gen_invalid(DisasContext *ctx) 289 { 290 return gen_excp(ctx, EXCP_OPCDEC, 0); 291 } 292 293 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags) 294 { 295 TCGv_i32 tmp32 = tcg_temp_new_i32(); 296 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); 297 gen_helper_memory_to_f(t0, tmp32); 298 tcg_temp_free_i32(tmp32); 299 } 300 301 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags) 302 { 303 TCGv tmp = tcg_temp_new(); 304 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ); 305 gen_helper_memory_to_g(t0, tmp); 306 tcg_temp_free(tmp); 307 } 308 309 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) 310 { 311 TCGv_i32 tmp32 = tcg_temp_new_i32(); 312 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); 313 gen_helper_memory_to_s(t0, tmp32); 314 tcg_temp_free_i32(tmp32); 315 } 316 317 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) 318 { 319 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); 320 tcg_gen_mov_i64(cpu_lock_addr, t1); 321 tcg_gen_mov_i64(cpu_lock_value, t0); 322 } 323 324 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) 325 { 326 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ); 327 tcg_gen_mov_i64(cpu_lock_addr, t1); 328 tcg_gen_mov_i64(cpu_lock_value, t0); 329 } 330 331 static inline void gen_load_mem(DisasContext *ctx, 332 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, 333 int flags), 334 int ra, int rb, int32_t disp16, bool fp, 335 bool clear) 336 { 337 TCGv tmp, addr, va; 338 339 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of 340 prefetches, which we can treat as nops. No worries about 341 missed exceptions here. */ 342 if (unlikely(ra == 31)) { 343 return; 344 } 345 346 tmp = tcg_temp_new(); 347 addr = load_gpr(ctx, rb); 348 349 if (disp16) { 350 tcg_gen_addi_i64(tmp, addr, disp16); 351 addr = tmp; 352 } 353 if (clear) { 354 tcg_gen_andi_i64(tmp, addr, ~0x7); 355 addr = tmp; 356 } 357 358 va = (fp ? cpu_fir[ra] : ctx->ir[ra]); 359 tcg_gen_qemu_load(va, addr, ctx->mem_idx); 360 361 tcg_temp_free(tmp); 362 } 363 364 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) 365 { 366 TCGv_i32 tmp32 = tcg_temp_new_i32(); 367 gen_helper_f_to_memory(tmp32, t0); 368 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); 369 tcg_temp_free_i32(tmp32); 370 } 371 372 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags) 373 { 374 TCGv tmp = tcg_temp_new(); 375 gen_helper_g_to_memory(tmp, t0); 376 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ); 377 tcg_temp_free(tmp); 378 } 379 380 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) 381 { 382 TCGv_i32 tmp32 = tcg_temp_new_i32(); 383 gen_helper_s_to_memory(tmp32, t0); 384 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); 385 tcg_temp_free_i32(tmp32); 386 } 387 388 static inline void gen_store_mem(DisasContext *ctx, 389 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, 390 int flags), 391 int ra, int rb, int32_t disp16, bool fp, 392 bool clear) 393 { 394 TCGv tmp, addr, va; 395 396 tmp = tcg_temp_new(); 397 addr = load_gpr(ctx, rb); 398 399 if (disp16) { 400 tcg_gen_addi_i64(tmp, addr, disp16); 401 addr = tmp; 402 } 403 if (clear) { 404 tcg_gen_andi_i64(tmp, addr, ~0x7); 405 addr = tmp; 406 } 407 408 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra)); 409 tcg_gen_qemu_store(va, addr, ctx->mem_idx); 410 411 tcg_temp_free(tmp); 412 } 413 414 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, 415 int32_t disp16, int mem_idx, 416 TCGMemOp op) 417 { 418 TCGLabel *lab_fail, *lab_done; 419 TCGv addr, val; 420 421 addr = tcg_temp_new_i64(); 422 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 423 free_context_temps(ctx); 424 425 lab_fail = gen_new_label(); 426 lab_done = gen_new_label(); 427 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); 428 tcg_temp_free_i64(addr); 429 430 val = tcg_temp_new_i64(); 431 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, 432 load_gpr(ctx, ra), mem_idx, op); 433 free_context_temps(ctx); 434 435 if (ra != 31) { 436 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); 437 } 438 tcg_temp_free_i64(val); 439 tcg_gen_br(lab_done); 440 441 gen_set_label(lab_fail); 442 if (ra != 31) { 443 tcg_gen_movi_i64(ctx->ir[ra], 0); 444 } 445 446 gen_set_label(lab_done); 447 tcg_gen_movi_i64(cpu_lock_addr, -1); 448 return DISAS_NEXT; 449 } 450 451 static bool in_superpage(DisasContext *ctx, int64_t addr) 452 { 453 #ifndef CONFIG_USER_ONLY 454 return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0 455 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1 456 && ((addr >> 41) & 3) == 2); 457 #else 458 return false; 459 #endif 460 } 461 462 static bool use_exit_tb(DisasContext *ctx) 463 { 464 return ((ctx->base.tb->cflags & CF_LAST_IO) 465 || ctx->base.singlestep_enabled 466 || singlestep); 467 } 468 469 static bool use_goto_tb(DisasContext *ctx, uint64_t dest) 470 { 471 /* Suppress goto_tb in the case of single-steping and IO. */ 472 if (unlikely(use_exit_tb(ctx))) { 473 return false; 474 } 475 #ifndef CONFIG_USER_ONLY 476 /* If the destination is in the superpage, the page perms can't change. */ 477 if (in_superpage(ctx, dest)) { 478 return true; 479 } 480 /* Check for the dest on the same page as the start of the TB. */ 481 return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0; 482 #else 483 return true; 484 #endif 485 } 486 487 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) 488 { 489 uint64_t dest = ctx->base.pc_next + (disp << 2); 490 491 if (ra != 31) { 492 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 493 } 494 495 /* Notice branch-to-next; used to initialize RA with the PC. */ 496 if (disp == 0) { 497 return 0; 498 } else if (use_goto_tb(ctx, dest)) { 499 tcg_gen_goto_tb(0); 500 tcg_gen_movi_i64(cpu_pc, dest); 501 tcg_gen_exit_tb((uintptr_t)ctx->base.tb); 502 return DISAS_NORETURN; 503 } else { 504 tcg_gen_movi_i64(cpu_pc, dest); 505 return DISAS_PC_UPDATED; 506 } 507 } 508 509 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, 510 TCGv cmp, int32_t disp) 511 { 512 uint64_t dest = ctx->base.pc_next + (disp << 2); 513 TCGLabel *lab_true = gen_new_label(); 514 515 if (use_goto_tb(ctx, dest)) { 516 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); 517 518 tcg_gen_goto_tb(0); 519 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 520 tcg_gen_exit_tb((uintptr_t)ctx->base.tb); 521 522 gen_set_label(lab_true); 523 tcg_gen_goto_tb(1); 524 tcg_gen_movi_i64(cpu_pc, dest); 525 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + 1); 526 527 return DISAS_NORETURN; 528 } else { 529 TCGv_i64 z = tcg_const_i64(0); 530 TCGv_i64 d = tcg_const_i64(dest); 531 TCGv_i64 p = tcg_const_i64(ctx->base.pc_next); 532 533 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); 534 535 tcg_temp_free_i64(z); 536 tcg_temp_free_i64(d); 537 tcg_temp_free_i64(p); 538 return DISAS_PC_UPDATED; 539 } 540 } 541 542 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, 543 int32_t disp, int mask) 544 { 545 if (mask) { 546 TCGv tmp = tcg_temp_new(); 547 DisasJumpType ret; 548 549 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); 550 ret = gen_bcond_internal(ctx, cond, tmp, disp); 551 tcg_temp_free(tmp); 552 return ret; 553 } 554 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); 555 } 556 557 /* Fold -0.0 for comparison with COND. */ 558 559 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) 560 { 561 uint64_t mzero = 1ull << 63; 562 563 switch (cond) { 564 case TCG_COND_LE: 565 case TCG_COND_GT: 566 /* For <= or >, the -0.0 value directly compares the way we want. */ 567 tcg_gen_mov_i64(dest, src); 568 break; 569 570 case TCG_COND_EQ: 571 case TCG_COND_NE: 572 /* For == or !=, we can simply mask off the sign bit and compare. */ 573 tcg_gen_andi_i64(dest, src, mzero - 1); 574 break; 575 576 case TCG_COND_GE: 577 case TCG_COND_LT: 578 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */ 579 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero); 580 tcg_gen_neg_i64(dest, dest); 581 tcg_gen_and_i64(dest, dest, src); 582 break; 583 584 default: 585 abort(); 586 } 587 } 588 589 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, 590 int32_t disp) 591 { 592 TCGv cmp_tmp = tcg_temp_new(); 593 DisasJumpType ret; 594 595 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); 596 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); 597 tcg_temp_free(cmp_tmp); 598 return ret; 599 } 600 601 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) 602 { 603 TCGv_i64 va, vb, z; 604 605 z = load_zero(ctx); 606 vb = load_fpr(ctx, rb); 607 va = tcg_temp_new(); 608 gen_fold_mzero(cond, va, load_fpr(ctx, ra)); 609 610 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); 611 612 tcg_temp_free(va); 613 } 614 615 #define QUAL_RM_N 0x080 /* Round mode nearest even */ 616 #define QUAL_RM_C 0x000 /* Round mode chopped */ 617 #define QUAL_RM_M 0x040 /* Round mode minus infinity */ 618 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */ 619 #define QUAL_RM_MASK 0x0c0 620 621 #define QUAL_U 0x100 /* Underflow enable (fp output) */ 622 #define QUAL_V 0x100 /* Overflow enable (int output) */ 623 #define QUAL_S 0x400 /* Software completion enable */ 624 #define QUAL_I 0x200 /* Inexact detection enable */ 625 626 static void gen_qual_roundmode(DisasContext *ctx, int fn11) 627 { 628 TCGv_i32 tmp; 629 630 fn11 &= QUAL_RM_MASK; 631 if (fn11 == ctx->tb_rm) { 632 return; 633 } 634 ctx->tb_rm = fn11; 635 636 tmp = tcg_temp_new_i32(); 637 switch (fn11) { 638 case QUAL_RM_N: 639 tcg_gen_movi_i32(tmp, float_round_nearest_even); 640 break; 641 case QUAL_RM_C: 642 tcg_gen_movi_i32(tmp, float_round_to_zero); 643 break; 644 case QUAL_RM_M: 645 tcg_gen_movi_i32(tmp, float_round_down); 646 break; 647 case QUAL_RM_D: 648 tcg_gen_ld8u_i32(tmp, cpu_env, 649 offsetof(CPUAlphaState, fpcr_dyn_round)); 650 break; 651 } 652 653 #if defined(CONFIG_SOFTFLOAT_INLINE) 654 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. 655 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just 656 sets the one field. */ 657 tcg_gen_st8_i32(tmp, cpu_env, 658 offsetof(CPUAlphaState, fp_status.float_rounding_mode)); 659 #else 660 gen_helper_setroundmode(tmp); 661 #endif 662 663 tcg_temp_free_i32(tmp); 664 } 665 666 static void gen_qual_flushzero(DisasContext *ctx, int fn11) 667 { 668 TCGv_i32 tmp; 669 670 fn11 &= QUAL_U; 671 if (fn11 == ctx->tb_ftz) { 672 return; 673 } 674 ctx->tb_ftz = fn11; 675 676 tmp = tcg_temp_new_i32(); 677 if (fn11) { 678 /* Underflow is enabled, use the FPCR setting. */ 679 tcg_gen_ld8u_i32(tmp, cpu_env, 680 offsetof(CPUAlphaState, fpcr_flush_to_zero)); 681 } else { 682 /* Underflow is disabled, force flush-to-zero. */ 683 tcg_gen_movi_i32(tmp, 1); 684 } 685 686 #if defined(CONFIG_SOFTFLOAT_INLINE) 687 tcg_gen_st8_i32(tmp, cpu_env, 688 offsetof(CPUAlphaState, fp_status.flush_to_zero)); 689 #else 690 gen_helper_setflushzero(tmp); 691 #endif 692 693 tcg_temp_free_i32(tmp); 694 } 695 696 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) 697 { 698 TCGv val; 699 700 if (unlikely(reg == 31)) { 701 val = load_zero(ctx); 702 } else { 703 val = cpu_fir[reg]; 704 if ((fn11 & QUAL_S) == 0) { 705 if (is_cmp) { 706 gen_helper_ieee_input_cmp(cpu_env, val); 707 } else { 708 gen_helper_ieee_input(cpu_env, val); 709 } 710 } else { 711 #ifndef CONFIG_USER_ONLY 712 /* In system mode, raise exceptions for denormals like real 713 hardware. In user mode, proceed as if the OS completion 714 handler is handling the denormal as per spec. */ 715 gen_helper_ieee_input_s(cpu_env, val); 716 #endif 717 } 718 } 719 return val; 720 } 721 722 static void gen_fp_exc_raise(int rc, int fn11) 723 { 724 /* ??? We ought to be able to do something with imprecise exceptions. 725 E.g. notice we're still in the trap shadow of something within the 726 TB and do not generate the code to signal the exception; end the TB 727 when an exception is forced to arrive, either by consumption of a 728 register value or TRAPB or EXCB. */ 729 TCGv_i32 reg, ign; 730 uint32_t ignore = 0; 731 732 if (!(fn11 & QUAL_U)) { 733 /* Note that QUAL_U == QUAL_V, so ignore either. */ 734 ignore |= FPCR_UNF | FPCR_IOV; 735 } 736 if (!(fn11 & QUAL_I)) { 737 ignore |= FPCR_INE; 738 } 739 ign = tcg_const_i32(ignore); 740 741 /* ??? Pass in the regno of the destination so that the helper can 742 set EXC_MASK, which contains a bitmask of destination registers 743 that have caused arithmetic traps. A simple userspace emulation 744 does not require this. We do need it for a guest kernel's entArith, 745 or if we were to do something clever with imprecise exceptions. */ 746 reg = tcg_const_i32(rc + 32); 747 if (fn11 & QUAL_S) { 748 gen_helper_fp_exc_raise_s(cpu_env, ign, reg); 749 } else { 750 gen_helper_fp_exc_raise(cpu_env, ign, reg); 751 } 752 753 tcg_temp_free_i32(reg); 754 tcg_temp_free_i32(ign); 755 } 756 757 static void gen_cvtlq(TCGv vc, TCGv vb) 758 { 759 TCGv tmp = tcg_temp_new(); 760 761 /* The arithmetic right shift here, plus the sign-extended mask below 762 yields a sign-extended result without an explicit ext32s_i64. */ 763 tcg_gen_shri_i64(tmp, vb, 29); 764 tcg_gen_sari_i64(vc, vb, 32); 765 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); 766 767 tcg_temp_free(tmp); 768 } 769 770 static void gen_ieee_arith2(DisasContext *ctx, 771 void (*helper)(TCGv, TCGv_ptr, TCGv), 772 int rb, int rc, int fn11) 773 { 774 TCGv vb; 775 776 gen_qual_roundmode(ctx, fn11); 777 gen_qual_flushzero(ctx, fn11); 778 779 vb = gen_ieee_input(ctx, rb, fn11, 0); 780 helper(dest_fpr(ctx, rc), cpu_env, vb); 781 782 gen_fp_exc_raise(rc, fn11); 783 } 784 785 #define IEEE_ARITH2(name) \ 786 static inline void glue(gen_, name)(DisasContext *ctx, \ 787 int rb, int rc, int fn11) \ 788 { \ 789 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ 790 } 791 IEEE_ARITH2(sqrts) 792 IEEE_ARITH2(sqrtt) 793 IEEE_ARITH2(cvtst) 794 IEEE_ARITH2(cvtts) 795 796 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) 797 { 798 TCGv vb, vc; 799 800 /* No need to set flushzero, since we have an integer output. */ 801 vb = gen_ieee_input(ctx, rb, fn11, 0); 802 vc = dest_fpr(ctx, rc); 803 804 /* Almost all integer conversions use cropped rounding; 805 special case that. */ 806 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { 807 gen_helper_cvttq_c(vc, cpu_env, vb); 808 } else { 809 gen_qual_roundmode(ctx, fn11); 810 gen_helper_cvttq(vc, cpu_env, vb); 811 } 812 gen_fp_exc_raise(rc, fn11); 813 } 814 815 static void gen_ieee_intcvt(DisasContext *ctx, 816 void (*helper)(TCGv, TCGv_ptr, TCGv), 817 int rb, int rc, int fn11) 818 { 819 TCGv vb, vc; 820 821 gen_qual_roundmode(ctx, fn11); 822 vb = load_fpr(ctx, rb); 823 vc = dest_fpr(ctx, rc); 824 825 /* The only exception that can be raised by integer conversion 826 is inexact. Thus we only need to worry about exceptions when 827 inexact handling is requested. */ 828 if (fn11 & QUAL_I) { 829 helper(vc, cpu_env, vb); 830 gen_fp_exc_raise(rc, fn11); 831 } else { 832 helper(vc, cpu_env, vb); 833 } 834 } 835 836 #define IEEE_INTCVT(name) \ 837 static inline void glue(gen_, name)(DisasContext *ctx, \ 838 int rb, int rc, int fn11) \ 839 { \ 840 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ 841 } 842 IEEE_INTCVT(cvtqs) 843 IEEE_INTCVT(cvtqt) 844 845 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) 846 { 847 TCGv vmask = tcg_const_i64(mask); 848 TCGv tmp = tcg_temp_new_i64(); 849 850 if (inv_a) { 851 tcg_gen_andc_i64(tmp, vmask, va); 852 } else { 853 tcg_gen_and_i64(tmp, va, vmask); 854 } 855 856 tcg_gen_andc_i64(vc, vb, vmask); 857 tcg_gen_or_i64(vc, vc, tmp); 858 859 tcg_temp_free(vmask); 860 tcg_temp_free(tmp); 861 } 862 863 static void gen_ieee_arith3(DisasContext *ctx, 864 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 865 int ra, int rb, int rc, int fn11) 866 { 867 TCGv va, vb, vc; 868 869 gen_qual_roundmode(ctx, fn11); 870 gen_qual_flushzero(ctx, fn11); 871 872 va = gen_ieee_input(ctx, ra, fn11, 0); 873 vb = gen_ieee_input(ctx, rb, fn11, 0); 874 vc = dest_fpr(ctx, rc); 875 helper(vc, cpu_env, va, vb); 876 877 gen_fp_exc_raise(rc, fn11); 878 } 879 880 #define IEEE_ARITH3(name) \ 881 static inline void glue(gen_, name)(DisasContext *ctx, \ 882 int ra, int rb, int rc, int fn11) \ 883 { \ 884 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 885 } 886 IEEE_ARITH3(adds) 887 IEEE_ARITH3(subs) 888 IEEE_ARITH3(muls) 889 IEEE_ARITH3(divs) 890 IEEE_ARITH3(addt) 891 IEEE_ARITH3(subt) 892 IEEE_ARITH3(mult) 893 IEEE_ARITH3(divt) 894 895 static void gen_ieee_compare(DisasContext *ctx, 896 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 897 int ra, int rb, int rc, int fn11) 898 { 899 TCGv va, vb, vc; 900 901 va = gen_ieee_input(ctx, ra, fn11, 1); 902 vb = gen_ieee_input(ctx, rb, fn11, 1); 903 vc = dest_fpr(ctx, rc); 904 helper(vc, cpu_env, va, vb); 905 906 gen_fp_exc_raise(rc, fn11); 907 } 908 909 #define IEEE_CMP3(name) \ 910 static inline void glue(gen_, name)(DisasContext *ctx, \ 911 int ra, int rb, int rc, int fn11) \ 912 { \ 913 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 914 } 915 IEEE_CMP3(cmptun) 916 IEEE_CMP3(cmpteq) 917 IEEE_CMP3(cmptlt) 918 IEEE_CMP3(cmptle) 919 920 static inline uint64_t zapnot_mask(uint8_t lit) 921 { 922 uint64_t mask = 0; 923 int i; 924 925 for (i = 0; i < 8; ++i) { 926 if ((lit >> i) & 1) { 927 mask |= 0xffull << (i * 8); 928 } 929 } 930 return mask; 931 } 932 933 /* Implement zapnot with an immediate operand, which expands to some 934 form of immediate AND. This is a basic building block in the 935 definition of many of the other byte manipulation instructions. */ 936 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) 937 { 938 switch (lit) { 939 case 0x00: 940 tcg_gen_movi_i64(dest, 0); 941 break; 942 case 0x01: 943 tcg_gen_ext8u_i64(dest, src); 944 break; 945 case 0x03: 946 tcg_gen_ext16u_i64(dest, src); 947 break; 948 case 0x0f: 949 tcg_gen_ext32u_i64(dest, src); 950 break; 951 case 0xff: 952 tcg_gen_mov_i64(dest, src); 953 break; 954 default: 955 tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); 956 break; 957 } 958 } 959 960 /* EXTWH, EXTLH, EXTQH */ 961 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 962 uint8_t lit, uint8_t byte_mask) 963 { 964 if (islit) { 965 int pos = (64 - lit * 8) & 0x3f; 966 int len = cto32(byte_mask) * 8; 967 if (pos < len) { 968 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); 969 } else { 970 tcg_gen_movi_i64(vc, 0); 971 } 972 } else { 973 TCGv tmp = tcg_temp_new(); 974 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); 975 tcg_gen_neg_i64(tmp, tmp); 976 tcg_gen_andi_i64(tmp, tmp, 0x3f); 977 tcg_gen_shl_i64(vc, va, tmp); 978 tcg_temp_free(tmp); 979 } 980 gen_zapnoti(vc, vc, byte_mask); 981 } 982 983 /* EXTBL, EXTWL, EXTLL, EXTQL */ 984 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 985 uint8_t lit, uint8_t byte_mask) 986 { 987 if (islit) { 988 int pos = (lit & 7) * 8; 989 int len = cto32(byte_mask) * 8; 990 if (pos + len >= 64) { 991 len = 64 - pos; 992 } 993 tcg_gen_extract_i64(vc, va, pos, len); 994 } else { 995 TCGv tmp = tcg_temp_new(); 996 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); 997 tcg_gen_shli_i64(tmp, tmp, 3); 998 tcg_gen_shr_i64(vc, va, tmp); 999 tcg_temp_free(tmp); 1000 gen_zapnoti(vc, vc, byte_mask); 1001 } 1002 } 1003 1004 /* INSWH, INSLH, INSQH */ 1005 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1006 uint8_t lit, uint8_t byte_mask) 1007 { 1008 if (islit) { 1009 int pos = 64 - (lit & 7) * 8; 1010 int len = cto32(byte_mask) * 8; 1011 if (pos < len) { 1012 tcg_gen_extract_i64(vc, va, pos, len - pos); 1013 } else { 1014 tcg_gen_movi_i64(vc, 0); 1015 } 1016 } else { 1017 TCGv tmp = tcg_temp_new(); 1018 TCGv shift = tcg_temp_new(); 1019 1020 /* The instruction description has us left-shift the byte mask 1021 and extract bits <15:8> and apply that zap at the end. This 1022 is equivalent to simply performing the zap first and shifting 1023 afterward. */ 1024 gen_zapnoti(tmp, va, byte_mask); 1025 1026 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this 1027 portably by splitting the shift into two parts: shift_count-1 and 1. 1028 Arrange for the -1 by using ones-complement instead of 1029 twos-complement in the negation: ~(B * 8) & 63. */ 1030 1031 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1032 tcg_gen_not_i64(shift, shift); 1033 tcg_gen_andi_i64(shift, shift, 0x3f); 1034 1035 tcg_gen_shr_i64(vc, tmp, shift); 1036 tcg_gen_shri_i64(vc, vc, 1); 1037 tcg_temp_free(shift); 1038 tcg_temp_free(tmp); 1039 } 1040 } 1041 1042 /* INSBL, INSWL, INSLL, INSQL */ 1043 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1044 uint8_t lit, uint8_t byte_mask) 1045 { 1046 if (islit) { 1047 int pos = (lit & 7) * 8; 1048 int len = cto32(byte_mask) * 8; 1049 if (pos + len > 64) { 1050 len = 64 - pos; 1051 } 1052 tcg_gen_deposit_z_i64(vc, va, pos, len); 1053 } else { 1054 TCGv tmp = tcg_temp_new(); 1055 TCGv shift = tcg_temp_new(); 1056 1057 /* The instruction description has us left-shift the byte mask 1058 and extract bits <15:8> and apply that zap at the end. This 1059 is equivalent to simply performing the zap first and shifting 1060 afterward. */ 1061 gen_zapnoti(tmp, va, byte_mask); 1062 1063 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1064 tcg_gen_shli_i64(shift, shift, 3); 1065 tcg_gen_shl_i64(vc, tmp, shift); 1066 tcg_temp_free(shift); 1067 tcg_temp_free(tmp); 1068 } 1069 } 1070 1071 /* MSKWH, MSKLH, MSKQH */ 1072 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1073 uint8_t lit, uint8_t byte_mask) 1074 { 1075 if (islit) { 1076 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); 1077 } else { 1078 TCGv shift = tcg_temp_new(); 1079 TCGv mask = tcg_temp_new(); 1080 1081 /* The instruction description is as above, where the byte_mask 1082 is shifted left, and then we extract bits <15:8>. This can be 1083 emulated with a right-shift on the expanded byte mask. This 1084 requires extra care because for an input <2:0> == 0 we need a 1085 shift of 64 bits in order to generate a zero. This is done by 1086 splitting the shift into two parts, the variable shift - 1 1087 followed by a constant 1 shift. The code we expand below is 1088 equivalent to ~(B * 8) & 63. */ 1089 1090 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1091 tcg_gen_not_i64(shift, shift); 1092 tcg_gen_andi_i64(shift, shift, 0x3f); 1093 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); 1094 tcg_gen_shr_i64(mask, mask, shift); 1095 tcg_gen_shri_i64(mask, mask, 1); 1096 1097 tcg_gen_andc_i64(vc, va, mask); 1098 1099 tcg_temp_free(mask); 1100 tcg_temp_free(shift); 1101 } 1102 } 1103 1104 /* MSKBL, MSKWL, MSKLL, MSKQL */ 1105 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1106 uint8_t lit, uint8_t byte_mask) 1107 { 1108 if (islit) { 1109 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); 1110 } else { 1111 TCGv shift = tcg_temp_new(); 1112 TCGv mask = tcg_temp_new(); 1113 1114 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1115 tcg_gen_shli_i64(shift, shift, 3); 1116 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); 1117 tcg_gen_shl_i64(mask, mask, shift); 1118 1119 tcg_gen_andc_i64(vc, va, mask); 1120 1121 tcg_temp_free(mask); 1122 tcg_temp_free(shift); 1123 } 1124 } 1125 1126 static void gen_rx(DisasContext *ctx, int ra, int set) 1127 { 1128 TCGv tmp; 1129 1130 if (ra != 31) { 1131 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1132 } 1133 1134 tmp = tcg_const_i64(set); 1135 st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1136 tcg_temp_free(tmp); 1137 } 1138 1139 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) 1140 { 1141 /* We're emulating OSF/1 PALcode. Many of these are trivial access 1142 to internal cpu registers. */ 1143 1144 /* Unprivileged PAL call */ 1145 if (palcode >= 0x80 && palcode < 0xC0) { 1146 switch (palcode) { 1147 case 0x86: 1148 /* IMB */ 1149 /* No-op inside QEMU. */ 1150 break; 1151 case 0x9E: 1152 /* RDUNIQUE */ 1153 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1154 offsetof(CPUAlphaState, unique)); 1155 break; 1156 case 0x9F: 1157 /* WRUNIQUE */ 1158 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1159 offsetof(CPUAlphaState, unique)); 1160 break; 1161 default: 1162 palcode &= 0xbf; 1163 goto do_call_pal; 1164 } 1165 return DISAS_NEXT; 1166 } 1167 1168 #ifndef CONFIG_USER_ONLY 1169 /* Privileged PAL code */ 1170 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1171 switch (palcode) { 1172 case 0x01: 1173 /* CFLUSH */ 1174 /* No-op inside QEMU. */ 1175 break; 1176 case 0x02: 1177 /* DRAINA */ 1178 /* No-op inside QEMU. */ 1179 break; 1180 case 0x2D: 1181 /* WRVPTPTR */ 1182 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1183 offsetof(CPUAlphaState, vptptr)); 1184 break; 1185 case 0x31: 1186 /* WRVAL */ 1187 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1188 offsetof(CPUAlphaState, sysval)); 1189 break; 1190 case 0x32: 1191 /* RDVAL */ 1192 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1193 offsetof(CPUAlphaState, sysval)); 1194 break; 1195 1196 case 0x35: 1197 /* SWPIPL */ 1198 /* Note that we already know we're in kernel mode, so we know 1199 that PS only contains the 3 IPL bits. */ 1200 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1201 1202 /* But make sure and store only the 3 IPL bits from the user. */ 1203 { 1204 TCGv tmp = tcg_temp_new(); 1205 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1206 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1207 tcg_temp_free(tmp); 1208 } 1209 1210 /* Allow interrupts to be recognized right away. */ 1211 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 1212 return DISAS_PC_UPDATED_NOCHAIN; 1213 1214 case 0x36: 1215 /* RDPS */ 1216 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1217 break; 1218 1219 case 0x38: 1220 /* WRUSP */ 1221 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1222 offsetof(CPUAlphaState, usp)); 1223 break; 1224 case 0x3A: 1225 /* RDUSP */ 1226 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1227 offsetof(CPUAlphaState, usp)); 1228 break; 1229 case 0x3C: 1230 /* WHAMI */ 1231 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env, 1232 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); 1233 break; 1234 1235 case 0x3E: 1236 /* WTINT */ 1237 { 1238 TCGv_i32 tmp = tcg_const_i32(1); 1239 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) + 1240 offsetof(CPUState, halted)); 1241 tcg_temp_free_i32(tmp); 1242 } 1243 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); 1244 return gen_excp(ctx, EXCP_HALTED, 0); 1245 1246 default: 1247 palcode &= 0x3f; 1248 goto do_call_pal; 1249 } 1250 return DISAS_NEXT; 1251 } 1252 #endif 1253 return gen_invalid(ctx); 1254 1255 do_call_pal: 1256 #ifdef CONFIG_USER_ONLY 1257 return gen_excp(ctx, EXCP_CALL_PAL, palcode); 1258 #else 1259 { 1260 TCGv tmp = tcg_temp_new(); 1261 uint64_t exc_addr = ctx->base.pc_next; 1262 uint64_t entry = ctx->palbr; 1263 1264 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1265 exc_addr |= 1; 1266 } else { 1267 tcg_gen_movi_i64(tmp, 1); 1268 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 1269 } 1270 1271 tcg_gen_movi_i64(tmp, exc_addr); 1272 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); 1273 tcg_temp_free(tmp); 1274 1275 entry += (palcode & 0x80 1276 ? 0x2000 + (palcode - 0x80) * 64 1277 : 0x1000 + palcode * 64); 1278 1279 /* Since the destination is running in PALmode, we don't really 1280 need the page permissions check. We'll see the existence of 1281 the page when we create the TB, and we'll flush all TBs if 1282 we change the PAL base register. */ 1283 if (!use_exit_tb(ctx)) { 1284 tcg_gen_goto_tb(0); 1285 tcg_gen_movi_i64(cpu_pc, entry); 1286 tcg_gen_exit_tb((uintptr_t)ctx->base.tb); 1287 return DISAS_NORETURN; 1288 } else { 1289 tcg_gen_movi_i64(cpu_pc, entry); 1290 return DISAS_PC_UPDATED; 1291 } 1292 } 1293 #endif 1294 } 1295 1296 #ifndef CONFIG_USER_ONLY 1297 1298 #define PR_LONG 0x200000 1299 1300 static int cpu_pr_data(int pr) 1301 { 1302 switch (pr) { 1303 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1304 case 3: return offsetof(CPUAlphaState, trap_arg0); 1305 case 4: return offsetof(CPUAlphaState, trap_arg1); 1306 case 5: return offsetof(CPUAlphaState, trap_arg2); 1307 case 6: return offsetof(CPUAlphaState, exc_addr); 1308 case 7: return offsetof(CPUAlphaState, palbr); 1309 case 8: return offsetof(CPUAlphaState, ptbr); 1310 case 9: return offsetof(CPUAlphaState, vptptr); 1311 case 10: return offsetof(CPUAlphaState, unique); 1312 case 11: return offsetof(CPUAlphaState, sysval); 1313 case 12: return offsetof(CPUAlphaState, usp); 1314 1315 case 40 ... 63: 1316 return offsetof(CPUAlphaState, scratch[pr - 40]); 1317 1318 case 251: 1319 return offsetof(CPUAlphaState, alarm_expire); 1320 } 1321 return 0; 1322 } 1323 1324 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) 1325 { 1326 void (*helper)(TCGv); 1327 int data; 1328 1329 switch (regno) { 1330 case 32 ... 39: 1331 /* Accessing the "non-shadow" general registers. */ 1332 regno = regno == 39 ? 25 : regno - 32 + 8; 1333 tcg_gen_mov_i64(va, cpu_std_ir[regno]); 1334 break; 1335 1336 case 250: /* WALLTIME */ 1337 helper = gen_helper_get_walltime; 1338 goto do_helper; 1339 case 249: /* VMTIME */ 1340 helper = gen_helper_get_vmtime; 1341 do_helper: 1342 if (use_icount) { 1343 gen_io_start(); 1344 helper(va); 1345 gen_io_end(); 1346 return DISAS_PC_STALE; 1347 } else { 1348 helper(va); 1349 } 1350 break; 1351 1352 case 0: /* PS */ 1353 ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1354 break; 1355 case 1: /* FEN */ 1356 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1357 break; 1358 1359 default: 1360 /* The basic registers are data only, and unknown registers 1361 are read-zero, write-ignore. */ 1362 data = cpu_pr_data(regno); 1363 if (data == 0) { 1364 tcg_gen_movi_i64(va, 0); 1365 } else if (data & PR_LONG) { 1366 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); 1367 } else { 1368 tcg_gen_ld_i64(va, cpu_env, data); 1369 } 1370 break; 1371 } 1372 1373 return DISAS_NEXT; 1374 } 1375 1376 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) 1377 { 1378 int data; 1379 1380 switch (regno) { 1381 case 255: 1382 /* TBIA */ 1383 gen_helper_tbia(cpu_env); 1384 break; 1385 1386 case 254: 1387 /* TBIS */ 1388 gen_helper_tbis(cpu_env, vb); 1389 break; 1390 1391 case 253: 1392 /* WAIT */ 1393 { 1394 TCGv_i32 tmp = tcg_const_i32(1); 1395 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) + 1396 offsetof(CPUState, halted)); 1397 tcg_temp_free_i32(tmp); 1398 } 1399 return gen_excp(ctx, EXCP_HALTED, 0); 1400 1401 case 252: 1402 /* HALT */ 1403 gen_helper_halt(vb); 1404 return DISAS_PC_STALE; 1405 1406 case 251: 1407 /* ALARM */ 1408 gen_helper_set_alarm(cpu_env, vb); 1409 break; 1410 1411 case 7: 1412 /* PALBR */ 1413 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr)); 1414 /* Changing the PAL base register implies un-chaining all of the TBs 1415 that ended with a CALL_PAL. Since the base register usually only 1416 changes during boot, flushing everything works well. */ 1417 gen_helper_tb_flush(cpu_env); 1418 return DISAS_PC_STALE; 1419 1420 case 32 ... 39: 1421 /* Accessing the "non-shadow" general registers. */ 1422 regno = regno == 39 ? 25 : regno - 32 + 8; 1423 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1424 break; 1425 1426 case 0: /* PS */ 1427 st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1428 break; 1429 case 1: /* FEN */ 1430 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1431 break; 1432 1433 default: 1434 /* The basic registers are data only, and unknown registers 1435 are read-zero, write-ignore. */ 1436 data = cpu_pr_data(regno); 1437 if (data != 0) { 1438 if (data & PR_LONG) { 1439 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); 1440 } else { 1441 tcg_gen_st_i64(vb, cpu_env, data); 1442 } 1443 } 1444 break; 1445 } 1446 1447 return DISAS_NEXT; 1448 } 1449 #endif /* !USER_ONLY*/ 1450 1451 #define REQUIRE_NO_LIT \ 1452 do { \ 1453 if (real_islit) { \ 1454 goto invalid_opc; \ 1455 } \ 1456 } while (0) 1457 1458 #define REQUIRE_AMASK(FLAG) \ 1459 do { \ 1460 if ((ctx->amask & AMASK_##FLAG) == 0) { \ 1461 goto invalid_opc; \ 1462 } \ 1463 } while (0) 1464 1465 #define REQUIRE_TB_FLAG(FLAG) \ 1466 do { \ 1467 if ((ctx->tbflags & (FLAG)) == 0) { \ 1468 goto invalid_opc; \ 1469 } \ 1470 } while (0) 1471 1472 #define REQUIRE_REG_31(WHICH) \ 1473 do { \ 1474 if (WHICH != 31) { \ 1475 goto invalid_opc; \ 1476 } \ 1477 } while (0) 1478 1479 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) 1480 { 1481 int32_t disp21, disp16, disp12 __attribute__((unused)); 1482 uint16_t fn11; 1483 uint8_t opc, ra, rb, rc, fpfn, fn7, lit; 1484 bool islit, real_islit; 1485 TCGv va, vb, vc, tmp, tmp2; 1486 TCGv_i32 t32; 1487 DisasJumpType ret; 1488 1489 /* Decode all instruction fields */ 1490 opc = extract32(insn, 26, 6); 1491 ra = extract32(insn, 21, 5); 1492 rb = extract32(insn, 16, 5); 1493 rc = extract32(insn, 0, 5); 1494 real_islit = islit = extract32(insn, 12, 1); 1495 lit = extract32(insn, 13, 8); 1496 1497 disp21 = sextract32(insn, 0, 21); 1498 disp16 = sextract32(insn, 0, 16); 1499 disp12 = sextract32(insn, 0, 12); 1500 1501 fn11 = extract32(insn, 5, 11); 1502 fpfn = extract32(insn, 5, 6); 1503 fn7 = extract32(insn, 5, 7); 1504 1505 if (rb == 31 && !islit) { 1506 islit = true; 1507 lit = 0; 1508 } 1509 1510 ret = DISAS_NEXT; 1511 switch (opc) { 1512 case 0x00: 1513 /* CALL_PAL */ 1514 ret = gen_call_pal(ctx, insn & 0x03ffffff); 1515 break; 1516 case 0x01: 1517 /* OPC01 */ 1518 goto invalid_opc; 1519 case 0x02: 1520 /* OPC02 */ 1521 goto invalid_opc; 1522 case 0x03: 1523 /* OPC03 */ 1524 goto invalid_opc; 1525 case 0x04: 1526 /* OPC04 */ 1527 goto invalid_opc; 1528 case 0x05: 1529 /* OPC05 */ 1530 goto invalid_opc; 1531 case 0x06: 1532 /* OPC06 */ 1533 goto invalid_opc; 1534 case 0x07: 1535 /* OPC07 */ 1536 goto invalid_opc; 1537 1538 case 0x09: 1539 /* LDAH */ 1540 disp16 = (uint32_t)disp16 << 16; 1541 /* fall through */ 1542 case 0x08: 1543 /* LDA */ 1544 va = dest_gpr(ctx, ra); 1545 /* It's worth special-casing immediate loads. */ 1546 if (rb == 31) { 1547 tcg_gen_movi_i64(va, disp16); 1548 } else { 1549 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); 1550 } 1551 break; 1552 1553 case 0x0A: 1554 /* LDBU */ 1555 REQUIRE_AMASK(BWX); 1556 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); 1557 break; 1558 case 0x0B: 1559 /* LDQ_U */ 1560 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); 1561 break; 1562 case 0x0C: 1563 /* LDWU */ 1564 REQUIRE_AMASK(BWX); 1565 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); 1566 break; 1567 case 0x0D: 1568 /* STW */ 1569 REQUIRE_AMASK(BWX); 1570 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); 1571 break; 1572 case 0x0E: 1573 /* STB */ 1574 REQUIRE_AMASK(BWX); 1575 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); 1576 break; 1577 case 0x0F: 1578 /* STQ_U */ 1579 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); 1580 break; 1581 1582 case 0x10: 1583 vc = dest_gpr(ctx, rc); 1584 vb = load_gpr_lit(ctx, rb, lit, islit); 1585 1586 if (ra == 31) { 1587 if (fn7 == 0x00) { 1588 /* Special case ADDL as SEXTL. */ 1589 tcg_gen_ext32s_i64(vc, vb); 1590 break; 1591 } 1592 if (fn7 == 0x29) { 1593 /* Special case SUBQ as NEGQ. */ 1594 tcg_gen_neg_i64(vc, vb); 1595 break; 1596 } 1597 } 1598 1599 va = load_gpr(ctx, ra); 1600 switch (fn7) { 1601 case 0x00: 1602 /* ADDL */ 1603 tcg_gen_add_i64(vc, va, vb); 1604 tcg_gen_ext32s_i64(vc, vc); 1605 break; 1606 case 0x02: 1607 /* S4ADDL */ 1608 tmp = tcg_temp_new(); 1609 tcg_gen_shli_i64(tmp, va, 2); 1610 tcg_gen_add_i64(tmp, tmp, vb); 1611 tcg_gen_ext32s_i64(vc, tmp); 1612 tcg_temp_free(tmp); 1613 break; 1614 case 0x09: 1615 /* SUBL */ 1616 tcg_gen_sub_i64(vc, va, vb); 1617 tcg_gen_ext32s_i64(vc, vc); 1618 break; 1619 case 0x0B: 1620 /* S4SUBL */ 1621 tmp = tcg_temp_new(); 1622 tcg_gen_shli_i64(tmp, va, 2); 1623 tcg_gen_sub_i64(tmp, tmp, vb); 1624 tcg_gen_ext32s_i64(vc, tmp); 1625 tcg_temp_free(tmp); 1626 break; 1627 case 0x0F: 1628 /* CMPBGE */ 1629 if (ra == 31) { 1630 /* Special case 0 >= X as X == 0. */ 1631 gen_helper_cmpbe0(vc, vb); 1632 } else { 1633 gen_helper_cmpbge(vc, va, vb); 1634 } 1635 break; 1636 case 0x12: 1637 /* S8ADDL */ 1638 tmp = tcg_temp_new(); 1639 tcg_gen_shli_i64(tmp, va, 3); 1640 tcg_gen_add_i64(tmp, tmp, vb); 1641 tcg_gen_ext32s_i64(vc, tmp); 1642 tcg_temp_free(tmp); 1643 break; 1644 case 0x1B: 1645 /* S8SUBL */ 1646 tmp = tcg_temp_new(); 1647 tcg_gen_shli_i64(tmp, va, 3); 1648 tcg_gen_sub_i64(tmp, tmp, vb); 1649 tcg_gen_ext32s_i64(vc, tmp); 1650 tcg_temp_free(tmp); 1651 break; 1652 case 0x1D: 1653 /* CMPULT */ 1654 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); 1655 break; 1656 case 0x20: 1657 /* ADDQ */ 1658 tcg_gen_add_i64(vc, va, vb); 1659 break; 1660 case 0x22: 1661 /* S4ADDQ */ 1662 tmp = tcg_temp_new(); 1663 tcg_gen_shli_i64(tmp, va, 2); 1664 tcg_gen_add_i64(vc, tmp, vb); 1665 tcg_temp_free(tmp); 1666 break; 1667 case 0x29: 1668 /* SUBQ */ 1669 tcg_gen_sub_i64(vc, va, vb); 1670 break; 1671 case 0x2B: 1672 /* S4SUBQ */ 1673 tmp = tcg_temp_new(); 1674 tcg_gen_shli_i64(tmp, va, 2); 1675 tcg_gen_sub_i64(vc, tmp, vb); 1676 tcg_temp_free(tmp); 1677 break; 1678 case 0x2D: 1679 /* CMPEQ */ 1680 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); 1681 break; 1682 case 0x32: 1683 /* S8ADDQ */ 1684 tmp = tcg_temp_new(); 1685 tcg_gen_shli_i64(tmp, va, 3); 1686 tcg_gen_add_i64(vc, tmp, vb); 1687 tcg_temp_free(tmp); 1688 break; 1689 case 0x3B: 1690 /* S8SUBQ */ 1691 tmp = tcg_temp_new(); 1692 tcg_gen_shli_i64(tmp, va, 3); 1693 tcg_gen_sub_i64(vc, tmp, vb); 1694 tcg_temp_free(tmp); 1695 break; 1696 case 0x3D: 1697 /* CMPULE */ 1698 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); 1699 break; 1700 case 0x40: 1701 /* ADDL/V */ 1702 tmp = tcg_temp_new(); 1703 tcg_gen_ext32s_i64(tmp, va); 1704 tcg_gen_ext32s_i64(vc, vb); 1705 tcg_gen_add_i64(tmp, tmp, vc); 1706 tcg_gen_ext32s_i64(vc, tmp); 1707 gen_helper_check_overflow(cpu_env, vc, tmp); 1708 tcg_temp_free(tmp); 1709 break; 1710 case 0x49: 1711 /* SUBL/V */ 1712 tmp = tcg_temp_new(); 1713 tcg_gen_ext32s_i64(tmp, va); 1714 tcg_gen_ext32s_i64(vc, vb); 1715 tcg_gen_sub_i64(tmp, tmp, vc); 1716 tcg_gen_ext32s_i64(vc, tmp); 1717 gen_helper_check_overflow(cpu_env, vc, tmp); 1718 tcg_temp_free(tmp); 1719 break; 1720 case 0x4D: 1721 /* CMPLT */ 1722 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); 1723 break; 1724 case 0x60: 1725 /* ADDQ/V */ 1726 tmp = tcg_temp_new(); 1727 tmp2 = tcg_temp_new(); 1728 tcg_gen_eqv_i64(tmp, va, vb); 1729 tcg_gen_mov_i64(tmp2, va); 1730 tcg_gen_add_i64(vc, va, vb); 1731 tcg_gen_xor_i64(tmp2, tmp2, vc); 1732 tcg_gen_and_i64(tmp, tmp, tmp2); 1733 tcg_gen_shri_i64(tmp, tmp, 63); 1734 tcg_gen_movi_i64(tmp2, 0); 1735 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1736 tcg_temp_free(tmp); 1737 tcg_temp_free(tmp2); 1738 break; 1739 case 0x69: 1740 /* SUBQ/V */ 1741 tmp = tcg_temp_new(); 1742 tmp2 = tcg_temp_new(); 1743 tcg_gen_xor_i64(tmp, va, vb); 1744 tcg_gen_mov_i64(tmp2, va); 1745 tcg_gen_sub_i64(vc, va, vb); 1746 tcg_gen_xor_i64(tmp2, tmp2, vc); 1747 tcg_gen_and_i64(tmp, tmp, tmp2); 1748 tcg_gen_shri_i64(tmp, tmp, 63); 1749 tcg_gen_movi_i64(tmp2, 0); 1750 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1751 tcg_temp_free(tmp); 1752 tcg_temp_free(tmp2); 1753 break; 1754 case 0x6D: 1755 /* CMPLE */ 1756 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); 1757 break; 1758 default: 1759 goto invalid_opc; 1760 } 1761 break; 1762 1763 case 0x11: 1764 if (fn7 == 0x20) { 1765 if (rc == 31) { 1766 /* Special case BIS as NOP. */ 1767 break; 1768 } 1769 if (ra == 31) { 1770 /* Special case BIS as MOV. */ 1771 vc = dest_gpr(ctx, rc); 1772 if (islit) { 1773 tcg_gen_movi_i64(vc, lit); 1774 } else { 1775 tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); 1776 } 1777 break; 1778 } 1779 } 1780 1781 vc = dest_gpr(ctx, rc); 1782 vb = load_gpr_lit(ctx, rb, lit, islit); 1783 1784 if (fn7 == 0x28 && ra == 31) { 1785 /* Special case ORNOT as NOT. */ 1786 tcg_gen_not_i64(vc, vb); 1787 break; 1788 } 1789 1790 va = load_gpr(ctx, ra); 1791 switch (fn7) { 1792 case 0x00: 1793 /* AND */ 1794 tcg_gen_and_i64(vc, va, vb); 1795 break; 1796 case 0x08: 1797 /* BIC */ 1798 tcg_gen_andc_i64(vc, va, vb); 1799 break; 1800 case 0x14: 1801 /* CMOVLBS */ 1802 tmp = tcg_temp_new(); 1803 tcg_gen_andi_i64(tmp, va, 1); 1804 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), 1805 vb, load_gpr(ctx, rc)); 1806 tcg_temp_free(tmp); 1807 break; 1808 case 0x16: 1809 /* CMOVLBC */ 1810 tmp = tcg_temp_new(); 1811 tcg_gen_andi_i64(tmp, va, 1); 1812 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), 1813 vb, load_gpr(ctx, rc)); 1814 tcg_temp_free(tmp); 1815 break; 1816 case 0x20: 1817 /* BIS */ 1818 tcg_gen_or_i64(vc, va, vb); 1819 break; 1820 case 0x24: 1821 /* CMOVEQ */ 1822 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), 1823 vb, load_gpr(ctx, rc)); 1824 break; 1825 case 0x26: 1826 /* CMOVNE */ 1827 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), 1828 vb, load_gpr(ctx, rc)); 1829 break; 1830 case 0x28: 1831 /* ORNOT */ 1832 tcg_gen_orc_i64(vc, va, vb); 1833 break; 1834 case 0x40: 1835 /* XOR */ 1836 tcg_gen_xor_i64(vc, va, vb); 1837 break; 1838 case 0x44: 1839 /* CMOVLT */ 1840 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), 1841 vb, load_gpr(ctx, rc)); 1842 break; 1843 case 0x46: 1844 /* CMOVGE */ 1845 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), 1846 vb, load_gpr(ctx, rc)); 1847 break; 1848 case 0x48: 1849 /* EQV */ 1850 tcg_gen_eqv_i64(vc, va, vb); 1851 break; 1852 case 0x61: 1853 /* AMASK */ 1854 REQUIRE_REG_31(ra); 1855 tcg_gen_andi_i64(vc, vb, ~ctx->amask); 1856 break; 1857 case 0x64: 1858 /* CMOVLE */ 1859 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), 1860 vb, load_gpr(ctx, rc)); 1861 break; 1862 case 0x66: 1863 /* CMOVGT */ 1864 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), 1865 vb, load_gpr(ctx, rc)); 1866 break; 1867 case 0x6C: 1868 /* IMPLVER */ 1869 REQUIRE_REG_31(ra); 1870 tcg_gen_movi_i64(vc, ctx->implver); 1871 break; 1872 default: 1873 goto invalid_opc; 1874 } 1875 break; 1876 1877 case 0x12: 1878 vc = dest_gpr(ctx, rc); 1879 va = load_gpr(ctx, ra); 1880 switch (fn7) { 1881 case 0x02: 1882 /* MSKBL */ 1883 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); 1884 break; 1885 case 0x06: 1886 /* EXTBL */ 1887 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); 1888 break; 1889 case 0x0B: 1890 /* INSBL */ 1891 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); 1892 break; 1893 case 0x12: 1894 /* MSKWL */ 1895 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); 1896 break; 1897 case 0x16: 1898 /* EXTWL */ 1899 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); 1900 break; 1901 case 0x1B: 1902 /* INSWL */ 1903 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); 1904 break; 1905 case 0x22: 1906 /* MSKLL */ 1907 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); 1908 break; 1909 case 0x26: 1910 /* EXTLL */ 1911 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); 1912 break; 1913 case 0x2B: 1914 /* INSLL */ 1915 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); 1916 break; 1917 case 0x30: 1918 /* ZAP */ 1919 if (islit) { 1920 gen_zapnoti(vc, va, ~lit); 1921 } else { 1922 gen_helper_zap(vc, va, load_gpr(ctx, rb)); 1923 } 1924 break; 1925 case 0x31: 1926 /* ZAPNOT */ 1927 if (islit) { 1928 gen_zapnoti(vc, va, lit); 1929 } else { 1930 gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); 1931 } 1932 break; 1933 case 0x32: 1934 /* MSKQL */ 1935 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); 1936 break; 1937 case 0x34: 1938 /* SRL */ 1939 if (islit) { 1940 tcg_gen_shri_i64(vc, va, lit & 0x3f); 1941 } else { 1942 tmp = tcg_temp_new(); 1943 vb = load_gpr(ctx, rb); 1944 tcg_gen_andi_i64(tmp, vb, 0x3f); 1945 tcg_gen_shr_i64(vc, va, tmp); 1946 tcg_temp_free(tmp); 1947 } 1948 break; 1949 case 0x36: 1950 /* EXTQL */ 1951 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); 1952 break; 1953 case 0x39: 1954 /* SLL */ 1955 if (islit) { 1956 tcg_gen_shli_i64(vc, va, lit & 0x3f); 1957 } else { 1958 tmp = tcg_temp_new(); 1959 vb = load_gpr(ctx, rb); 1960 tcg_gen_andi_i64(tmp, vb, 0x3f); 1961 tcg_gen_shl_i64(vc, va, tmp); 1962 tcg_temp_free(tmp); 1963 } 1964 break; 1965 case 0x3B: 1966 /* INSQL */ 1967 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); 1968 break; 1969 case 0x3C: 1970 /* SRA */ 1971 if (islit) { 1972 tcg_gen_sari_i64(vc, va, lit & 0x3f); 1973 } else { 1974 tmp = tcg_temp_new(); 1975 vb = load_gpr(ctx, rb); 1976 tcg_gen_andi_i64(tmp, vb, 0x3f); 1977 tcg_gen_sar_i64(vc, va, tmp); 1978 tcg_temp_free(tmp); 1979 } 1980 break; 1981 case 0x52: 1982 /* MSKWH */ 1983 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); 1984 break; 1985 case 0x57: 1986 /* INSWH */ 1987 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); 1988 break; 1989 case 0x5A: 1990 /* EXTWH */ 1991 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); 1992 break; 1993 case 0x62: 1994 /* MSKLH */ 1995 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); 1996 break; 1997 case 0x67: 1998 /* INSLH */ 1999 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); 2000 break; 2001 case 0x6A: 2002 /* EXTLH */ 2003 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); 2004 break; 2005 case 0x72: 2006 /* MSKQH */ 2007 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); 2008 break; 2009 case 0x77: 2010 /* INSQH */ 2011 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); 2012 break; 2013 case 0x7A: 2014 /* EXTQH */ 2015 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); 2016 break; 2017 default: 2018 goto invalid_opc; 2019 } 2020 break; 2021 2022 case 0x13: 2023 vc = dest_gpr(ctx, rc); 2024 vb = load_gpr_lit(ctx, rb, lit, islit); 2025 va = load_gpr(ctx, ra); 2026 switch (fn7) { 2027 case 0x00: 2028 /* MULL */ 2029 tcg_gen_mul_i64(vc, va, vb); 2030 tcg_gen_ext32s_i64(vc, vc); 2031 break; 2032 case 0x20: 2033 /* MULQ */ 2034 tcg_gen_mul_i64(vc, va, vb); 2035 break; 2036 case 0x30: 2037 /* UMULH */ 2038 tmp = tcg_temp_new(); 2039 tcg_gen_mulu2_i64(tmp, vc, va, vb); 2040 tcg_temp_free(tmp); 2041 break; 2042 case 0x40: 2043 /* MULL/V */ 2044 tmp = tcg_temp_new(); 2045 tcg_gen_ext32s_i64(tmp, va); 2046 tcg_gen_ext32s_i64(vc, vb); 2047 tcg_gen_mul_i64(tmp, tmp, vc); 2048 tcg_gen_ext32s_i64(vc, tmp); 2049 gen_helper_check_overflow(cpu_env, vc, tmp); 2050 tcg_temp_free(tmp); 2051 break; 2052 case 0x60: 2053 /* MULQ/V */ 2054 tmp = tcg_temp_new(); 2055 tmp2 = tcg_temp_new(); 2056 tcg_gen_muls2_i64(vc, tmp, va, vb); 2057 tcg_gen_sari_i64(tmp2, vc, 63); 2058 gen_helper_check_overflow(cpu_env, tmp, tmp2); 2059 tcg_temp_free(tmp); 2060 tcg_temp_free(tmp2); 2061 break; 2062 default: 2063 goto invalid_opc; 2064 } 2065 break; 2066 2067 case 0x14: 2068 REQUIRE_AMASK(FIX); 2069 vc = dest_fpr(ctx, rc); 2070 switch (fpfn) { /* fn11 & 0x3F */ 2071 case 0x04: 2072 /* ITOFS */ 2073 REQUIRE_REG_31(rb); 2074 t32 = tcg_temp_new_i32(); 2075 va = load_gpr(ctx, ra); 2076 tcg_gen_extrl_i64_i32(t32, va); 2077 gen_helper_memory_to_s(vc, t32); 2078 tcg_temp_free_i32(t32); 2079 break; 2080 case 0x0A: 2081 /* SQRTF */ 2082 REQUIRE_REG_31(ra); 2083 vb = load_fpr(ctx, rb); 2084 gen_helper_sqrtf(vc, cpu_env, vb); 2085 break; 2086 case 0x0B: 2087 /* SQRTS */ 2088 REQUIRE_REG_31(ra); 2089 gen_sqrts(ctx, rb, rc, fn11); 2090 break; 2091 case 0x14: 2092 /* ITOFF */ 2093 REQUIRE_REG_31(rb); 2094 t32 = tcg_temp_new_i32(); 2095 va = load_gpr(ctx, ra); 2096 tcg_gen_extrl_i64_i32(t32, va); 2097 gen_helper_memory_to_f(vc, t32); 2098 tcg_temp_free_i32(t32); 2099 break; 2100 case 0x24: 2101 /* ITOFT */ 2102 REQUIRE_REG_31(rb); 2103 va = load_gpr(ctx, ra); 2104 tcg_gen_mov_i64(vc, va); 2105 break; 2106 case 0x2A: 2107 /* SQRTG */ 2108 REQUIRE_REG_31(ra); 2109 vb = load_fpr(ctx, rb); 2110 gen_helper_sqrtg(vc, cpu_env, vb); 2111 break; 2112 case 0x02B: 2113 /* SQRTT */ 2114 REQUIRE_REG_31(ra); 2115 gen_sqrtt(ctx, rb, rc, fn11); 2116 break; 2117 default: 2118 goto invalid_opc; 2119 } 2120 break; 2121 2122 case 0x15: 2123 /* VAX floating point */ 2124 /* XXX: rounding mode and trap are ignored (!) */ 2125 vc = dest_fpr(ctx, rc); 2126 vb = load_fpr(ctx, rb); 2127 va = load_fpr(ctx, ra); 2128 switch (fpfn) { /* fn11 & 0x3F */ 2129 case 0x00: 2130 /* ADDF */ 2131 gen_helper_addf(vc, cpu_env, va, vb); 2132 break; 2133 case 0x01: 2134 /* SUBF */ 2135 gen_helper_subf(vc, cpu_env, va, vb); 2136 break; 2137 case 0x02: 2138 /* MULF */ 2139 gen_helper_mulf(vc, cpu_env, va, vb); 2140 break; 2141 case 0x03: 2142 /* DIVF */ 2143 gen_helper_divf(vc, cpu_env, va, vb); 2144 break; 2145 case 0x1E: 2146 /* CVTDG -- TODO */ 2147 REQUIRE_REG_31(ra); 2148 goto invalid_opc; 2149 case 0x20: 2150 /* ADDG */ 2151 gen_helper_addg(vc, cpu_env, va, vb); 2152 break; 2153 case 0x21: 2154 /* SUBG */ 2155 gen_helper_subg(vc, cpu_env, va, vb); 2156 break; 2157 case 0x22: 2158 /* MULG */ 2159 gen_helper_mulg(vc, cpu_env, va, vb); 2160 break; 2161 case 0x23: 2162 /* DIVG */ 2163 gen_helper_divg(vc, cpu_env, va, vb); 2164 break; 2165 case 0x25: 2166 /* CMPGEQ */ 2167 gen_helper_cmpgeq(vc, cpu_env, va, vb); 2168 break; 2169 case 0x26: 2170 /* CMPGLT */ 2171 gen_helper_cmpglt(vc, cpu_env, va, vb); 2172 break; 2173 case 0x27: 2174 /* CMPGLE */ 2175 gen_helper_cmpgle(vc, cpu_env, va, vb); 2176 break; 2177 case 0x2C: 2178 /* CVTGF */ 2179 REQUIRE_REG_31(ra); 2180 gen_helper_cvtgf(vc, cpu_env, vb); 2181 break; 2182 case 0x2D: 2183 /* CVTGD -- TODO */ 2184 REQUIRE_REG_31(ra); 2185 goto invalid_opc; 2186 case 0x2F: 2187 /* CVTGQ */ 2188 REQUIRE_REG_31(ra); 2189 gen_helper_cvtgq(vc, cpu_env, vb); 2190 break; 2191 case 0x3C: 2192 /* CVTQF */ 2193 REQUIRE_REG_31(ra); 2194 gen_helper_cvtqf(vc, cpu_env, vb); 2195 break; 2196 case 0x3E: 2197 /* CVTQG */ 2198 REQUIRE_REG_31(ra); 2199 gen_helper_cvtqg(vc, cpu_env, vb); 2200 break; 2201 default: 2202 goto invalid_opc; 2203 } 2204 break; 2205 2206 case 0x16: 2207 /* IEEE floating-point */ 2208 switch (fpfn) { /* fn11 & 0x3F */ 2209 case 0x00: 2210 /* ADDS */ 2211 gen_adds(ctx, ra, rb, rc, fn11); 2212 break; 2213 case 0x01: 2214 /* SUBS */ 2215 gen_subs(ctx, ra, rb, rc, fn11); 2216 break; 2217 case 0x02: 2218 /* MULS */ 2219 gen_muls(ctx, ra, rb, rc, fn11); 2220 break; 2221 case 0x03: 2222 /* DIVS */ 2223 gen_divs(ctx, ra, rb, rc, fn11); 2224 break; 2225 case 0x20: 2226 /* ADDT */ 2227 gen_addt(ctx, ra, rb, rc, fn11); 2228 break; 2229 case 0x21: 2230 /* SUBT */ 2231 gen_subt(ctx, ra, rb, rc, fn11); 2232 break; 2233 case 0x22: 2234 /* MULT */ 2235 gen_mult(ctx, ra, rb, rc, fn11); 2236 break; 2237 case 0x23: 2238 /* DIVT */ 2239 gen_divt(ctx, ra, rb, rc, fn11); 2240 break; 2241 case 0x24: 2242 /* CMPTUN */ 2243 gen_cmptun(ctx, ra, rb, rc, fn11); 2244 break; 2245 case 0x25: 2246 /* CMPTEQ */ 2247 gen_cmpteq(ctx, ra, rb, rc, fn11); 2248 break; 2249 case 0x26: 2250 /* CMPTLT */ 2251 gen_cmptlt(ctx, ra, rb, rc, fn11); 2252 break; 2253 case 0x27: 2254 /* CMPTLE */ 2255 gen_cmptle(ctx, ra, rb, rc, fn11); 2256 break; 2257 case 0x2C: 2258 REQUIRE_REG_31(ra); 2259 if (fn11 == 0x2AC || fn11 == 0x6AC) { 2260 /* CVTST */ 2261 gen_cvtst(ctx, rb, rc, fn11); 2262 } else { 2263 /* CVTTS */ 2264 gen_cvtts(ctx, rb, rc, fn11); 2265 } 2266 break; 2267 case 0x2F: 2268 /* CVTTQ */ 2269 REQUIRE_REG_31(ra); 2270 gen_cvttq(ctx, rb, rc, fn11); 2271 break; 2272 case 0x3C: 2273 /* CVTQS */ 2274 REQUIRE_REG_31(ra); 2275 gen_cvtqs(ctx, rb, rc, fn11); 2276 break; 2277 case 0x3E: 2278 /* CVTQT */ 2279 REQUIRE_REG_31(ra); 2280 gen_cvtqt(ctx, rb, rc, fn11); 2281 break; 2282 default: 2283 goto invalid_opc; 2284 } 2285 break; 2286 2287 case 0x17: 2288 switch (fn11) { 2289 case 0x010: 2290 /* CVTLQ */ 2291 REQUIRE_REG_31(ra); 2292 vc = dest_fpr(ctx, rc); 2293 vb = load_fpr(ctx, rb); 2294 gen_cvtlq(vc, vb); 2295 break; 2296 case 0x020: 2297 /* CPYS */ 2298 if (rc == 31) { 2299 /* Special case CPYS as FNOP. */ 2300 } else { 2301 vc = dest_fpr(ctx, rc); 2302 va = load_fpr(ctx, ra); 2303 if (ra == rb) { 2304 /* Special case CPYS as FMOV. */ 2305 tcg_gen_mov_i64(vc, va); 2306 } else { 2307 vb = load_fpr(ctx, rb); 2308 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); 2309 } 2310 } 2311 break; 2312 case 0x021: 2313 /* CPYSN */ 2314 vc = dest_fpr(ctx, rc); 2315 vb = load_fpr(ctx, rb); 2316 va = load_fpr(ctx, ra); 2317 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); 2318 break; 2319 case 0x022: 2320 /* CPYSE */ 2321 vc = dest_fpr(ctx, rc); 2322 vb = load_fpr(ctx, rb); 2323 va = load_fpr(ctx, ra); 2324 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); 2325 break; 2326 case 0x024: 2327 /* MT_FPCR */ 2328 va = load_fpr(ctx, ra); 2329 gen_helper_store_fpcr(cpu_env, va); 2330 if (ctx->tb_rm == QUAL_RM_D) { 2331 /* Re-do the copy of the rounding mode to fp_status 2332 the next time we use dynamic rounding. */ 2333 ctx->tb_rm = -1; 2334 } 2335 break; 2336 case 0x025: 2337 /* MF_FPCR */ 2338 va = dest_fpr(ctx, ra); 2339 gen_helper_load_fpcr(va, cpu_env); 2340 break; 2341 case 0x02A: 2342 /* FCMOVEQ */ 2343 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); 2344 break; 2345 case 0x02B: 2346 /* FCMOVNE */ 2347 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); 2348 break; 2349 case 0x02C: 2350 /* FCMOVLT */ 2351 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); 2352 break; 2353 case 0x02D: 2354 /* FCMOVGE */ 2355 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); 2356 break; 2357 case 0x02E: 2358 /* FCMOVLE */ 2359 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); 2360 break; 2361 case 0x02F: 2362 /* FCMOVGT */ 2363 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); 2364 break; 2365 case 0x030: /* CVTQL */ 2366 case 0x130: /* CVTQL/V */ 2367 case 0x530: /* CVTQL/SV */ 2368 REQUIRE_REG_31(ra); 2369 vc = dest_fpr(ctx, rc); 2370 vb = load_fpr(ctx, rb); 2371 gen_helper_cvtql(vc, cpu_env, vb); 2372 gen_fp_exc_raise(rc, fn11); 2373 break; 2374 default: 2375 goto invalid_opc; 2376 } 2377 break; 2378 2379 case 0x18: 2380 switch ((uint16_t)disp16) { 2381 case 0x0000: 2382 /* TRAPB */ 2383 /* No-op. */ 2384 break; 2385 case 0x0400: 2386 /* EXCB */ 2387 /* No-op. */ 2388 break; 2389 case 0x4000: 2390 /* MB */ 2391 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 2392 break; 2393 case 0x4400: 2394 /* WMB */ 2395 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2396 break; 2397 case 0x8000: 2398 /* FETCH */ 2399 /* No-op */ 2400 break; 2401 case 0xA000: 2402 /* FETCH_M */ 2403 /* No-op */ 2404 break; 2405 case 0xC000: 2406 /* RPCC */ 2407 va = dest_gpr(ctx, ra); 2408 if (ctx->base.tb->cflags & CF_USE_ICOUNT) { 2409 gen_io_start(); 2410 gen_helper_load_pcc(va, cpu_env); 2411 gen_io_end(); 2412 ret = DISAS_PC_STALE; 2413 } else { 2414 gen_helper_load_pcc(va, cpu_env); 2415 } 2416 break; 2417 case 0xE000: 2418 /* RC */ 2419 gen_rx(ctx, ra, 0); 2420 break; 2421 case 0xE800: 2422 /* ECB */ 2423 break; 2424 case 0xF000: 2425 /* RS */ 2426 gen_rx(ctx, ra, 1); 2427 break; 2428 case 0xF800: 2429 /* WH64 */ 2430 /* No-op */ 2431 break; 2432 case 0xFC00: 2433 /* WH64EN */ 2434 /* No-op */ 2435 break; 2436 default: 2437 goto invalid_opc; 2438 } 2439 break; 2440 2441 case 0x19: 2442 /* HW_MFPR (PALcode) */ 2443 #ifndef CONFIG_USER_ONLY 2444 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2445 va = dest_gpr(ctx, ra); 2446 ret = gen_mfpr(ctx, va, insn & 0xffff); 2447 break; 2448 #else 2449 goto invalid_opc; 2450 #endif 2451 2452 case 0x1A: 2453 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch 2454 prediction stack action, which of course we don't implement. */ 2455 vb = load_gpr(ctx, rb); 2456 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2457 if (ra != 31) { 2458 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 2459 } 2460 ret = DISAS_PC_UPDATED; 2461 break; 2462 2463 case 0x1B: 2464 /* HW_LD (PALcode) */ 2465 #ifndef CONFIG_USER_ONLY 2466 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2467 { 2468 TCGv addr = tcg_temp_new(); 2469 vb = load_gpr(ctx, rb); 2470 va = dest_gpr(ctx, ra); 2471 2472 tcg_gen_addi_i64(addr, vb, disp12); 2473 switch ((insn >> 12) & 0xF) { 2474 case 0x0: 2475 /* Longword physical access (hw_ldl/p) */ 2476 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); 2477 break; 2478 case 0x1: 2479 /* Quadword physical access (hw_ldq/p) */ 2480 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ); 2481 break; 2482 case 0x2: 2483 /* Longword physical access with lock (hw_ldl_l/p) */ 2484 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX); 2485 break; 2486 case 0x3: 2487 /* Quadword physical access with lock (hw_ldq_l/p) */ 2488 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX); 2489 break; 2490 case 0x4: 2491 /* Longword virtual PTE fetch (hw_ldl/v) */ 2492 goto invalid_opc; 2493 case 0x5: 2494 /* Quadword virtual PTE fetch (hw_ldq/v) */ 2495 goto invalid_opc; 2496 break; 2497 case 0x6: 2498 /* Invalid */ 2499 goto invalid_opc; 2500 case 0x7: 2501 /* Invaliid */ 2502 goto invalid_opc; 2503 case 0x8: 2504 /* Longword virtual access (hw_ldl) */ 2505 goto invalid_opc; 2506 case 0x9: 2507 /* Quadword virtual access (hw_ldq) */ 2508 goto invalid_opc; 2509 case 0xA: 2510 /* Longword virtual access with protection check (hw_ldl/w) */ 2511 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); 2512 break; 2513 case 0xB: 2514 /* Quadword virtual access with protection check (hw_ldq/w) */ 2515 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ); 2516 break; 2517 case 0xC: 2518 /* Longword virtual access with alt access mode (hw_ldl/a)*/ 2519 goto invalid_opc; 2520 case 0xD: 2521 /* Quadword virtual access with alt access mode (hw_ldq/a) */ 2522 goto invalid_opc; 2523 case 0xE: 2524 /* Longword virtual access with alternate access mode and 2525 protection checks (hw_ldl/wa) */ 2526 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); 2527 break; 2528 case 0xF: 2529 /* Quadword virtual access with alternate access mode and 2530 protection checks (hw_ldq/wa) */ 2531 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ); 2532 break; 2533 } 2534 tcg_temp_free(addr); 2535 break; 2536 } 2537 #else 2538 goto invalid_opc; 2539 #endif 2540 2541 case 0x1C: 2542 vc = dest_gpr(ctx, rc); 2543 if (fn7 == 0x70) { 2544 /* FTOIT */ 2545 REQUIRE_AMASK(FIX); 2546 REQUIRE_REG_31(rb); 2547 va = load_fpr(ctx, ra); 2548 tcg_gen_mov_i64(vc, va); 2549 break; 2550 } else if (fn7 == 0x78) { 2551 /* FTOIS */ 2552 REQUIRE_AMASK(FIX); 2553 REQUIRE_REG_31(rb); 2554 t32 = tcg_temp_new_i32(); 2555 va = load_fpr(ctx, ra); 2556 gen_helper_s_to_memory(t32, va); 2557 tcg_gen_ext_i32_i64(vc, t32); 2558 tcg_temp_free_i32(t32); 2559 break; 2560 } 2561 2562 vb = load_gpr_lit(ctx, rb, lit, islit); 2563 switch (fn7) { 2564 case 0x00: 2565 /* SEXTB */ 2566 REQUIRE_AMASK(BWX); 2567 REQUIRE_REG_31(ra); 2568 tcg_gen_ext8s_i64(vc, vb); 2569 break; 2570 case 0x01: 2571 /* SEXTW */ 2572 REQUIRE_AMASK(BWX); 2573 REQUIRE_REG_31(ra); 2574 tcg_gen_ext16s_i64(vc, vb); 2575 break; 2576 case 0x30: 2577 /* CTPOP */ 2578 REQUIRE_AMASK(CIX); 2579 REQUIRE_REG_31(ra); 2580 REQUIRE_NO_LIT; 2581 tcg_gen_ctpop_i64(vc, vb); 2582 break; 2583 case 0x31: 2584 /* PERR */ 2585 REQUIRE_AMASK(MVI); 2586 REQUIRE_NO_LIT; 2587 va = load_gpr(ctx, ra); 2588 gen_helper_perr(vc, va, vb); 2589 break; 2590 case 0x32: 2591 /* CTLZ */ 2592 REQUIRE_AMASK(CIX); 2593 REQUIRE_REG_31(ra); 2594 REQUIRE_NO_LIT; 2595 tcg_gen_clzi_i64(vc, vb, 64); 2596 break; 2597 case 0x33: 2598 /* CTTZ */ 2599 REQUIRE_AMASK(CIX); 2600 REQUIRE_REG_31(ra); 2601 REQUIRE_NO_LIT; 2602 tcg_gen_ctzi_i64(vc, vb, 64); 2603 break; 2604 case 0x34: 2605 /* UNPKBW */ 2606 REQUIRE_AMASK(MVI); 2607 REQUIRE_REG_31(ra); 2608 REQUIRE_NO_LIT; 2609 gen_helper_unpkbw(vc, vb); 2610 break; 2611 case 0x35: 2612 /* UNPKBL */ 2613 REQUIRE_AMASK(MVI); 2614 REQUIRE_REG_31(ra); 2615 REQUIRE_NO_LIT; 2616 gen_helper_unpkbl(vc, vb); 2617 break; 2618 case 0x36: 2619 /* PKWB */ 2620 REQUIRE_AMASK(MVI); 2621 REQUIRE_REG_31(ra); 2622 REQUIRE_NO_LIT; 2623 gen_helper_pkwb(vc, vb); 2624 break; 2625 case 0x37: 2626 /* PKLB */ 2627 REQUIRE_AMASK(MVI); 2628 REQUIRE_REG_31(ra); 2629 REQUIRE_NO_LIT; 2630 gen_helper_pklb(vc, vb); 2631 break; 2632 case 0x38: 2633 /* MINSB8 */ 2634 REQUIRE_AMASK(MVI); 2635 va = load_gpr(ctx, ra); 2636 gen_helper_minsb8(vc, va, vb); 2637 break; 2638 case 0x39: 2639 /* MINSW4 */ 2640 REQUIRE_AMASK(MVI); 2641 va = load_gpr(ctx, ra); 2642 gen_helper_minsw4(vc, va, vb); 2643 break; 2644 case 0x3A: 2645 /* MINUB8 */ 2646 REQUIRE_AMASK(MVI); 2647 va = load_gpr(ctx, ra); 2648 gen_helper_minub8(vc, va, vb); 2649 break; 2650 case 0x3B: 2651 /* MINUW4 */ 2652 REQUIRE_AMASK(MVI); 2653 va = load_gpr(ctx, ra); 2654 gen_helper_minuw4(vc, va, vb); 2655 break; 2656 case 0x3C: 2657 /* MAXUB8 */ 2658 REQUIRE_AMASK(MVI); 2659 va = load_gpr(ctx, ra); 2660 gen_helper_maxub8(vc, va, vb); 2661 break; 2662 case 0x3D: 2663 /* MAXUW4 */ 2664 REQUIRE_AMASK(MVI); 2665 va = load_gpr(ctx, ra); 2666 gen_helper_maxuw4(vc, va, vb); 2667 break; 2668 case 0x3E: 2669 /* MAXSB8 */ 2670 REQUIRE_AMASK(MVI); 2671 va = load_gpr(ctx, ra); 2672 gen_helper_maxsb8(vc, va, vb); 2673 break; 2674 case 0x3F: 2675 /* MAXSW4 */ 2676 REQUIRE_AMASK(MVI); 2677 va = load_gpr(ctx, ra); 2678 gen_helper_maxsw4(vc, va, vb); 2679 break; 2680 default: 2681 goto invalid_opc; 2682 } 2683 break; 2684 2685 case 0x1D: 2686 /* HW_MTPR (PALcode) */ 2687 #ifndef CONFIG_USER_ONLY 2688 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2689 vb = load_gpr(ctx, rb); 2690 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2691 break; 2692 #else 2693 goto invalid_opc; 2694 #endif 2695 2696 case 0x1E: 2697 /* HW_RET (PALcode) */ 2698 #ifndef CONFIG_USER_ONLY 2699 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2700 if (rb == 31) { 2701 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2702 address from EXC_ADDR. This turns out to be useful for our 2703 emulation PALcode, so continue to accept it. */ 2704 ctx->lit = vb = tcg_temp_new(); 2705 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr)); 2706 } else { 2707 vb = load_gpr(ctx, rb); 2708 } 2709 tcg_gen_movi_i64(cpu_lock_addr, -1); 2710 tmp = tcg_temp_new(); 2711 tcg_gen_movi_i64(tmp, 0); 2712 st_flag_byte(tmp, ENV_FLAG_RX_SHIFT); 2713 tcg_gen_andi_i64(tmp, vb, 1); 2714 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2715 tcg_temp_free(tmp); 2716 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2717 /* Allow interrupts to be recognized right away. */ 2718 ret = DISAS_PC_UPDATED_NOCHAIN; 2719 break; 2720 #else 2721 goto invalid_opc; 2722 #endif 2723 2724 case 0x1F: 2725 /* HW_ST (PALcode) */ 2726 #ifndef CONFIG_USER_ONLY 2727 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2728 { 2729 switch ((insn >> 12) & 0xF) { 2730 case 0x0: 2731 /* Longword physical access */ 2732 va = load_gpr(ctx, ra); 2733 vb = load_gpr(ctx, rb); 2734 tmp = tcg_temp_new(); 2735 tcg_gen_addi_i64(tmp, vb, disp12); 2736 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); 2737 tcg_temp_free(tmp); 2738 break; 2739 case 0x1: 2740 /* Quadword physical access */ 2741 va = load_gpr(ctx, ra); 2742 vb = load_gpr(ctx, rb); 2743 tmp = tcg_temp_new(); 2744 tcg_gen_addi_i64(tmp, vb, disp12); 2745 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ); 2746 tcg_temp_free(tmp); 2747 break; 2748 case 0x2: 2749 /* Longword physical access with lock */ 2750 ret = gen_store_conditional(ctx, ra, rb, disp12, 2751 MMU_PHYS_IDX, MO_LESL); 2752 break; 2753 case 0x3: 2754 /* Quadword physical access with lock */ 2755 ret = gen_store_conditional(ctx, ra, rb, disp12, 2756 MMU_PHYS_IDX, MO_LEQ); 2757 break; 2758 case 0x4: 2759 /* Longword virtual access */ 2760 goto invalid_opc; 2761 case 0x5: 2762 /* Quadword virtual access */ 2763 goto invalid_opc; 2764 case 0x6: 2765 /* Invalid */ 2766 goto invalid_opc; 2767 case 0x7: 2768 /* Invalid */ 2769 goto invalid_opc; 2770 case 0x8: 2771 /* Invalid */ 2772 goto invalid_opc; 2773 case 0x9: 2774 /* Invalid */ 2775 goto invalid_opc; 2776 case 0xA: 2777 /* Invalid */ 2778 goto invalid_opc; 2779 case 0xB: 2780 /* Invalid */ 2781 goto invalid_opc; 2782 case 0xC: 2783 /* Longword virtual access with alternate access mode */ 2784 goto invalid_opc; 2785 case 0xD: 2786 /* Quadword virtual access with alternate access mode */ 2787 goto invalid_opc; 2788 case 0xE: 2789 /* Invalid */ 2790 goto invalid_opc; 2791 case 0xF: 2792 /* Invalid */ 2793 goto invalid_opc; 2794 } 2795 break; 2796 } 2797 #else 2798 goto invalid_opc; 2799 #endif 2800 case 0x20: 2801 /* LDF */ 2802 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); 2803 break; 2804 case 0x21: 2805 /* LDG */ 2806 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); 2807 break; 2808 case 0x22: 2809 /* LDS */ 2810 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); 2811 break; 2812 case 0x23: 2813 /* LDT */ 2814 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); 2815 break; 2816 case 0x24: 2817 /* STF */ 2818 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); 2819 break; 2820 case 0x25: 2821 /* STG */ 2822 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); 2823 break; 2824 case 0x26: 2825 /* STS */ 2826 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); 2827 break; 2828 case 0x27: 2829 /* STT */ 2830 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); 2831 break; 2832 case 0x28: 2833 /* LDL */ 2834 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); 2835 break; 2836 case 0x29: 2837 /* LDQ */ 2838 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); 2839 break; 2840 case 0x2A: 2841 /* LDL_L */ 2842 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); 2843 break; 2844 case 0x2B: 2845 /* LDQ_L */ 2846 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); 2847 break; 2848 case 0x2C: 2849 /* STL */ 2850 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); 2851 break; 2852 case 0x2D: 2853 /* STQ */ 2854 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); 2855 break; 2856 case 0x2E: 2857 /* STL_C */ 2858 ret = gen_store_conditional(ctx, ra, rb, disp16, 2859 ctx->mem_idx, MO_LESL); 2860 break; 2861 case 0x2F: 2862 /* STQ_C */ 2863 ret = gen_store_conditional(ctx, ra, rb, disp16, 2864 ctx->mem_idx, MO_LEQ); 2865 break; 2866 case 0x30: 2867 /* BR */ 2868 ret = gen_bdirect(ctx, ra, disp21); 2869 break; 2870 case 0x31: /* FBEQ */ 2871 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); 2872 break; 2873 case 0x32: /* FBLT */ 2874 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); 2875 break; 2876 case 0x33: /* FBLE */ 2877 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); 2878 break; 2879 case 0x34: 2880 /* BSR */ 2881 ret = gen_bdirect(ctx, ra, disp21); 2882 break; 2883 case 0x35: /* FBNE */ 2884 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); 2885 break; 2886 case 0x36: /* FBGE */ 2887 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); 2888 break; 2889 case 0x37: /* FBGT */ 2890 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); 2891 break; 2892 case 0x38: 2893 /* BLBC */ 2894 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); 2895 break; 2896 case 0x39: 2897 /* BEQ */ 2898 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); 2899 break; 2900 case 0x3A: 2901 /* BLT */ 2902 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); 2903 break; 2904 case 0x3B: 2905 /* BLE */ 2906 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); 2907 break; 2908 case 0x3C: 2909 /* BLBS */ 2910 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); 2911 break; 2912 case 0x3D: 2913 /* BNE */ 2914 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); 2915 break; 2916 case 0x3E: 2917 /* BGE */ 2918 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); 2919 break; 2920 case 0x3F: 2921 /* BGT */ 2922 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); 2923 break; 2924 invalid_opc: 2925 ret = gen_invalid(ctx); 2926 break; 2927 } 2928 2929 return ret; 2930 } 2931 2932 static int alpha_tr_init_disas_context(DisasContextBase *dcbase, 2933 CPUState *cpu, int max_insns) 2934 { 2935 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2936 CPUAlphaState *env = cpu->env_ptr; 2937 int64_t bound, mask; 2938 2939 ctx->tbflags = ctx->base.tb->flags; 2940 ctx->mem_idx = cpu_mmu_index(env, false); 2941 ctx->implver = env->implver; 2942 ctx->amask = env->amask; 2943 2944 #ifdef CONFIG_USER_ONLY 2945 ctx->ir = cpu_std_ir; 2946 #else 2947 ctx->palbr = env->palbr; 2948 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2949 #endif 2950 2951 /* ??? Every TB begins with unset rounding mode, to be initialized on 2952 the first fp insn of the TB. Alternately we could define a proper 2953 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure 2954 to reset the FP_STATUS to that default at the end of any TB that 2955 changes the default. We could even (gasp) dynamiclly figure out 2956 what default would be most efficient given the running program. */ 2957 ctx->tb_rm = -1; 2958 /* Similarly for flush-to-zero. */ 2959 ctx->tb_ftz = -1; 2960 2961 TCGV_UNUSED_I64(ctx->zero); 2962 TCGV_UNUSED_I64(ctx->sink); 2963 TCGV_UNUSED_I64(ctx->lit); 2964 2965 /* Bound the number of insns to execute to those left on the page. */ 2966 if (in_superpage(ctx, ctx->base.pc_first)) { 2967 mask = -1ULL << 41; 2968 } else { 2969 mask = TARGET_PAGE_MASK; 2970 } 2971 bound = -(ctx->base.pc_first | mask) / 4; 2972 2973 return MIN(max_insns, bound); 2974 } 2975 2976 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu) 2977 { 2978 } 2979 2980 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 2981 { 2982 tcg_gen_insn_start(dcbase->pc_next); 2983 } 2984 2985 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, 2986 const CPUBreakpoint *bp) 2987 { 2988 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2989 2990 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0); 2991 2992 /* The address covered by the breakpoint must be included in 2993 [tb->pc, tb->pc + tb->size) in order to for it to be 2994 properly cleared -- thus we increment the PC here so that 2995 the logic setting tb->size below does the right thing. */ 2996 ctx->base.pc_next += 4; 2997 return true; 2998 } 2999 3000 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 3001 { 3002 DisasContext *ctx = container_of(dcbase, DisasContext, base); 3003 CPUAlphaState *env = cpu->env_ptr; 3004 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next); 3005 3006 ctx->base.pc_next += 4; 3007 ctx->base.is_jmp = translate_one(ctx, insn); 3008 3009 free_context_temps(ctx); 3010 translator_loop_temp_check(&ctx->base); 3011 } 3012 3013 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 3014 { 3015 DisasContext *ctx = container_of(dcbase, DisasContext, base); 3016 3017 switch (ctx->base.is_jmp) { 3018 case DISAS_NORETURN: 3019 break; 3020 case DISAS_TOO_MANY: 3021 if (use_goto_tb(ctx, ctx->base.pc_next)) { 3022 tcg_gen_goto_tb(0); 3023 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 3024 tcg_gen_exit_tb((uintptr_t)ctx->base.tb); 3025 } 3026 /* FALLTHRU */ 3027 case DISAS_PC_STALE: 3028 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 3029 /* FALLTHRU */ 3030 case DISAS_PC_UPDATED: 3031 if (!use_exit_tb(ctx)) { 3032 tcg_gen_lookup_and_goto_ptr(cpu_pc); 3033 break; 3034 } 3035 /* FALLTHRU */ 3036 case DISAS_PC_UPDATED_NOCHAIN: 3037 if (ctx->base.singlestep_enabled) { 3038 gen_excp_1(EXCP_DEBUG, 0); 3039 } else { 3040 tcg_gen_exit_tb(0); 3041 } 3042 break; 3043 default: 3044 g_assert_not_reached(); 3045 } 3046 } 3047 3048 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) 3049 { 3050 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); 3051 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size, 1); 3052 } 3053 3054 static const TranslatorOps alpha_tr_ops = { 3055 .init_disas_context = alpha_tr_init_disas_context, 3056 .tb_start = alpha_tr_tb_start, 3057 .insn_start = alpha_tr_insn_start, 3058 .breakpoint_check = alpha_tr_breakpoint_check, 3059 .translate_insn = alpha_tr_translate_insn, 3060 .tb_stop = alpha_tr_tb_stop, 3061 .disas_log = alpha_tr_disas_log, 3062 }; 3063 3064 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb) 3065 { 3066 DisasContext dc; 3067 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb); 3068 } 3069 3070 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, 3071 target_ulong *data) 3072 { 3073 env->pc = data[0]; 3074 } 3075