1 /* 2 * Alpha emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "sysemu/cpus.h" 23 #include "disas/disas.h" 24 #include "qemu/host-utils.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg-op.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/helper-proto.h" 29 #include "exec/helper-gen.h" 30 #include "exec/translator.h" 31 #include "exec/log.h" 32 33 34 #undef ALPHA_DEBUG_DISAS 35 #define CONFIG_SOFTFLOAT_INLINE 36 37 #ifdef ALPHA_DEBUG_DISAS 38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 39 #else 40 # define LOG_DISAS(...) do { } while (0) 41 #endif 42 43 typedef struct DisasContext DisasContext; 44 struct DisasContext { 45 DisasContextBase base; 46 47 #ifdef CONFIG_USER_ONLY 48 MemOp unalign; 49 #else 50 uint64_t palbr; 51 #endif 52 uint32_t tbflags; 53 int mem_idx; 54 55 /* implver and amask values for this CPU. */ 56 int implver; 57 int amask; 58 59 /* Current rounding mode for this TB. */ 60 int tb_rm; 61 /* Current flush-to-zero setting for this TB. */ 62 int tb_ftz; 63 64 /* The set of registers active in the current context. */ 65 TCGv *ir; 66 67 /* Temporaries for $31 and $f31 as source and destination. */ 68 TCGv zero; 69 TCGv sink; 70 }; 71 72 #ifdef CONFIG_USER_ONLY 73 #define UNALIGN(C) (C)->unalign 74 #else 75 #define UNALIGN(C) MO_ALIGN 76 #endif 77 78 /* Target-specific return values from translate_one, indicating the 79 state of the TB. Note that DISAS_NEXT indicates that we are not 80 exiting the TB. */ 81 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 82 #define DISAS_PC_UPDATED DISAS_TARGET_1 83 #define DISAS_PC_STALE DISAS_TARGET_2 84 85 /* global register indexes */ 86 static TCGv cpu_std_ir[31]; 87 static TCGv cpu_fir[31]; 88 static TCGv cpu_pc; 89 static TCGv cpu_lock_addr; 90 static TCGv cpu_lock_value; 91 92 #ifndef CONFIG_USER_ONLY 93 static TCGv cpu_pal_ir[31]; 94 #endif 95 96 #include "exec/gen-icount.h" 97 98 void alpha_translate_init(void) 99 { 100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } 101 102 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 103 static const GlobalVar vars[] = { 104 DEF_VAR(pc), 105 DEF_VAR(lock_addr), 106 DEF_VAR(lock_value), 107 }; 108 109 #undef DEF_VAR 110 111 /* Use the symbolic register names that match the disassembler. */ 112 static const char greg_names[31][4] = { 113 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", 114 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", 115 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", 116 "t10", "t11", "ra", "t12", "at", "gp", "sp" 117 }; 118 static const char freg_names[31][4] = { 119 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 120 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 121 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 122 "f24", "f25", "f26", "f27", "f28", "f29", "f30" 123 }; 124 #ifndef CONFIG_USER_ONLY 125 static const char shadow_names[8][8] = { 126 "pal_t7", "pal_s0", "pal_s1", "pal_s2", 127 "pal_s3", "pal_s4", "pal_s5", "pal_t11" 128 }; 129 #endif 130 131 int i; 132 133 for (i = 0; i < 31; i++) { 134 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env, 135 offsetof(CPUAlphaState, ir[i]), 136 greg_names[i]); 137 } 138 139 for (i = 0; i < 31; i++) { 140 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env, 141 offsetof(CPUAlphaState, fir[i]), 142 freg_names[i]); 143 } 144 145 #ifndef CONFIG_USER_ONLY 146 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); 147 for (i = 0; i < 8; i++) { 148 int r = (i == 7 ? 25 : i + 8); 149 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env, 150 offsetof(CPUAlphaState, 151 shadow[i]), 152 shadow_names[i]); 153 } 154 #endif 155 156 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 157 const GlobalVar *v = &vars[i]; 158 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name); 159 } 160 } 161 162 static TCGv load_zero(DisasContext *ctx) 163 { 164 if (!ctx->zero) { 165 ctx->zero = tcg_constant_i64(0); 166 } 167 return ctx->zero; 168 } 169 170 static TCGv dest_sink(DisasContext *ctx) 171 { 172 if (!ctx->sink) { 173 ctx->sink = tcg_temp_new(); 174 } 175 return ctx->sink; 176 } 177 178 static void free_context_temps(DisasContext *ctx) 179 { 180 if (ctx->sink) { 181 tcg_gen_discard_i64(ctx->sink); 182 ctx->sink = NULL; 183 } 184 } 185 186 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 187 { 188 if (likely(reg < 31)) { 189 return ctx->ir[reg]; 190 } else { 191 return load_zero(ctx); 192 } 193 } 194 195 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, 196 uint8_t lit, bool islit) 197 { 198 if (islit) { 199 return tcg_constant_i64(lit); 200 } else if (likely(reg < 31)) { 201 return ctx->ir[reg]; 202 } else { 203 return load_zero(ctx); 204 } 205 } 206 207 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 208 { 209 if (likely(reg < 31)) { 210 return ctx->ir[reg]; 211 } else { 212 return dest_sink(ctx); 213 } 214 } 215 216 static TCGv load_fpr(DisasContext *ctx, unsigned reg) 217 { 218 if (likely(reg < 31)) { 219 return cpu_fir[reg]; 220 } else { 221 return load_zero(ctx); 222 } 223 } 224 225 static TCGv dest_fpr(DisasContext *ctx, unsigned reg) 226 { 227 if (likely(reg < 31)) { 228 return cpu_fir[reg]; 229 } else { 230 return dest_sink(ctx); 231 } 232 } 233 234 static int get_flag_ofs(unsigned shift) 235 { 236 int ofs = offsetof(CPUAlphaState, flags); 237 #if HOST_BIG_ENDIAN 238 ofs += 3 - (shift / 8); 239 #else 240 ofs += shift / 8; 241 #endif 242 return ofs; 243 } 244 245 static void ld_flag_byte(TCGv val, unsigned shift) 246 { 247 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift)); 248 } 249 250 static void st_flag_byte(TCGv val, unsigned shift) 251 { 252 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift)); 253 } 254 255 static void gen_excp_1(int exception, int error_code) 256 { 257 TCGv_i32 tmp1, tmp2; 258 259 tmp1 = tcg_constant_i32(exception); 260 tmp2 = tcg_constant_i32(error_code); 261 gen_helper_excp(cpu_env, tmp1, tmp2); 262 } 263 264 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) 265 { 266 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 267 gen_excp_1(exception, error_code); 268 return DISAS_NORETURN; 269 } 270 271 static inline DisasJumpType gen_invalid(DisasContext *ctx) 272 { 273 return gen_excp(ctx, EXCP_OPCDEC, 0); 274 } 275 276 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) 277 { 278 TCGv_i32 tmp32 = tcg_temp_new_i32(); 279 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 280 gen_helper_memory_to_f(dest, tmp32); 281 } 282 283 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) 284 { 285 TCGv tmp = tcg_temp_new(); 286 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 287 gen_helper_memory_to_g(dest, tmp); 288 } 289 290 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) 291 { 292 TCGv_i32 tmp32 = tcg_temp_new_i32(); 293 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 294 gen_helper_memory_to_s(dest, tmp32); 295 } 296 297 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) 298 { 299 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 300 } 301 302 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 303 void (*func)(DisasContext *, TCGv, TCGv)) 304 { 305 /* Loads to $f31 are prefetches, which we can treat as nops. */ 306 if (likely(ra != 31)) { 307 TCGv addr = tcg_temp_new(); 308 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 309 func(ctx, cpu_fir[ra], addr); 310 } 311 } 312 313 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 314 MemOp op, bool clear, bool locked) 315 { 316 TCGv addr, dest; 317 318 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of 319 prefetches, which we can treat as nops. No worries about 320 missed exceptions here. */ 321 if (unlikely(ra == 31)) { 322 return; 323 } 324 325 addr = tcg_temp_new(); 326 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 327 if (clear) { 328 tcg_gen_andi_i64(addr, addr, ~0x7); 329 } else if (!locked) { 330 op |= UNALIGN(ctx); 331 } 332 333 dest = ctx->ir[ra]; 334 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); 335 336 if (locked) { 337 tcg_gen_mov_i64(cpu_lock_addr, addr); 338 tcg_gen_mov_i64(cpu_lock_value, dest); 339 } 340 } 341 342 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) 343 { 344 TCGv_i32 tmp32 = tcg_temp_new_i32(); 345 gen_helper_f_to_memory(tmp32, addr); 346 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 347 } 348 349 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) 350 { 351 TCGv tmp = tcg_temp_new(); 352 gen_helper_g_to_memory(tmp, src); 353 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 354 } 355 356 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) 357 { 358 TCGv_i32 tmp32 = tcg_temp_new_i32(); 359 gen_helper_s_to_memory(tmp32, src); 360 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 361 } 362 363 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) 364 { 365 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 366 } 367 368 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 369 void (*func)(DisasContext *, TCGv, TCGv)) 370 { 371 TCGv addr = tcg_temp_new(); 372 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 373 func(ctx, load_fpr(ctx, ra), addr); 374 } 375 376 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 377 MemOp op, bool clear) 378 { 379 TCGv addr, src; 380 381 addr = tcg_temp_new(); 382 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 383 if (clear) { 384 tcg_gen_andi_i64(addr, addr, ~0x7); 385 } else { 386 op |= UNALIGN(ctx); 387 } 388 389 src = load_gpr(ctx, ra); 390 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); 391 } 392 393 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, 394 int32_t disp16, int mem_idx, 395 MemOp op) 396 { 397 TCGLabel *lab_fail, *lab_done; 398 TCGv addr, val; 399 400 addr = tcg_temp_new_i64(); 401 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 402 free_context_temps(ctx); 403 404 lab_fail = gen_new_label(); 405 lab_done = gen_new_label(); 406 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); 407 408 val = tcg_temp_new_i64(); 409 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, 410 load_gpr(ctx, ra), mem_idx, op); 411 free_context_temps(ctx); 412 413 if (ra != 31) { 414 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); 415 } 416 tcg_gen_br(lab_done); 417 418 gen_set_label(lab_fail); 419 if (ra != 31) { 420 tcg_gen_movi_i64(ctx->ir[ra], 0); 421 } 422 423 gen_set_label(lab_done); 424 tcg_gen_movi_i64(cpu_lock_addr, -1); 425 return DISAS_NEXT; 426 } 427 428 static bool use_goto_tb(DisasContext *ctx, uint64_t dest) 429 { 430 return translator_use_goto_tb(&ctx->base, dest); 431 } 432 433 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) 434 { 435 uint64_t dest = ctx->base.pc_next + (disp << 2); 436 437 if (ra != 31) { 438 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 439 } 440 441 /* Notice branch-to-next; used to initialize RA with the PC. */ 442 if (disp == 0) { 443 return 0; 444 } else if (use_goto_tb(ctx, dest)) { 445 tcg_gen_goto_tb(0); 446 tcg_gen_movi_i64(cpu_pc, dest); 447 tcg_gen_exit_tb(ctx->base.tb, 0); 448 return DISAS_NORETURN; 449 } else { 450 tcg_gen_movi_i64(cpu_pc, dest); 451 return DISAS_PC_UPDATED; 452 } 453 } 454 455 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, 456 TCGv cmp, int32_t disp) 457 { 458 uint64_t dest = ctx->base.pc_next + (disp << 2); 459 TCGLabel *lab_true = gen_new_label(); 460 461 if (use_goto_tb(ctx, dest)) { 462 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); 463 464 tcg_gen_goto_tb(0); 465 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 466 tcg_gen_exit_tb(ctx->base.tb, 0); 467 468 gen_set_label(lab_true); 469 tcg_gen_goto_tb(1); 470 tcg_gen_movi_i64(cpu_pc, dest); 471 tcg_gen_exit_tb(ctx->base.tb, 1); 472 473 return DISAS_NORETURN; 474 } else { 475 TCGv_i64 z = load_zero(ctx); 476 TCGv_i64 d = tcg_constant_i64(dest); 477 TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next); 478 479 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); 480 return DISAS_PC_UPDATED; 481 } 482 } 483 484 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, 485 int32_t disp, int mask) 486 { 487 if (mask) { 488 TCGv tmp = tcg_temp_new(); 489 DisasJumpType ret; 490 491 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); 492 ret = gen_bcond_internal(ctx, cond, tmp, disp); 493 return ret; 494 } 495 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); 496 } 497 498 /* Fold -0.0 for comparison with COND. */ 499 500 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) 501 { 502 uint64_t mzero = 1ull << 63; 503 504 switch (cond) { 505 case TCG_COND_LE: 506 case TCG_COND_GT: 507 /* For <= or >, the -0.0 value directly compares the way we want. */ 508 tcg_gen_mov_i64(dest, src); 509 break; 510 511 case TCG_COND_EQ: 512 case TCG_COND_NE: 513 /* For == or !=, we can simply mask off the sign bit and compare. */ 514 tcg_gen_andi_i64(dest, src, mzero - 1); 515 break; 516 517 case TCG_COND_GE: 518 case TCG_COND_LT: 519 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */ 520 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero); 521 tcg_gen_neg_i64(dest, dest); 522 tcg_gen_and_i64(dest, dest, src); 523 break; 524 525 default: 526 abort(); 527 } 528 } 529 530 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, 531 int32_t disp) 532 { 533 TCGv cmp_tmp = tcg_temp_new(); 534 DisasJumpType ret; 535 536 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); 537 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); 538 return ret; 539 } 540 541 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) 542 { 543 TCGv_i64 va, vb, z; 544 545 z = load_zero(ctx); 546 vb = load_fpr(ctx, rb); 547 va = tcg_temp_new(); 548 gen_fold_mzero(cond, va, load_fpr(ctx, ra)); 549 550 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); 551 } 552 553 #define QUAL_RM_N 0x080 /* Round mode nearest even */ 554 #define QUAL_RM_C 0x000 /* Round mode chopped */ 555 #define QUAL_RM_M 0x040 /* Round mode minus infinity */ 556 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */ 557 #define QUAL_RM_MASK 0x0c0 558 559 #define QUAL_U 0x100 /* Underflow enable (fp output) */ 560 #define QUAL_V 0x100 /* Overflow enable (int output) */ 561 #define QUAL_S 0x400 /* Software completion enable */ 562 #define QUAL_I 0x200 /* Inexact detection enable */ 563 564 static void gen_qual_roundmode(DisasContext *ctx, int fn11) 565 { 566 TCGv_i32 tmp; 567 568 fn11 &= QUAL_RM_MASK; 569 if (fn11 == ctx->tb_rm) { 570 return; 571 } 572 ctx->tb_rm = fn11; 573 574 tmp = tcg_temp_new_i32(); 575 switch (fn11) { 576 case QUAL_RM_N: 577 tcg_gen_movi_i32(tmp, float_round_nearest_even); 578 break; 579 case QUAL_RM_C: 580 tcg_gen_movi_i32(tmp, float_round_to_zero); 581 break; 582 case QUAL_RM_M: 583 tcg_gen_movi_i32(tmp, float_round_down); 584 break; 585 case QUAL_RM_D: 586 tcg_gen_ld8u_i32(tmp, cpu_env, 587 offsetof(CPUAlphaState, fpcr_dyn_round)); 588 break; 589 } 590 591 #if defined(CONFIG_SOFTFLOAT_INLINE) 592 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. 593 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just 594 sets the one field. */ 595 tcg_gen_st8_i32(tmp, cpu_env, 596 offsetof(CPUAlphaState, fp_status.float_rounding_mode)); 597 #else 598 gen_helper_setroundmode(tmp); 599 #endif 600 } 601 602 static void gen_qual_flushzero(DisasContext *ctx, int fn11) 603 { 604 TCGv_i32 tmp; 605 606 fn11 &= QUAL_U; 607 if (fn11 == ctx->tb_ftz) { 608 return; 609 } 610 ctx->tb_ftz = fn11; 611 612 tmp = tcg_temp_new_i32(); 613 if (fn11) { 614 /* Underflow is enabled, use the FPCR setting. */ 615 tcg_gen_ld8u_i32(tmp, cpu_env, 616 offsetof(CPUAlphaState, fpcr_flush_to_zero)); 617 } else { 618 /* Underflow is disabled, force flush-to-zero. */ 619 tcg_gen_movi_i32(tmp, 1); 620 } 621 622 #if defined(CONFIG_SOFTFLOAT_INLINE) 623 tcg_gen_st8_i32(tmp, cpu_env, 624 offsetof(CPUAlphaState, fp_status.flush_to_zero)); 625 #else 626 gen_helper_setflushzero(tmp); 627 #endif 628 } 629 630 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) 631 { 632 TCGv val; 633 634 if (unlikely(reg == 31)) { 635 val = load_zero(ctx); 636 } else { 637 val = cpu_fir[reg]; 638 if ((fn11 & QUAL_S) == 0) { 639 if (is_cmp) { 640 gen_helper_ieee_input_cmp(cpu_env, val); 641 } else { 642 gen_helper_ieee_input(cpu_env, val); 643 } 644 } else { 645 #ifndef CONFIG_USER_ONLY 646 /* In system mode, raise exceptions for denormals like real 647 hardware. In user mode, proceed as if the OS completion 648 handler is handling the denormal as per spec. */ 649 gen_helper_ieee_input_s(cpu_env, val); 650 #endif 651 } 652 } 653 return val; 654 } 655 656 static void gen_fp_exc_raise(int rc, int fn11) 657 { 658 /* ??? We ought to be able to do something with imprecise exceptions. 659 E.g. notice we're still in the trap shadow of something within the 660 TB and do not generate the code to signal the exception; end the TB 661 when an exception is forced to arrive, either by consumption of a 662 register value or TRAPB or EXCB. */ 663 TCGv_i32 reg, ign; 664 uint32_t ignore = 0; 665 666 if (!(fn11 & QUAL_U)) { 667 /* Note that QUAL_U == QUAL_V, so ignore either. */ 668 ignore |= FPCR_UNF | FPCR_IOV; 669 } 670 if (!(fn11 & QUAL_I)) { 671 ignore |= FPCR_INE; 672 } 673 ign = tcg_constant_i32(ignore); 674 675 /* ??? Pass in the regno of the destination so that the helper can 676 set EXC_MASK, which contains a bitmask of destination registers 677 that have caused arithmetic traps. A simple userspace emulation 678 does not require this. We do need it for a guest kernel's entArith, 679 or if we were to do something clever with imprecise exceptions. */ 680 reg = tcg_constant_i32(rc + 32); 681 if (fn11 & QUAL_S) { 682 gen_helper_fp_exc_raise_s(cpu_env, ign, reg); 683 } else { 684 gen_helper_fp_exc_raise(cpu_env, ign, reg); 685 } 686 } 687 688 static void gen_cvtlq(TCGv vc, TCGv vb) 689 { 690 TCGv tmp = tcg_temp_new(); 691 692 /* The arithmetic right shift here, plus the sign-extended mask below 693 yields a sign-extended result without an explicit ext32s_i64. */ 694 tcg_gen_shri_i64(tmp, vb, 29); 695 tcg_gen_sari_i64(vc, vb, 32); 696 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); 697 } 698 699 static void gen_ieee_arith2(DisasContext *ctx, 700 void (*helper)(TCGv, TCGv_ptr, TCGv), 701 int rb, int rc, int fn11) 702 { 703 TCGv vb; 704 705 gen_qual_roundmode(ctx, fn11); 706 gen_qual_flushzero(ctx, fn11); 707 708 vb = gen_ieee_input(ctx, rb, fn11, 0); 709 helper(dest_fpr(ctx, rc), cpu_env, vb); 710 711 gen_fp_exc_raise(rc, fn11); 712 } 713 714 #define IEEE_ARITH2(name) \ 715 static inline void glue(gen_, name)(DisasContext *ctx, \ 716 int rb, int rc, int fn11) \ 717 { \ 718 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ 719 } 720 IEEE_ARITH2(sqrts) 721 IEEE_ARITH2(sqrtt) 722 IEEE_ARITH2(cvtst) 723 IEEE_ARITH2(cvtts) 724 725 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) 726 { 727 TCGv vb, vc; 728 729 /* No need to set flushzero, since we have an integer output. */ 730 vb = gen_ieee_input(ctx, rb, fn11, 0); 731 vc = dest_fpr(ctx, rc); 732 733 /* Almost all integer conversions use cropped rounding; 734 special case that. */ 735 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { 736 gen_helper_cvttq_c(vc, cpu_env, vb); 737 } else { 738 gen_qual_roundmode(ctx, fn11); 739 gen_helper_cvttq(vc, cpu_env, vb); 740 } 741 gen_fp_exc_raise(rc, fn11); 742 } 743 744 static void gen_ieee_intcvt(DisasContext *ctx, 745 void (*helper)(TCGv, TCGv_ptr, TCGv), 746 int rb, int rc, int fn11) 747 { 748 TCGv vb, vc; 749 750 gen_qual_roundmode(ctx, fn11); 751 vb = load_fpr(ctx, rb); 752 vc = dest_fpr(ctx, rc); 753 754 /* The only exception that can be raised by integer conversion 755 is inexact. Thus we only need to worry about exceptions when 756 inexact handling is requested. */ 757 if (fn11 & QUAL_I) { 758 helper(vc, cpu_env, vb); 759 gen_fp_exc_raise(rc, fn11); 760 } else { 761 helper(vc, cpu_env, vb); 762 } 763 } 764 765 #define IEEE_INTCVT(name) \ 766 static inline void glue(gen_, name)(DisasContext *ctx, \ 767 int rb, int rc, int fn11) \ 768 { \ 769 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ 770 } 771 IEEE_INTCVT(cvtqs) 772 IEEE_INTCVT(cvtqt) 773 774 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) 775 { 776 TCGv vmask = tcg_constant_i64(mask); 777 TCGv tmp = tcg_temp_new_i64(); 778 779 if (inv_a) { 780 tcg_gen_andc_i64(tmp, vmask, va); 781 } else { 782 tcg_gen_and_i64(tmp, va, vmask); 783 } 784 785 tcg_gen_andc_i64(vc, vb, vmask); 786 tcg_gen_or_i64(vc, vc, tmp); 787 } 788 789 static void gen_ieee_arith3(DisasContext *ctx, 790 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 791 int ra, int rb, int rc, int fn11) 792 { 793 TCGv va, vb, vc; 794 795 gen_qual_roundmode(ctx, fn11); 796 gen_qual_flushzero(ctx, fn11); 797 798 va = gen_ieee_input(ctx, ra, fn11, 0); 799 vb = gen_ieee_input(ctx, rb, fn11, 0); 800 vc = dest_fpr(ctx, rc); 801 helper(vc, cpu_env, va, vb); 802 803 gen_fp_exc_raise(rc, fn11); 804 } 805 806 #define IEEE_ARITH3(name) \ 807 static inline void glue(gen_, name)(DisasContext *ctx, \ 808 int ra, int rb, int rc, int fn11) \ 809 { \ 810 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 811 } 812 IEEE_ARITH3(adds) 813 IEEE_ARITH3(subs) 814 IEEE_ARITH3(muls) 815 IEEE_ARITH3(divs) 816 IEEE_ARITH3(addt) 817 IEEE_ARITH3(subt) 818 IEEE_ARITH3(mult) 819 IEEE_ARITH3(divt) 820 821 static void gen_ieee_compare(DisasContext *ctx, 822 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 823 int ra, int rb, int rc, int fn11) 824 { 825 TCGv va, vb, vc; 826 827 va = gen_ieee_input(ctx, ra, fn11, 1); 828 vb = gen_ieee_input(ctx, rb, fn11, 1); 829 vc = dest_fpr(ctx, rc); 830 helper(vc, cpu_env, va, vb); 831 832 gen_fp_exc_raise(rc, fn11); 833 } 834 835 #define IEEE_CMP3(name) \ 836 static inline void glue(gen_, name)(DisasContext *ctx, \ 837 int ra, int rb, int rc, int fn11) \ 838 { \ 839 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 840 } 841 IEEE_CMP3(cmptun) 842 IEEE_CMP3(cmpteq) 843 IEEE_CMP3(cmptlt) 844 IEEE_CMP3(cmptle) 845 846 static inline uint64_t zapnot_mask(uint8_t lit) 847 { 848 uint64_t mask = 0; 849 int i; 850 851 for (i = 0; i < 8; ++i) { 852 if ((lit >> i) & 1) { 853 mask |= 0xffull << (i * 8); 854 } 855 } 856 return mask; 857 } 858 859 /* Implement zapnot with an immediate operand, which expands to some 860 form of immediate AND. This is a basic building block in the 861 definition of many of the other byte manipulation instructions. */ 862 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) 863 { 864 switch (lit) { 865 case 0x00: 866 tcg_gen_movi_i64(dest, 0); 867 break; 868 case 0x01: 869 tcg_gen_ext8u_i64(dest, src); 870 break; 871 case 0x03: 872 tcg_gen_ext16u_i64(dest, src); 873 break; 874 case 0x0f: 875 tcg_gen_ext32u_i64(dest, src); 876 break; 877 case 0xff: 878 tcg_gen_mov_i64(dest, src); 879 break; 880 default: 881 tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); 882 break; 883 } 884 } 885 886 /* EXTWH, EXTLH, EXTQH */ 887 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 888 uint8_t lit, uint8_t byte_mask) 889 { 890 if (islit) { 891 int pos = (64 - lit * 8) & 0x3f; 892 int len = cto32(byte_mask) * 8; 893 if (pos < len) { 894 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); 895 } else { 896 tcg_gen_movi_i64(vc, 0); 897 } 898 } else { 899 TCGv tmp = tcg_temp_new(); 900 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); 901 tcg_gen_neg_i64(tmp, tmp); 902 tcg_gen_andi_i64(tmp, tmp, 0x3f); 903 tcg_gen_shl_i64(vc, va, tmp); 904 } 905 gen_zapnoti(vc, vc, byte_mask); 906 } 907 908 /* EXTBL, EXTWL, EXTLL, EXTQL */ 909 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 910 uint8_t lit, uint8_t byte_mask) 911 { 912 if (islit) { 913 int pos = (lit & 7) * 8; 914 int len = cto32(byte_mask) * 8; 915 if (pos + len >= 64) { 916 len = 64 - pos; 917 } 918 tcg_gen_extract_i64(vc, va, pos, len); 919 } else { 920 TCGv tmp = tcg_temp_new(); 921 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); 922 tcg_gen_shli_i64(tmp, tmp, 3); 923 tcg_gen_shr_i64(vc, va, tmp); 924 gen_zapnoti(vc, vc, byte_mask); 925 } 926 } 927 928 /* INSWH, INSLH, INSQH */ 929 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 930 uint8_t lit, uint8_t byte_mask) 931 { 932 if (islit) { 933 int pos = 64 - (lit & 7) * 8; 934 int len = cto32(byte_mask) * 8; 935 if (pos < len) { 936 tcg_gen_extract_i64(vc, va, pos, len - pos); 937 } else { 938 tcg_gen_movi_i64(vc, 0); 939 } 940 } else { 941 TCGv tmp = tcg_temp_new(); 942 TCGv shift = tcg_temp_new(); 943 944 /* The instruction description has us left-shift the byte mask 945 and extract bits <15:8> and apply that zap at the end. This 946 is equivalent to simply performing the zap first and shifting 947 afterward. */ 948 gen_zapnoti(tmp, va, byte_mask); 949 950 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this 951 portably by splitting the shift into two parts: shift_count-1 and 1. 952 Arrange for the -1 by using ones-complement instead of 953 twos-complement in the negation: ~(B * 8) & 63. */ 954 955 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 956 tcg_gen_not_i64(shift, shift); 957 tcg_gen_andi_i64(shift, shift, 0x3f); 958 959 tcg_gen_shr_i64(vc, tmp, shift); 960 tcg_gen_shri_i64(vc, vc, 1); 961 } 962 } 963 964 /* INSBL, INSWL, INSLL, INSQL */ 965 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 966 uint8_t lit, uint8_t byte_mask) 967 { 968 if (islit) { 969 int pos = (lit & 7) * 8; 970 int len = cto32(byte_mask) * 8; 971 if (pos + len > 64) { 972 len = 64 - pos; 973 } 974 tcg_gen_deposit_z_i64(vc, va, pos, len); 975 } else { 976 TCGv tmp = tcg_temp_new(); 977 TCGv shift = tcg_temp_new(); 978 979 /* The instruction description has us left-shift the byte mask 980 and extract bits <15:8> and apply that zap at the end. This 981 is equivalent to simply performing the zap first and shifting 982 afterward. */ 983 gen_zapnoti(tmp, va, byte_mask); 984 985 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 986 tcg_gen_shli_i64(shift, shift, 3); 987 tcg_gen_shl_i64(vc, tmp, shift); 988 } 989 } 990 991 /* MSKWH, MSKLH, MSKQH */ 992 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 993 uint8_t lit, uint8_t byte_mask) 994 { 995 if (islit) { 996 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); 997 } else { 998 TCGv shift = tcg_temp_new(); 999 TCGv mask = tcg_temp_new(); 1000 1001 /* The instruction description is as above, where the byte_mask 1002 is shifted left, and then we extract bits <15:8>. This can be 1003 emulated with a right-shift on the expanded byte mask. This 1004 requires extra care because for an input <2:0> == 0 we need a 1005 shift of 64 bits in order to generate a zero. This is done by 1006 splitting the shift into two parts, the variable shift - 1 1007 followed by a constant 1 shift. The code we expand below is 1008 equivalent to ~(B * 8) & 63. */ 1009 1010 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1011 tcg_gen_not_i64(shift, shift); 1012 tcg_gen_andi_i64(shift, shift, 0x3f); 1013 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); 1014 tcg_gen_shr_i64(mask, mask, shift); 1015 tcg_gen_shri_i64(mask, mask, 1); 1016 1017 tcg_gen_andc_i64(vc, va, mask); 1018 } 1019 } 1020 1021 /* MSKBL, MSKWL, MSKLL, MSKQL */ 1022 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1023 uint8_t lit, uint8_t byte_mask) 1024 { 1025 if (islit) { 1026 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); 1027 } else { 1028 TCGv shift = tcg_temp_new(); 1029 TCGv mask = tcg_temp_new(); 1030 1031 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1032 tcg_gen_shli_i64(shift, shift, 3); 1033 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); 1034 tcg_gen_shl_i64(mask, mask, shift); 1035 1036 tcg_gen_andc_i64(vc, va, mask); 1037 } 1038 } 1039 1040 static void gen_rx(DisasContext *ctx, int ra, int set) 1041 { 1042 if (ra != 31) { 1043 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1044 } 1045 1046 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT); 1047 } 1048 1049 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) 1050 { 1051 /* We're emulating OSF/1 PALcode. Many of these are trivial access 1052 to internal cpu registers. */ 1053 1054 /* Unprivileged PAL call */ 1055 if (palcode >= 0x80 && palcode < 0xC0) { 1056 switch (palcode) { 1057 case 0x86: 1058 /* IMB */ 1059 /* No-op inside QEMU. */ 1060 break; 1061 case 0x9E: 1062 /* RDUNIQUE */ 1063 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1064 offsetof(CPUAlphaState, unique)); 1065 break; 1066 case 0x9F: 1067 /* WRUNIQUE */ 1068 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1069 offsetof(CPUAlphaState, unique)); 1070 break; 1071 default: 1072 palcode &= 0xbf; 1073 goto do_call_pal; 1074 } 1075 return DISAS_NEXT; 1076 } 1077 1078 #ifndef CONFIG_USER_ONLY 1079 /* Privileged PAL code */ 1080 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1081 switch (palcode) { 1082 case 0x01: 1083 /* CFLUSH */ 1084 /* No-op inside QEMU. */ 1085 break; 1086 case 0x02: 1087 /* DRAINA */ 1088 /* No-op inside QEMU. */ 1089 break; 1090 case 0x2D: 1091 /* WRVPTPTR */ 1092 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1093 offsetof(CPUAlphaState, vptptr)); 1094 break; 1095 case 0x31: 1096 /* WRVAL */ 1097 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1098 offsetof(CPUAlphaState, sysval)); 1099 break; 1100 case 0x32: 1101 /* RDVAL */ 1102 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1103 offsetof(CPUAlphaState, sysval)); 1104 break; 1105 1106 case 0x35: 1107 /* SWPIPL */ 1108 /* Note that we already know we're in kernel mode, so we know 1109 that PS only contains the 3 IPL bits. */ 1110 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1111 1112 /* But make sure and store only the 3 IPL bits from the user. */ 1113 { 1114 TCGv tmp = tcg_temp_new(); 1115 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1116 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1117 } 1118 1119 /* Allow interrupts to be recognized right away. */ 1120 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 1121 return DISAS_PC_UPDATED_NOCHAIN; 1122 1123 case 0x36: 1124 /* RDPS */ 1125 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1126 break; 1127 1128 case 0x38: 1129 /* WRUSP */ 1130 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1131 offsetof(CPUAlphaState, usp)); 1132 break; 1133 case 0x3A: 1134 /* RDUSP */ 1135 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1136 offsetof(CPUAlphaState, usp)); 1137 break; 1138 case 0x3C: 1139 /* WHAMI */ 1140 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env, 1141 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); 1142 break; 1143 1144 case 0x3E: 1145 /* WTINT */ 1146 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, 1147 -offsetof(AlphaCPU, env) + 1148 offsetof(CPUState, halted)); 1149 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); 1150 return gen_excp(ctx, EXCP_HALTED, 0); 1151 1152 default: 1153 palcode &= 0x3f; 1154 goto do_call_pal; 1155 } 1156 return DISAS_NEXT; 1157 } 1158 #endif 1159 return gen_invalid(ctx); 1160 1161 do_call_pal: 1162 #ifdef CONFIG_USER_ONLY 1163 return gen_excp(ctx, EXCP_CALL_PAL, palcode); 1164 #else 1165 { 1166 TCGv tmp = tcg_temp_new(); 1167 uint64_t exc_addr = ctx->base.pc_next; 1168 uint64_t entry = ctx->palbr; 1169 1170 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1171 exc_addr |= 1; 1172 } else { 1173 tcg_gen_movi_i64(tmp, 1); 1174 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 1175 } 1176 1177 tcg_gen_movi_i64(tmp, exc_addr); 1178 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); 1179 1180 entry += (palcode & 0x80 1181 ? 0x2000 + (palcode - 0x80) * 64 1182 : 0x1000 + palcode * 64); 1183 1184 tcg_gen_movi_i64(cpu_pc, entry); 1185 return DISAS_PC_UPDATED; 1186 } 1187 #endif 1188 } 1189 1190 #ifndef CONFIG_USER_ONLY 1191 1192 #define PR_LONG 0x200000 1193 1194 static int cpu_pr_data(int pr) 1195 { 1196 switch (pr) { 1197 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1198 case 3: return offsetof(CPUAlphaState, trap_arg0); 1199 case 4: return offsetof(CPUAlphaState, trap_arg1); 1200 case 5: return offsetof(CPUAlphaState, trap_arg2); 1201 case 6: return offsetof(CPUAlphaState, exc_addr); 1202 case 7: return offsetof(CPUAlphaState, palbr); 1203 case 8: return offsetof(CPUAlphaState, ptbr); 1204 case 9: return offsetof(CPUAlphaState, vptptr); 1205 case 10: return offsetof(CPUAlphaState, unique); 1206 case 11: return offsetof(CPUAlphaState, sysval); 1207 case 12: return offsetof(CPUAlphaState, usp); 1208 1209 case 40 ... 63: 1210 return offsetof(CPUAlphaState, scratch[pr - 40]); 1211 1212 case 251: 1213 return offsetof(CPUAlphaState, alarm_expire); 1214 } 1215 return 0; 1216 } 1217 1218 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) 1219 { 1220 void (*helper)(TCGv); 1221 int data; 1222 1223 switch (regno) { 1224 case 32 ... 39: 1225 /* Accessing the "non-shadow" general registers. */ 1226 regno = regno == 39 ? 25 : regno - 32 + 8; 1227 tcg_gen_mov_i64(va, cpu_std_ir[regno]); 1228 break; 1229 1230 case 250: /* WALLTIME */ 1231 helper = gen_helper_get_walltime; 1232 goto do_helper; 1233 case 249: /* VMTIME */ 1234 helper = gen_helper_get_vmtime; 1235 do_helper: 1236 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 1237 gen_io_start(); 1238 helper(va); 1239 return DISAS_PC_STALE; 1240 } else { 1241 helper(va); 1242 } 1243 break; 1244 1245 case 0: /* PS */ 1246 ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1247 break; 1248 case 1: /* FEN */ 1249 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1250 break; 1251 1252 default: 1253 /* The basic registers are data only, and unknown registers 1254 are read-zero, write-ignore. */ 1255 data = cpu_pr_data(regno); 1256 if (data == 0) { 1257 tcg_gen_movi_i64(va, 0); 1258 } else if (data & PR_LONG) { 1259 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); 1260 } else { 1261 tcg_gen_ld_i64(va, cpu_env, data); 1262 } 1263 break; 1264 } 1265 1266 return DISAS_NEXT; 1267 } 1268 1269 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) 1270 { 1271 int data; 1272 DisasJumpType ret = DISAS_NEXT; 1273 1274 switch (regno) { 1275 case 255: 1276 /* TBIA */ 1277 gen_helper_tbia(cpu_env); 1278 break; 1279 1280 case 254: 1281 /* TBIS */ 1282 gen_helper_tbis(cpu_env, vb); 1283 break; 1284 1285 case 253: 1286 /* WAIT */ 1287 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, 1288 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); 1289 return gen_excp(ctx, EXCP_HALTED, 0); 1290 1291 case 252: 1292 /* HALT */ 1293 gen_helper_halt(vb); 1294 return DISAS_PC_STALE; 1295 1296 case 251: 1297 /* ALARM */ 1298 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 1299 gen_io_start(); 1300 ret = DISAS_PC_STALE; 1301 } 1302 gen_helper_set_alarm(cpu_env, vb); 1303 break; 1304 1305 case 7: 1306 /* PALBR */ 1307 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr)); 1308 /* Changing the PAL base register implies un-chaining all of the TBs 1309 that ended with a CALL_PAL. Since the base register usually only 1310 changes during boot, flushing everything works well. */ 1311 gen_helper_tb_flush(cpu_env); 1312 return DISAS_PC_STALE; 1313 1314 case 32 ... 39: 1315 /* Accessing the "non-shadow" general registers. */ 1316 regno = regno == 39 ? 25 : regno - 32 + 8; 1317 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1318 break; 1319 1320 case 0: /* PS */ 1321 st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1322 break; 1323 case 1: /* FEN */ 1324 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1325 break; 1326 1327 default: 1328 /* The basic registers are data only, and unknown registers 1329 are read-zero, write-ignore. */ 1330 data = cpu_pr_data(regno); 1331 if (data != 0) { 1332 if (data & PR_LONG) { 1333 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); 1334 } else { 1335 tcg_gen_st_i64(vb, cpu_env, data); 1336 } 1337 } 1338 break; 1339 } 1340 1341 return ret; 1342 } 1343 #endif /* !USER_ONLY*/ 1344 1345 #define REQUIRE_NO_LIT \ 1346 do { \ 1347 if (real_islit) { \ 1348 goto invalid_opc; \ 1349 } \ 1350 } while (0) 1351 1352 #define REQUIRE_AMASK(FLAG) \ 1353 do { \ 1354 if ((ctx->amask & AMASK_##FLAG) == 0) { \ 1355 goto invalid_opc; \ 1356 } \ 1357 } while (0) 1358 1359 #define REQUIRE_TB_FLAG(FLAG) \ 1360 do { \ 1361 if ((ctx->tbflags & (FLAG)) == 0) { \ 1362 goto invalid_opc; \ 1363 } \ 1364 } while (0) 1365 1366 #define REQUIRE_REG_31(WHICH) \ 1367 do { \ 1368 if (WHICH != 31) { \ 1369 goto invalid_opc; \ 1370 } \ 1371 } while (0) 1372 1373 #define REQUIRE_FEN \ 1374 do { \ 1375 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \ 1376 goto raise_fen; \ 1377 } \ 1378 } while (0) 1379 1380 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) 1381 { 1382 int32_t disp21, disp16, disp12 __attribute__((unused)); 1383 uint16_t fn11; 1384 uint8_t opc, ra, rb, rc, fpfn, fn7, lit; 1385 bool islit, real_islit; 1386 TCGv va, vb, vc, tmp, tmp2; 1387 TCGv_i32 t32; 1388 DisasJumpType ret; 1389 1390 /* Decode all instruction fields */ 1391 opc = extract32(insn, 26, 6); 1392 ra = extract32(insn, 21, 5); 1393 rb = extract32(insn, 16, 5); 1394 rc = extract32(insn, 0, 5); 1395 real_islit = islit = extract32(insn, 12, 1); 1396 lit = extract32(insn, 13, 8); 1397 1398 disp21 = sextract32(insn, 0, 21); 1399 disp16 = sextract32(insn, 0, 16); 1400 disp12 = sextract32(insn, 0, 12); 1401 1402 fn11 = extract32(insn, 5, 11); 1403 fpfn = extract32(insn, 5, 6); 1404 fn7 = extract32(insn, 5, 7); 1405 1406 if (rb == 31 && !islit) { 1407 islit = true; 1408 lit = 0; 1409 } 1410 1411 ret = DISAS_NEXT; 1412 switch (opc) { 1413 case 0x00: 1414 /* CALL_PAL */ 1415 ret = gen_call_pal(ctx, insn & 0x03ffffff); 1416 break; 1417 case 0x01: 1418 /* OPC01 */ 1419 goto invalid_opc; 1420 case 0x02: 1421 /* OPC02 */ 1422 goto invalid_opc; 1423 case 0x03: 1424 /* OPC03 */ 1425 goto invalid_opc; 1426 case 0x04: 1427 /* OPC04 */ 1428 goto invalid_opc; 1429 case 0x05: 1430 /* OPC05 */ 1431 goto invalid_opc; 1432 case 0x06: 1433 /* OPC06 */ 1434 goto invalid_opc; 1435 case 0x07: 1436 /* OPC07 */ 1437 goto invalid_opc; 1438 1439 case 0x09: 1440 /* LDAH */ 1441 disp16 = (uint32_t)disp16 << 16; 1442 /* fall through */ 1443 case 0x08: 1444 /* LDA */ 1445 va = dest_gpr(ctx, ra); 1446 /* It's worth special-casing immediate loads. */ 1447 if (rb == 31) { 1448 tcg_gen_movi_i64(va, disp16); 1449 } else { 1450 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); 1451 } 1452 break; 1453 1454 case 0x0A: 1455 /* LDBU */ 1456 REQUIRE_AMASK(BWX); 1457 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); 1458 break; 1459 case 0x0B: 1460 /* LDQ_U */ 1461 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0); 1462 break; 1463 case 0x0C: 1464 /* LDWU */ 1465 REQUIRE_AMASK(BWX); 1466 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); 1467 break; 1468 case 0x0D: 1469 /* STW */ 1470 REQUIRE_AMASK(BWX); 1471 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); 1472 break; 1473 case 0x0E: 1474 /* STB */ 1475 REQUIRE_AMASK(BWX); 1476 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); 1477 break; 1478 case 0x0F: 1479 /* STQ_U */ 1480 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1); 1481 break; 1482 1483 case 0x10: 1484 vc = dest_gpr(ctx, rc); 1485 vb = load_gpr_lit(ctx, rb, lit, islit); 1486 1487 if (ra == 31) { 1488 if (fn7 == 0x00) { 1489 /* Special case ADDL as SEXTL. */ 1490 tcg_gen_ext32s_i64(vc, vb); 1491 break; 1492 } 1493 if (fn7 == 0x29) { 1494 /* Special case SUBQ as NEGQ. */ 1495 tcg_gen_neg_i64(vc, vb); 1496 break; 1497 } 1498 } 1499 1500 va = load_gpr(ctx, ra); 1501 switch (fn7) { 1502 case 0x00: 1503 /* ADDL */ 1504 tcg_gen_add_i64(vc, va, vb); 1505 tcg_gen_ext32s_i64(vc, vc); 1506 break; 1507 case 0x02: 1508 /* S4ADDL */ 1509 tmp = tcg_temp_new(); 1510 tcg_gen_shli_i64(tmp, va, 2); 1511 tcg_gen_add_i64(tmp, tmp, vb); 1512 tcg_gen_ext32s_i64(vc, tmp); 1513 break; 1514 case 0x09: 1515 /* SUBL */ 1516 tcg_gen_sub_i64(vc, va, vb); 1517 tcg_gen_ext32s_i64(vc, vc); 1518 break; 1519 case 0x0B: 1520 /* S4SUBL */ 1521 tmp = tcg_temp_new(); 1522 tcg_gen_shli_i64(tmp, va, 2); 1523 tcg_gen_sub_i64(tmp, tmp, vb); 1524 tcg_gen_ext32s_i64(vc, tmp); 1525 break; 1526 case 0x0F: 1527 /* CMPBGE */ 1528 if (ra == 31) { 1529 /* Special case 0 >= X as X == 0. */ 1530 gen_helper_cmpbe0(vc, vb); 1531 } else { 1532 gen_helper_cmpbge(vc, va, vb); 1533 } 1534 break; 1535 case 0x12: 1536 /* S8ADDL */ 1537 tmp = tcg_temp_new(); 1538 tcg_gen_shli_i64(tmp, va, 3); 1539 tcg_gen_add_i64(tmp, tmp, vb); 1540 tcg_gen_ext32s_i64(vc, tmp); 1541 break; 1542 case 0x1B: 1543 /* S8SUBL */ 1544 tmp = tcg_temp_new(); 1545 tcg_gen_shli_i64(tmp, va, 3); 1546 tcg_gen_sub_i64(tmp, tmp, vb); 1547 tcg_gen_ext32s_i64(vc, tmp); 1548 break; 1549 case 0x1D: 1550 /* CMPULT */ 1551 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); 1552 break; 1553 case 0x20: 1554 /* ADDQ */ 1555 tcg_gen_add_i64(vc, va, vb); 1556 break; 1557 case 0x22: 1558 /* S4ADDQ */ 1559 tmp = tcg_temp_new(); 1560 tcg_gen_shli_i64(tmp, va, 2); 1561 tcg_gen_add_i64(vc, tmp, vb); 1562 break; 1563 case 0x29: 1564 /* SUBQ */ 1565 tcg_gen_sub_i64(vc, va, vb); 1566 break; 1567 case 0x2B: 1568 /* S4SUBQ */ 1569 tmp = tcg_temp_new(); 1570 tcg_gen_shli_i64(tmp, va, 2); 1571 tcg_gen_sub_i64(vc, tmp, vb); 1572 break; 1573 case 0x2D: 1574 /* CMPEQ */ 1575 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); 1576 break; 1577 case 0x32: 1578 /* S8ADDQ */ 1579 tmp = tcg_temp_new(); 1580 tcg_gen_shli_i64(tmp, va, 3); 1581 tcg_gen_add_i64(vc, tmp, vb); 1582 break; 1583 case 0x3B: 1584 /* S8SUBQ */ 1585 tmp = tcg_temp_new(); 1586 tcg_gen_shli_i64(tmp, va, 3); 1587 tcg_gen_sub_i64(vc, tmp, vb); 1588 break; 1589 case 0x3D: 1590 /* CMPULE */ 1591 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); 1592 break; 1593 case 0x40: 1594 /* ADDL/V */ 1595 tmp = tcg_temp_new(); 1596 tcg_gen_ext32s_i64(tmp, va); 1597 tcg_gen_ext32s_i64(vc, vb); 1598 tcg_gen_add_i64(tmp, tmp, vc); 1599 tcg_gen_ext32s_i64(vc, tmp); 1600 gen_helper_check_overflow(cpu_env, vc, tmp); 1601 break; 1602 case 0x49: 1603 /* SUBL/V */ 1604 tmp = tcg_temp_new(); 1605 tcg_gen_ext32s_i64(tmp, va); 1606 tcg_gen_ext32s_i64(vc, vb); 1607 tcg_gen_sub_i64(tmp, tmp, vc); 1608 tcg_gen_ext32s_i64(vc, tmp); 1609 gen_helper_check_overflow(cpu_env, vc, tmp); 1610 break; 1611 case 0x4D: 1612 /* CMPLT */ 1613 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); 1614 break; 1615 case 0x60: 1616 /* ADDQ/V */ 1617 tmp = tcg_temp_new(); 1618 tmp2 = tcg_temp_new(); 1619 tcg_gen_eqv_i64(tmp, va, vb); 1620 tcg_gen_mov_i64(tmp2, va); 1621 tcg_gen_add_i64(vc, va, vb); 1622 tcg_gen_xor_i64(tmp2, tmp2, vc); 1623 tcg_gen_and_i64(tmp, tmp, tmp2); 1624 tcg_gen_shri_i64(tmp, tmp, 63); 1625 tcg_gen_movi_i64(tmp2, 0); 1626 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1627 break; 1628 case 0x69: 1629 /* SUBQ/V */ 1630 tmp = tcg_temp_new(); 1631 tmp2 = tcg_temp_new(); 1632 tcg_gen_xor_i64(tmp, va, vb); 1633 tcg_gen_mov_i64(tmp2, va); 1634 tcg_gen_sub_i64(vc, va, vb); 1635 tcg_gen_xor_i64(tmp2, tmp2, vc); 1636 tcg_gen_and_i64(tmp, tmp, tmp2); 1637 tcg_gen_shri_i64(tmp, tmp, 63); 1638 tcg_gen_movi_i64(tmp2, 0); 1639 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1640 break; 1641 case 0x6D: 1642 /* CMPLE */ 1643 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); 1644 break; 1645 default: 1646 goto invalid_opc; 1647 } 1648 break; 1649 1650 case 0x11: 1651 if (fn7 == 0x20) { 1652 if (rc == 31) { 1653 /* Special case BIS as NOP. */ 1654 break; 1655 } 1656 if (ra == 31) { 1657 /* Special case BIS as MOV. */ 1658 vc = dest_gpr(ctx, rc); 1659 if (islit) { 1660 tcg_gen_movi_i64(vc, lit); 1661 } else { 1662 tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); 1663 } 1664 break; 1665 } 1666 } 1667 1668 vc = dest_gpr(ctx, rc); 1669 vb = load_gpr_lit(ctx, rb, lit, islit); 1670 1671 if (fn7 == 0x28 && ra == 31) { 1672 /* Special case ORNOT as NOT. */ 1673 tcg_gen_not_i64(vc, vb); 1674 break; 1675 } 1676 1677 va = load_gpr(ctx, ra); 1678 switch (fn7) { 1679 case 0x00: 1680 /* AND */ 1681 tcg_gen_and_i64(vc, va, vb); 1682 break; 1683 case 0x08: 1684 /* BIC */ 1685 tcg_gen_andc_i64(vc, va, vb); 1686 break; 1687 case 0x14: 1688 /* CMOVLBS */ 1689 tmp = tcg_temp_new(); 1690 tcg_gen_andi_i64(tmp, va, 1); 1691 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), 1692 vb, load_gpr(ctx, rc)); 1693 break; 1694 case 0x16: 1695 /* CMOVLBC */ 1696 tmp = tcg_temp_new(); 1697 tcg_gen_andi_i64(tmp, va, 1); 1698 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), 1699 vb, load_gpr(ctx, rc)); 1700 break; 1701 case 0x20: 1702 /* BIS */ 1703 tcg_gen_or_i64(vc, va, vb); 1704 break; 1705 case 0x24: 1706 /* CMOVEQ */ 1707 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), 1708 vb, load_gpr(ctx, rc)); 1709 break; 1710 case 0x26: 1711 /* CMOVNE */ 1712 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), 1713 vb, load_gpr(ctx, rc)); 1714 break; 1715 case 0x28: 1716 /* ORNOT */ 1717 tcg_gen_orc_i64(vc, va, vb); 1718 break; 1719 case 0x40: 1720 /* XOR */ 1721 tcg_gen_xor_i64(vc, va, vb); 1722 break; 1723 case 0x44: 1724 /* CMOVLT */ 1725 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), 1726 vb, load_gpr(ctx, rc)); 1727 break; 1728 case 0x46: 1729 /* CMOVGE */ 1730 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), 1731 vb, load_gpr(ctx, rc)); 1732 break; 1733 case 0x48: 1734 /* EQV */ 1735 tcg_gen_eqv_i64(vc, va, vb); 1736 break; 1737 case 0x61: 1738 /* AMASK */ 1739 REQUIRE_REG_31(ra); 1740 tcg_gen_andi_i64(vc, vb, ~ctx->amask); 1741 break; 1742 case 0x64: 1743 /* CMOVLE */ 1744 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), 1745 vb, load_gpr(ctx, rc)); 1746 break; 1747 case 0x66: 1748 /* CMOVGT */ 1749 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), 1750 vb, load_gpr(ctx, rc)); 1751 break; 1752 case 0x6C: 1753 /* IMPLVER */ 1754 REQUIRE_REG_31(ra); 1755 tcg_gen_movi_i64(vc, ctx->implver); 1756 break; 1757 default: 1758 goto invalid_opc; 1759 } 1760 break; 1761 1762 case 0x12: 1763 vc = dest_gpr(ctx, rc); 1764 va = load_gpr(ctx, ra); 1765 switch (fn7) { 1766 case 0x02: 1767 /* MSKBL */ 1768 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); 1769 break; 1770 case 0x06: 1771 /* EXTBL */ 1772 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); 1773 break; 1774 case 0x0B: 1775 /* INSBL */ 1776 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); 1777 break; 1778 case 0x12: 1779 /* MSKWL */ 1780 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); 1781 break; 1782 case 0x16: 1783 /* EXTWL */ 1784 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); 1785 break; 1786 case 0x1B: 1787 /* INSWL */ 1788 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); 1789 break; 1790 case 0x22: 1791 /* MSKLL */ 1792 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); 1793 break; 1794 case 0x26: 1795 /* EXTLL */ 1796 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); 1797 break; 1798 case 0x2B: 1799 /* INSLL */ 1800 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); 1801 break; 1802 case 0x30: 1803 /* ZAP */ 1804 if (islit) { 1805 gen_zapnoti(vc, va, ~lit); 1806 } else { 1807 gen_helper_zap(vc, va, load_gpr(ctx, rb)); 1808 } 1809 break; 1810 case 0x31: 1811 /* ZAPNOT */ 1812 if (islit) { 1813 gen_zapnoti(vc, va, lit); 1814 } else { 1815 gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); 1816 } 1817 break; 1818 case 0x32: 1819 /* MSKQL */ 1820 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); 1821 break; 1822 case 0x34: 1823 /* SRL */ 1824 if (islit) { 1825 tcg_gen_shri_i64(vc, va, lit & 0x3f); 1826 } else { 1827 tmp = tcg_temp_new(); 1828 vb = load_gpr(ctx, rb); 1829 tcg_gen_andi_i64(tmp, vb, 0x3f); 1830 tcg_gen_shr_i64(vc, va, tmp); 1831 } 1832 break; 1833 case 0x36: 1834 /* EXTQL */ 1835 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); 1836 break; 1837 case 0x39: 1838 /* SLL */ 1839 if (islit) { 1840 tcg_gen_shli_i64(vc, va, lit & 0x3f); 1841 } else { 1842 tmp = tcg_temp_new(); 1843 vb = load_gpr(ctx, rb); 1844 tcg_gen_andi_i64(tmp, vb, 0x3f); 1845 tcg_gen_shl_i64(vc, va, tmp); 1846 } 1847 break; 1848 case 0x3B: 1849 /* INSQL */ 1850 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); 1851 break; 1852 case 0x3C: 1853 /* SRA */ 1854 if (islit) { 1855 tcg_gen_sari_i64(vc, va, lit & 0x3f); 1856 } else { 1857 tmp = tcg_temp_new(); 1858 vb = load_gpr(ctx, rb); 1859 tcg_gen_andi_i64(tmp, vb, 0x3f); 1860 tcg_gen_sar_i64(vc, va, tmp); 1861 } 1862 break; 1863 case 0x52: 1864 /* MSKWH */ 1865 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); 1866 break; 1867 case 0x57: 1868 /* INSWH */ 1869 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); 1870 break; 1871 case 0x5A: 1872 /* EXTWH */ 1873 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); 1874 break; 1875 case 0x62: 1876 /* MSKLH */ 1877 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); 1878 break; 1879 case 0x67: 1880 /* INSLH */ 1881 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); 1882 break; 1883 case 0x6A: 1884 /* EXTLH */ 1885 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); 1886 break; 1887 case 0x72: 1888 /* MSKQH */ 1889 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); 1890 break; 1891 case 0x77: 1892 /* INSQH */ 1893 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); 1894 break; 1895 case 0x7A: 1896 /* EXTQH */ 1897 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); 1898 break; 1899 default: 1900 goto invalid_opc; 1901 } 1902 break; 1903 1904 case 0x13: 1905 vc = dest_gpr(ctx, rc); 1906 vb = load_gpr_lit(ctx, rb, lit, islit); 1907 va = load_gpr(ctx, ra); 1908 switch (fn7) { 1909 case 0x00: 1910 /* MULL */ 1911 tcg_gen_mul_i64(vc, va, vb); 1912 tcg_gen_ext32s_i64(vc, vc); 1913 break; 1914 case 0x20: 1915 /* MULQ */ 1916 tcg_gen_mul_i64(vc, va, vb); 1917 break; 1918 case 0x30: 1919 /* UMULH */ 1920 tmp = tcg_temp_new(); 1921 tcg_gen_mulu2_i64(tmp, vc, va, vb); 1922 break; 1923 case 0x40: 1924 /* MULL/V */ 1925 tmp = tcg_temp_new(); 1926 tcg_gen_ext32s_i64(tmp, va); 1927 tcg_gen_ext32s_i64(vc, vb); 1928 tcg_gen_mul_i64(tmp, tmp, vc); 1929 tcg_gen_ext32s_i64(vc, tmp); 1930 gen_helper_check_overflow(cpu_env, vc, tmp); 1931 break; 1932 case 0x60: 1933 /* MULQ/V */ 1934 tmp = tcg_temp_new(); 1935 tmp2 = tcg_temp_new(); 1936 tcg_gen_muls2_i64(vc, tmp, va, vb); 1937 tcg_gen_sari_i64(tmp2, vc, 63); 1938 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1939 break; 1940 default: 1941 goto invalid_opc; 1942 } 1943 break; 1944 1945 case 0x14: 1946 REQUIRE_AMASK(FIX); 1947 vc = dest_fpr(ctx, rc); 1948 switch (fpfn) { /* fn11 & 0x3F */ 1949 case 0x04: 1950 /* ITOFS */ 1951 REQUIRE_REG_31(rb); 1952 REQUIRE_FEN; 1953 t32 = tcg_temp_new_i32(); 1954 va = load_gpr(ctx, ra); 1955 tcg_gen_extrl_i64_i32(t32, va); 1956 gen_helper_memory_to_s(vc, t32); 1957 break; 1958 case 0x0A: 1959 /* SQRTF */ 1960 REQUIRE_REG_31(ra); 1961 REQUIRE_FEN; 1962 vb = load_fpr(ctx, rb); 1963 gen_helper_sqrtf(vc, cpu_env, vb); 1964 break; 1965 case 0x0B: 1966 /* SQRTS */ 1967 REQUIRE_REG_31(ra); 1968 REQUIRE_FEN; 1969 gen_sqrts(ctx, rb, rc, fn11); 1970 break; 1971 case 0x14: 1972 /* ITOFF */ 1973 REQUIRE_REG_31(rb); 1974 REQUIRE_FEN; 1975 t32 = tcg_temp_new_i32(); 1976 va = load_gpr(ctx, ra); 1977 tcg_gen_extrl_i64_i32(t32, va); 1978 gen_helper_memory_to_f(vc, t32); 1979 break; 1980 case 0x24: 1981 /* ITOFT */ 1982 REQUIRE_REG_31(rb); 1983 REQUIRE_FEN; 1984 va = load_gpr(ctx, ra); 1985 tcg_gen_mov_i64(vc, va); 1986 break; 1987 case 0x2A: 1988 /* SQRTG */ 1989 REQUIRE_REG_31(ra); 1990 REQUIRE_FEN; 1991 vb = load_fpr(ctx, rb); 1992 gen_helper_sqrtg(vc, cpu_env, vb); 1993 break; 1994 case 0x02B: 1995 /* SQRTT */ 1996 REQUIRE_REG_31(ra); 1997 REQUIRE_FEN; 1998 gen_sqrtt(ctx, rb, rc, fn11); 1999 break; 2000 default: 2001 goto invalid_opc; 2002 } 2003 break; 2004 2005 case 0x15: 2006 /* VAX floating point */ 2007 /* XXX: rounding mode and trap are ignored (!) */ 2008 vc = dest_fpr(ctx, rc); 2009 vb = load_fpr(ctx, rb); 2010 va = load_fpr(ctx, ra); 2011 switch (fpfn) { /* fn11 & 0x3F */ 2012 case 0x00: 2013 /* ADDF */ 2014 REQUIRE_FEN; 2015 gen_helper_addf(vc, cpu_env, va, vb); 2016 break; 2017 case 0x01: 2018 /* SUBF */ 2019 REQUIRE_FEN; 2020 gen_helper_subf(vc, cpu_env, va, vb); 2021 break; 2022 case 0x02: 2023 /* MULF */ 2024 REQUIRE_FEN; 2025 gen_helper_mulf(vc, cpu_env, va, vb); 2026 break; 2027 case 0x03: 2028 /* DIVF */ 2029 REQUIRE_FEN; 2030 gen_helper_divf(vc, cpu_env, va, vb); 2031 break; 2032 case 0x1E: 2033 /* CVTDG -- TODO */ 2034 REQUIRE_REG_31(ra); 2035 goto invalid_opc; 2036 case 0x20: 2037 /* ADDG */ 2038 REQUIRE_FEN; 2039 gen_helper_addg(vc, cpu_env, va, vb); 2040 break; 2041 case 0x21: 2042 /* SUBG */ 2043 REQUIRE_FEN; 2044 gen_helper_subg(vc, cpu_env, va, vb); 2045 break; 2046 case 0x22: 2047 /* MULG */ 2048 REQUIRE_FEN; 2049 gen_helper_mulg(vc, cpu_env, va, vb); 2050 break; 2051 case 0x23: 2052 /* DIVG */ 2053 REQUIRE_FEN; 2054 gen_helper_divg(vc, cpu_env, va, vb); 2055 break; 2056 case 0x25: 2057 /* CMPGEQ */ 2058 REQUIRE_FEN; 2059 gen_helper_cmpgeq(vc, cpu_env, va, vb); 2060 break; 2061 case 0x26: 2062 /* CMPGLT */ 2063 REQUIRE_FEN; 2064 gen_helper_cmpglt(vc, cpu_env, va, vb); 2065 break; 2066 case 0x27: 2067 /* CMPGLE */ 2068 REQUIRE_FEN; 2069 gen_helper_cmpgle(vc, cpu_env, va, vb); 2070 break; 2071 case 0x2C: 2072 /* CVTGF */ 2073 REQUIRE_REG_31(ra); 2074 REQUIRE_FEN; 2075 gen_helper_cvtgf(vc, cpu_env, vb); 2076 break; 2077 case 0x2D: 2078 /* CVTGD -- TODO */ 2079 REQUIRE_REG_31(ra); 2080 goto invalid_opc; 2081 case 0x2F: 2082 /* CVTGQ */ 2083 REQUIRE_REG_31(ra); 2084 REQUIRE_FEN; 2085 gen_helper_cvtgq(vc, cpu_env, vb); 2086 break; 2087 case 0x3C: 2088 /* CVTQF */ 2089 REQUIRE_REG_31(ra); 2090 REQUIRE_FEN; 2091 gen_helper_cvtqf(vc, cpu_env, vb); 2092 break; 2093 case 0x3E: 2094 /* CVTQG */ 2095 REQUIRE_REG_31(ra); 2096 REQUIRE_FEN; 2097 gen_helper_cvtqg(vc, cpu_env, vb); 2098 break; 2099 default: 2100 goto invalid_opc; 2101 } 2102 break; 2103 2104 case 0x16: 2105 /* IEEE floating-point */ 2106 switch (fpfn) { /* fn11 & 0x3F */ 2107 case 0x00: 2108 /* ADDS */ 2109 REQUIRE_FEN; 2110 gen_adds(ctx, ra, rb, rc, fn11); 2111 break; 2112 case 0x01: 2113 /* SUBS */ 2114 REQUIRE_FEN; 2115 gen_subs(ctx, ra, rb, rc, fn11); 2116 break; 2117 case 0x02: 2118 /* MULS */ 2119 REQUIRE_FEN; 2120 gen_muls(ctx, ra, rb, rc, fn11); 2121 break; 2122 case 0x03: 2123 /* DIVS */ 2124 REQUIRE_FEN; 2125 gen_divs(ctx, ra, rb, rc, fn11); 2126 break; 2127 case 0x20: 2128 /* ADDT */ 2129 REQUIRE_FEN; 2130 gen_addt(ctx, ra, rb, rc, fn11); 2131 break; 2132 case 0x21: 2133 /* SUBT */ 2134 REQUIRE_FEN; 2135 gen_subt(ctx, ra, rb, rc, fn11); 2136 break; 2137 case 0x22: 2138 /* MULT */ 2139 REQUIRE_FEN; 2140 gen_mult(ctx, ra, rb, rc, fn11); 2141 break; 2142 case 0x23: 2143 /* DIVT */ 2144 REQUIRE_FEN; 2145 gen_divt(ctx, ra, rb, rc, fn11); 2146 break; 2147 case 0x24: 2148 /* CMPTUN */ 2149 REQUIRE_FEN; 2150 gen_cmptun(ctx, ra, rb, rc, fn11); 2151 break; 2152 case 0x25: 2153 /* CMPTEQ */ 2154 REQUIRE_FEN; 2155 gen_cmpteq(ctx, ra, rb, rc, fn11); 2156 break; 2157 case 0x26: 2158 /* CMPTLT */ 2159 REQUIRE_FEN; 2160 gen_cmptlt(ctx, ra, rb, rc, fn11); 2161 break; 2162 case 0x27: 2163 /* CMPTLE */ 2164 REQUIRE_FEN; 2165 gen_cmptle(ctx, ra, rb, rc, fn11); 2166 break; 2167 case 0x2C: 2168 REQUIRE_REG_31(ra); 2169 REQUIRE_FEN; 2170 if (fn11 == 0x2AC || fn11 == 0x6AC) { 2171 /* CVTST */ 2172 gen_cvtst(ctx, rb, rc, fn11); 2173 } else { 2174 /* CVTTS */ 2175 gen_cvtts(ctx, rb, rc, fn11); 2176 } 2177 break; 2178 case 0x2F: 2179 /* CVTTQ */ 2180 REQUIRE_REG_31(ra); 2181 REQUIRE_FEN; 2182 gen_cvttq(ctx, rb, rc, fn11); 2183 break; 2184 case 0x3C: 2185 /* CVTQS */ 2186 REQUIRE_REG_31(ra); 2187 REQUIRE_FEN; 2188 gen_cvtqs(ctx, rb, rc, fn11); 2189 break; 2190 case 0x3E: 2191 /* CVTQT */ 2192 REQUIRE_REG_31(ra); 2193 REQUIRE_FEN; 2194 gen_cvtqt(ctx, rb, rc, fn11); 2195 break; 2196 default: 2197 goto invalid_opc; 2198 } 2199 break; 2200 2201 case 0x17: 2202 switch (fn11) { 2203 case 0x010: 2204 /* CVTLQ */ 2205 REQUIRE_REG_31(ra); 2206 REQUIRE_FEN; 2207 vc = dest_fpr(ctx, rc); 2208 vb = load_fpr(ctx, rb); 2209 gen_cvtlq(vc, vb); 2210 break; 2211 case 0x020: 2212 /* CPYS */ 2213 REQUIRE_FEN; 2214 if (rc == 31) { 2215 /* Special case CPYS as FNOP. */ 2216 } else { 2217 vc = dest_fpr(ctx, rc); 2218 va = load_fpr(ctx, ra); 2219 if (ra == rb) { 2220 /* Special case CPYS as FMOV. */ 2221 tcg_gen_mov_i64(vc, va); 2222 } else { 2223 vb = load_fpr(ctx, rb); 2224 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); 2225 } 2226 } 2227 break; 2228 case 0x021: 2229 /* CPYSN */ 2230 REQUIRE_FEN; 2231 vc = dest_fpr(ctx, rc); 2232 vb = load_fpr(ctx, rb); 2233 va = load_fpr(ctx, ra); 2234 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); 2235 break; 2236 case 0x022: 2237 /* CPYSE */ 2238 REQUIRE_FEN; 2239 vc = dest_fpr(ctx, rc); 2240 vb = load_fpr(ctx, rb); 2241 va = load_fpr(ctx, ra); 2242 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); 2243 break; 2244 case 0x024: 2245 /* MT_FPCR */ 2246 REQUIRE_FEN; 2247 va = load_fpr(ctx, ra); 2248 gen_helper_store_fpcr(cpu_env, va); 2249 if (ctx->tb_rm == QUAL_RM_D) { 2250 /* Re-do the copy of the rounding mode to fp_status 2251 the next time we use dynamic rounding. */ 2252 ctx->tb_rm = -1; 2253 } 2254 break; 2255 case 0x025: 2256 /* MF_FPCR */ 2257 REQUIRE_FEN; 2258 va = dest_fpr(ctx, ra); 2259 gen_helper_load_fpcr(va, cpu_env); 2260 break; 2261 case 0x02A: 2262 /* FCMOVEQ */ 2263 REQUIRE_FEN; 2264 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); 2265 break; 2266 case 0x02B: 2267 /* FCMOVNE */ 2268 REQUIRE_FEN; 2269 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); 2270 break; 2271 case 0x02C: 2272 /* FCMOVLT */ 2273 REQUIRE_FEN; 2274 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); 2275 break; 2276 case 0x02D: 2277 /* FCMOVGE */ 2278 REQUIRE_FEN; 2279 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); 2280 break; 2281 case 0x02E: 2282 /* FCMOVLE */ 2283 REQUIRE_FEN; 2284 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); 2285 break; 2286 case 0x02F: 2287 /* FCMOVGT */ 2288 REQUIRE_FEN; 2289 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); 2290 break; 2291 case 0x030: /* CVTQL */ 2292 case 0x130: /* CVTQL/V */ 2293 case 0x530: /* CVTQL/SV */ 2294 REQUIRE_REG_31(ra); 2295 REQUIRE_FEN; 2296 vc = dest_fpr(ctx, rc); 2297 vb = load_fpr(ctx, rb); 2298 gen_helper_cvtql(vc, cpu_env, vb); 2299 gen_fp_exc_raise(rc, fn11); 2300 break; 2301 default: 2302 goto invalid_opc; 2303 } 2304 break; 2305 2306 case 0x18: 2307 switch ((uint16_t)disp16) { 2308 case 0x0000: 2309 /* TRAPB */ 2310 /* No-op. */ 2311 break; 2312 case 0x0400: 2313 /* EXCB */ 2314 /* No-op. */ 2315 break; 2316 case 0x4000: 2317 /* MB */ 2318 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 2319 break; 2320 case 0x4400: 2321 /* WMB */ 2322 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2323 break; 2324 case 0x8000: 2325 /* FETCH */ 2326 /* No-op */ 2327 break; 2328 case 0xA000: 2329 /* FETCH_M */ 2330 /* No-op */ 2331 break; 2332 case 0xC000: 2333 /* RPCC */ 2334 va = dest_gpr(ctx, ra); 2335 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 2336 gen_io_start(); 2337 gen_helper_load_pcc(va, cpu_env); 2338 ret = DISAS_PC_STALE; 2339 } else { 2340 gen_helper_load_pcc(va, cpu_env); 2341 } 2342 break; 2343 case 0xE000: 2344 /* RC */ 2345 gen_rx(ctx, ra, 0); 2346 break; 2347 case 0xE800: 2348 /* ECB */ 2349 break; 2350 case 0xF000: 2351 /* RS */ 2352 gen_rx(ctx, ra, 1); 2353 break; 2354 case 0xF800: 2355 /* WH64 */ 2356 /* No-op */ 2357 break; 2358 case 0xFC00: 2359 /* WH64EN */ 2360 /* No-op */ 2361 break; 2362 default: 2363 goto invalid_opc; 2364 } 2365 break; 2366 2367 case 0x19: 2368 /* HW_MFPR (PALcode) */ 2369 #ifndef CONFIG_USER_ONLY 2370 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2371 va = dest_gpr(ctx, ra); 2372 ret = gen_mfpr(ctx, va, insn & 0xffff); 2373 break; 2374 #else 2375 goto invalid_opc; 2376 #endif 2377 2378 case 0x1A: 2379 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch 2380 prediction stack action, which of course we don't implement. */ 2381 vb = load_gpr(ctx, rb); 2382 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2383 if (ra != 31) { 2384 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 2385 } 2386 ret = DISAS_PC_UPDATED; 2387 break; 2388 2389 case 0x1B: 2390 /* HW_LD (PALcode) */ 2391 #ifndef CONFIG_USER_ONLY 2392 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2393 { 2394 TCGv addr = tcg_temp_new(); 2395 vb = load_gpr(ctx, rb); 2396 va = dest_gpr(ctx, ra); 2397 2398 tcg_gen_addi_i64(addr, vb, disp12); 2399 switch ((insn >> 12) & 0xF) { 2400 case 0x0: 2401 /* Longword physical access (hw_ldl/p) */ 2402 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2403 break; 2404 case 0x1: 2405 /* Quadword physical access (hw_ldq/p) */ 2406 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2407 break; 2408 case 0x2: 2409 /* Longword physical access with lock (hw_ldl_l/p) */ 2410 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2411 tcg_gen_mov_i64(cpu_lock_addr, addr); 2412 tcg_gen_mov_i64(cpu_lock_value, va); 2413 break; 2414 case 0x3: 2415 /* Quadword physical access with lock (hw_ldq_l/p) */ 2416 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2417 tcg_gen_mov_i64(cpu_lock_addr, addr); 2418 tcg_gen_mov_i64(cpu_lock_value, va); 2419 break; 2420 case 0x4: 2421 /* Longword virtual PTE fetch (hw_ldl/v) */ 2422 goto invalid_opc; 2423 case 0x5: 2424 /* Quadword virtual PTE fetch (hw_ldq/v) */ 2425 goto invalid_opc; 2426 break; 2427 case 0x6: 2428 /* Invalid */ 2429 goto invalid_opc; 2430 case 0x7: 2431 /* Invaliid */ 2432 goto invalid_opc; 2433 case 0x8: 2434 /* Longword virtual access (hw_ldl) */ 2435 goto invalid_opc; 2436 case 0x9: 2437 /* Quadword virtual access (hw_ldq) */ 2438 goto invalid_opc; 2439 case 0xA: 2440 /* Longword virtual access with protection check (hw_ldl/w) */ 2441 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2442 MO_LESL | MO_ALIGN); 2443 break; 2444 case 0xB: 2445 /* Quadword virtual access with protection check (hw_ldq/w) */ 2446 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2447 MO_LEUQ | MO_ALIGN); 2448 break; 2449 case 0xC: 2450 /* Longword virtual access with alt access mode (hw_ldl/a)*/ 2451 goto invalid_opc; 2452 case 0xD: 2453 /* Quadword virtual access with alt access mode (hw_ldq/a) */ 2454 goto invalid_opc; 2455 case 0xE: 2456 /* Longword virtual access with alternate access mode and 2457 protection checks (hw_ldl/wa) */ 2458 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2459 MO_LESL | MO_ALIGN); 2460 break; 2461 case 0xF: 2462 /* Quadword virtual access with alternate access mode and 2463 protection checks (hw_ldq/wa) */ 2464 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2465 MO_LEUQ | MO_ALIGN); 2466 break; 2467 } 2468 break; 2469 } 2470 #else 2471 goto invalid_opc; 2472 #endif 2473 2474 case 0x1C: 2475 vc = dest_gpr(ctx, rc); 2476 if (fn7 == 0x70) { 2477 /* FTOIT */ 2478 REQUIRE_AMASK(FIX); 2479 REQUIRE_REG_31(rb); 2480 va = load_fpr(ctx, ra); 2481 tcg_gen_mov_i64(vc, va); 2482 break; 2483 } else if (fn7 == 0x78) { 2484 /* FTOIS */ 2485 REQUIRE_AMASK(FIX); 2486 REQUIRE_REG_31(rb); 2487 t32 = tcg_temp_new_i32(); 2488 va = load_fpr(ctx, ra); 2489 gen_helper_s_to_memory(t32, va); 2490 tcg_gen_ext_i32_i64(vc, t32); 2491 break; 2492 } 2493 2494 vb = load_gpr_lit(ctx, rb, lit, islit); 2495 switch (fn7) { 2496 case 0x00: 2497 /* SEXTB */ 2498 REQUIRE_AMASK(BWX); 2499 REQUIRE_REG_31(ra); 2500 tcg_gen_ext8s_i64(vc, vb); 2501 break; 2502 case 0x01: 2503 /* SEXTW */ 2504 REQUIRE_AMASK(BWX); 2505 REQUIRE_REG_31(ra); 2506 tcg_gen_ext16s_i64(vc, vb); 2507 break; 2508 case 0x30: 2509 /* CTPOP */ 2510 REQUIRE_AMASK(CIX); 2511 REQUIRE_REG_31(ra); 2512 REQUIRE_NO_LIT; 2513 tcg_gen_ctpop_i64(vc, vb); 2514 break; 2515 case 0x31: 2516 /* PERR */ 2517 REQUIRE_AMASK(MVI); 2518 REQUIRE_NO_LIT; 2519 va = load_gpr(ctx, ra); 2520 gen_helper_perr(vc, va, vb); 2521 break; 2522 case 0x32: 2523 /* CTLZ */ 2524 REQUIRE_AMASK(CIX); 2525 REQUIRE_REG_31(ra); 2526 REQUIRE_NO_LIT; 2527 tcg_gen_clzi_i64(vc, vb, 64); 2528 break; 2529 case 0x33: 2530 /* CTTZ */ 2531 REQUIRE_AMASK(CIX); 2532 REQUIRE_REG_31(ra); 2533 REQUIRE_NO_LIT; 2534 tcg_gen_ctzi_i64(vc, vb, 64); 2535 break; 2536 case 0x34: 2537 /* UNPKBW */ 2538 REQUIRE_AMASK(MVI); 2539 REQUIRE_REG_31(ra); 2540 REQUIRE_NO_LIT; 2541 gen_helper_unpkbw(vc, vb); 2542 break; 2543 case 0x35: 2544 /* UNPKBL */ 2545 REQUIRE_AMASK(MVI); 2546 REQUIRE_REG_31(ra); 2547 REQUIRE_NO_LIT; 2548 gen_helper_unpkbl(vc, vb); 2549 break; 2550 case 0x36: 2551 /* PKWB */ 2552 REQUIRE_AMASK(MVI); 2553 REQUIRE_REG_31(ra); 2554 REQUIRE_NO_LIT; 2555 gen_helper_pkwb(vc, vb); 2556 break; 2557 case 0x37: 2558 /* PKLB */ 2559 REQUIRE_AMASK(MVI); 2560 REQUIRE_REG_31(ra); 2561 REQUIRE_NO_LIT; 2562 gen_helper_pklb(vc, vb); 2563 break; 2564 case 0x38: 2565 /* MINSB8 */ 2566 REQUIRE_AMASK(MVI); 2567 va = load_gpr(ctx, ra); 2568 gen_helper_minsb8(vc, va, vb); 2569 break; 2570 case 0x39: 2571 /* MINSW4 */ 2572 REQUIRE_AMASK(MVI); 2573 va = load_gpr(ctx, ra); 2574 gen_helper_minsw4(vc, va, vb); 2575 break; 2576 case 0x3A: 2577 /* MINUB8 */ 2578 REQUIRE_AMASK(MVI); 2579 va = load_gpr(ctx, ra); 2580 gen_helper_minub8(vc, va, vb); 2581 break; 2582 case 0x3B: 2583 /* MINUW4 */ 2584 REQUIRE_AMASK(MVI); 2585 va = load_gpr(ctx, ra); 2586 gen_helper_minuw4(vc, va, vb); 2587 break; 2588 case 0x3C: 2589 /* MAXUB8 */ 2590 REQUIRE_AMASK(MVI); 2591 va = load_gpr(ctx, ra); 2592 gen_helper_maxub8(vc, va, vb); 2593 break; 2594 case 0x3D: 2595 /* MAXUW4 */ 2596 REQUIRE_AMASK(MVI); 2597 va = load_gpr(ctx, ra); 2598 gen_helper_maxuw4(vc, va, vb); 2599 break; 2600 case 0x3E: 2601 /* MAXSB8 */ 2602 REQUIRE_AMASK(MVI); 2603 va = load_gpr(ctx, ra); 2604 gen_helper_maxsb8(vc, va, vb); 2605 break; 2606 case 0x3F: 2607 /* MAXSW4 */ 2608 REQUIRE_AMASK(MVI); 2609 va = load_gpr(ctx, ra); 2610 gen_helper_maxsw4(vc, va, vb); 2611 break; 2612 default: 2613 goto invalid_opc; 2614 } 2615 break; 2616 2617 case 0x1D: 2618 /* HW_MTPR (PALcode) */ 2619 #ifndef CONFIG_USER_ONLY 2620 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2621 vb = load_gpr(ctx, rb); 2622 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2623 break; 2624 #else 2625 goto invalid_opc; 2626 #endif 2627 2628 case 0x1E: 2629 /* HW_RET (PALcode) */ 2630 #ifndef CONFIG_USER_ONLY 2631 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2632 if (rb == 31) { 2633 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2634 address from EXC_ADDR. This turns out to be useful for our 2635 emulation PALcode, so continue to accept it. */ 2636 vb = dest_sink(ctx); 2637 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr)); 2638 } else { 2639 vb = load_gpr(ctx, rb); 2640 } 2641 tcg_gen_movi_i64(cpu_lock_addr, -1); 2642 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT); 2643 tmp = tcg_temp_new(); 2644 tcg_gen_andi_i64(tmp, vb, 1); 2645 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2646 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2647 /* Allow interrupts to be recognized right away. */ 2648 ret = DISAS_PC_UPDATED_NOCHAIN; 2649 break; 2650 #else 2651 goto invalid_opc; 2652 #endif 2653 2654 case 0x1F: 2655 /* HW_ST (PALcode) */ 2656 #ifndef CONFIG_USER_ONLY 2657 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2658 { 2659 switch ((insn >> 12) & 0xF) { 2660 case 0x0: 2661 /* Longword physical access */ 2662 va = load_gpr(ctx, ra); 2663 vb = load_gpr(ctx, rb); 2664 tmp = tcg_temp_new(); 2665 tcg_gen_addi_i64(tmp, vb, disp12); 2666 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2667 break; 2668 case 0x1: 2669 /* Quadword physical access */ 2670 va = load_gpr(ctx, ra); 2671 vb = load_gpr(ctx, rb); 2672 tmp = tcg_temp_new(); 2673 tcg_gen_addi_i64(tmp, vb, disp12); 2674 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2675 break; 2676 case 0x2: 2677 /* Longword physical access with lock */ 2678 ret = gen_store_conditional(ctx, ra, rb, disp12, 2679 MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2680 break; 2681 case 0x3: 2682 /* Quadword physical access with lock */ 2683 ret = gen_store_conditional(ctx, ra, rb, disp12, 2684 MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2685 break; 2686 case 0x4: 2687 /* Longword virtual access */ 2688 goto invalid_opc; 2689 case 0x5: 2690 /* Quadword virtual access */ 2691 goto invalid_opc; 2692 case 0x6: 2693 /* Invalid */ 2694 goto invalid_opc; 2695 case 0x7: 2696 /* Invalid */ 2697 goto invalid_opc; 2698 case 0x8: 2699 /* Invalid */ 2700 goto invalid_opc; 2701 case 0x9: 2702 /* Invalid */ 2703 goto invalid_opc; 2704 case 0xA: 2705 /* Invalid */ 2706 goto invalid_opc; 2707 case 0xB: 2708 /* Invalid */ 2709 goto invalid_opc; 2710 case 0xC: 2711 /* Longword virtual access with alternate access mode */ 2712 goto invalid_opc; 2713 case 0xD: 2714 /* Quadword virtual access with alternate access mode */ 2715 goto invalid_opc; 2716 case 0xE: 2717 /* Invalid */ 2718 goto invalid_opc; 2719 case 0xF: 2720 /* Invalid */ 2721 goto invalid_opc; 2722 } 2723 break; 2724 } 2725 #else 2726 goto invalid_opc; 2727 #endif 2728 case 0x20: 2729 /* LDF */ 2730 REQUIRE_FEN; 2731 gen_load_fp(ctx, ra, rb, disp16, gen_ldf); 2732 break; 2733 case 0x21: 2734 /* LDG */ 2735 REQUIRE_FEN; 2736 gen_load_fp(ctx, ra, rb, disp16, gen_ldg); 2737 break; 2738 case 0x22: 2739 /* LDS */ 2740 REQUIRE_FEN; 2741 gen_load_fp(ctx, ra, rb, disp16, gen_lds); 2742 break; 2743 case 0x23: 2744 /* LDT */ 2745 REQUIRE_FEN; 2746 gen_load_fp(ctx, ra, rb, disp16, gen_ldt); 2747 break; 2748 case 0x24: 2749 /* STF */ 2750 REQUIRE_FEN; 2751 gen_store_fp(ctx, ra, rb, disp16, gen_stf); 2752 break; 2753 case 0x25: 2754 /* STG */ 2755 REQUIRE_FEN; 2756 gen_store_fp(ctx, ra, rb, disp16, gen_stg); 2757 break; 2758 case 0x26: 2759 /* STS */ 2760 REQUIRE_FEN; 2761 gen_store_fp(ctx, ra, rb, disp16, gen_sts); 2762 break; 2763 case 0x27: 2764 /* STT */ 2765 REQUIRE_FEN; 2766 gen_store_fp(ctx, ra, rb, disp16, gen_stt); 2767 break; 2768 case 0x28: 2769 /* LDL */ 2770 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); 2771 break; 2772 case 0x29: 2773 /* LDQ */ 2774 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0); 2775 break; 2776 case 0x2A: 2777 /* LDL_L */ 2778 gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1); 2779 break; 2780 case 0x2B: 2781 /* LDQ_L */ 2782 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1); 2783 break; 2784 case 0x2C: 2785 /* STL */ 2786 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); 2787 break; 2788 case 0x2D: 2789 /* STQ */ 2790 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0); 2791 break; 2792 case 0x2E: 2793 /* STL_C */ 2794 ret = gen_store_conditional(ctx, ra, rb, disp16, 2795 ctx->mem_idx, MO_LESL | MO_ALIGN); 2796 break; 2797 case 0x2F: 2798 /* STQ_C */ 2799 ret = gen_store_conditional(ctx, ra, rb, disp16, 2800 ctx->mem_idx, MO_LEUQ | MO_ALIGN); 2801 break; 2802 case 0x30: 2803 /* BR */ 2804 ret = gen_bdirect(ctx, ra, disp21); 2805 break; 2806 case 0x31: /* FBEQ */ 2807 REQUIRE_FEN; 2808 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); 2809 break; 2810 case 0x32: /* FBLT */ 2811 REQUIRE_FEN; 2812 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); 2813 break; 2814 case 0x33: /* FBLE */ 2815 REQUIRE_FEN; 2816 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); 2817 break; 2818 case 0x34: 2819 /* BSR */ 2820 ret = gen_bdirect(ctx, ra, disp21); 2821 break; 2822 case 0x35: /* FBNE */ 2823 REQUIRE_FEN; 2824 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); 2825 break; 2826 case 0x36: /* FBGE */ 2827 REQUIRE_FEN; 2828 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); 2829 break; 2830 case 0x37: /* FBGT */ 2831 REQUIRE_FEN; 2832 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); 2833 break; 2834 case 0x38: 2835 /* BLBC */ 2836 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); 2837 break; 2838 case 0x39: 2839 /* BEQ */ 2840 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); 2841 break; 2842 case 0x3A: 2843 /* BLT */ 2844 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); 2845 break; 2846 case 0x3B: 2847 /* BLE */ 2848 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); 2849 break; 2850 case 0x3C: 2851 /* BLBS */ 2852 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); 2853 break; 2854 case 0x3D: 2855 /* BNE */ 2856 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); 2857 break; 2858 case 0x3E: 2859 /* BGE */ 2860 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); 2861 break; 2862 case 0x3F: 2863 /* BGT */ 2864 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); 2865 break; 2866 invalid_opc: 2867 ret = gen_invalid(ctx); 2868 break; 2869 raise_fen: 2870 ret = gen_excp(ctx, EXCP_FEN, 0); 2871 break; 2872 } 2873 2874 return ret; 2875 } 2876 2877 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 2878 { 2879 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2880 CPUAlphaState *env = cpu->env_ptr; 2881 int64_t bound; 2882 2883 ctx->tbflags = ctx->base.tb->flags; 2884 ctx->mem_idx = cpu_mmu_index(env, false); 2885 ctx->implver = env->implver; 2886 ctx->amask = env->amask; 2887 2888 #ifdef CONFIG_USER_ONLY 2889 ctx->ir = cpu_std_ir; 2890 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 2891 #else 2892 ctx->palbr = env->palbr; 2893 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2894 #endif 2895 2896 /* ??? Every TB begins with unset rounding mode, to be initialized on 2897 the first fp insn of the TB. Alternately we could define a proper 2898 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure 2899 to reset the FP_STATUS to that default at the end of any TB that 2900 changes the default. We could even (gasp) dynamiclly figure out 2901 what default would be most efficient given the running program. */ 2902 ctx->tb_rm = -1; 2903 /* Similarly for flush-to-zero. */ 2904 ctx->tb_ftz = -1; 2905 2906 ctx->zero = NULL; 2907 ctx->sink = NULL; 2908 2909 /* Bound the number of insns to execute to those left on the page. */ 2910 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 2911 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 2912 } 2913 2914 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu) 2915 { 2916 } 2917 2918 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 2919 { 2920 tcg_gen_insn_start(dcbase->pc_next); 2921 } 2922 2923 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 2924 { 2925 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2926 CPUAlphaState *env = cpu->env_ptr; 2927 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 2928 2929 ctx->base.pc_next += 4; 2930 ctx->base.is_jmp = translate_one(ctx, insn); 2931 2932 free_context_temps(ctx); 2933 } 2934 2935 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 2936 { 2937 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2938 2939 switch (ctx->base.is_jmp) { 2940 case DISAS_NORETURN: 2941 break; 2942 case DISAS_TOO_MANY: 2943 if (use_goto_tb(ctx, ctx->base.pc_next)) { 2944 tcg_gen_goto_tb(0); 2945 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 2946 tcg_gen_exit_tb(ctx->base.tb, 0); 2947 } 2948 /* FALLTHRU */ 2949 case DISAS_PC_STALE: 2950 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 2951 /* FALLTHRU */ 2952 case DISAS_PC_UPDATED: 2953 tcg_gen_lookup_and_goto_ptr(); 2954 break; 2955 case DISAS_PC_UPDATED_NOCHAIN: 2956 tcg_gen_exit_tb(NULL, 0); 2957 break; 2958 default: 2959 g_assert_not_reached(); 2960 } 2961 } 2962 2963 static void alpha_tr_disas_log(const DisasContextBase *dcbase, 2964 CPUState *cpu, FILE *logfile) 2965 { 2966 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 2967 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); 2968 } 2969 2970 static const TranslatorOps alpha_tr_ops = { 2971 .init_disas_context = alpha_tr_init_disas_context, 2972 .tb_start = alpha_tr_tb_start, 2973 .insn_start = alpha_tr_insn_start, 2974 .translate_insn = alpha_tr_translate_insn, 2975 .tb_stop = alpha_tr_tb_stop, 2976 .disas_log = alpha_tr_disas_log, 2977 }; 2978 2979 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, 2980 target_ulong pc, void *host_pc) 2981 { 2982 DisasContext dc; 2983 translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); 2984 } 2985