1 /* 2 * Alpha emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "sysemu/cpus.h" 23 #include "disas/disas.h" 24 #include "qemu/host-utils.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg-op.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 #define HELPER_H "helper.h" 33 #include "exec/helper-info.c.inc" 34 #undef HELPER_H 35 36 #undef ALPHA_DEBUG_DISAS 37 #define CONFIG_SOFTFLOAT_INLINE 38 39 #ifdef ALPHA_DEBUG_DISAS 40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 41 #else 42 # define LOG_DISAS(...) do { } while (0) 43 #endif 44 45 typedef struct DisasContext DisasContext; 46 struct DisasContext { 47 DisasContextBase base; 48 49 #ifdef CONFIG_USER_ONLY 50 MemOp unalign; 51 #else 52 uint64_t palbr; 53 #endif 54 uint32_t tbflags; 55 int mem_idx; 56 57 /* implver and amask values for this CPU. */ 58 int implver; 59 int amask; 60 61 /* Current rounding mode for this TB. */ 62 int tb_rm; 63 /* Current flush-to-zero setting for this TB. */ 64 int tb_ftz; 65 66 /* The set of registers active in the current context. */ 67 TCGv *ir; 68 69 /* Temporaries for $31 and $f31 as source and destination. */ 70 TCGv zero; 71 TCGv sink; 72 }; 73 74 #ifdef CONFIG_USER_ONLY 75 #define UNALIGN(C) (C)->unalign 76 #else 77 #define UNALIGN(C) MO_ALIGN 78 #endif 79 80 /* Target-specific return values from translate_one, indicating the 81 state of the TB. Note that DISAS_NEXT indicates that we are not 82 exiting the TB. */ 83 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 84 #define DISAS_PC_UPDATED DISAS_TARGET_1 85 #define DISAS_PC_STALE DISAS_TARGET_2 86 87 /* global register indexes */ 88 static TCGv cpu_std_ir[31]; 89 static TCGv cpu_fir[31]; 90 static TCGv cpu_pc; 91 static TCGv cpu_lock_addr; 92 static TCGv cpu_lock_value; 93 94 #ifndef CONFIG_USER_ONLY 95 static TCGv cpu_pal_ir[31]; 96 #endif 97 98 void alpha_translate_init(void) 99 { 100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } 101 102 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 103 static const GlobalVar vars[] = { 104 DEF_VAR(pc), 105 DEF_VAR(lock_addr), 106 DEF_VAR(lock_value), 107 }; 108 109 #undef DEF_VAR 110 111 /* Use the symbolic register names that match the disassembler. */ 112 static const char greg_names[31][4] = { 113 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", 114 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", 115 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", 116 "t10", "t11", "ra", "t12", "at", "gp", "sp" 117 }; 118 static const char freg_names[31][4] = { 119 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 120 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 121 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 122 "f24", "f25", "f26", "f27", "f28", "f29", "f30" 123 }; 124 #ifndef CONFIG_USER_ONLY 125 static const char shadow_names[8][8] = { 126 "pal_t7", "pal_s0", "pal_s1", "pal_s2", 127 "pal_s3", "pal_s4", "pal_s5", "pal_t11" 128 }; 129 #endif 130 131 int i; 132 133 for (i = 0; i < 31; i++) { 134 cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env, 135 offsetof(CPUAlphaState, ir[i]), 136 greg_names[i]); 137 } 138 139 for (i = 0; i < 31; i++) { 140 cpu_fir[i] = tcg_global_mem_new_i64(tcg_env, 141 offsetof(CPUAlphaState, fir[i]), 142 freg_names[i]); 143 } 144 145 #ifndef CONFIG_USER_ONLY 146 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); 147 for (i = 0; i < 8; i++) { 148 int r = (i == 7 ? 25 : i + 8); 149 cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env, 150 offsetof(CPUAlphaState, 151 shadow[i]), 152 shadow_names[i]); 153 } 154 #endif 155 156 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 157 const GlobalVar *v = &vars[i]; 158 *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name); 159 } 160 } 161 162 static TCGv load_zero(DisasContext *ctx) 163 { 164 if (!ctx->zero) { 165 ctx->zero = tcg_constant_i64(0); 166 } 167 return ctx->zero; 168 } 169 170 static TCGv dest_sink(DisasContext *ctx) 171 { 172 if (!ctx->sink) { 173 ctx->sink = tcg_temp_new(); 174 } 175 return ctx->sink; 176 } 177 178 static void free_context_temps(DisasContext *ctx) 179 { 180 if (ctx->sink) { 181 tcg_gen_discard_i64(ctx->sink); 182 ctx->sink = NULL; 183 } 184 } 185 186 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 187 { 188 if (likely(reg < 31)) { 189 return ctx->ir[reg]; 190 } else { 191 return load_zero(ctx); 192 } 193 } 194 195 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, 196 uint8_t lit, bool islit) 197 { 198 if (islit) { 199 return tcg_constant_i64(lit); 200 } else if (likely(reg < 31)) { 201 return ctx->ir[reg]; 202 } else { 203 return load_zero(ctx); 204 } 205 } 206 207 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 208 { 209 if (likely(reg < 31)) { 210 return ctx->ir[reg]; 211 } else { 212 return dest_sink(ctx); 213 } 214 } 215 216 static TCGv load_fpr(DisasContext *ctx, unsigned reg) 217 { 218 if (likely(reg < 31)) { 219 return cpu_fir[reg]; 220 } else { 221 return load_zero(ctx); 222 } 223 } 224 225 static TCGv dest_fpr(DisasContext *ctx, unsigned reg) 226 { 227 if (likely(reg < 31)) { 228 return cpu_fir[reg]; 229 } else { 230 return dest_sink(ctx); 231 } 232 } 233 234 static int get_flag_ofs(unsigned shift) 235 { 236 int ofs = offsetof(CPUAlphaState, flags); 237 #if HOST_BIG_ENDIAN 238 ofs += 3 - (shift / 8); 239 #else 240 ofs += shift / 8; 241 #endif 242 return ofs; 243 } 244 245 static void ld_flag_byte(TCGv val, unsigned shift) 246 { 247 tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift)); 248 } 249 250 static void st_flag_byte(TCGv val, unsigned shift) 251 { 252 tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift)); 253 } 254 255 static void gen_excp_1(int exception, int error_code) 256 { 257 TCGv_i32 tmp1, tmp2; 258 259 tmp1 = tcg_constant_i32(exception); 260 tmp2 = tcg_constant_i32(error_code); 261 gen_helper_excp(tcg_env, tmp1, tmp2); 262 } 263 264 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) 265 { 266 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 267 gen_excp_1(exception, error_code); 268 return DISAS_NORETURN; 269 } 270 271 static inline DisasJumpType gen_invalid(DisasContext *ctx) 272 { 273 return gen_excp(ctx, EXCP_OPCDEC, 0); 274 } 275 276 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) 277 { 278 TCGv_i32 tmp32 = tcg_temp_new_i32(); 279 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 280 gen_helper_memory_to_f(dest, tmp32); 281 } 282 283 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) 284 { 285 TCGv tmp = tcg_temp_new(); 286 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 287 gen_helper_memory_to_g(dest, tmp); 288 } 289 290 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) 291 { 292 TCGv_i32 tmp32 = tcg_temp_new_i32(); 293 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 294 gen_helper_memory_to_s(dest, tmp32); 295 } 296 297 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) 298 { 299 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 300 } 301 302 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 303 void (*func)(DisasContext *, TCGv, TCGv)) 304 { 305 /* Loads to $f31 are prefetches, which we can treat as nops. */ 306 if (likely(ra != 31)) { 307 TCGv addr = tcg_temp_new(); 308 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 309 func(ctx, cpu_fir[ra], addr); 310 } 311 } 312 313 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 314 MemOp op, bool clear, bool locked) 315 { 316 TCGv addr, dest; 317 318 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of 319 prefetches, which we can treat as nops. No worries about 320 missed exceptions here. */ 321 if (unlikely(ra == 31)) { 322 return; 323 } 324 325 addr = tcg_temp_new(); 326 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 327 if (clear) { 328 tcg_gen_andi_i64(addr, addr, ~0x7); 329 } else if (!locked) { 330 op |= UNALIGN(ctx); 331 } 332 333 dest = ctx->ir[ra]; 334 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); 335 336 if (locked) { 337 tcg_gen_mov_i64(cpu_lock_addr, addr); 338 tcg_gen_mov_i64(cpu_lock_value, dest); 339 } 340 } 341 342 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) 343 { 344 TCGv_i32 tmp32 = tcg_temp_new_i32(); 345 gen_helper_f_to_memory(tmp32, addr); 346 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 347 } 348 349 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) 350 { 351 TCGv tmp = tcg_temp_new(); 352 gen_helper_g_to_memory(tmp, src); 353 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 354 } 355 356 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) 357 { 358 TCGv_i32 tmp32 = tcg_temp_new_i32(); 359 gen_helper_s_to_memory(tmp32, src); 360 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 361 } 362 363 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) 364 { 365 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 366 } 367 368 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 369 void (*func)(DisasContext *, TCGv, TCGv)) 370 { 371 TCGv addr = tcg_temp_new(); 372 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 373 func(ctx, load_fpr(ctx, ra), addr); 374 } 375 376 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 377 MemOp op, bool clear) 378 { 379 TCGv addr, src; 380 381 addr = tcg_temp_new(); 382 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 383 if (clear) { 384 tcg_gen_andi_i64(addr, addr, ~0x7); 385 } else { 386 op |= UNALIGN(ctx); 387 } 388 389 src = load_gpr(ctx, ra); 390 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); 391 } 392 393 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, 394 int32_t disp16, int mem_idx, 395 MemOp op) 396 { 397 TCGLabel *lab_fail, *lab_done; 398 TCGv addr, val; 399 400 addr = tcg_temp_new_i64(); 401 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 402 free_context_temps(ctx); 403 404 lab_fail = gen_new_label(); 405 lab_done = gen_new_label(); 406 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); 407 408 val = tcg_temp_new_i64(); 409 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, 410 load_gpr(ctx, ra), mem_idx, op); 411 free_context_temps(ctx); 412 413 if (ra != 31) { 414 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); 415 } 416 tcg_gen_br(lab_done); 417 418 gen_set_label(lab_fail); 419 if (ra != 31) { 420 tcg_gen_movi_i64(ctx->ir[ra], 0); 421 } 422 423 gen_set_label(lab_done); 424 tcg_gen_movi_i64(cpu_lock_addr, -1); 425 return DISAS_NEXT; 426 } 427 428 static bool use_goto_tb(DisasContext *ctx, uint64_t dest) 429 { 430 return translator_use_goto_tb(&ctx->base, dest); 431 } 432 433 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) 434 { 435 uint64_t dest = ctx->base.pc_next + (disp << 2); 436 437 if (ra != 31) { 438 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 439 } 440 441 /* Notice branch-to-next; used to initialize RA with the PC. */ 442 if (disp == 0) { 443 return 0; 444 } else if (use_goto_tb(ctx, dest)) { 445 tcg_gen_goto_tb(0); 446 tcg_gen_movi_i64(cpu_pc, dest); 447 tcg_gen_exit_tb(ctx->base.tb, 0); 448 return DISAS_NORETURN; 449 } else { 450 tcg_gen_movi_i64(cpu_pc, dest); 451 return DISAS_PC_UPDATED; 452 } 453 } 454 455 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, 456 TCGv cmp, int32_t disp) 457 { 458 uint64_t dest = ctx->base.pc_next + (disp << 2); 459 TCGLabel *lab_true = gen_new_label(); 460 461 if (use_goto_tb(ctx, dest)) { 462 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); 463 464 tcg_gen_goto_tb(0); 465 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 466 tcg_gen_exit_tb(ctx->base.tb, 0); 467 468 gen_set_label(lab_true); 469 tcg_gen_goto_tb(1); 470 tcg_gen_movi_i64(cpu_pc, dest); 471 tcg_gen_exit_tb(ctx->base.tb, 1); 472 473 return DISAS_NORETURN; 474 } else { 475 TCGv_i64 z = load_zero(ctx); 476 TCGv_i64 d = tcg_constant_i64(dest); 477 TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next); 478 479 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); 480 return DISAS_PC_UPDATED; 481 } 482 } 483 484 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, 485 int32_t disp, int mask) 486 { 487 if (mask) { 488 TCGv tmp = tcg_temp_new(); 489 DisasJumpType ret; 490 491 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); 492 ret = gen_bcond_internal(ctx, cond, tmp, disp); 493 return ret; 494 } 495 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); 496 } 497 498 /* Fold -0.0 for comparison with COND. */ 499 500 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) 501 { 502 uint64_t mzero = 1ull << 63; 503 504 switch (cond) { 505 case TCG_COND_LE: 506 case TCG_COND_GT: 507 /* For <= or >, the -0.0 value directly compares the way we want. */ 508 tcg_gen_mov_i64(dest, src); 509 break; 510 511 case TCG_COND_EQ: 512 case TCG_COND_NE: 513 /* For == or !=, we can simply mask off the sign bit and compare. */ 514 tcg_gen_andi_i64(dest, src, mzero - 1); 515 break; 516 517 case TCG_COND_GE: 518 case TCG_COND_LT: 519 /* For >= or <, map -0.0 to +0.0. */ 520 tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero), 521 src, tcg_constant_i64(0)); 522 break; 523 524 default: 525 abort(); 526 } 527 } 528 529 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, 530 int32_t disp) 531 { 532 TCGv cmp_tmp = tcg_temp_new(); 533 DisasJumpType ret; 534 535 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); 536 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); 537 return ret; 538 } 539 540 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) 541 { 542 TCGv_i64 va, vb, z; 543 544 z = load_zero(ctx); 545 vb = load_fpr(ctx, rb); 546 va = tcg_temp_new(); 547 gen_fold_mzero(cond, va, load_fpr(ctx, ra)); 548 549 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); 550 } 551 552 #define QUAL_RM_N 0x080 /* Round mode nearest even */ 553 #define QUAL_RM_C 0x000 /* Round mode chopped */ 554 #define QUAL_RM_M 0x040 /* Round mode minus infinity */ 555 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */ 556 #define QUAL_RM_MASK 0x0c0 557 558 #define QUAL_U 0x100 /* Underflow enable (fp output) */ 559 #define QUAL_V 0x100 /* Overflow enable (int output) */ 560 #define QUAL_S 0x400 /* Software completion enable */ 561 #define QUAL_I 0x200 /* Inexact detection enable */ 562 563 static void gen_qual_roundmode(DisasContext *ctx, int fn11) 564 { 565 TCGv_i32 tmp; 566 567 fn11 &= QUAL_RM_MASK; 568 if (fn11 == ctx->tb_rm) { 569 return; 570 } 571 ctx->tb_rm = fn11; 572 573 tmp = tcg_temp_new_i32(); 574 switch (fn11) { 575 case QUAL_RM_N: 576 tcg_gen_movi_i32(tmp, float_round_nearest_even); 577 break; 578 case QUAL_RM_C: 579 tcg_gen_movi_i32(tmp, float_round_to_zero); 580 break; 581 case QUAL_RM_M: 582 tcg_gen_movi_i32(tmp, float_round_down); 583 break; 584 case QUAL_RM_D: 585 tcg_gen_ld8u_i32(tmp, tcg_env, 586 offsetof(CPUAlphaState, fpcr_dyn_round)); 587 break; 588 } 589 590 #if defined(CONFIG_SOFTFLOAT_INLINE) 591 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. 592 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just 593 sets the one field. */ 594 tcg_gen_st8_i32(tmp, tcg_env, 595 offsetof(CPUAlphaState, fp_status.float_rounding_mode)); 596 #else 597 gen_helper_setroundmode(tmp); 598 #endif 599 } 600 601 static void gen_qual_flushzero(DisasContext *ctx, int fn11) 602 { 603 TCGv_i32 tmp; 604 605 fn11 &= QUAL_U; 606 if (fn11 == ctx->tb_ftz) { 607 return; 608 } 609 ctx->tb_ftz = fn11; 610 611 tmp = tcg_temp_new_i32(); 612 if (fn11) { 613 /* Underflow is enabled, use the FPCR setting. */ 614 tcg_gen_ld8u_i32(tmp, tcg_env, 615 offsetof(CPUAlphaState, fpcr_flush_to_zero)); 616 } else { 617 /* Underflow is disabled, force flush-to-zero. */ 618 tcg_gen_movi_i32(tmp, 1); 619 } 620 621 #if defined(CONFIG_SOFTFLOAT_INLINE) 622 tcg_gen_st8_i32(tmp, tcg_env, 623 offsetof(CPUAlphaState, fp_status.flush_to_zero)); 624 #else 625 gen_helper_setflushzero(tmp); 626 #endif 627 } 628 629 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) 630 { 631 TCGv val; 632 633 if (unlikely(reg == 31)) { 634 val = load_zero(ctx); 635 } else { 636 val = cpu_fir[reg]; 637 if ((fn11 & QUAL_S) == 0) { 638 if (is_cmp) { 639 gen_helper_ieee_input_cmp(tcg_env, val); 640 } else { 641 gen_helper_ieee_input(tcg_env, val); 642 } 643 } else { 644 #ifndef CONFIG_USER_ONLY 645 /* In system mode, raise exceptions for denormals like real 646 hardware. In user mode, proceed as if the OS completion 647 handler is handling the denormal as per spec. */ 648 gen_helper_ieee_input_s(tcg_env, val); 649 #endif 650 } 651 } 652 return val; 653 } 654 655 static void gen_fp_exc_raise(int rc, int fn11) 656 { 657 /* ??? We ought to be able to do something with imprecise exceptions. 658 E.g. notice we're still in the trap shadow of something within the 659 TB and do not generate the code to signal the exception; end the TB 660 when an exception is forced to arrive, either by consumption of a 661 register value or TRAPB or EXCB. */ 662 TCGv_i32 reg, ign; 663 uint32_t ignore = 0; 664 665 if (!(fn11 & QUAL_U)) { 666 /* Note that QUAL_U == QUAL_V, so ignore either. */ 667 ignore |= FPCR_UNF | FPCR_IOV; 668 } 669 if (!(fn11 & QUAL_I)) { 670 ignore |= FPCR_INE; 671 } 672 ign = tcg_constant_i32(ignore); 673 674 /* ??? Pass in the regno of the destination so that the helper can 675 set EXC_MASK, which contains a bitmask of destination registers 676 that have caused arithmetic traps. A simple userspace emulation 677 does not require this. We do need it for a guest kernel's entArith, 678 or if we were to do something clever with imprecise exceptions. */ 679 reg = tcg_constant_i32(rc + 32); 680 if (fn11 & QUAL_S) { 681 gen_helper_fp_exc_raise_s(tcg_env, ign, reg); 682 } else { 683 gen_helper_fp_exc_raise(tcg_env, ign, reg); 684 } 685 } 686 687 static void gen_cvtlq(TCGv vc, TCGv vb) 688 { 689 TCGv tmp = tcg_temp_new(); 690 691 /* The arithmetic right shift here, plus the sign-extended mask below 692 yields a sign-extended result without an explicit ext32s_i64. */ 693 tcg_gen_shri_i64(tmp, vb, 29); 694 tcg_gen_sari_i64(vc, vb, 32); 695 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); 696 } 697 698 static void gen_ieee_arith2(DisasContext *ctx, 699 void (*helper)(TCGv, TCGv_ptr, TCGv), 700 int rb, int rc, int fn11) 701 { 702 TCGv vb; 703 704 gen_qual_roundmode(ctx, fn11); 705 gen_qual_flushzero(ctx, fn11); 706 707 vb = gen_ieee_input(ctx, rb, fn11, 0); 708 helper(dest_fpr(ctx, rc), tcg_env, vb); 709 710 gen_fp_exc_raise(rc, fn11); 711 } 712 713 #define IEEE_ARITH2(name) \ 714 static inline void glue(gen_, name)(DisasContext *ctx, \ 715 int rb, int rc, int fn11) \ 716 { \ 717 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ 718 } 719 IEEE_ARITH2(sqrts) 720 IEEE_ARITH2(sqrtt) 721 IEEE_ARITH2(cvtst) 722 IEEE_ARITH2(cvtts) 723 724 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) 725 { 726 TCGv vb, vc; 727 728 /* No need to set flushzero, since we have an integer output. */ 729 vb = gen_ieee_input(ctx, rb, fn11, 0); 730 vc = dest_fpr(ctx, rc); 731 732 /* Almost all integer conversions use cropped rounding; 733 special case that. */ 734 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { 735 gen_helper_cvttq_c(vc, tcg_env, vb); 736 } else { 737 gen_qual_roundmode(ctx, fn11); 738 gen_helper_cvttq(vc, tcg_env, vb); 739 } 740 gen_fp_exc_raise(rc, fn11); 741 } 742 743 static void gen_ieee_intcvt(DisasContext *ctx, 744 void (*helper)(TCGv, TCGv_ptr, TCGv), 745 int rb, int rc, int fn11) 746 { 747 TCGv vb, vc; 748 749 gen_qual_roundmode(ctx, fn11); 750 vb = load_fpr(ctx, rb); 751 vc = dest_fpr(ctx, rc); 752 753 /* The only exception that can be raised by integer conversion 754 is inexact. Thus we only need to worry about exceptions when 755 inexact handling is requested. */ 756 if (fn11 & QUAL_I) { 757 helper(vc, tcg_env, vb); 758 gen_fp_exc_raise(rc, fn11); 759 } else { 760 helper(vc, tcg_env, vb); 761 } 762 } 763 764 #define IEEE_INTCVT(name) \ 765 static inline void glue(gen_, name)(DisasContext *ctx, \ 766 int rb, int rc, int fn11) \ 767 { \ 768 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ 769 } 770 IEEE_INTCVT(cvtqs) 771 IEEE_INTCVT(cvtqt) 772 773 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) 774 { 775 TCGv vmask = tcg_constant_i64(mask); 776 TCGv tmp = tcg_temp_new_i64(); 777 778 if (inv_a) { 779 tcg_gen_andc_i64(tmp, vmask, va); 780 } else { 781 tcg_gen_and_i64(tmp, va, vmask); 782 } 783 784 tcg_gen_andc_i64(vc, vb, vmask); 785 tcg_gen_or_i64(vc, vc, tmp); 786 } 787 788 static void gen_ieee_arith3(DisasContext *ctx, 789 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 790 int ra, int rb, int rc, int fn11) 791 { 792 TCGv va, vb, vc; 793 794 gen_qual_roundmode(ctx, fn11); 795 gen_qual_flushzero(ctx, fn11); 796 797 va = gen_ieee_input(ctx, ra, fn11, 0); 798 vb = gen_ieee_input(ctx, rb, fn11, 0); 799 vc = dest_fpr(ctx, rc); 800 helper(vc, tcg_env, va, vb); 801 802 gen_fp_exc_raise(rc, fn11); 803 } 804 805 #define IEEE_ARITH3(name) \ 806 static inline void glue(gen_, name)(DisasContext *ctx, \ 807 int ra, int rb, int rc, int fn11) \ 808 { \ 809 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 810 } 811 IEEE_ARITH3(adds) 812 IEEE_ARITH3(subs) 813 IEEE_ARITH3(muls) 814 IEEE_ARITH3(divs) 815 IEEE_ARITH3(addt) 816 IEEE_ARITH3(subt) 817 IEEE_ARITH3(mult) 818 IEEE_ARITH3(divt) 819 820 static void gen_ieee_compare(DisasContext *ctx, 821 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 822 int ra, int rb, int rc, int fn11) 823 { 824 TCGv va, vb, vc; 825 826 va = gen_ieee_input(ctx, ra, fn11, 1); 827 vb = gen_ieee_input(ctx, rb, fn11, 1); 828 vc = dest_fpr(ctx, rc); 829 helper(vc, tcg_env, va, vb); 830 831 gen_fp_exc_raise(rc, fn11); 832 } 833 834 #define IEEE_CMP3(name) \ 835 static inline void glue(gen_, name)(DisasContext *ctx, \ 836 int ra, int rb, int rc, int fn11) \ 837 { \ 838 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 839 } 840 IEEE_CMP3(cmptun) 841 IEEE_CMP3(cmpteq) 842 IEEE_CMP3(cmptlt) 843 IEEE_CMP3(cmptle) 844 845 static inline uint64_t zapnot_mask(uint8_t lit) 846 { 847 uint64_t mask = 0; 848 int i; 849 850 for (i = 0; i < 8; ++i) { 851 if ((lit >> i) & 1) { 852 mask |= 0xffull << (i * 8); 853 } 854 } 855 return mask; 856 } 857 858 /* Implement zapnot with an immediate operand, which expands to some 859 form of immediate AND. This is a basic building block in the 860 definition of many of the other byte manipulation instructions. */ 861 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) 862 { 863 switch (lit) { 864 case 0x00: 865 tcg_gen_movi_i64(dest, 0); 866 break; 867 case 0x01: 868 tcg_gen_ext8u_i64(dest, src); 869 break; 870 case 0x03: 871 tcg_gen_ext16u_i64(dest, src); 872 break; 873 case 0x0f: 874 tcg_gen_ext32u_i64(dest, src); 875 break; 876 case 0xff: 877 tcg_gen_mov_i64(dest, src); 878 break; 879 default: 880 tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); 881 break; 882 } 883 } 884 885 /* EXTWH, EXTLH, EXTQH */ 886 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 887 uint8_t lit, uint8_t byte_mask) 888 { 889 if (islit) { 890 int pos = (64 - lit * 8) & 0x3f; 891 int len = cto32(byte_mask) * 8; 892 if (pos < len) { 893 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); 894 } else { 895 tcg_gen_movi_i64(vc, 0); 896 } 897 } else { 898 TCGv tmp = tcg_temp_new(); 899 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); 900 tcg_gen_neg_i64(tmp, tmp); 901 tcg_gen_andi_i64(tmp, tmp, 0x3f); 902 tcg_gen_shl_i64(vc, va, tmp); 903 } 904 gen_zapnoti(vc, vc, byte_mask); 905 } 906 907 /* EXTBL, EXTWL, EXTLL, EXTQL */ 908 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 909 uint8_t lit, uint8_t byte_mask) 910 { 911 if (islit) { 912 int pos = (lit & 7) * 8; 913 int len = cto32(byte_mask) * 8; 914 if (pos + len >= 64) { 915 len = 64 - pos; 916 } 917 tcg_gen_extract_i64(vc, va, pos, len); 918 } else { 919 TCGv tmp = tcg_temp_new(); 920 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); 921 tcg_gen_shli_i64(tmp, tmp, 3); 922 tcg_gen_shr_i64(vc, va, tmp); 923 gen_zapnoti(vc, vc, byte_mask); 924 } 925 } 926 927 /* INSWH, INSLH, INSQH */ 928 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 929 uint8_t lit, uint8_t byte_mask) 930 { 931 if (islit) { 932 int pos = 64 - (lit & 7) * 8; 933 int len = cto32(byte_mask) * 8; 934 if (pos < len) { 935 tcg_gen_extract_i64(vc, va, pos, len - pos); 936 } else { 937 tcg_gen_movi_i64(vc, 0); 938 } 939 } else { 940 TCGv tmp = tcg_temp_new(); 941 TCGv shift = tcg_temp_new(); 942 943 /* The instruction description has us left-shift the byte mask 944 and extract bits <15:8> and apply that zap at the end. This 945 is equivalent to simply performing the zap first and shifting 946 afterward. */ 947 gen_zapnoti(tmp, va, byte_mask); 948 949 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this 950 portably by splitting the shift into two parts: shift_count-1 and 1. 951 Arrange for the -1 by using ones-complement instead of 952 twos-complement in the negation: ~(B * 8) & 63. */ 953 954 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 955 tcg_gen_not_i64(shift, shift); 956 tcg_gen_andi_i64(shift, shift, 0x3f); 957 958 tcg_gen_shr_i64(vc, tmp, shift); 959 tcg_gen_shri_i64(vc, vc, 1); 960 } 961 } 962 963 /* INSBL, INSWL, INSLL, INSQL */ 964 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 965 uint8_t lit, uint8_t byte_mask) 966 { 967 if (islit) { 968 int pos = (lit & 7) * 8; 969 int len = cto32(byte_mask) * 8; 970 if (pos + len > 64) { 971 len = 64 - pos; 972 } 973 tcg_gen_deposit_z_i64(vc, va, pos, len); 974 } else { 975 TCGv tmp = tcg_temp_new(); 976 TCGv shift = tcg_temp_new(); 977 978 /* The instruction description has us left-shift the byte mask 979 and extract bits <15:8> and apply that zap at the end. This 980 is equivalent to simply performing the zap first and shifting 981 afterward. */ 982 gen_zapnoti(tmp, va, byte_mask); 983 984 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 985 tcg_gen_shli_i64(shift, shift, 3); 986 tcg_gen_shl_i64(vc, tmp, shift); 987 } 988 } 989 990 /* MSKWH, MSKLH, MSKQH */ 991 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 992 uint8_t lit, uint8_t byte_mask) 993 { 994 if (islit) { 995 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); 996 } else { 997 TCGv shift = tcg_temp_new(); 998 TCGv mask = tcg_temp_new(); 999 1000 /* The instruction description is as above, where the byte_mask 1001 is shifted left, and then we extract bits <15:8>. This can be 1002 emulated with a right-shift on the expanded byte mask. This 1003 requires extra care because for an input <2:0> == 0 we need a 1004 shift of 64 bits in order to generate a zero. This is done by 1005 splitting the shift into two parts, the variable shift - 1 1006 followed by a constant 1 shift. The code we expand below is 1007 equivalent to ~(B * 8) & 63. */ 1008 1009 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1010 tcg_gen_not_i64(shift, shift); 1011 tcg_gen_andi_i64(shift, shift, 0x3f); 1012 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); 1013 tcg_gen_shr_i64(mask, mask, shift); 1014 tcg_gen_shri_i64(mask, mask, 1); 1015 1016 tcg_gen_andc_i64(vc, va, mask); 1017 } 1018 } 1019 1020 /* MSKBL, MSKWL, MSKLL, MSKQL */ 1021 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1022 uint8_t lit, uint8_t byte_mask) 1023 { 1024 if (islit) { 1025 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); 1026 } else { 1027 TCGv shift = tcg_temp_new(); 1028 TCGv mask = tcg_temp_new(); 1029 1030 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1031 tcg_gen_shli_i64(shift, shift, 3); 1032 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); 1033 tcg_gen_shl_i64(mask, mask, shift); 1034 1035 tcg_gen_andc_i64(vc, va, mask); 1036 } 1037 } 1038 1039 static void gen_rx(DisasContext *ctx, int ra, int set) 1040 { 1041 if (ra != 31) { 1042 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1043 } 1044 1045 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT); 1046 } 1047 1048 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) 1049 { 1050 /* We're emulating OSF/1 PALcode. Many of these are trivial access 1051 to internal cpu registers. */ 1052 1053 /* Unprivileged PAL call */ 1054 if (palcode >= 0x80 && palcode < 0xC0) { 1055 switch (palcode) { 1056 case 0x86: 1057 /* IMB */ 1058 /* No-op inside QEMU. */ 1059 break; 1060 case 0x9E: 1061 /* RDUNIQUE */ 1062 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1063 offsetof(CPUAlphaState, unique)); 1064 break; 1065 case 0x9F: 1066 /* WRUNIQUE */ 1067 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1068 offsetof(CPUAlphaState, unique)); 1069 break; 1070 default: 1071 palcode &= 0xbf; 1072 goto do_call_pal; 1073 } 1074 return DISAS_NEXT; 1075 } 1076 1077 #ifndef CONFIG_USER_ONLY 1078 /* Privileged PAL code */ 1079 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1080 switch (palcode) { 1081 case 0x01: 1082 /* CFLUSH */ 1083 /* No-op inside QEMU. */ 1084 break; 1085 case 0x02: 1086 /* DRAINA */ 1087 /* No-op inside QEMU. */ 1088 break; 1089 case 0x2D: 1090 /* WRVPTPTR */ 1091 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1092 offsetof(CPUAlphaState, vptptr)); 1093 break; 1094 case 0x31: 1095 /* WRVAL */ 1096 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1097 offsetof(CPUAlphaState, sysval)); 1098 break; 1099 case 0x32: 1100 /* RDVAL */ 1101 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1102 offsetof(CPUAlphaState, sysval)); 1103 break; 1104 1105 case 0x35: 1106 /* SWPIPL */ 1107 /* Note that we already know we're in kernel mode, so we know 1108 that PS only contains the 3 IPL bits. */ 1109 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1110 1111 /* But make sure and store only the 3 IPL bits from the user. */ 1112 { 1113 TCGv tmp = tcg_temp_new(); 1114 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1115 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1116 } 1117 1118 /* Allow interrupts to be recognized right away. */ 1119 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 1120 return DISAS_PC_UPDATED_NOCHAIN; 1121 1122 case 0x36: 1123 /* RDPS */ 1124 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1125 break; 1126 1127 case 0x38: 1128 /* WRUSP */ 1129 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1130 offsetof(CPUAlphaState, usp)); 1131 break; 1132 case 0x3A: 1133 /* RDUSP */ 1134 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1135 offsetof(CPUAlphaState, usp)); 1136 break; 1137 case 0x3C: 1138 /* WHAMI */ 1139 tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env, 1140 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); 1141 break; 1142 1143 case 0x3E: 1144 /* WTINT */ 1145 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1146 -offsetof(AlphaCPU, env) + 1147 offsetof(CPUState, halted)); 1148 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); 1149 return gen_excp(ctx, EXCP_HALTED, 0); 1150 1151 default: 1152 palcode &= 0x3f; 1153 goto do_call_pal; 1154 } 1155 return DISAS_NEXT; 1156 } 1157 #endif 1158 return gen_invalid(ctx); 1159 1160 do_call_pal: 1161 #ifdef CONFIG_USER_ONLY 1162 return gen_excp(ctx, EXCP_CALL_PAL, palcode); 1163 #else 1164 { 1165 TCGv tmp = tcg_temp_new(); 1166 uint64_t exc_addr = ctx->base.pc_next; 1167 uint64_t entry = ctx->palbr; 1168 1169 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1170 exc_addr |= 1; 1171 } else { 1172 tcg_gen_movi_i64(tmp, 1); 1173 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 1174 } 1175 1176 tcg_gen_movi_i64(tmp, exc_addr); 1177 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr)); 1178 1179 entry += (palcode & 0x80 1180 ? 0x2000 + (palcode - 0x80) * 64 1181 : 0x1000 + palcode * 64); 1182 1183 tcg_gen_movi_i64(cpu_pc, entry); 1184 return DISAS_PC_UPDATED; 1185 } 1186 #endif 1187 } 1188 1189 #ifndef CONFIG_USER_ONLY 1190 1191 #define PR_LONG 0x200000 1192 1193 static int cpu_pr_data(int pr) 1194 { 1195 switch (pr) { 1196 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1197 case 3: return offsetof(CPUAlphaState, trap_arg0); 1198 case 4: return offsetof(CPUAlphaState, trap_arg1); 1199 case 5: return offsetof(CPUAlphaState, trap_arg2); 1200 case 6: return offsetof(CPUAlphaState, exc_addr); 1201 case 7: return offsetof(CPUAlphaState, palbr); 1202 case 8: return offsetof(CPUAlphaState, ptbr); 1203 case 9: return offsetof(CPUAlphaState, vptptr); 1204 case 10: return offsetof(CPUAlphaState, unique); 1205 case 11: return offsetof(CPUAlphaState, sysval); 1206 case 12: return offsetof(CPUAlphaState, usp); 1207 1208 case 40 ... 63: 1209 return offsetof(CPUAlphaState, scratch[pr - 40]); 1210 1211 case 251: 1212 return offsetof(CPUAlphaState, alarm_expire); 1213 } 1214 return 0; 1215 } 1216 1217 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) 1218 { 1219 void (*helper)(TCGv); 1220 int data; 1221 1222 switch (regno) { 1223 case 32 ... 39: 1224 /* Accessing the "non-shadow" general registers. */ 1225 regno = regno == 39 ? 25 : regno - 32 + 8; 1226 tcg_gen_mov_i64(va, cpu_std_ir[regno]); 1227 break; 1228 1229 case 250: /* WALLTIME */ 1230 helper = gen_helper_get_walltime; 1231 goto do_helper; 1232 case 249: /* VMTIME */ 1233 helper = gen_helper_get_vmtime; 1234 do_helper: 1235 if (translator_io_start(&ctx->base)) { 1236 helper(va); 1237 return DISAS_PC_STALE; 1238 } else { 1239 helper(va); 1240 } 1241 break; 1242 1243 case 0: /* PS */ 1244 ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1245 break; 1246 case 1: /* FEN */ 1247 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1248 break; 1249 1250 default: 1251 /* The basic registers are data only, and unknown registers 1252 are read-zero, write-ignore. */ 1253 data = cpu_pr_data(regno); 1254 if (data == 0) { 1255 tcg_gen_movi_i64(va, 0); 1256 } else if (data & PR_LONG) { 1257 tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG); 1258 } else { 1259 tcg_gen_ld_i64(va, tcg_env, data); 1260 } 1261 break; 1262 } 1263 1264 return DISAS_NEXT; 1265 } 1266 1267 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) 1268 { 1269 int data; 1270 DisasJumpType ret = DISAS_NEXT; 1271 1272 switch (regno) { 1273 case 255: 1274 /* TBIA */ 1275 gen_helper_tbia(tcg_env); 1276 break; 1277 1278 case 254: 1279 /* TBIS */ 1280 gen_helper_tbis(tcg_env, vb); 1281 break; 1282 1283 case 253: 1284 /* WAIT */ 1285 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1286 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); 1287 return gen_excp(ctx, EXCP_HALTED, 0); 1288 1289 case 252: 1290 /* HALT */ 1291 gen_helper_halt(vb); 1292 return DISAS_PC_STALE; 1293 1294 case 251: 1295 /* ALARM */ 1296 if (translator_io_start(&ctx->base)) { 1297 ret = DISAS_PC_STALE; 1298 } 1299 gen_helper_set_alarm(tcg_env, vb); 1300 break; 1301 1302 case 7: 1303 /* PALBR */ 1304 tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr)); 1305 /* Changing the PAL base register implies un-chaining all of the TBs 1306 that ended with a CALL_PAL. Since the base register usually only 1307 changes during boot, flushing everything works well. */ 1308 gen_helper_tb_flush(tcg_env); 1309 return DISAS_PC_STALE; 1310 1311 case 32 ... 39: 1312 /* Accessing the "non-shadow" general registers. */ 1313 regno = regno == 39 ? 25 : regno - 32 + 8; 1314 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1315 break; 1316 1317 case 0: /* PS */ 1318 st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1319 break; 1320 case 1: /* FEN */ 1321 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1322 break; 1323 1324 default: 1325 /* The basic registers are data only, and unknown registers 1326 are read-zero, write-ignore. */ 1327 data = cpu_pr_data(regno); 1328 if (data != 0) { 1329 if (data & PR_LONG) { 1330 tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG); 1331 } else { 1332 tcg_gen_st_i64(vb, tcg_env, data); 1333 } 1334 } 1335 break; 1336 } 1337 1338 return ret; 1339 } 1340 #endif /* !USER_ONLY*/ 1341 1342 #define REQUIRE_NO_LIT \ 1343 do { \ 1344 if (real_islit) { \ 1345 goto invalid_opc; \ 1346 } \ 1347 } while (0) 1348 1349 #define REQUIRE_AMASK(FLAG) \ 1350 do { \ 1351 if ((ctx->amask & AMASK_##FLAG) == 0) { \ 1352 goto invalid_opc; \ 1353 } \ 1354 } while (0) 1355 1356 #define REQUIRE_TB_FLAG(FLAG) \ 1357 do { \ 1358 if ((ctx->tbflags & (FLAG)) == 0) { \ 1359 goto invalid_opc; \ 1360 } \ 1361 } while (0) 1362 1363 #define REQUIRE_REG_31(WHICH) \ 1364 do { \ 1365 if (WHICH != 31) { \ 1366 goto invalid_opc; \ 1367 } \ 1368 } while (0) 1369 1370 #define REQUIRE_FEN \ 1371 do { \ 1372 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \ 1373 goto raise_fen; \ 1374 } \ 1375 } while (0) 1376 1377 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) 1378 { 1379 int32_t disp21, disp16, disp12 __attribute__((unused)); 1380 uint16_t fn11; 1381 uint8_t opc, ra, rb, rc, fpfn, fn7, lit; 1382 bool islit, real_islit; 1383 TCGv va, vb, vc, tmp, tmp2; 1384 TCGv_i32 t32; 1385 DisasJumpType ret; 1386 1387 /* Decode all instruction fields */ 1388 opc = extract32(insn, 26, 6); 1389 ra = extract32(insn, 21, 5); 1390 rb = extract32(insn, 16, 5); 1391 rc = extract32(insn, 0, 5); 1392 real_islit = islit = extract32(insn, 12, 1); 1393 lit = extract32(insn, 13, 8); 1394 1395 disp21 = sextract32(insn, 0, 21); 1396 disp16 = sextract32(insn, 0, 16); 1397 disp12 = sextract32(insn, 0, 12); 1398 1399 fn11 = extract32(insn, 5, 11); 1400 fpfn = extract32(insn, 5, 6); 1401 fn7 = extract32(insn, 5, 7); 1402 1403 if (rb == 31 && !islit) { 1404 islit = true; 1405 lit = 0; 1406 } 1407 1408 ret = DISAS_NEXT; 1409 switch (opc) { 1410 case 0x00: 1411 /* CALL_PAL */ 1412 ret = gen_call_pal(ctx, insn & 0x03ffffff); 1413 break; 1414 case 0x01: 1415 /* OPC01 */ 1416 goto invalid_opc; 1417 case 0x02: 1418 /* OPC02 */ 1419 goto invalid_opc; 1420 case 0x03: 1421 /* OPC03 */ 1422 goto invalid_opc; 1423 case 0x04: 1424 /* OPC04 */ 1425 goto invalid_opc; 1426 case 0x05: 1427 /* OPC05 */ 1428 goto invalid_opc; 1429 case 0x06: 1430 /* OPC06 */ 1431 goto invalid_opc; 1432 case 0x07: 1433 /* OPC07 */ 1434 goto invalid_opc; 1435 1436 case 0x09: 1437 /* LDAH */ 1438 disp16 = (uint32_t)disp16 << 16; 1439 /* fall through */ 1440 case 0x08: 1441 /* LDA */ 1442 va = dest_gpr(ctx, ra); 1443 /* It's worth special-casing immediate loads. */ 1444 if (rb == 31) { 1445 tcg_gen_movi_i64(va, disp16); 1446 } else { 1447 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); 1448 } 1449 break; 1450 1451 case 0x0A: 1452 /* LDBU */ 1453 REQUIRE_AMASK(BWX); 1454 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); 1455 break; 1456 case 0x0B: 1457 /* LDQ_U */ 1458 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0); 1459 break; 1460 case 0x0C: 1461 /* LDWU */ 1462 REQUIRE_AMASK(BWX); 1463 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); 1464 break; 1465 case 0x0D: 1466 /* STW */ 1467 REQUIRE_AMASK(BWX); 1468 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); 1469 break; 1470 case 0x0E: 1471 /* STB */ 1472 REQUIRE_AMASK(BWX); 1473 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); 1474 break; 1475 case 0x0F: 1476 /* STQ_U */ 1477 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1); 1478 break; 1479 1480 case 0x10: 1481 vc = dest_gpr(ctx, rc); 1482 vb = load_gpr_lit(ctx, rb, lit, islit); 1483 1484 if (ra == 31) { 1485 if (fn7 == 0x00) { 1486 /* Special case ADDL as SEXTL. */ 1487 tcg_gen_ext32s_i64(vc, vb); 1488 break; 1489 } 1490 if (fn7 == 0x29) { 1491 /* Special case SUBQ as NEGQ. */ 1492 tcg_gen_neg_i64(vc, vb); 1493 break; 1494 } 1495 } 1496 1497 va = load_gpr(ctx, ra); 1498 switch (fn7) { 1499 case 0x00: 1500 /* ADDL */ 1501 tcg_gen_add_i64(vc, va, vb); 1502 tcg_gen_ext32s_i64(vc, vc); 1503 break; 1504 case 0x02: 1505 /* S4ADDL */ 1506 tmp = tcg_temp_new(); 1507 tcg_gen_shli_i64(tmp, va, 2); 1508 tcg_gen_add_i64(tmp, tmp, vb); 1509 tcg_gen_ext32s_i64(vc, tmp); 1510 break; 1511 case 0x09: 1512 /* SUBL */ 1513 tcg_gen_sub_i64(vc, va, vb); 1514 tcg_gen_ext32s_i64(vc, vc); 1515 break; 1516 case 0x0B: 1517 /* S4SUBL */ 1518 tmp = tcg_temp_new(); 1519 tcg_gen_shli_i64(tmp, va, 2); 1520 tcg_gen_sub_i64(tmp, tmp, vb); 1521 tcg_gen_ext32s_i64(vc, tmp); 1522 break; 1523 case 0x0F: 1524 /* CMPBGE */ 1525 if (ra == 31) { 1526 /* Special case 0 >= X as X == 0. */ 1527 gen_helper_cmpbe0(vc, vb); 1528 } else { 1529 gen_helper_cmpbge(vc, va, vb); 1530 } 1531 break; 1532 case 0x12: 1533 /* S8ADDL */ 1534 tmp = tcg_temp_new(); 1535 tcg_gen_shli_i64(tmp, va, 3); 1536 tcg_gen_add_i64(tmp, tmp, vb); 1537 tcg_gen_ext32s_i64(vc, tmp); 1538 break; 1539 case 0x1B: 1540 /* S8SUBL */ 1541 tmp = tcg_temp_new(); 1542 tcg_gen_shli_i64(tmp, va, 3); 1543 tcg_gen_sub_i64(tmp, tmp, vb); 1544 tcg_gen_ext32s_i64(vc, tmp); 1545 break; 1546 case 0x1D: 1547 /* CMPULT */ 1548 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); 1549 break; 1550 case 0x20: 1551 /* ADDQ */ 1552 tcg_gen_add_i64(vc, va, vb); 1553 break; 1554 case 0x22: 1555 /* S4ADDQ */ 1556 tmp = tcg_temp_new(); 1557 tcg_gen_shli_i64(tmp, va, 2); 1558 tcg_gen_add_i64(vc, tmp, vb); 1559 break; 1560 case 0x29: 1561 /* SUBQ */ 1562 tcg_gen_sub_i64(vc, va, vb); 1563 break; 1564 case 0x2B: 1565 /* S4SUBQ */ 1566 tmp = tcg_temp_new(); 1567 tcg_gen_shli_i64(tmp, va, 2); 1568 tcg_gen_sub_i64(vc, tmp, vb); 1569 break; 1570 case 0x2D: 1571 /* CMPEQ */ 1572 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); 1573 break; 1574 case 0x32: 1575 /* S8ADDQ */ 1576 tmp = tcg_temp_new(); 1577 tcg_gen_shli_i64(tmp, va, 3); 1578 tcg_gen_add_i64(vc, tmp, vb); 1579 break; 1580 case 0x3B: 1581 /* S8SUBQ */ 1582 tmp = tcg_temp_new(); 1583 tcg_gen_shli_i64(tmp, va, 3); 1584 tcg_gen_sub_i64(vc, tmp, vb); 1585 break; 1586 case 0x3D: 1587 /* CMPULE */ 1588 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); 1589 break; 1590 case 0x40: 1591 /* ADDL/V */ 1592 tmp = tcg_temp_new(); 1593 tcg_gen_ext32s_i64(tmp, va); 1594 tcg_gen_ext32s_i64(vc, vb); 1595 tcg_gen_add_i64(tmp, tmp, vc); 1596 tcg_gen_ext32s_i64(vc, tmp); 1597 gen_helper_check_overflow(tcg_env, vc, tmp); 1598 break; 1599 case 0x49: 1600 /* SUBL/V */ 1601 tmp = tcg_temp_new(); 1602 tcg_gen_ext32s_i64(tmp, va); 1603 tcg_gen_ext32s_i64(vc, vb); 1604 tcg_gen_sub_i64(tmp, tmp, vc); 1605 tcg_gen_ext32s_i64(vc, tmp); 1606 gen_helper_check_overflow(tcg_env, vc, tmp); 1607 break; 1608 case 0x4D: 1609 /* CMPLT */ 1610 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); 1611 break; 1612 case 0x60: 1613 /* ADDQ/V */ 1614 tmp = tcg_temp_new(); 1615 tmp2 = tcg_temp_new(); 1616 tcg_gen_eqv_i64(tmp, va, vb); 1617 tcg_gen_mov_i64(tmp2, va); 1618 tcg_gen_add_i64(vc, va, vb); 1619 tcg_gen_xor_i64(tmp2, tmp2, vc); 1620 tcg_gen_and_i64(tmp, tmp, tmp2); 1621 tcg_gen_shri_i64(tmp, tmp, 63); 1622 tcg_gen_movi_i64(tmp2, 0); 1623 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1624 break; 1625 case 0x69: 1626 /* SUBQ/V */ 1627 tmp = tcg_temp_new(); 1628 tmp2 = tcg_temp_new(); 1629 tcg_gen_xor_i64(tmp, va, vb); 1630 tcg_gen_mov_i64(tmp2, va); 1631 tcg_gen_sub_i64(vc, va, vb); 1632 tcg_gen_xor_i64(tmp2, tmp2, vc); 1633 tcg_gen_and_i64(tmp, tmp, tmp2); 1634 tcg_gen_shri_i64(tmp, tmp, 63); 1635 tcg_gen_movi_i64(tmp2, 0); 1636 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1637 break; 1638 case 0x6D: 1639 /* CMPLE */ 1640 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); 1641 break; 1642 default: 1643 goto invalid_opc; 1644 } 1645 break; 1646 1647 case 0x11: 1648 if (fn7 == 0x20) { 1649 if (rc == 31) { 1650 /* Special case BIS as NOP. */ 1651 break; 1652 } 1653 if (ra == 31) { 1654 /* Special case BIS as MOV. */ 1655 vc = dest_gpr(ctx, rc); 1656 if (islit) { 1657 tcg_gen_movi_i64(vc, lit); 1658 } else { 1659 tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); 1660 } 1661 break; 1662 } 1663 } 1664 1665 vc = dest_gpr(ctx, rc); 1666 vb = load_gpr_lit(ctx, rb, lit, islit); 1667 1668 if (fn7 == 0x28 && ra == 31) { 1669 /* Special case ORNOT as NOT. */ 1670 tcg_gen_not_i64(vc, vb); 1671 break; 1672 } 1673 1674 va = load_gpr(ctx, ra); 1675 switch (fn7) { 1676 case 0x00: 1677 /* AND */ 1678 tcg_gen_and_i64(vc, va, vb); 1679 break; 1680 case 0x08: 1681 /* BIC */ 1682 tcg_gen_andc_i64(vc, va, vb); 1683 break; 1684 case 0x14: 1685 /* CMOVLBS */ 1686 tmp = tcg_temp_new(); 1687 tcg_gen_andi_i64(tmp, va, 1); 1688 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), 1689 vb, load_gpr(ctx, rc)); 1690 break; 1691 case 0x16: 1692 /* CMOVLBC */ 1693 tmp = tcg_temp_new(); 1694 tcg_gen_andi_i64(tmp, va, 1); 1695 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), 1696 vb, load_gpr(ctx, rc)); 1697 break; 1698 case 0x20: 1699 /* BIS */ 1700 tcg_gen_or_i64(vc, va, vb); 1701 break; 1702 case 0x24: 1703 /* CMOVEQ */ 1704 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), 1705 vb, load_gpr(ctx, rc)); 1706 break; 1707 case 0x26: 1708 /* CMOVNE */ 1709 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), 1710 vb, load_gpr(ctx, rc)); 1711 break; 1712 case 0x28: 1713 /* ORNOT */ 1714 tcg_gen_orc_i64(vc, va, vb); 1715 break; 1716 case 0x40: 1717 /* XOR */ 1718 tcg_gen_xor_i64(vc, va, vb); 1719 break; 1720 case 0x44: 1721 /* CMOVLT */ 1722 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), 1723 vb, load_gpr(ctx, rc)); 1724 break; 1725 case 0x46: 1726 /* CMOVGE */ 1727 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), 1728 vb, load_gpr(ctx, rc)); 1729 break; 1730 case 0x48: 1731 /* EQV */ 1732 tcg_gen_eqv_i64(vc, va, vb); 1733 break; 1734 case 0x61: 1735 /* AMASK */ 1736 REQUIRE_REG_31(ra); 1737 tcg_gen_andi_i64(vc, vb, ~ctx->amask); 1738 break; 1739 case 0x64: 1740 /* CMOVLE */ 1741 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), 1742 vb, load_gpr(ctx, rc)); 1743 break; 1744 case 0x66: 1745 /* CMOVGT */ 1746 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), 1747 vb, load_gpr(ctx, rc)); 1748 break; 1749 case 0x6C: 1750 /* IMPLVER */ 1751 REQUIRE_REG_31(ra); 1752 tcg_gen_movi_i64(vc, ctx->implver); 1753 break; 1754 default: 1755 goto invalid_opc; 1756 } 1757 break; 1758 1759 case 0x12: 1760 vc = dest_gpr(ctx, rc); 1761 va = load_gpr(ctx, ra); 1762 switch (fn7) { 1763 case 0x02: 1764 /* MSKBL */ 1765 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); 1766 break; 1767 case 0x06: 1768 /* EXTBL */ 1769 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); 1770 break; 1771 case 0x0B: 1772 /* INSBL */ 1773 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); 1774 break; 1775 case 0x12: 1776 /* MSKWL */ 1777 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); 1778 break; 1779 case 0x16: 1780 /* EXTWL */ 1781 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); 1782 break; 1783 case 0x1B: 1784 /* INSWL */ 1785 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); 1786 break; 1787 case 0x22: 1788 /* MSKLL */ 1789 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); 1790 break; 1791 case 0x26: 1792 /* EXTLL */ 1793 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); 1794 break; 1795 case 0x2B: 1796 /* INSLL */ 1797 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); 1798 break; 1799 case 0x30: 1800 /* ZAP */ 1801 if (islit) { 1802 gen_zapnoti(vc, va, ~lit); 1803 } else { 1804 gen_helper_zap(vc, va, load_gpr(ctx, rb)); 1805 } 1806 break; 1807 case 0x31: 1808 /* ZAPNOT */ 1809 if (islit) { 1810 gen_zapnoti(vc, va, lit); 1811 } else { 1812 gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); 1813 } 1814 break; 1815 case 0x32: 1816 /* MSKQL */ 1817 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); 1818 break; 1819 case 0x34: 1820 /* SRL */ 1821 if (islit) { 1822 tcg_gen_shri_i64(vc, va, lit & 0x3f); 1823 } else { 1824 tmp = tcg_temp_new(); 1825 vb = load_gpr(ctx, rb); 1826 tcg_gen_andi_i64(tmp, vb, 0x3f); 1827 tcg_gen_shr_i64(vc, va, tmp); 1828 } 1829 break; 1830 case 0x36: 1831 /* EXTQL */ 1832 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); 1833 break; 1834 case 0x39: 1835 /* SLL */ 1836 if (islit) { 1837 tcg_gen_shli_i64(vc, va, lit & 0x3f); 1838 } else { 1839 tmp = tcg_temp_new(); 1840 vb = load_gpr(ctx, rb); 1841 tcg_gen_andi_i64(tmp, vb, 0x3f); 1842 tcg_gen_shl_i64(vc, va, tmp); 1843 } 1844 break; 1845 case 0x3B: 1846 /* INSQL */ 1847 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); 1848 break; 1849 case 0x3C: 1850 /* SRA */ 1851 if (islit) { 1852 tcg_gen_sari_i64(vc, va, lit & 0x3f); 1853 } else { 1854 tmp = tcg_temp_new(); 1855 vb = load_gpr(ctx, rb); 1856 tcg_gen_andi_i64(tmp, vb, 0x3f); 1857 tcg_gen_sar_i64(vc, va, tmp); 1858 } 1859 break; 1860 case 0x52: 1861 /* MSKWH */ 1862 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); 1863 break; 1864 case 0x57: 1865 /* INSWH */ 1866 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); 1867 break; 1868 case 0x5A: 1869 /* EXTWH */ 1870 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); 1871 break; 1872 case 0x62: 1873 /* MSKLH */ 1874 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); 1875 break; 1876 case 0x67: 1877 /* INSLH */ 1878 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); 1879 break; 1880 case 0x6A: 1881 /* EXTLH */ 1882 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); 1883 break; 1884 case 0x72: 1885 /* MSKQH */ 1886 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); 1887 break; 1888 case 0x77: 1889 /* INSQH */ 1890 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); 1891 break; 1892 case 0x7A: 1893 /* EXTQH */ 1894 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); 1895 break; 1896 default: 1897 goto invalid_opc; 1898 } 1899 break; 1900 1901 case 0x13: 1902 vc = dest_gpr(ctx, rc); 1903 vb = load_gpr_lit(ctx, rb, lit, islit); 1904 va = load_gpr(ctx, ra); 1905 switch (fn7) { 1906 case 0x00: 1907 /* MULL */ 1908 tcg_gen_mul_i64(vc, va, vb); 1909 tcg_gen_ext32s_i64(vc, vc); 1910 break; 1911 case 0x20: 1912 /* MULQ */ 1913 tcg_gen_mul_i64(vc, va, vb); 1914 break; 1915 case 0x30: 1916 /* UMULH */ 1917 tmp = tcg_temp_new(); 1918 tcg_gen_mulu2_i64(tmp, vc, va, vb); 1919 break; 1920 case 0x40: 1921 /* MULL/V */ 1922 tmp = tcg_temp_new(); 1923 tcg_gen_ext32s_i64(tmp, va); 1924 tcg_gen_ext32s_i64(vc, vb); 1925 tcg_gen_mul_i64(tmp, tmp, vc); 1926 tcg_gen_ext32s_i64(vc, tmp); 1927 gen_helper_check_overflow(tcg_env, vc, tmp); 1928 break; 1929 case 0x60: 1930 /* MULQ/V */ 1931 tmp = tcg_temp_new(); 1932 tmp2 = tcg_temp_new(); 1933 tcg_gen_muls2_i64(vc, tmp, va, vb); 1934 tcg_gen_sari_i64(tmp2, vc, 63); 1935 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1936 break; 1937 default: 1938 goto invalid_opc; 1939 } 1940 break; 1941 1942 case 0x14: 1943 REQUIRE_AMASK(FIX); 1944 vc = dest_fpr(ctx, rc); 1945 switch (fpfn) { /* fn11 & 0x3F */ 1946 case 0x04: 1947 /* ITOFS */ 1948 REQUIRE_REG_31(rb); 1949 REQUIRE_FEN; 1950 t32 = tcg_temp_new_i32(); 1951 va = load_gpr(ctx, ra); 1952 tcg_gen_extrl_i64_i32(t32, va); 1953 gen_helper_memory_to_s(vc, t32); 1954 break; 1955 case 0x0A: 1956 /* SQRTF */ 1957 REQUIRE_REG_31(ra); 1958 REQUIRE_FEN; 1959 vb = load_fpr(ctx, rb); 1960 gen_helper_sqrtf(vc, tcg_env, vb); 1961 break; 1962 case 0x0B: 1963 /* SQRTS */ 1964 REQUIRE_REG_31(ra); 1965 REQUIRE_FEN; 1966 gen_sqrts(ctx, rb, rc, fn11); 1967 break; 1968 case 0x14: 1969 /* ITOFF */ 1970 REQUIRE_REG_31(rb); 1971 REQUIRE_FEN; 1972 t32 = tcg_temp_new_i32(); 1973 va = load_gpr(ctx, ra); 1974 tcg_gen_extrl_i64_i32(t32, va); 1975 gen_helper_memory_to_f(vc, t32); 1976 break; 1977 case 0x24: 1978 /* ITOFT */ 1979 REQUIRE_REG_31(rb); 1980 REQUIRE_FEN; 1981 va = load_gpr(ctx, ra); 1982 tcg_gen_mov_i64(vc, va); 1983 break; 1984 case 0x2A: 1985 /* SQRTG */ 1986 REQUIRE_REG_31(ra); 1987 REQUIRE_FEN; 1988 vb = load_fpr(ctx, rb); 1989 gen_helper_sqrtg(vc, tcg_env, vb); 1990 break; 1991 case 0x02B: 1992 /* SQRTT */ 1993 REQUIRE_REG_31(ra); 1994 REQUIRE_FEN; 1995 gen_sqrtt(ctx, rb, rc, fn11); 1996 break; 1997 default: 1998 goto invalid_opc; 1999 } 2000 break; 2001 2002 case 0x15: 2003 /* VAX floating point */ 2004 /* XXX: rounding mode and trap are ignored (!) */ 2005 vc = dest_fpr(ctx, rc); 2006 vb = load_fpr(ctx, rb); 2007 va = load_fpr(ctx, ra); 2008 switch (fpfn) { /* fn11 & 0x3F */ 2009 case 0x00: 2010 /* ADDF */ 2011 REQUIRE_FEN; 2012 gen_helper_addf(vc, tcg_env, va, vb); 2013 break; 2014 case 0x01: 2015 /* SUBF */ 2016 REQUIRE_FEN; 2017 gen_helper_subf(vc, tcg_env, va, vb); 2018 break; 2019 case 0x02: 2020 /* MULF */ 2021 REQUIRE_FEN; 2022 gen_helper_mulf(vc, tcg_env, va, vb); 2023 break; 2024 case 0x03: 2025 /* DIVF */ 2026 REQUIRE_FEN; 2027 gen_helper_divf(vc, tcg_env, va, vb); 2028 break; 2029 case 0x1E: 2030 /* CVTDG -- TODO */ 2031 REQUIRE_REG_31(ra); 2032 goto invalid_opc; 2033 case 0x20: 2034 /* ADDG */ 2035 REQUIRE_FEN; 2036 gen_helper_addg(vc, tcg_env, va, vb); 2037 break; 2038 case 0x21: 2039 /* SUBG */ 2040 REQUIRE_FEN; 2041 gen_helper_subg(vc, tcg_env, va, vb); 2042 break; 2043 case 0x22: 2044 /* MULG */ 2045 REQUIRE_FEN; 2046 gen_helper_mulg(vc, tcg_env, va, vb); 2047 break; 2048 case 0x23: 2049 /* DIVG */ 2050 REQUIRE_FEN; 2051 gen_helper_divg(vc, tcg_env, va, vb); 2052 break; 2053 case 0x25: 2054 /* CMPGEQ */ 2055 REQUIRE_FEN; 2056 gen_helper_cmpgeq(vc, tcg_env, va, vb); 2057 break; 2058 case 0x26: 2059 /* CMPGLT */ 2060 REQUIRE_FEN; 2061 gen_helper_cmpglt(vc, tcg_env, va, vb); 2062 break; 2063 case 0x27: 2064 /* CMPGLE */ 2065 REQUIRE_FEN; 2066 gen_helper_cmpgle(vc, tcg_env, va, vb); 2067 break; 2068 case 0x2C: 2069 /* CVTGF */ 2070 REQUIRE_REG_31(ra); 2071 REQUIRE_FEN; 2072 gen_helper_cvtgf(vc, tcg_env, vb); 2073 break; 2074 case 0x2D: 2075 /* CVTGD -- TODO */ 2076 REQUIRE_REG_31(ra); 2077 goto invalid_opc; 2078 case 0x2F: 2079 /* CVTGQ */ 2080 REQUIRE_REG_31(ra); 2081 REQUIRE_FEN; 2082 gen_helper_cvtgq(vc, tcg_env, vb); 2083 break; 2084 case 0x3C: 2085 /* CVTQF */ 2086 REQUIRE_REG_31(ra); 2087 REQUIRE_FEN; 2088 gen_helper_cvtqf(vc, tcg_env, vb); 2089 break; 2090 case 0x3E: 2091 /* CVTQG */ 2092 REQUIRE_REG_31(ra); 2093 REQUIRE_FEN; 2094 gen_helper_cvtqg(vc, tcg_env, vb); 2095 break; 2096 default: 2097 goto invalid_opc; 2098 } 2099 break; 2100 2101 case 0x16: 2102 /* IEEE floating-point */ 2103 switch (fpfn) { /* fn11 & 0x3F */ 2104 case 0x00: 2105 /* ADDS */ 2106 REQUIRE_FEN; 2107 gen_adds(ctx, ra, rb, rc, fn11); 2108 break; 2109 case 0x01: 2110 /* SUBS */ 2111 REQUIRE_FEN; 2112 gen_subs(ctx, ra, rb, rc, fn11); 2113 break; 2114 case 0x02: 2115 /* MULS */ 2116 REQUIRE_FEN; 2117 gen_muls(ctx, ra, rb, rc, fn11); 2118 break; 2119 case 0x03: 2120 /* DIVS */ 2121 REQUIRE_FEN; 2122 gen_divs(ctx, ra, rb, rc, fn11); 2123 break; 2124 case 0x20: 2125 /* ADDT */ 2126 REQUIRE_FEN; 2127 gen_addt(ctx, ra, rb, rc, fn11); 2128 break; 2129 case 0x21: 2130 /* SUBT */ 2131 REQUIRE_FEN; 2132 gen_subt(ctx, ra, rb, rc, fn11); 2133 break; 2134 case 0x22: 2135 /* MULT */ 2136 REQUIRE_FEN; 2137 gen_mult(ctx, ra, rb, rc, fn11); 2138 break; 2139 case 0x23: 2140 /* DIVT */ 2141 REQUIRE_FEN; 2142 gen_divt(ctx, ra, rb, rc, fn11); 2143 break; 2144 case 0x24: 2145 /* CMPTUN */ 2146 REQUIRE_FEN; 2147 gen_cmptun(ctx, ra, rb, rc, fn11); 2148 break; 2149 case 0x25: 2150 /* CMPTEQ */ 2151 REQUIRE_FEN; 2152 gen_cmpteq(ctx, ra, rb, rc, fn11); 2153 break; 2154 case 0x26: 2155 /* CMPTLT */ 2156 REQUIRE_FEN; 2157 gen_cmptlt(ctx, ra, rb, rc, fn11); 2158 break; 2159 case 0x27: 2160 /* CMPTLE */ 2161 REQUIRE_FEN; 2162 gen_cmptle(ctx, ra, rb, rc, fn11); 2163 break; 2164 case 0x2C: 2165 REQUIRE_REG_31(ra); 2166 REQUIRE_FEN; 2167 if (fn11 == 0x2AC || fn11 == 0x6AC) { 2168 /* CVTST */ 2169 gen_cvtst(ctx, rb, rc, fn11); 2170 } else { 2171 /* CVTTS */ 2172 gen_cvtts(ctx, rb, rc, fn11); 2173 } 2174 break; 2175 case 0x2F: 2176 /* CVTTQ */ 2177 REQUIRE_REG_31(ra); 2178 REQUIRE_FEN; 2179 gen_cvttq(ctx, rb, rc, fn11); 2180 break; 2181 case 0x3C: 2182 /* CVTQS */ 2183 REQUIRE_REG_31(ra); 2184 REQUIRE_FEN; 2185 gen_cvtqs(ctx, rb, rc, fn11); 2186 break; 2187 case 0x3E: 2188 /* CVTQT */ 2189 REQUIRE_REG_31(ra); 2190 REQUIRE_FEN; 2191 gen_cvtqt(ctx, rb, rc, fn11); 2192 break; 2193 default: 2194 goto invalid_opc; 2195 } 2196 break; 2197 2198 case 0x17: 2199 switch (fn11) { 2200 case 0x010: 2201 /* CVTLQ */ 2202 REQUIRE_REG_31(ra); 2203 REQUIRE_FEN; 2204 vc = dest_fpr(ctx, rc); 2205 vb = load_fpr(ctx, rb); 2206 gen_cvtlq(vc, vb); 2207 break; 2208 case 0x020: 2209 /* CPYS */ 2210 REQUIRE_FEN; 2211 if (rc == 31) { 2212 /* Special case CPYS as FNOP. */ 2213 } else { 2214 vc = dest_fpr(ctx, rc); 2215 va = load_fpr(ctx, ra); 2216 if (ra == rb) { 2217 /* Special case CPYS as FMOV. */ 2218 tcg_gen_mov_i64(vc, va); 2219 } else { 2220 vb = load_fpr(ctx, rb); 2221 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); 2222 } 2223 } 2224 break; 2225 case 0x021: 2226 /* CPYSN */ 2227 REQUIRE_FEN; 2228 vc = dest_fpr(ctx, rc); 2229 vb = load_fpr(ctx, rb); 2230 va = load_fpr(ctx, ra); 2231 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); 2232 break; 2233 case 0x022: 2234 /* CPYSE */ 2235 REQUIRE_FEN; 2236 vc = dest_fpr(ctx, rc); 2237 vb = load_fpr(ctx, rb); 2238 va = load_fpr(ctx, ra); 2239 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); 2240 break; 2241 case 0x024: 2242 /* MT_FPCR */ 2243 REQUIRE_FEN; 2244 va = load_fpr(ctx, ra); 2245 gen_helper_store_fpcr(tcg_env, va); 2246 if (ctx->tb_rm == QUAL_RM_D) { 2247 /* Re-do the copy of the rounding mode to fp_status 2248 the next time we use dynamic rounding. */ 2249 ctx->tb_rm = -1; 2250 } 2251 break; 2252 case 0x025: 2253 /* MF_FPCR */ 2254 REQUIRE_FEN; 2255 va = dest_fpr(ctx, ra); 2256 gen_helper_load_fpcr(va, tcg_env); 2257 break; 2258 case 0x02A: 2259 /* FCMOVEQ */ 2260 REQUIRE_FEN; 2261 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); 2262 break; 2263 case 0x02B: 2264 /* FCMOVNE */ 2265 REQUIRE_FEN; 2266 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); 2267 break; 2268 case 0x02C: 2269 /* FCMOVLT */ 2270 REQUIRE_FEN; 2271 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); 2272 break; 2273 case 0x02D: 2274 /* FCMOVGE */ 2275 REQUIRE_FEN; 2276 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); 2277 break; 2278 case 0x02E: 2279 /* FCMOVLE */ 2280 REQUIRE_FEN; 2281 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); 2282 break; 2283 case 0x02F: 2284 /* FCMOVGT */ 2285 REQUIRE_FEN; 2286 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); 2287 break; 2288 case 0x030: /* CVTQL */ 2289 case 0x130: /* CVTQL/V */ 2290 case 0x530: /* CVTQL/SV */ 2291 REQUIRE_REG_31(ra); 2292 REQUIRE_FEN; 2293 vc = dest_fpr(ctx, rc); 2294 vb = load_fpr(ctx, rb); 2295 gen_helper_cvtql(vc, tcg_env, vb); 2296 gen_fp_exc_raise(rc, fn11); 2297 break; 2298 default: 2299 goto invalid_opc; 2300 } 2301 break; 2302 2303 case 0x18: 2304 switch ((uint16_t)disp16) { 2305 case 0x0000: 2306 /* TRAPB */ 2307 /* No-op. */ 2308 break; 2309 case 0x0400: 2310 /* EXCB */ 2311 /* No-op. */ 2312 break; 2313 case 0x4000: 2314 /* MB */ 2315 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 2316 break; 2317 case 0x4400: 2318 /* WMB */ 2319 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2320 break; 2321 case 0x8000: 2322 /* FETCH */ 2323 /* No-op */ 2324 break; 2325 case 0xA000: 2326 /* FETCH_M */ 2327 /* No-op */ 2328 break; 2329 case 0xC000: 2330 /* RPCC */ 2331 va = dest_gpr(ctx, ra); 2332 if (translator_io_start(&ctx->base)) { 2333 ret = DISAS_PC_STALE; 2334 } 2335 gen_helper_load_pcc(va, tcg_env); 2336 break; 2337 case 0xE000: 2338 /* RC */ 2339 gen_rx(ctx, ra, 0); 2340 break; 2341 case 0xE800: 2342 /* ECB */ 2343 break; 2344 case 0xF000: 2345 /* RS */ 2346 gen_rx(ctx, ra, 1); 2347 break; 2348 case 0xF800: 2349 /* WH64 */ 2350 /* No-op */ 2351 break; 2352 case 0xFC00: 2353 /* WH64EN */ 2354 /* No-op */ 2355 break; 2356 default: 2357 goto invalid_opc; 2358 } 2359 break; 2360 2361 case 0x19: 2362 /* HW_MFPR (PALcode) */ 2363 #ifndef CONFIG_USER_ONLY 2364 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2365 va = dest_gpr(ctx, ra); 2366 ret = gen_mfpr(ctx, va, insn & 0xffff); 2367 break; 2368 #else 2369 goto invalid_opc; 2370 #endif 2371 2372 case 0x1A: 2373 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch 2374 prediction stack action, which of course we don't implement. */ 2375 vb = load_gpr(ctx, rb); 2376 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2377 if (ra != 31) { 2378 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 2379 } 2380 ret = DISAS_PC_UPDATED; 2381 break; 2382 2383 case 0x1B: 2384 /* HW_LD (PALcode) */ 2385 #ifndef CONFIG_USER_ONLY 2386 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2387 { 2388 TCGv addr = tcg_temp_new(); 2389 vb = load_gpr(ctx, rb); 2390 va = dest_gpr(ctx, ra); 2391 2392 tcg_gen_addi_i64(addr, vb, disp12); 2393 switch ((insn >> 12) & 0xF) { 2394 case 0x0: 2395 /* Longword physical access (hw_ldl/p) */ 2396 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2397 break; 2398 case 0x1: 2399 /* Quadword physical access (hw_ldq/p) */ 2400 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2401 break; 2402 case 0x2: 2403 /* Longword physical access with lock (hw_ldl_l/p) */ 2404 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2405 tcg_gen_mov_i64(cpu_lock_addr, addr); 2406 tcg_gen_mov_i64(cpu_lock_value, va); 2407 break; 2408 case 0x3: 2409 /* Quadword physical access with lock (hw_ldq_l/p) */ 2410 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2411 tcg_gen_mov_i64(cpu_lock_addr, addr); 2412 tcg_gen_mov_i64(cpu_lock_value, va); 2413 break; 2414 case 0x4: 2415 /* Longword virtual PTE fetch (hw_ldl/v) */ 2416 goto invalid_opc; 2417 case 0x5: 2418 /* Quadword virtual PTE fetch (hw_ldq/v) */ 2419 goto invalid_opc; 2420 break; 2421 case 0x6: 2422 /* Invalid */ 2423 goto invalid_opc; 2424 case 0x7: 2425 /* Invaliid */ 2426 goto invalid_opc; 2427 case 0x8: 2428 /* Longword virtual access (hw_ldl) */ 2429 goto invalid_opc; 2430 case 0x9: 2431 /* Quadword virtual access (hw_ldq) */ 2432 goto invalid_opc; 2433 case 0xA: 2434 /* Longword virtual access with protection check (hw_ldl/w) */ 2435 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2436 MO_LESL | MO_ALIGN); 2437 break; 2438 case 0xB: 2439 /* Quadword virtual access with protection check (hw_ldq/w) */ 2440 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2441 MO_LEUQ | MO_ALIGN); 2442 break; 2443 case 0xC: 2444 /* Longword virtual access with alt access mode (hw_ldl/a)*/ 2445 goto invalid_opc; 2446 case 0xD: 2447 /* Quadword virtual access with alt access mode (hw_ldq/a) */ 2448 goto invalid_opc; 2449 case 0xE: 2450 /* Longword virtual access with alternate access mode and 2451 protection checks (hw_ldl/wa) */ 2452 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2453 MO_LESL | MO_ALIGN); 2454 break; 2455 case 0xF: 2456 /* Quadword virtual access with alternate access mode and 2457 protection checks (hw_ldq/wa) */ 2458 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2459 MO_LEUQ | MO_ALIGN); 2460 break; 2461 } 2462 break; 2463 } 2464 #else 2465 goto invalid_opc; 2466 #endif 2467 2468 case 0x1C: 2469 vc = dest_gpr(ctx, rc); 2470 if (fn7 == 0x70) { 2471 /* FTOIT */ 2472 REQUIRE_AMASK(FIX); 2473 REQUIRE_REG_31(rb); 2474 va = load_fpr(ctx, ra); 2475 tcg_gen_mov_i64(vc, va); 2476 break; 2477 } else if (fn7 == 0x78) { 2478 /* FTOIS */ 2479 REQUIRE_AMASK(FIX); 2480 REQUIRE_REG_31(rb); 2481 t32 = tcg_temp_new_i32(); 2482 va = load_fpr(ctx, ra); 2483 gen_helper_s_to_memory(t32, va); 2484 tcg_gen_ext_i32_i64(vc, t32); 2485 break; 2486 } 2487 2488 vb = load_gpr_lit(ctx, rb, lit, islit); 2489 switch (fn7) { 2490 case 0x00: 2491 /* SEXTB */ 2492 REQUIRE_AMASK(BWX); 2493 REQUIRE_REG_31(ra); 2494 tcg_gen_ext8s_i64(vc, vb); 2495 break; 2496 case 0x01: 2497 /* SEXTW */ 2498 REQUIRE_AMASK(BWX); 2499 REQUIRE_REG_31(ra); 2500 tcg_gen_ext16s_i64(vc, vb); 2501 break; 2502 case 0x30: 2503 /* CTPOP */ 2504 REQUIRE_AMASK(CIX); 2505 REQUIRE_REG_31(ra); 2506 REQUIRE_NO_LIT; 2507 tcg_gen_ctpop_i64(vc, vb); 2508 break; 2509 case 0x31: 2510 /* PERR */ 2511 REQUIRE_AMASK(MVI); 2512 REQUIRE_NO_LIT; 2513 va = load_gpr(ctx, ra); 2514 gen_helper_perr(vc, va, vb); 2515 break; 2516 case 0x32: 2517 /* CTLZ */ 2518 REQUIRE_AMASK(CIX); 2519 REQUIRE_REG_31(ra); 2520 REQUIRE_NO_LIT; 2521 tcg_gen_clzi_i64(vc, vb, 64); 2522 break; 2523 case 0x33: 2524 /* CTTZ */ 2525 REQUIRE_AMASK(CIX); 2526 REQUIRE_REG_31(ra); 2527 REQUIRE_NO_LIT; 2528 tcg_gen_ctzi_i64(vc, vb, 64); 2529 break; 2530 case 0x34: 2531 /* UNPKBW */ 2532 REQUIRE_AMASK(MVI); 2533 REQUIRE_REG_31(ra); 2534 REQUIRE_NO_LIT; 2535 gen_helper_unpkbw(vc, vb); 2536 break; 2537 case 0x35: 2538 /* UNPKBL */ 2539 REQUIRE_AMASK(MVI); 2540 REQUIRE_REG_31(ra); 2541 REQUIRE_NO_LIT; 2542 gen_helper_unpkbl(vc, vb); 2543 break; 2544 case 0x36: 2545 /* PKWB */ 2546 REQUIRE_AMASK(MVI); 2547 REQUIRE_REG_31(ra); 2548 REQUIRE_NO_LIT; 2549 gen_helper_pkwb(vc, vb); 2550 break; 2551 case 0x37: 2552 /* PKLB */ 2553 REQUIRE_AMASK(MVI); 2554 REQUIRE_REG_31(ra); 2555 REQUIRE_NO_LIT; 2556 gen_helper_pklb(vc, vb); 2557 break; 2558 case 0x38: 2559 /* MINSB8 */ 2560 REQUIRE_AMASK(MVI); 2561 va = load_gpr(ctx, ra); 2562 gen_helper_minsb8(vc, va, vb); 2563 break; 2564 case 0x39: 2565 /* MINSW4 */ 2566 REQUIRE_AMASK(MVI); 2567 va = load_gpr(ctx, ra); 2568 gen_helper_minsw4(vc, va, vb); 2569 break; 2570 case 0x3A: 2571 /* MINUB8 */ 2572 REQUIRE_AMASK(MVI); 2573 va = load_gpr(ctx, ra); 2574 gen_helper_minub8(vc, va, vb); 2575 break; 2576 case 0x3B: 2577 /* MINUW4 */ 2578 REQUIRE_AMASK(MVI); 2579 va = load_gpr(ctx, ra); 2580 gen_helper_minuw4(vc, va, vb); 2581 break; 2582 case 0x3C: 2583 /* MAXUB8 */ 2584 REQUIRE_AMASK(MVI); 2585 va = load_gpr(ctx, ra); 2586 gen_helper_maxub8(vc, va, vb); 2587 break; 2588 case 0x3D: 2589 /* MAXUW4 */ 2590 REQUIRE_AMASK(MVI); 2591 va = load_gpr(ctx, ra); 2592 gen_helper_maxuw4(vc, va, vb); 2593 break; 2594 case 0x3E: 2595 /* MAXSB8 */ 2596 REQUIRE_AMASK(MVI); 2597 va = load_gpr(ctx, ra); 2598 gen_helper_maxsb8(vc, va, vb); 2599 break; 2600 case 0x3F: 2601 /* MAXSW4 */ 2602 REQUIRE_AMASK(MVI); 2603 va = load_gpr(ctx, ra); 2604 gen_helper_maxsw4(vc, va, vb); 2605 break; 2606 default: 2607 goto invalid_opc; 2608 } 2609 break; 2610 2611 case 0x1D: 2612 /* HW_MTPR (PALcode) */ 2613 #ifndef CONFIG_USER_ONLY 2614 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2615 vb = load_gpr(ctx, rb); 2616 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2617 break; 2618 #else 2619 goto invalid_opc; 2620 #endif 2621 2622 case 0x1E: 2623 /* HW_RET (PALcode) */ 2624 #ifndef CONFIG_USER_ONLY 2625 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2626 if (rb == 31) { 2627 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2628 address from EXC_ADDR. This turns out to be useful for our 2629 emulation PALcode, so continue to accept it. */ 2630 vb = dest_sink(ctx); 2631 tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr)); 2632 } else { 2633 vb = load_gpr(ctx, rb); 2634 } 2635 tcg_gen_movi_i64(cpu_lock_addr, -1); 2636 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT); 2637 tmp = tcg_temp_new(); 2638 tcg_gen_andi_i64(tmp, vb, 1); 2639 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2640 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2641 /* Allow interrupts to be recognized right away. */ 2642 ret = DISAS_PC_UPDATED_NOCHAIN; 2643 break; 2644 #else 2645 goto invalid_opc; 2646 #endif 2647 2648 case 0x1F: 2649 /* HW_ST (PALcode) */ 2650 #ifndef CONFIG_USER_ONLY 2651 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2652 { 2653 switch ((insn >> 12) & 0xF) { 2654 case 0x0: 2655 /* Longword physical access */ 2656 va = load_gpr(ctx, ra); 2657 vb = load_gpr(ctx, rb); 2658 tmp = tcg_temp_new(); 2659 tcg_gen_addi_i64(tmp, vb, disp12); 2660 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2661 break; 2662 case 0x1: 2663 /* Quadword physical access */ 2664 va = load_gpr(ctx, ra); 2665 vb = load_gpr(ctx, rb); 2666 tmp = tcg_temp_new(); 2667 tcg_gen_addi_i64(tmp, vb, disp12); 2668 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2669 break; 2670 case 0x2: 2671 /* Longword physical access with lock */ 2672 ret = gen_store_conditional(ctx, ra, rb, disp12, 2673 MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2674 break; 2675 case 0x3: 2676 /* Quadword physical access with lock */ 2677 ret = gen_store_conditional(ctx, ra, rb, disp12, 2678 MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2679 break; 2680 case 0x4: 2681 /* Longword virtual access */ 2682 goto invalid_opc; 2683 case 0x5: 2684 /* Quadword virtual access */ 2685 goto invalid_opc; 2686 case 0x6: 2687 /* Invalid */ 2688 goto invalid_opc; 2689 case 0x7: 2690 /* Invalid */ 2691 goto invalid_opc; 2692 case 0x8: 2693 /* Invalid */ 2694 goto invalid_opc; 2695 case 0x9: 2696 /* Invalid */ 2697 goto invalid_opc; 2698 case 0xA: 2699 /* Invalid */ 2700 goto invalid_opc; 2701 case 0xB: 2702 /* Invalid */ 2703 goto invalid_opc; 2704 case 0xC: 2705 /* Longword virtual access with alternate access mode */ 2706 goto invalid_opc; 2707 case 0xD: 2708 /* Quadword virtual access with alternate access mode */ 2709 goto invalid_opc; 2710 case 0xE: 2711 /* Invalid */ 2712 goto invalid_opc; 2713 case 0xF: 2714 /* Invalid */ 2715 goto invalid_opc; 2716 } 2717 break; 2718 } 2719 #else 2720 goto invalid_opc; 2721 #endif 2722 case 0x20: 2723 /* LDF */ 2724 REQUIRE_FEN; 2725 gen_load_fp(ctx, ra, rb, disp16, gen_ldf); 2726 break; 2727 case 0x21: 2728 /* LDG */ 2729 REQUIRE_FEN; 2730 gen_load_fp(ctx, ra, rb, disp16, gen_ldg); 2731 break; 2732 case 0x22: 2733 /* LDS */ 2734 REQUIRE_FEN; 2735 gen_load_fp(ctx, ra, rb, disp16, gen_lds); 2736 break; 2737 case 0x23: 2738 /* LDT */ 2739 REQUIRE_FEN; 2740 gen_load_fp(ctx, ra, rb, disp16, gen_ldt); 2741 break; 2742 case 0x24: 2743 /* STF */ 2744 REQUIRE_FEN; 2745 gen_store_fp(ctx, ra, rb, disp16, gen_stf); 2746 break; 2747 case 0x25: 2748 /* STG */ 2749 REQUIRE_FEN; 2750 gen_store_fp(ctx, ra, rb, disp16, gen_stg); 2751 break; 2752 case 0x26: 2753 /* STS */ 2754 REQUIRE_FEN; 2755 gen_store_fp(ctx, ra, rb, disp16, gen_sts); 2756 break; 2757 case 0x27: 2758 /* STT */ 2759 REQUIRE_FEN; 2760 gen_store_fp(ctx, ra, rb, disp16, gen_stt); 2761 break; 2762 case 0x28: 2763 /* LDL */ 2764 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); 2765 break; 2766 case 0x29: 2767 /* LDQ */ 2768 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0); 2769 break; 2770 case 0x2A: 2771 /* LDL_L */ 2772 gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1); 2773 break; 2774 case 0x2B: 2775 /* LDQ_L */ 2776 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1); 2777 break; 2778 case 0x2C: 2779 /* STL */ 2780 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); 2781 break; 2782 case 0x2D: 2783 /* STQ */ 2784 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0); 2785 break; 2786 case 0x2E: 2787 /* STL_C */ 2788 ret = gen_store_conditional(ctx, ra, rb, disp16, 2789 ctx->mem_idx, MO_LESL | MO_ALIGN); 2790 break; 2791 case 0x2F: 2792 /* STQ_C */ 2793 ret = gen_store_conditional(ctx, ra, rb, disp16, 2794 ctx->mem_idx, MO_LEUQ | MO_ALIGN); 2795 break; 2796 case 0x30: 2797 /* BR */ 2798 ret = gen_bdirect(ctx, ra, disp21); 2799 break; 2800 case 0x31: /* FBEQ */ 2801 REQUIRE_FEN; 2802 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); 2803 break; 2804 case 0x32: /* FBLT */ 2805 REQUIRE_FEN; 2806 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); 2807 break; 2808 case 0x33: /* FBLE */ 2809 REQUIRE_FEN; 2810 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); 2811 break; 2812 case 0x34: 2813 /* BSR */ 2814 ret = gen_bdirect(ctx, ra, disp21); 2815 break; 2816 case 0x35: /* FBNE */ 2817 REQUIRE_FEN; 2818 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); 2819 break; 2820 case 0x36: /* FBGE */ 2821 REQUIRE_FEN; 2822 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); 2823 break; 2824 case 0x37: /* FBGT */ 2825 REQUIRE_FEN; 2826 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); 2827 break; 2828 case 0x38: 2829 /* BLBC */ 2830 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); 2831 break; 2832 case 0x39: 2833 /* BEQ */ 2834 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); 2835 break; 2836 case 0x3A: 2837 /* BLT */ 2838 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); 2839 break; 2840 case 0x3B: 2841 /* BLE */ 2842 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); 2843 break; 2844 case 0x3C: 2845 /* BLBS */ 2846 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); 2847 break; 2848 case 0x3D: 2849 /* BNE */ 2850 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); 2851 break; 2852 case 0x3E: 2853 /* BGE */ 2854 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); 2855 break; 2856 case 0x3F: 2857 /* BGT */ 2858 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); 2859 break; 2860 invalid_opc: 2861 ret = gen_invalid(ctx); 2862 break; 2863 raise_fen: 2864 ret = gen_excp(ctx, EXCP_FEN, 0); 2865 break; 2866 } 2867 2868 return ret; 2869 } 2870 2871 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 2872 { 2873 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2874 CPUAlphaState *env = cpu_env(cpu); 2875 int64_t bound; 2876 2877 ctx->tbflags = ctx->base.tb->flags; 2878 ctx->mem_idx = cpu_mmu_index(env, false); 2879 ctx->implver = env->implver; 2880 ctx->amask = env->amask; 2881 2882 #ifdef CONFIG_USER_ONLY 2883 ctx->ir = cpu_std_ir; 2884 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 2885 #else 2886 ctx->palbr = env->palbr; 2887 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2888 #endif 2889 2890 /* ??? Every TB begins with unset rounding mode, to be initialized on 2891 the first fp insn of the TB. Alternately we could define a proper 2892 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure 2893 to reset the FP_STATUS to that default at the end of any TB that 2894 changes the default. We could even (gasp) dynamically figure out 2895 what default would be most efficient given the running program. */ 2896 ctx->tb_rm = -1; 2897 /* Similarly for flush-to-zero. */ 2898 ctx->tb_ftz = -1; 2899 2900 ctx->zero = NULL; 2901 ctx->sink = NULL; 2902 2903 /* Bound the number of insns to execute to those left on the page. */ 2904 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 2905 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 2906 } 2907 2908 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu) 2909 { 2910 } 2911 2912 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 2913 { 2914 tcg_gen_insn_start(dcbase->pc_next); 2915 } 2916 2917 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 2918 { 2919 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2920 CPUAlphaState *env = cpu_env(cpu); 2921 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 2922 2923 ctx->base.pc_next += 4; 2924 ctx->base.is_jmp = translate_one(ctx, insn); 2925 2926 free_context_temps(ctx); 2927 } 2928 2929 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 2930 { 2931 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2932 2933 switch (ctx->base.is_jmp) { 2934 case DISAS_NORETURN: 2935 break; 2936 case DISAS_TOO_MANY: 2937 if (use_goto_tb(ctx, ctx->base.pc_next)) { 2938 tcg_gen_goto_tb(0); 2939 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 2940 tcg_gen_exit_tb(ctx->base.tb, 0); 2941 } 2942 /* FALLTHRU */ 2943 case DISAS_PC_STALE: 2944 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 2945 /* FALLTHRU */ 2946 case DISAS_PC_UPDATED: 2947 tcg_gen_lookup_and_goto_ptr(); 2948 break; 2949 case DISAS_PC_UPDATED_NOCHAIN: 2950 tcg_gen_exit_tb(NULL, 0); 2951 break; 2952 default: 2953 g_assert_not_reached(); 2954 } 2955 } 2956 2957 static void alpha_tr_disas_log(const DisasContextBase *dcbase, 2958 CPUState *cpu, FILE *logfile) 2959 { 2960 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 2961 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); 2962 } 2963 2964 static const TranslatorOps alpha_tr_ops = { 2965 .init_disas_context = alpha_tr_init_disas_context, 2966 .tb_start = alpha_tr_tb_start, 2967 .insn_start = alpha_tr_insn_start, 2968 .translate_insn = alpha_tr_translate_insn, 2969 .tb_stop = alpha_tr_tb_stop, 2970 .disas_log = alpha_tr_disas_log, 2971 }; 2972 2973 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, 2974 target_ulong pc, void *host_pc) 2975 { 2976 DisasContext dc; 2977 translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); 2978 } 2979