1 /* 2 * Alpha emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "system/cpus.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 #include "exec/translator.h" 29 #include "exec/translation-block.h" 30 #include "exec/target_page.h" 31 #include "exec/log.h" 32 33 #define HELPER_H "helper.h" 34 #include "exec/helper-info.c.inc" 35 #undef HELPER_H 36 37 #undef ALPHA_DEBUG_DISAS 38 #define CONFIG_SOFTFLOAT_INLINE 39 40 #ifdef ALPHA_DEBUG_DISAS 41 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 42 #else 43 # define LOG_DISAS(...) do { } while (0) 44 #endif 45 46 typedef struct DisasContext DisasContext; 47 struct DisasContext { 48 DisasContextBase base; 49 50 #ifdef CONFIG_USER_ONLY 51 MemOp unalign; 52 #else 53 uint64_t palbr; 54 #endif 55 uint32_t tbflags; 56 int mem_idx; 57 58 /* True if generating pc-relative code. */ 59 bool pcrel; 60 61 /* implver and amask values for this CPU. */ 62 int implver; 63 int amask; 64 65 /* Current rounding mode for this TB. */ 66 int tb_rm; 67 /* Current flush-to-zero setting for this TB. */ 68 int tb_ftz; 69 70 /* The set of registers active in the current context. */ 71 TCGv *ir; 72 73 /* Temporaries for $31 and $f31 as source and destination. */ 74 TCGv zero; 75 TCGv sink; 76 }; 77 78 #ifdef CONFIG_USER_ONLY 79 #define UNALIGN(C) (C)->unalign 80 #else 81 #define UNALIGN(C) MO_ALIGN 82 #endif 83 84 /* Target-specific return values from translate_one, indicating the 85 state of the TB. Note that DISAS_NEXT indicates that we are not 86 exiting the TB. */ 87 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 88 #define DISAS_PC_UPDATED DISAS_TARGET_1 89 #define DISAS_PC_STALE DISAS_TARGET_2 90 91 /* global register indexes */ 92 static TCGv cpu_std_ir[31]; 93 static TCGv cpu_fir[31]; 94 static TCGv cpu_pc; 95 static TCGv cpu_lock_addr; 96 static TCGv cpu_lock_value; 97 98 #ifndef CONFIG_USER_ONLY 99 static TCGv cpu_pal_ir[31]; 100 #endif 101 102 void alpha_translate_init(void) 103 { 104 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } 105 106 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 107 static const GlobalVar vars[] = { 108 DEF_VAR(pc), 109 DEF_VAR(lock_addr), 110 DEF_VAR(lock_value), 111 }; 112 113 #undef DEF_VAR 114 115 /* Use the symbolic register names that match the disassembler. */ 116 static const char greg_names[31][4] = { 117 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", 118 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", 119 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", 120 "t10", "t11", "ra", "t12", "at", "gp", "sp" 121 }; 122 static const char freg_names[31][4] = { 123 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 124 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 125 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 126 "f24", "f25", "f26", "f27", "f28", "f29", "f30" 127 }; 128 #ifndef CONFIG_USER_ONLY 129 static const char shadow_names[8][8] = { 130 "pal_t7", "pal_s0", "pal_s1", "pal_s2", 131 "pal_s3", "pal_s4", "pal_s5", "pal_t11" 132 }; 133 #endif 134 135 int i; 136 137 for (i = 0; i < 31; i++) { 138 cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env, 139 offsetof(CPUAlphaState, ir[i]), 140 greg_names[i]); 141 } 142 143 for (i = 0; i < 31; i++) { 144 cpu_fir[i] = tcg_global_mem_new_i64(tcg_env, 145 offsetof(CPUAlphaState, fir[i]), 146 freg_names[i]); 147 } 148 149 #ifndef CONFIG_USER_ONLY 150 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); 151 for (i = 0; i < 8; i++) { 152 int r = (i == 7 ? 25 : i + 8); 153 cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env, 154 offsetof(CPUAlphaState, 155 shadow[i]), 156 shadow_names[i]); 157 } 158 #endif 159 160 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 161 const GlobalVar *v = &vars[i]; 162 *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name); 163 } 164 } 165 166 static TCGv load_zero(DisasContext *ctx) 167 { 168 if (!ctx->zero) { 169 ctx->zero = tcg_constant_i64(0); 170 } 171 return ctx->zero; 172 } 173 174 static TCGv dest_sink(DisasContext *ctx) 175 { 176 if (!ctx->sink) { 177 ctx->sink = tcg_temp_new(); 178 } 179 return ctx->sink; 180 } 181 182 static void free_context_temps(DisasContext *ctx) 183 { 184 if (ctx->sink) { 185 tcg_gen_discard_i64(ctx->sink); 186 ctx->sink = NULL; 187 } 188 } 189 190 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 191 { 192 if (likely(reg < 31)) { 193 return ctx->ir[reg]; 194 } else { 195 return load_zero(ctx); 196 } 197 } 198 199 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, 200 uint8_t lit, bool islit) 201 { 202 if (islit) { 203 return tcg_constant_i64(lit); 204 } else if (likely(reg < 31)) { 205 return ctx->ir[reg]; 206 } else { 207 return load_zero(ctx); 208 } 209 } 210 211 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 212 { 213 if (likely(reg < 31)) { 214 return ctx->ir[reg]; 215 } else { 216 return dest_sink(ctx); 217 } 218 } 219 220 static TCGv load_fpr(DisasContext *ctx, unsigned reg) 221 { 222 if (likely(reg < 31)) { 223 return cpu_fir[reg]; 224 } else { 225 return load_zero(ctx); 226 } 227 } 228 229 static TCGv dest_fpr(DisasContext *ctx, unsigned reg) 230 { 231 if (likely(reg < 31)) { 232 return cpu_fir[reg]; 233 } else { 234 return dest_sink(ctx); 235 } 236 } 237 238 static int get_flag_ofs(unsigned shift) 239 { 240 int ofs = offsetof(CPUAlphaState, flags); 241 #if HOST_BIG_ENDIAN 242 ofs += 3 - (shift / 8); 243 #else 244 ofs += shift / 8; 245 #endif 246 return ofs; 247 } 248 249 static void ld_flag_byte(TCGv val, unsigned shift) 250 { 251 tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift)); 252 } 253 254 static void st_flag_byte(TCGv val, unsigned shift) 255 { 256 tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift)); 257 } 258 259 static void gen_pc_disp(DisasContext *ctx, TCGv dest, int32_t disp) 260 { 261 uint64_t addr = ctx->base.pc_next + disp; 262 if (ctx->pcrel) { 263 tcg_gen_addi_i64(dest, cpu_pc, addr - ctx->base.pc_first); 264 } else { 265 tcg_gen_movi_i64(dest, addr); 266 } 267 } 268 269 static void gen_excp_1(int exception, int error_code) 270 { 271 TCGv_i32 tmp1, tmp2; 272 273 tmp1 = tcg_constant_i32(exception); 274 tmp2 = tcg_constant_i32(error_code); 275 gen_helper_excp(tcg_env, tmp1, tmp2); 276 } 277 278 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) 279 { 280 gen_pc_disp(ctx, cpu_pc, 0); 281 gen_excp_1(exception, error_code); 282 return DISAS_NORETURN; 283 } 284 285 static inline DisasJumpType gen_invalid(DisasContext *ctx) 286 { 287 return gen_excp(ctx, EXCP_OPCDEC, 0); 288 } 289 290 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) 291 { 292 TCGv_i32 tmp32 = tcg_temp_new_i32(); 293 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 294 gen_helper_memory_to_f(dest, tmp32); 295 } 296 297 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) 298 { 299 TCGv tmp = tcg_temp_new(); 300 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 301 gen_helper_memory_to_g(dest, tmp); 302 } 303 304 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) 305 { 306 TCGv_i32 tmp32 = tcg_temp_new_i32(); 307 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 308 gen_helper_memory_to_s(dest, tmp32); 309 } 310 311 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) 312 { 313 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 314 } 315 316 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 317 void (*func)(DisasContext *, TCGv, TCGv)) 318 { 319 /* Loads to $f31 are prefetches, which we can treat as nops. */ 320 if (likely(ra != 31)) { 321 TCGv addr = tcg_temp_new(); 322 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 323 func(ctx, cpu_fir[ra], addr); 324 } 325 } 326 327 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 328 MemOp op, bool clear, bool locked) 329 { 330 TCGv addr, dest; 331 332 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of 333 prefetches, which we can treat as nops. No worries about 334 missed exceptions here. */ 335 if (unlikely(ra == 31)) { 336 return; 337 } 338 339 addr = tcg_temp_new(); 340 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 341 if (clear) { 342 tcg_gen_andi_i64(addr, addr, ~0x7); 343 } else if (!locked) { 344 op |= UNALIGN(ctx); 345 } 346 347 dest = ctx->ir[ra]; 348 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); 349 350 if (locked) { 351 tcg_gen_mov_i64(cpu_lock_addr, addr); 352 tcg_gen_mov_i64(cpu_lock_value, dest); 353 } 354 } 355 356 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) 357 { 358 TCGv_i32 tmp32 = tcg_temp_new_i32(); 359 gen_helper_f_to_memory(tmp32, addr); 360 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 361 } 362 363 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) 364 { 365 TCGv tmp = tcg_temp_new(); 366 gen_helper_g_to_memory(tmp, src); 367 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 368 } 369 370 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) 371 { 372 TCGv_i32 tmp32 = tcg_temp_new_i32(); 373 gen_helper_s_to_memory(tmp32, src); 374 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 375 } 376 377 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) 378 { 379 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 380 } 381 382 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 383 void (*func)(DisasContext *, TCGv, TCGv)) 384 { 385 TCGv addr = tcg_temp_new(); 386 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 387 func(ctx, load_fpr(ctx, ra), addr); 388 } 389 390 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 391 MemOp op, bool clear) 392 { 393 TCGv addr, src; 394 395 addr = tcg_temp_new(); 396 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 397 if (clear) { 398 tcg_gen_andi_i64(addr, addr, ~0x7); 399 } else { 400 op |= UNALIGN(ctx); 401 } 402 403 src = load_gpr(ctx, ra); 404 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); 405 } 406 407 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, 408 int32_t disp16, int mem_idx, 409 MemOp op) 410 { 411 TCGLabel *lab_fail, *lab_done; 412 TCGv addr, val; 413 414 addr = tcg_temp_new_i64(); 415 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 416 free_context_temps(ctx); 417 418 lab_fail = gen_new_label(); 419 lab_done = gen_new_label(); 420 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); 421 422 val = tcg_temp_new_i64(); 423 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, 424 load_gpr(ctx, ra), mem_idx, op); 425 free_context_temps(ctx); 426 427 if (ra != 31) { 428 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); 429 } 430 tcg_gen_br(lab_done); 431 432 gen_set_label(lab_fail); 433 if (ra != 31) { 434 tcg_gen_movi_i64(ctx->ir[ra], 0); 435 } 436 437 gen_set_label(lab_done); 438 tcg_gen_movi_i64(cpu_lock_addr, -1); 439 return DISAS_NEXT; 440 } 441 442 static void gen_goto_tb(DisasContext *ctx, int idx, int32_t disp) 443 { 444 if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) { 445 /* With PCREL, PC must always be up-to-date. */ 446 if (ctx->pcrel) { 447 gen_pc_disp(ctx, cpu_pc, disp); 448 tcg_gen_goto_tb(idx); 449 } else { 450 tcg_gen_goto_tb(idx); 451 gen_pc_disp(ctx, cpu_pc, disp); 452 } 453 tcg_gen_exit_tb(ctx->base.tb, idx); 454 } else { 455 gen_pc_disp(ctx, cpu_pc, disp); 456 tcg_gen_lookup_and_goto_ptr(); 457 } 458 } 459 460 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) 461 { 462 if (ra != 31) { 463 gen_pc_disp(ctx, ctx->ir[ra], 0); 464 } 465 466 /* Notice branch-to-next; used to initialize RA with the PC. */ 467 if (disp == 0) { 468 return DISAS_NEXT; 469 } 470 gen_goto_tb(ctx, 0, disp); 471 return DISAS_NORETURN; 472 } 473 474 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, 475 TCGv cmp, uint64_t imm, int32_t disp) 476 { 477 TCGLabel *lab_true = gen_new_label(); 478 479 tcg_gen_brcondi_i64(cond, cmp, imm, lab_true); 480 gen_goto_tb(ctx, 0, 0); 481 gen_set_label(lab_true); 482 gen_goto_tb(ctx, 1, disp); 483 484 return DISAS_NORETURN; 485 } 486 487 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, 488 int32_t disp) 489 { 490 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), 491 is_tst_cond(cond), disp); 492 } 493 494 /* Fold -0.0 for comparison with COND. */ 495 496 static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src) 497 { 498 TCGv_i64 tmp; 499 500 *pimm = 0; 501 switch (*pcond) { 502 case TCG_COND_LE: 503 case TCG_COND_GT: 504 /* For <= or >, the -0.0 value directly compares the way we want. */ 505 return src; 506 507 case TCG_COND_EQ: 508 case TCG_COND_NE: 509 /* For == or !=, we can compare without the sign bit. */ 510 *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE; 511 *pimm = INT64_MAX; 512 return src; 513 514 case TCG_COND_GE: 515 case TCG_COND_LT: 516 /* For >= or <, map -0.0 to +0.0. */ 517 tmp = tcg_temp_new_i64(); 518 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, 519 src, tcg_constant_i64(INT64_MIN), 520 tcg_constant_i64(0), src); 521 return tmp; 522 523 default: 524 g_assert_not_reached(); 525 } 526 } 527 528 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, 529 int32_t disp) 530 { 531 uint64_t imm; 532 TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra)); 533 return gen_bcond_internal(ctx, cond, tmp, imm, disp); 534 } 535 536 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) 537 { 538 uint64_t imm; 539 TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra)); 540 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), 541 tmp, tcg_constant_i64(imm), 542 load_fpr(ctx, rb), load_fpr(ctx, rc)); 543 } 544 545 #define QUAL_RM_N 0x080 /* Round mode nearest even */ 546 #define QUAL_RM_C 0x000 /* Round mode chopped */ 547 #define QUAL_RM_M 0x040 /* Round mode minus infinity */ 548 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */ 549 #define QUAL_RM_MASK 0x0c0 550 551 #define QUAL_U 0x100 /* Underflow enable (fp output) */ 552 #define QUAL_V 0x100 /* Overflow enable (int output) */ 553 #define QUAL_S 0x400 /* Software completion enable */ 554 #define QUAL_I 0x200 /* Inexact detection enable */ 555 556 static void gen_qual_roundmode(DisasContext *ctx, int fn11) 557 { 558 TCGv_i32 tmp; 559 560 fn11 &= QUAL_RM_MASK; 561 if (fn11 == ctx->tb_rm) { 562 return; 563 } 564 ctx->tb_rm = fn11; 565 566 tmp = tcg_temp_new_i32(); 567 switch (fn11) { 568 case QUAL_RM_N: 569 tcg_gen_movi_i32(tmp, float_round_nearest_even); 570 break; 571 case QUAL_RM_C: 572 tcg_gen_movi_i32(tmp, float_round_to_zero); 573 break; 574 case QUAL_RM_M: 575 tcg_gen_movi_i32(tmp, float_round_down); 576 break; 577 case QUAL_RM_D: 578 tcg_gen_ld8u_i32(tmp, tcg_env, 579 offsetof(CPUAlphaState, fpcr_dyn_round)); 580 break; 581 } 582 583 #if defined(CONFIG_SOFTFLOAT_INLINE) 584 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. 585 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just 586 sets the one field. */ 587 tcg_gen_st8_i32(tmp, tcg_env, 588 offsetof(CPUAlphaState, fp_status.float_rounding_mode)); 589 #else 590 gen_helper_setroundmode(tmp); 591 #endif 592 } 593 594 static void gen_qual_flushzero(DisasContext *ctx, int fn11) 595 { 596 TCGv_i32 tmp; 597 598 fn11 &= QUAL_U; 599 if (fn11 == ctx->tb_ftz) { 600 return; 601 } 602 ctx->tb_ftz = fn11; 603 604 tmp = tcg_temp_new_i32(); 605 if (fn11) { 606 /* Underflow is enabled, use the FPCR setting. */ 607 tcg_gen_ld8u_i32(tmp, tcg_env, 608 offsetof(CPUAlphaState, fpcr_flush_to_zero)); 609 } else { 610 /* Underflow is disabled, force flush-to-zero. */ 611 tcg_gen_movi_i32(tmp, 1); 612 } 613 614 #if defined(CONFIG_SOFTFLOAT_INLINE) 615 tcg_gen_st8_i32(tmp, tcg_env, 616 offsetof(CPUAlphaState, fp_status.flush_to_zero)); 617 #else 618 gen_helper_setflushzero(tmp); 619 #endif 620 } 621 622 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) 623 { 624 TCGv val; 625 626 if (unlikely(reg == 31)) { 627 val = load_zero(ctx); 628 } else { 629 val = cpu_fir[reg]; 630 if ((fn11 & QUAL_S) == 0) { 631 if (is_cmp) { 632 gen_helper_ieee_input_cmp(tcg_env, val); 633 } else { 634 gen_helper_ieee_input(tcg_env, val); 635 } 636 } else { 637 #ifndef CONFIG_USER_ONLY 638 /* In system mode, raise exceptions for denormals like real 639 hardware. In user mode, proceed as if the OS completion 640 handler is handling the denormal as per spec. */ 641 gen_helper_ieee_input_s(tcg_env, val); 642 #endif 643 } 644 } 645 return val; 646 } 647 648 static void gen_fp_exc_raise(int rc, int fn11) 649 { 650 /* ??? We ought to be able to do something with imprecise exceptions. 651 E.g. notice we're still in the trap shadow of something within the 652 TB and do not generate the code to signal the exception; end the TB 653 when an exception is forced to arrive, either by consumption of a 654 register value or TRAPB or EXCB. */ 655 TCGv_i32 reg, ign; 656 uint32_t ignore = 0; 657 658 if (!(fn11 & QUAL_U)) { 659 /* Note that QUAL_U == QUAL_V, so ignore either. */ 660 ignore |= FPCR_UNF | FPCR_IOV; 661 } 662 if (!(fn11 & QUAL_I)) { 663 ignore |= FPCR_INE; 664 } 665 ign = tcg_constant_i32(ignore); 666 667 /* ??? Pass in the regno of the destination so that the helper can 668 set EXC_MASK, which contains a bitmask of destination registers 669 that have caused arithmetic traps. A simple userspace emulation 670 does not require this. We do need it for a guest kernel's entArith, 671 or if we were to do something clever with imprecise exceptions. */ 672 reg = tcg_constant_i32(rc + 32); 673 if (fn11 & QUAL_S) { 674 gen_helper_fp_exc_raise_s(tcg_env, ign, reg); 675 } else { 676 gen_helper_fp_exc_raise(tcg_env, ign, reg); 677 } 678 } 679 680 static void gen_cvtlq(TCGv vc, TCGv vb) 681 { 682 TCGv tmp = tcg_temp_new(); 683 684 /* The arithmetic right shift here, plus the sign-extended mask below 685 yields a sign-extended result without an explicit ext32s_i64. */ 686 tcg_gen_shri_i64(tmp, vb, 29); 687 tcg_gen_sari_i64(vc, vb, 32); 688 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); 689 } 690 691 static void gen_ieee_arith2(DisasContext *ctx, 692 void (*helper)(TCGv, TCGv_ptr, TCGv), 693 int rb, int rc, int fn11) 694 { 695 TCGv vb; 696 697 gen_qual_roundmode(ctx, fn11); 698 gen_qual_flushzero(ctx, fn11); 699 700 vb = gen_ieee_input(ctx, rb, fn11, 0); 701 helper(dest_fpr(ctx, rc), tcg_env, vb); 702 703 gen_fp_exc_raise(rc, fn11); 704 } 705 706 #define IEEE_ARITH2(name) \ 707 static inline void glue(gen_, name)(DisasContext *ctx, \ 708 int rb, int rc, int fn11) \ 709 { \ 710 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ 711 } 712 IEEE_ARITH2(sqrts) 713 IEEE_ARITH2(sqrtt) 714 IEEE_ARITH2(cvtst) 715 IEEE_ARITH2(cvtts) 716 717 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) 718 { 719 TCGv vb, vc; 720 721 /* No need to set flushzero, since we have an integer output. */ 722 vb = gen_ieee_input(ctx, rb, fn11, 0); 723 vc = dest_fpr(ctx, rc); 724 725 /* Almost all integer conversions use cropped rounding; 726 special case that. */ 727 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { 728 gen_helper_cvttq_c(vc, tcg_env, vb); 729 } else { 730 gen_qual_roundmode(ctx, fn11); 731 gen_helper_cvttq(vc, tcg_env, vb); 732 } 733 gen_fp_exc_raise(rc, fn11); 734 } 735 736 static void gen_ieee_intcvt(DisasContext *ctx, 737 void (*helper)(TCGv, TCGv_ptr, TCGv), 738 int rb, int rc, int fn11) 739 { 740 TCGv vb, vc; 741 742 gen_qual_roundmode(ctx, fn11); 743 vb = load_fpr(ctx, rb); 744 vc = dest_fpr(ctx, rc); 745 746 /* The only exception that can be raised by integer conversion 747 is inexact. Thus we only need to worry about exceptions when 748 inexact handling is requested. */ 749 if (fn11 & QUAL_I) { 750 helper(vc, tcg_env, vb); 751 gen_fp_exc_raise(rc, fn11); 752 } else { 753 helper(vc, tcg_env, vb); 754 } 755 } 756 757 #define IEEE_INTCVT(name) \ 758 static inline void glue(gen_, name)(DisasContext *ctx, \ 759 int rb, int rc, int fn11) \ 760 { \ 761 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ 762 } 763 IEEE_INTCVT(cvtqs) 764 IEEE_INTCVT(cvtqt) 765 766 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) 767 { 768 TCGv vmask = tcg_constant_i64(mask); 769 TCGv tmp = tcg_temp_new_i64(); 770 771 if (inv_a) { 772 tcg_gen_andc_i64(tmp, vmask, va); 773 } else { 774 tcg_gen_and_i64(tmp, va, vmask); 775 } 776 777 tcg_gen_andc_i64(vc, vb, vmask); 778 tcg_gen_or_i64(vc, vc, tmp); 779 } 780 781 static void gen_ieee_arith3(DisasContext *ctx, 782 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 783 int ra, int rb, int rc, int fn11) 784 { 785 TCGv va, vb, vc; 786 787 gen_qual_roundmode(ctx, fn11); 788 gen_qual_flushzero(ctx, fn11); 789 790 va = gen_ieee_input(ctx, ra, fn11, 0); 791 vb = gen_ieee_input(ctx, rb, fn11, 0); 792 vc = dest_fpr(ctx, rc); 793 helper(vc, tcg_env, va, vb); 794 795 gen_fp_exc_raise(rc, fn11); 796 } 797 798 #define IEEE_ARITH3(name) \ 799 static inline void glue(gen_, name)(DisasContext *ctx, \ 800 int ra, int rb, int rc, int fn11) \ 801 { \ 802 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 803 } 804 IEEE_ARITH3(adds) 805 IEEE_ARITH3(subs) 806 IEEE_ARITH3(muls) 807 IEEE_ARITH3(divs) 808 IEEE_ARITH3(addt) 809 IEEE_ARITH3(subt) 810 IEEE_ARITH3(mult) 811 IEEE_ARITH3(divt) 812 813 static void gen_ieee_compare(DisasContext *ctx, 814 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 815 int ra, int rb, int rc, int fn11) 816 { 817 TCGv va, vb, vc; 818 819 va = gen_ieee_input(ctx, ra, fn11, 1); 820 vb = gen_ieee_input(ctx, rb, fn11, 1); 821 vc = dest_fpr(ctx, rc); 822 helper(vc, tcg_env, va, vb); 823 824 gen_fp_exc_raise(rc, fn11); 825 } 826 827 #define IEEE_CMP3(name) \ 828 static inline void glue(gen_, name)(DisasContext *ctx, \ 829 int ra, int rb, int rc, int fn11) \ 830 { \ 831 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 832 } 833 IEEE_CMP3(cmptun) 834 IEEE_CMP3(cmpteq) 835 IEEE_CMP3(cmptlt) 836 IEEE_CMP3(cmptle) 837 838 static inline uint64_t zapnot_mask(uint8_t lit) 839 { 840 uint64_t mask = 0; 841 int i; 842 843 for (i = 0; i < 8; ++i) { 844 if ((lit >> i) & 1) { 845 mask |= 0xffull << (i * 8); 846 } 847 } 848 return mask; 849 } 850 851 /* Implement zapnot with an immediate operand, which expands to some 852 form of immediate AND. This is a basic building block in the 853 definition of many of the other byte manipulation instructions. */ 854 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) 855 { 856 switch (lit) { 857 case 0x00: 858 tcg_gen_movi_i64(dest, 0); 859 break; 860 case 0x01: 861 tcg_gen_ext8u_i64(dest, src); 862 break; 863 case 0x03: 864 tcg_gen_ext16u_i64(dest, src); 865 break; 866 case 0x0f: 867 tcg_gen_ext32u_i64(dest, src); 868 break; 869 case 0xff: 870 tcg_gen_mov_i64(dest, src); 871 break; 872 default: 873 tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); 874 break; 875 } 876 } 877 878 /* EXTWH, EXTLH, EXTQH */ 879 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 880 uint8_t lit, uint8_t byte_mask) 881 { 882 if (islit) { 883 int pos = (64 - lit * 8) & 0x3f; 884 int len = cto32(byte_mask) * 8; 885 if (pos < len) { 886 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); 887 } else { 888 tcg_gen_movi_i64(vc, 0); 889 } 890 } else { 891 TCGv tmp = tcg_temp_new(); 892 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); 893 tcg_gen_neg_i64(tmp, tmp); 894 tcg_gen_andi_i64(tmp, tmp, 0x3f); 895 tcg_gen_shl_i64(vc, va, tmp); 896 } 897 gen_zapnoti(vc, vc, byte_mask); 898 } 899 900 /* EXTBL, EXTWL, EXTLL, EXTQL */ 901 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 902 uint8_t lit, uint8_t byte_mask) 903 { 904 if (islit) { 905 int pos = (lit & 7) * 8; 906 int len = cto32(byte_mask) * 8; 907 if (pos + len >= 64) { 908 len = 64 - pos; 909 } 910 tcg_gen_extract_i64(vc, va, pos, len); 911 } else { 912 TCGv tmp = tcg_temp_new(); 913 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); 914 tcg_gen_shli_i64(tmp, tmp, 3); 915 tcg_gen_shr_i64(vc, va, tmp); 916 gen_zapnoti(vc, vc, byte_mask); 917 } 918 } 919 920 /* INSWH, INSLH, INSQH */ 921 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 922 uint8_t lit, uint8_t byte_mask) 923 { 924 if (islit) { 925 int pos = 64 - (lit & 7) * 8; 926 int len = cto32(byte_mask) * 8; 927 if (pos < len) { 928 tcg_gen_extract_i64(vc, va, pos, len - pos); 929 } else { 930 tcg_gen_movi_i64(vc, 0); 931 } 932 } else { 933 TCGv tmp = tcg_temp_new(); 934 TCGv shift = tcg_temp_new(); 935 936 /* The instruction description has us left-shift the byte mask 937 and extract bits <15:8> and apply that zap at the end. This 938 is equivalent to simply performing the zap first and shifting 939 afterward. */ 940 gen_zapnoti(tmp, va, byte_mask); 941 942 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this 943 portably by splitting the shift into two parts: shift_count-1 and 1. 944 Arrange for the -1 by using ones-complement instead of 945 twos-complement in the negation: ~(B * 8) & 63. */ 946 947 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 948 tcg_gen_not_i64(shift, shift); 949 tcg_gen_andi_i64(shift, shift, 0x3f); 950 951 tcg_gen_shr_i64(vc, tmp, shift); 952 tcg_gen_shri_i64(vc, vc, 1); 953 } 954 } 955 956 /* INSBL, INSWL, INSLL, INSQL */ 957 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 958 uint8_t lit, uint8_t byte_mask) 959 { 960 if (islit) { 961 int pos = (lit & 7) * 8; 962 int len = cto32(byte_mask) * 8; 963 if (pos + len > 64) { 964 len = 64 - pos; 965 } 966 tcg_gen_deposit_z_i64(vc, va, pos, len); 967 } else { 968 TCGv tmp = tcg_temp_new(); 969 TCGv shift = tcg_temp_new(); 970 971 /* The instruction description has us left-shift the byte mask 972 and extract bits <15:8> and apply that zap at the end. This 973 is equivalent to simply performing the zap first and shifting 974 afterward. */ 975 gen_zapnoti(tmp, va, byte_mask); 976 977 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 978 tcg_gen_shli_i64(shift, shift, 3); 979 tcg_gen_shl_i64(vc, tmp, shift); 980 } 981 } 982 983 /* MSKWH, MSKLH, MSKQH */ 984 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 985 uint8_t lit, uint8_t byte_mask) 986 { 987 if (islit) { 988 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); 989 } else { 990 TCGv shift = tcg_temp_new(); 991 TCGv mask = tcg_temp_new(); 992 993 /* The instruction description is as above, where the byte_mask 994 is shifted left, and then we extract bits <15:8>. This can be 995 emulated with a right-shift on the expanded byte mask. This 996 requires extra care because for an input <2:0> == 0 we need a 997 shift of 64 bits in order to generate a zero. This is done by 998 splitting the shift into two parts, the variable shift - 1 999 followed by a constant 1 shift. The code we expand below is 1000 equivalent to ~(B * 8) & 63. */ 1001 1002 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1003 tcg_gen_not_i64(shift, shift); 1004 tcg_gen_andi_i64(shift, shift, 0x3f); 1005 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); 1006 tcg_gen_shr_i64(mask, mask, shift); 1007 tcg_gen_shri_i64(mask, mask, 1); 1008 1009 tcg_gen_andc_i64(vc, va, mask); 1010 } 1011 } 1012 1013 /* MSKBL, MSKWL, MSKLL, MSKQL */ 1014 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1015 uint8_t lit, uint8_t byte_mask) 1016 { 1017 if (islit) { 1018 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); 1019 } else { 1020 TCGv shift = tcg_temp_new(); 1021 TCGv mask = tcg_temp_new(); 1022 1023 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1024 tcg_gen_shli_i64(shift, shift, 3); 1025 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); 1026 tcg_gen_shl_i64(mask, mask, shift); 1027 1028 tcg_gen_andc_i64(vc, va, mask); 1029 } 1030 } 1031 1032 static void gen_rx(DisasContext *ctx, int ra, int set) 1033 { 1034 if (ra != 31) { 1035 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1036 } 1037 1038 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT); 1039 } 1040 1041 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) 1042 { 1043 /* We're emulating OSF/1 PALcode. Many of these are trivial access 1044 to internal cpu registers. */ 1045 1046 /* Unprivileged PAL call */ 1047 if (palcode >= 0x80 && palcode < 0xC0) { 1048 switch (palcode) { 1049 case 0x86: 1050 /* IMB */ 1051 /* No-op inside QEMU. */ 1052 break; 1053 case 0x9E: 1054 /* RDUNIQUE */ 1055 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1056 offsetof(CPUAlphaState, unique)); 1057 break; 1058 case 0x9F: 1059 /* WRUNIQUE */ 1060 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1061 offsetof(CPUAlphaState, unique)); 1062 break; 1063 default: 1064 palcode &= 0xbf; 1065 goto do_call_pal; 1066 } 1067 return DISAS_NEXT; 1068 } 1069 1070 #ifndef CONFIG_USER_ONLY 1071 /* Privileged PAL code */ 1072 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1073 switch (palcode) { 1074 case 0x01: 1075 /* CFLUSH */ 1076 /* No-op inside QEMU. */ 1077 break; 1078 case 0x02: 1079 /* DRAINA */ 1080 /* No-op inside QEMU. */ 1081 break; 1082 case 0x2D: 1083 /* WRVPTPTR */ 1084 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1085 offsetof(CPUAlphaState, vptptr)); 1086 break; 1087 case 0x31: 1088 /* WRVAL */ 1089 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1090 offsetof(CPUAlphaState, sysval)); 1091 break; 1092 case 0x32: 1093 /* RDVAL */ 1094 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1095 offsetof(CPUAlphaState, sysval)); 1096 break; 1097 1098 case 0x35: 1099 /* SWPIPL */ 1100 /* Note that we already know we're in kernel mode, so we know 1101 that PS only contains the 3 IPL bits. */ 1102 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1103 1104 /* But make sure and store only the 3 IPL bits from the user. */ 1105 { 1106 TCGv tmp = tcg_temp_new(); 1107 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1108 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1109 } 1110 1111 /* Allow interrupts to be recognized right away. */ 1112 gen_pc_disp(ctx, cpu_pc, 0); 1113 return DISAS_PC_UPDATED_NOCHAIN; 1114 1115 case 0x36: 1116 /* RDPS */ 1117 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1118 break; 1119 1120 case 0x38: 1121 /* WRUSP */ 1122 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1123 offsetof(CPUAlphaState, usp)); 1124 break; 1125 case 0x3A: 1126 /* RDUSP */ 1127 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1128 offsetof(CPUAlphaState, usp)); 1129 break; 1130 case 0x3C: 1131 /* WHAMI */ 1132 tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env, 1133 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); 1134 break; 1135 1136 case 0x3E: 1137 /* WTINT */ 1138 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1139 -offsetof(AlphaCPU, env) + 1140 offsetof(CPUState, halted)); 1141 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); 1142 return gen_excp(ctx, EXCP_HALTED, 0); 1143 1144 default: 1145 palcode &= 0x3f; 1146 goto do_call_pal; 1147 } 1148 return DISAS_NEXT; 1149 } 1150 #endif 1151 return gen_invalid(ctx); 1152 1153 do_call_pal: 1154 #ifdef CONFIG_USER_ONLY 1155 return gen_excp(ctx, EXCP_CALL_PAL, palcode); 1156 #else 1157 { 1158 TCGv tmp = tcg_temp_new(); 1159 uint64_t entry; 1160 1161 gen_pc_disp(ctx, tmp, 0); 1162 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1163 tcg_gen_ori_i64(tmp, tmp, 1); 1164 } else { 1165 st_flag_byte(tcg_constant_i64(1), ENV_FLAG_PAL_SHIFT); 1166 } 1167 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr)); 1168 1169 entry = ctx->palbr; 1170 entry += (palcode & 0x80 1171 ? 0x2000 + (palcode - 0x80) * 64 1172 : 0x1000 + palcode * 64); 1173 1174 tcg_gen_movi_i64(cpu_pc, entry); 1175 return DISAS_PC_UPDATED; 1176 } 1177 #endif 1178 } 1179 1180 #ifndef CONFIG_USER_ONLY 1181 1182 #define PR_LONG 0x200000 1183 1184 static int cpu_pr_data(int pr) 1185 { 1186 switch (pr) { 1187 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1188 case 3: return offsetof(CPUAlphaState, trap_arg0); 1189 case 4: return offsetof(CPUAlphaState, trap_arg1); 1190 case 5: return offsetof(CPUAlphaState, trap_arg2); 1191 case 6: return offsetof(CPUAlphaState, exc_addr); 1192 case 7: return offsetof(CPUAlphaState, palbr); 1193 case 8: return offsetof(CPUAlphaState, ptbr); 1194 case 9: return offsetof(CPUAlphaState, vptptr); 1195 case 10: return offsetof(CPUAlphaState, unique); 1196 case 11: return offsetof(CPUAlphaState, sysval); 1197 case 12: return offsetof(CPUAlphaState, usp); 1198 1199 case 40 ... 63: 1200 return offsetof(CPUAlphaState, scratch[pr - 40]); 1201 1202 case 251: 1203 return offsetof(CPUAlphaState, alarm_expire); 1204 } 1205 return 0; 1206 } 1207 1208 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) 1209 { 1210 void (*helper)(TCGv); 1211 int data; 1212 1213 switch (regno) { 1214 case 32 ... 39: 1215 /* Accessing the "non-shadow" general registers. */ 1216 regno = regno == 39 ? 25 : regno - 32 + 8; 1217 tcg_gen_mov_i64(va, cpu_std_ir[regno]); 1218 break; 1219 1220 case 250: /* WALLTIME */ 1221 helper = gen_helper_get_walltime; 1222 goto do_helper; 1223 case 249: /* VMTIME */ 1224 helper = gen_helper_get_vmtime; 1225 do_helper: 1226 if (translator_io_start(&ctx->base)) { 1227 helper(va); 1228 return DISAS_PC_STALE; 1229 } else { 1230 helper(va); 1231 } 1232 break; 1233 1234 case 0: /* PS */ 1235 ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1236 break; 1237 case 1: /* FEN */ 1238 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1239 break; 1240 1241 default: 1242 /* The basic registers are data only, and unknown registers 1243 are read-zero, write-ignore. */ 1244 data = cpu_pr_data(regno); 1245 if (data == 0) { 1246 tcg_gen_movi_i64(va, 0); 1247 } else if (data & PR_LONG) { 1248 tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG); 1249 } else { 1250 tcg_gen_ld_i64(va, tcg_env, data); 1251 } 1252 break; 1253 } 1254 1255 return DISAS_NEXT; 1256 } 1257 1258 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) 1259 { 1260 int data; 1261 DisasJumpType ret = DISAS_NEXT; 1262 1263 switch (regno) { 1264 case 255: 1265 /* TBIA */ 1266 gen_helper_tbia(tcg_env); 1267 break; 1268 1269 case 254: 1270 /* TBIS */ 1271 gen_helper_tbis(tcg_env, vb); 1272 break; 1273 1274 case 253: 1275 /* WAIT */ 1276 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1277 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); 1278 return gen_excp(ctx, EXCP_HALTED, 0); 1279 1280 case 252: 1281 /* HALT */ 1282 gen_helper_halt(vb); 1283 return DISAS_PC_STALE; 1284 1285 case 251: 1286 /* ALARM */ 1287 if (translator_io_start(&ctx->base)) { 1288 ret = DISAS_PC_STALE; 1289 } 1290 gen_helper_set_alarm(tcg_env, vb); 1291 break; 1292 1293 case 7: 1294 /* PALBR */ 1295 tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr)); 1296 /* Changing the PAL base register implies un-chaining all of the TBs 1297 that ended with a CALL_PAL. Since the base register usually only 1298 changes during boot, flushing everything works well. */ 1299 gen_helper_tb_flush(tcg_env); 1300 return DISAS_PC_STALE; 1301 1302 case 32 ... 39: 1303 /* Accessing the "non-shadow" general registers. */ 1304 regno = regno == 39 ? 25 : regno - 32 + 8; 1305 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1306 break; 1307 1308 case 0: /* PS */ 1309 st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1310 break; 1311 case 1: /* FEN */ 1312 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1313 break; 1314 1315 default: 1316 /* The basic registers are data only, and unknown registers 1317 are read-zero, write-ignore. */ 1318 data = cpu_pr_data(regno); 1319 if (data != 0) { 1320 if (data & PR_LONG) { 1321 tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG); 1322 } else { 1323 tcg_gen_st_i64(vb, tcg_env, data); 1324 } 1325 } 1326 break; 1327 } 1328 1329 return ret; 1330 } 1331 #endif /* !USER_ONLY*/ 1332 1333 #define REQUIRE_NO_LIT \ 1334 do { \ 1335 if (real_islit) { \ 1336 goto invalid_opc; \ 1337 } \ 1338 } while (0) 1339 1340 #define REQUIRE_AMASK(FLAG) \ 1341 do { \ 1342 if ((ctx->amask & AMASK_##FLAG) == 0) { \ 1343 goto invalid_opc; \ 1344 } \ 1345 } while (0) 1346 1347 #define REQUIRE_TB_FLAG(FLAG) \ 1348 do { \ 1349 if ((ctx->tbflags & (FLAG)) == 0) { \ 1350 goto invalid_opc; \ 1351 } \ 1352 } while (0) 1353 1354 #define REQUIRE_REG_31(WHICH) \ 1355 do { \ 1356 if (WHICH != 31) { \ 1357 goto invalid_opc; \ 1358 } \ 1359 } while (0) 1360 1361 #define REQUIRE_FEN \ 1362 do { \ 1363 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \ 1364 goto raise_fen; \ 1365 } \ 1366 } while (0) 1367 1368 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) 1369 { 1370 int32_t disp21, disp16, disp12 __attribute__((unused)); 1371 uint16_t fn11; 1372 uint8_t opc, ra, rb, rc, fpfn, fn7, lit; 1373 bool islit, real_islit; 1374 TCGv va, vb, vc, tmp, tmp2; 1375 TCGv_i32 t32; 1376 DisasJumpType ret; 1377 1378 /* Decode all instruction fields */ 1379 opc = extract32(insn, 26, 6); 1380 ra = extract32(insn, 21, 5); 1381 rb = extract32(insn, 16, 5); 1382 rc = extract32(insn, 0, 5); 1383 real_islit = islit = extract32(insn, 12, 1); 1384 lit = extract32(insn, 13, 8); 1385 1386 disp21 = sextract32(insn, 0, 21) * 4; 1387 disp16 = sextract32(insn, 0, 16); 1388 disp12 = sextract32(insn, 0, 12); 1389 1390 fn11 = extract32(insn, 5, 11); 1391 fpfn = extract32(insn, 5, 6); 1392 fn7 = extract32(insn, 5, 7); 1393 1394 if (rb == 31 && !islit) { 1395 islit = true; 1396 lit = 0; 1397 } 1398 1399 ret = DISAS_NEXT; 1400 switch (opc) { 1401 case 0x00: 1402 /* CALL_PAL */ 1403 ret = gen_call_pal(ctx, insn & 0x03ffffff); 1404 break; 1405 case 0x01: 1406 /* OPC01 */ 1407 goto invalid_opc; 1408 case 0x02: 1409 /* OPC02 */ 1410 goto invalid_opc; 1411 case 0x03: 1412 /* OPC03 */ 1413 goto invalid_opc; 1414 case 0x04: 1415 /* OPC04 */ 1416 goto invalid_opc; 1417 case 0x05: 1418 /* OPC05 */ 1419 goto invalid_opc; 1420 case 0x06: 1421 /* OPC06 */ 1422 goto invalid_opc; 1423 case 0x07: 1424 /* OPC07 */ 1425 goto invalid_opc; 1426 1427 case 0x09: 1428 /* LDAH */ 1429 disp16 = (uint32_t)disp16 << 16; 1430 /* fall through */ 1431 case 0x08: 1432 /* LDA */ 1433 va = dest_gpr(ctx, ra); 1434 /* It's worth special-casing immediate loads. */ 1435 if (rb == 31) { 1436 tcg_gen_movi_i64(va, disp16); 1437 } else { 1438 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); 1439 } 1440 break; 1441 1442 case 0x0A: 1443 /* LDBU */ 1444 REQUIRE_AMASK(BWX); 1445 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); 1446 break; 1447 case 0x0B: 1448 /* LDQ_U */ 1449 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0); 1450 break; 1451 case 0x0C: 1452 /* LDWU */ 1453 REQUIRE_AMASK(BWX); 1454 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); 1455 break; 1456 case 0x0D: 1457 /* STW */ 1458 REQUIRE_AMASK(BWX); 1459 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); 1460 break; 1461 case 0x0E: 1462 /* STB */ 1463 REQUIRE_AMASK(BWX); 1464 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); 1465 break; 1466 case 0x0F: 1467 /* STQ_U */ 1468 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1); 1469 break; 1470 1471 case 0x10: 1472 vc = dest_gpr(ctx, rc); 1473 vb = load_gpr_lit(ctx, rb, lit, islit); 1474 1475 if (ra == 31) { 1476 if (fn7 == 0x00) { 1477 /* Special case ADDL as SEXTL. */ 1478 tcg_gen_ext32s_i64(vc, vb); 1479 break; 1480 } 1481 if (fn7 == 0x29) { 1482 /* Special case SUBQ as NEGQ. */ 1483 tcg_gen_neg_i64(vc, vb); 1484 break; 1485 } 1486 } 1487 1488 va = load_gpr(ctx, ra); 1489 switch (fn7) { 1490 case 0x00: 1491 /* ADDL */ 1492 tcg_gen_add_i64(vc, va, vb); 1493 tcg_gen_ext32s_i64(vc, vc); 1494 break; 1495 case 0x02: 1496 /* S4ADDL */ 1497 tmp = tcg_temp_new(); 1498 tcg_gen_shli_i64(tmp, va, 2); 1499 tcg_gen_add_i64(tmp, tmp, vb); 1500 tcg_gen_ext32s_i64(vc, tmp); 1501 break; 1502 case 0x09: 1503 /* SUBL */ 1504 tcg_gen_sub_i64(vc, va, vb); 1505 tcg_gen_ext32s_i64(vc, vc); 1506 break; 1507 case 0x0B: 1508 /* S4SUBL */ 1509 tmp = tcg_temp_new(); 1510 tcg_gen_shli_i64(tmp, va, 2); 1511 tcg_gen_sub_i64(tmp, tmp, vb); 1512 tcg_gen_ext32s_i64(vc, tmp); 1513 break; 1514 case 0x0F: 1515 /* CMPBGE */ 1516 if (ra == 31) { 1517 /* Special case 0 >= X as X == 0. */ 1518 gen_helper_cmpbe0(vc, vb); 1519 } else { 1520 gen_helper_cmpbge(vc, va, vb); 1521 } 1522 break; 1523 case 0x12: 1524 /* S8ADDL */ 1525 tmp = tcg_temp_new(); 1526 tcg_gen_shli_i64(tmp, va, 3); 1527 tcg_gen_add_i64(tmp, tmp, vb); 1528 tcg_gen_ext32s_i64(vc, tmp); 1529 break; 1530 case 0x1B: 1531 /* S8SUBL */ 1532 tmp = tcg_temp_new(); 1533 tcg_gen_shli_i64(tmp, va, 3); 1534 tcg_gen_sub_i64(tmp, tmp, vb); 1535 tcg_gen_ext32s_i64(vc, tmp); 1536 break; 1537 case 0x1D: 1538 /* CMPULT */ 1539 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); 1540 break; 1541 case 0x20: 1542 /* ADDQ */ 1543 tcg_gen_add_i64(vc, va, vb); 1544 break; 1545 case 0x22: 1546 /* S4ADDQ */ 1547 tmp = tcg_temp_new(); 1548 tcg_gen_shli_i64(tmp, va, 2); 1549 tcg_gen_add_i64(vc, tmp, vb); 1550 break; 1551 case 0x29: 1552 /* SUBQ */ 1553 tcg_gen_sub_i64(vc, va, vb); 1554 break; 1555 case 0x2B: 1556 /* S4SUBQ */ 1557 tmp = tcg_temp_new(); 1558 tcg_gen_shli_i64(tmp, va, 2); 1559 tcg_gen_sub_i64(vc, tmp, vb); 1560 break; 1561 case 0x2D: 1562 /* CMPEQ */ 1563 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); 1564 break; 1565 case 0x32: 1566 /* S8ADDQ */ 1567 tmp = tcg_temp_new(); 1568 tcg_gen_shli_i64(tmp, va, 3); 1569 tcg_gen_add_i64(vc, tmp, vb); 1570 break; 1571 case 0x3B: 1572 /* S8SUBQ */ 1573 tmp = tcg_temp_new(); 1574 tcg_gen_shli_i64(tmp, va, 3); 1575 tcg_gen_sub_i64(vc, tmp, vb); 1576 break; 1577 case 0x3D: 1578 /* CMPULE */ 1579 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); 1580 break; 1581 case 0x40: 1582 /* ADDL/V */ 1583 tmp = tcg_temp_new(); 1584 tcg_gen_ext32s_i64(tmp, va); 1585 tcg_gen_ext32s_i64(vc, vb); 1586 tcg_gen_add_i64(tmp, tmp, vc); 1587 tcg_gen_ext32s_i64(vc, tmp); 1588 gen_helper_check_overflow(tcg_env, vc, tmp); 1589 break; 1590 case 0x49: 1591 /* SUBL/V */ 1592 tmp = tcg_temp_new(); 1593 tcg_gen_ext32s_i64(tmp, va); 1594 tcg_gen_ext32s_i64(vc, vb); 1595 tcg_gen_sub_i64(tmp, tmp, vc); 1596 tcg_gen_ext32s_i64(vc, tmp); 1597 gen_helper_check_overflow(tcg_env, vc, tmp); 1598 break; 1599 case 0x4D: 1600 /* CMPLT */ 1601 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); 1602 break; 1603 case 0x60: 1604 /* ADDQ/V */ 1605 tmp = tcg_temp_new(); 1606 tmp2 = tcg_temp_new(); 1607 tcg_gen_eqv_i64(tmp, va, vb); 1608 tcg_gen_mov_i64(tmp2, va); 1609 tcg_gen_add_i64(vc, va, vb); 1610 tcg_gen_xor_i64(tmp2, tmp2, vc); 1611 tcg_gen_and_i64(tmp, tmp, tmp2); 1612 tcg_gen_shri_i64(tmp, tmp, 63); 1613 tcg_gen_movi_i64(tmp2, 0); 1614 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1615 break; 1616 case 0x69: 1617 /* SUBQ/V */ 1618 tmp = tcg_temp_new(); 1619 tmp2 = tcg_temp_new(); 1620 tcg_gen_xor_i64(tmp, va, vb); 1621 tcg_gen_mov_i64(tmp2, va); 1622 tcg_gen_sub_i64(vc, va, vb); 1623 tcg_gen_xor_i64(tmp2, tmp2, vc); 1624 tcg_gen_and_i64(tmp, tmp, tmp2); 1625 tcg_gen_shri_i64(tmp, tmp, 63); 1626 tcg_gen_movi_i64(tmp2, 0); 1627 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1628 break; 1629 case 0x6D: 1630 /* CMPLE */ 1631 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); 1632 break; 1633 default: 1634 goto invalid_opc; 1635 } 1636 break; 1637 1638 case 0x11: 1639 if (fn7 == 0x20) { 1640 if (rc == 31) { 1641 /* Special case BIS as NOP. */ 1642 break; 1643 } 1644 if (ra == 31) { 1645 /* Special case BIS as MOV. */ 1646 vc = dest_gpr(ctx, rc); 1647 if (islit) { 1648 tcg_gen_movi_i64(vc, lit); 1649 } else { 1650 tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); 1651 } 1652 break; 1653 } 1654 } 1655 1656 vc = dest_gpr(ctx, rc); 1657 vb = load_gpr_lit(ctx, rb, lit, islit); 1658 1659 if (fn7 == 0x28 && ra == 31) { 1660 /* Special case ORNOT as NOT. */ 1661 tcg_gen_not_i64(vc, vb); 1662 break; 1663 } 1664 1665 va = load_gpr(ctx, ra); 1666 switch (fn7) { 1667 case 0x00: 1668 /* AND */ 1669 tcg_gen_and_i64(vc, va, vb); 1670 break; 1671 case 0x08: 1672 /* BIC */ 1673 tcg_gen_andc_i64(vc, va, vb); 1674 break; 1675 case 0x14: 1676 /* CMOVLBS */ 1677 tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1), 1678 vb, load_gpr(ctx, rc)); 1679 break; 1680 case 0x16: 1681 /* CMOVLBC */ 1682 tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1), 1683 vb, load_gpr(ctx, rc)); 1684 break; 1685 case 0x20: 1686 /* BIS */ 1687 tcg_gen_or_i64(vc, va, vb); 1688 break; 1689 case 0x24: 1690 /* CMOVEQ */ 1691 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), 1692 vb, load_gpr(ctx, rc)); 1693 break; 1694 case 0x26: 1695 /* CMOVNE */ 1696 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), 1697 vb, load_gpr(ctx, rc)); 1698 break; 1699 case 0x28: 1700 /* ORNOT */ 1701 tcg_gen_orc_i64(vc, va, vb); 1702 break; 1703 case 0x40: 1704 /* XOR */ 1705 tcg_gen_xor_i64(vc, va, vb); 1706 break; 1707 case 0x44: 1708 /* CMOVLT */ 1709 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), 1710 vb, load_gpr(ctx, rc)); 1711 break; 1712 case 0x46: 1713 /* CMOVGE */ 1714 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), 1715 vb, load_gpr(ctx, rc)); 1716 break; 1717 case 0x48: 1718 /* EQV */ 1719 tcg_gen_eqv_i64(vc, va, vb); 1720 break; 1721 case 0x61: 1722 /* AMASK */ 1723 REQUIRE_REG_31(ra); 1724 tcg_gen_andi_i64(vc, vb, ~ctx->amask); 1725 break; 1726 case 0x64: 1727 /* CMOVLE */ 1728 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), 1729 vb, load_gpr(ctx, rc)); 1730 break; 1731 case 0x66: 1732 /* CMOVGT */ 1733 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), 1734 vb, load_gpr(ctx, rc)); 1735 break; 1736 case 0x6C: 1737 /* IMPLVER */ 1738 REQUIRE_REG_31(ra); 1739 tcg_gen_movi_i64(vc, ctx->implver); 1740 break; 1741 default: 1742 goto invalid_opc; 1743 } 1744 break; 1745 1746 case 0x12: 1747 vc = dest_gpr(ctx, rc); 1748 va = load_gpr(ctx, ra); 1749 switch (fn7) { 1750 case 0x02: 1751 /* MSKBL */ 1752 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); 1753 break; 1754 case 0x06: 1755 /* EXTBL */ 1756 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); 1757 break; 1758 case 0x0B: 1759 /* INSBL */ 1760 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); 1761 break; 1762 case 0x12: 1763 /* MSKWL */ 1764 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); 1765 break; 1766 case 0x16: 1767 /* EXTWL */ 1768 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); 1769 break; 1770 case 0x1B: 1771 /* INSWL */ 1772 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); 1773 break; 1774 case 0x22: 1775 /* MSKLL */ 1776 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); 1777 break; 1778 case 0x26: 1779 /* EXTLL */ 1780 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); 1781 break; 1782 case 0x2B: 1783 /* INSLL */ 1784 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); 1785 break; 1786 case 0x30: 1787 /* ZAP */ 1788 if (islit) { 1789 gen_zapnoti(vc, va, ~lit); 1790 } else { 1791 gen_helper_zap(vc, va, load_gpr(ctx, rb)); 1792 } 1793 break; 1794 case 0x31: 1795 /* ZAPNOT */ 1796 if (islit) { 1797 gen_zapnoti(vc, va, lit); 1798 } else { 1799 gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); 1800 } 1801 break; 1802 case 0x32: 1803 /* MSKQL */ 1804 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); 1805 break; 1806 case 0x34: 1807 /* SRL */ 1808 if (islit) { 1809 tcg_gen_shri_i64(vc, va, lit & 0x3f); 1810 } else { 1811 tmp = tcg_temp_new(); 1812 vb = load_gpr(ctx, rb); 1813 tcg_gen_andi_i64(tmp, vb, 0x3f); 1814 tcg_gen_shr_i64(vc, va, tmp); 1815 } 1816 break; 1817 case 0x36: 1818 /* EXTQL */ 1819 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); 1820 break; 1821 case 0x39: 1822 /* SLL */ 1823 if (islit) { 1824 tcg_gen_shli_i64(vc, va, lit & 0x3f); 1825 } else { 1826 tmp = tcg_temp_new(); 1827 vb = load_gpr(ctx, rb); 1828 tcg_gen_andi_i64(tmp, vb, 0x3f); 1829 tcg_gen_shl_i64(vc, va, tmp); 1830 } 1831 break; 1832 case 0x3B: 1833 /* INSQL */ 1834 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); 1835 break; 1836 case 0x3C: 1837 /* SRA */ 1838 if (islit) { 1839 tcg_gen_sari_i64(vc, va, lit & 0x3f); 1840 } else { 1841 tmp = tcg_temp_new(); 1842 vb = load_gpr(ctx, rb); 1843 tcg_gen_andi_i64(tmp, vb, 0x3f); 1844 tcg_gen_sar_i64(vc, va, tmp); 1845 } 1846 break; 1847 case 0x52: 1848 /* MSKWH */ 1849 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); 1850 break; 1851 case 0x57: 1852 /* INSWH */ 1853 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); 1854 break; 1855 case 0x5A: 1856 /* EXTWH */ 1857 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); 1858 break; 1859 case 0x62: 1860 /* MSKLH */ 1861 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); 1862 break; 1863 case 0x67: 1864 /* INSLH */ 1865 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); 1866 break; 1867 case 0x6A: 1868 /* EXTLH */ 1869 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); 1870 break; 1871 case 0x72: 1872 /* MSKQH */ 1873 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); 1874 break; 1875 case 0x77: 1876 /* INSQH */ 1877 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); 1878 break; 1879 case 0x7A: 1880 /* EXTQH */ 1881 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); 1882 break; 1883 default: 1884 goto invalid_opc; 1885 } 1886 break; 1887 1888 case 0x13: 1889 vc = dest_gpr(ctx, rc); 1890 vb = load_gpr_lit(ctx, rb, lit, islit); 1891 va = load_gpr(ctx, ra); 1892 switch (fn7) { 1893 case 0x00: 1894 /* MULL */ 1895 tcg_gen_mul_i64(vc, va, vb); 1896 tcg_gen_ext32s_i64(vc, vc); 1897 break; 1898 case 0x20: 1899 /* MULQ */ 1900 tcg_gen_mul_i64(vc, va, vb); 1901 break; 1902 case 0x30: 1903 /* UMULH */ 1904 tmp = tcg_temp_new(); 1905 tcg_gen_mulu2_i64(tmp, vc, va, vb); 1906 break; 1907 case 0x40: 1908 /* MULL/V */ 1909 tmp = tcg_temp_new(); 1910 tcg_gen_ext32s_i64(tmp, va); 1911 tcg_gen_ext32s_i64(vc, vb); 1912 tcg_gen_mul_i64(tmp, tmp, vc); 1913 tcg_gen_ext32s_i64(vc, tmp); 1914 gen_helper_check_overflow(tcg_env, vc, tmp); 1915 break; 1916 case 0x60: 1917 /* MULQ/V */ 1918 tmp = tcg_temp_new(); 1919 tmp2 = tcg_temp_new(); 1920 tcg_gen_muls2_i64(vc, tmp, va, vb); 1921 tcg_gen_sari_i64(tmp2, vc, 63); 1922 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1923 break; 1924 default: 1925 goto invalid_opc; 1926 } 1927 break; 1928 1929 case 0x14: 1930 REQUIRE_AMASK(FIX); 1931 vc = dest_fpr(ctx, rc); 1932 switch (fpfn) { /* fn11 & 0x3F */ 1933 case 0x04: 1934 /* ITOFS */ 1935 REQUIRE_REG_31(rb); 1936 REQUIRE_FEN; 1937 t32 = tcg_temp_new_i32(); 1938 va = load_gpr(ctx, ra); 1939 tcg_gen_extrl_i64_i32(t32, va); 1940 gen_helper_memory_to_s(vc, t32); 1941 break; 1942 case 0x0A: 1943 /* SQRTF */ 1944 REQUIRE_REG_31(ra); 1945 REQUIRE_FEN; 1946 vb = load_fpr(ctx, rb); 1947 gen_helper_sqrtf(vc, tcg_env, vb); 1948 break; 1949 case 0x0B: 1950 /* SQRTS */ 1951 REQUIRE_REG_31(ra); 1952 REQUIRE_FEN; 1953 gen_sqrts(ctx, rb, rc, fn11); 1954 break; 1955 case 0x14: 1956 /* ITOFF */ 1957 REQUIRE_REG_31(rb); 1958 REQUIRE_FEN; 1959 t32 = tcg_temp_new_i32(); 1960 va = load_gpr(ctx, ra); 1961 tcg_gen_extrl_i64_i32(t32, va); 1962 gen_helper_memory_to_f(vc, t32); 1963 break; 1964 case 0x24: 1965 /* ITOFT */ 1966 REQUIRE_REG_31(rb); 1967 REQUIRE_FEN; 1968 va = load_gpr(ctx, ra); 1969 tcg_gen_mov_i64(vc, va); 1970 break; 1971 case 0x2A: 1972 /* SQRTG */ 1973 REQUIRE_REG_31(ra); 1974 REQUIRE_FEN; 1975 vb = load_fpr(ctx, rb); 1976 gen_helper_sqrtg(vc, tcg_env, vb); 1977 break; 1978 case 0x02B: 1979 /* SQRTT */ 1980 REQUIRE_REG_31(ra); 1981 REQUIRE_FEN; 1982 gen_sqrtt(ctx, rb, rc, fn11); 1983 break; 1984 default: 1985 goto invalid_opc; 1986 } 1987 break; 1988 1989 case 0x15: 1990 /* VAX floating point */ 1991 /* XXX: rounding mode and trap are ignored (!) */ 1992 vc = dest_fpr(ctx, rc); 1993 vb = load_fpr(ctx, rb); 1994 va = load_fpr(ctx, ra); 1995 switch (fpfn) { /* fn11 & 0x3F */ 1996 case 0x00: 1997 /* ADDF */ 1998 REQUIRE_FEN; 1999 gen_helper_addf(vc, tcg_env, va, vb); 2000 break; 2001 case 0x01: 2002 /* SUBF */ 2003 REQUIRE_FEN; 2004 gen_helper_subf(vc, tcg_env, va, vb); 2005 break; 2006 case 0x02: 2007 /* MULF */ 2008 REQUIRE_FEN; 2009 gen_helper_mulf(vc, tcg_env, va, vb); 2010 break; 2011 case 0x03: 2012 /* DIVF */ 2013 REQUIRE_FEN; 2014 gen_helper_divf(vc, tcg_env, va, vb); 2015 break; 2016 case 0x1E: 2017 /* CVTDG -- TODO */ 2018 REQUIRE_REG_31(ra); 2019 goto invalid_opc; 2020 case 0x20: 2021 /* ADDG */ 2022 REQUIRE_FEN; 2023 gen_helper_addg(vc, tcg_env, va, vb); 2024 break; 2025 case 0x21: 2026 /* SUBG */ 2027 REQUIRE_FEN; 2028 gen_helper_subg(vc, tcg_env, va, vb); 2029 break; 2030 case 0x22: 2031 /* MULG */ 2032 REQUIRE_FEN; 2033 gen_helper_mulg(vc, tcg_env, va, vb); 2034 break; 2035 case 0x23: 2036 /* DIVG */ 2037 REQUIRE_FEN; 2038 gen_helper_divg(vc, tcg_env, va, vb); 2039 break; 2040 case 0x25: 2041 /* CMPGEQ */ 2042 REQUIRE_FEN; 2043 gen_helper_cmpgeq(vc, tcg_env, va, vb); 2044 break; 2045 case 0x26: 2046 /* CMPGLT */ 2047 REQUIRE_FEN; 2048 gen_helper_cmpglt(vc, tcg_env, va, vb); 2049 break; 2050 case 0x27: 2051 /* CMPGLE */ 2052 REQUIRE_FEN; 2053 gen_helper_cmpgle(vc, tcg_env, va, vb); 2054 break; 2055 case 0x2C: 2056 /* CVTGF */ 2057 REQUIRE_REG_31(ra); 2058 REQUIRE_FEN; 2059 gen_helper_cvtgf(vc, tcg_env, vb); 2060 break; 2061 case 0x2D: 2062 /* CVTGD -- TODO */ 2063 REQUIRE_REG_31(ra); 2064 goto invalid_opc; 2065 case 0x2F: 2066 /* CVTGQ */ 2067 REQUIRE_REG_31(ra); 2068 REQUIRE_FEN; 2069 gen_helper_cvtgq(vc, tcg_env, vb); 2070 break; 2071 case 0x3C: 2072 /* CVTQF */ 2073 REQUIRE_REG_31(ra); 2074 REQUIRE_FEN; 2075 gen_helper_cvtqf(vc, tcg_env, vb); 2076 break; 2077 case 0x3E: 2078 /* CVTQG */ 2079 REQUIRE_REG_31(ra); 2080 REQUIRE_FEN; 2081 gen_helper_cvtqg(vc, tcg_env, vb); 2082 break; 2083 default: 2084 goto invalid_opc; 2085 } 2086 break; 2087 2088 case 0x16: 2089 /* IEEE floating-point */ 2090 switch (fpfn) { /* fn11 & 0x3F */ 2091 case 0x00: 2092 /* ADDS */ 2093 REQUIRE_FEN; 2094 gen_adds(ctx, ra, rb, rc, fn11); 2095 break; 2096 case 0x01: 2097 /* SUBS */ 2098 REQUIRE_FEN; 2099 gen_subs(ctx, ra, rb, rc, fn11); 2100 break; 2101 case 0x02: 2102 /* MULS */ 2103 REQUIRE_FEN; 2104 gen_muls(ctx, ra, rb, rc, fn11); 2105 break; 2106 case 0x03: 2107 /* DIVS */ 2108 REQUIRE_FEN; 2109 gen_divs(ctx, ra, rb, rc, fn11); 2110 break; 2111 case 0x20: 2112 /* ADDT */ 2113 REQUIRE_FEN; 2114 gen_addt(ctx, ra, rb, rc, fn11); 2115 break; 2116 case 0x21: 2117 /* SUBT */ 2118 REQUIRE_FEN; 2119 gen_subt(ctx, ra, rb, rc, fn11); 2120 break; 2121 case 0x22: 2122 /* MULT */ 2123 REQUIRE_FEN; 2124 gen_mult(ctx, ra, rb, rc, fn11); 2125 break; 2126 case 0x23: 2127 /* DIVT */ 2128 REQUIRE_FEN; 2129 gen_divt(ctx, ra, rb, rc, fn11); 2130 break; 2131 case 0x24: 2132 /* CMPTUN */ 2133 REQUIRE_FEN; 2134 gen_cmptun(ctx, ra, rb, rc, fn11); 2135 break; 2136 case 0x25: 2137 /* CMPTEQ */ 2138 REQUIRE_FEN; 2139 gen_cmpteq(ctx, ra, rb, rc, fn11); 2140 break; 2141 case 0x26: 2142 /* CMPTLT */ 2143 REQUIRE_FEN; 2144 gen_cmptlt(ctx, ra, rb, rc, fn11); 2145 break; 2146 case 0x27: 2147 /* CMPTLE */ 2148 REQUIRE_FEN; 2149 gen_cmptle(ctx, ra, rb, rc, fn11); 2150 break; 2151 case 0x2C: 2152 REQUIRE_REG_31(ra); 2153 REQUIRE_FEN; 2154 if (fn11 == 0x2AC || fn11 == 0x6AC) { 2155 /* CVTST */ 2156 gen_cvtst(ctx, rb, rc, fn11); 2157 } else { 2158 /* CVTTS */ 2159 gen_cvtts(ctx, rb, rc, fn11); 2160 } 2161 break; 2162 case 0x2F: 2163 /* CVTTQ */ 2164 REQUIRE_REG_31(ra); 2165 REQUIRE_FEN; 2166 gen_cvttq(ctx, rb, rc, fn11); 2167 break; 2168 case 0x3C: 2169 /* CVTQS */ 2170 REQUIRE_REG_31(ra); 2171 REQUIRE_FEN; 2172 gen_cvtqs(ctx, rb, rc, fn11); 2173 break; 2174 case 0x3E: 2175 /* CVTQT */ 2176 REQUIRE_REG_31(ra); 2177 REQUIRE_FEN; 2178 gen_cvtqt(ctx, rb, rc, fn11); 2179 break; 2180 default: 2181 goto invalid_opc; 2182 } 2183 break; 2184 2185 case 0x17: 2186 switch (fn11) { 2187 case 0x010: 2188 /* CVTLQ */ 2189 REQUIRE_REG_31(ra); 2190 REQUIRE_FEN; 2191 vc = dest_fpr(ctx, rc); 2192 vb = load_fpr(ctx, rb); 2193 gen_cvtlq(vc, vb); 2194 break; 2195 case 0x020: 2196 /* CPYS */ 2197 REQUIRE_FEN; 2198 if (rc == 31) { 2199 /* Special case CPYS as FNOP. */ 2200 } else { 2201 vc = dest_fpr(ctx, rc); 2202 va = load_fpr(ctx, ra); 2203 if (ra == rb) { 2204 /* Special case CPYS as FMOV. */ 2205 tcg_gen_mov_i64(vc, va); 2206 } else { 2207 vb = load_fpr(ctx, rb); 2208 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); 2209 } 2210 } 2211 break; 2212 case 0x021: 2213 /* CPYSN */ 2214 REQUIRE_FEN; 2215 vc = dest_fpr(ctx, rc); 2216 vb = load_fpr(ctx, rb); 2217 va = load_fpr(ctx, ra); 2218 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); 2219 break; 2220 case 0x022: 2221 /* CPYSE */ 2222 REQUIRE_FEN; 2223 vc = dest_fpr(ctx, rc); 2224 vb = load_fpr(ctx, rb); 2225 va = load_fpr(ctx, ra); 2226 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); 2227 break; 2228 case 0x024: 2229 /* MT_FPCR */ 2230 REQUIRE_FEN; 2231 va = load_fpr(ctx, ra); 2232 gen_helper_store_fpcr(tcg_env, va); 2233 if (ctx->tb_rm == QUAL_RM_D) { 2234 /* Re-do the copy of the rounding mode to fp_status 2235 the next time we use dynamic rounding. */ 2236 ctx->tb_rm = -1; 2237 } 2238 break; 2239 case 0x025: 2240 /* MF_FPCR */ 2241 REQUIRE_FEN; 2242 va = dest_fpr(ctx, ra); 2243 gen_helper_load_fpcr(va, tcg_env); 2244 break; 2245 case 0x02A: 2246 /* FCMOVEQ */ 2247 REQUIRE_FEN; 2248 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); 2249 break; 2250 case 0x02B: 2251 /* FCMOVNE */ 2252 REQUIRE_FEN; 2253 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); 2254 break; 2255 case 0x02C: 2256 /* FCMOVLT */ 2257 REQUIRE_FEN; 2258 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); 2259 break; 2260 case 0x02D: 2261 /* FCMOVGE */ 2262 REQUIRE_FEN; 2263 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); 2264 break; 2265 case 0x02E: 2266 /* FCMOVLE */ 2267 REQUIRE_FEN; 2268 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); 2269 break; 2270 case 0x02F: 2271 /* FCMOVGT */ 2272 REQUIRE_FEN; 2273 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); 2274 break; 2275 case 0x030: /* CVTQL */ 2276 case 0x130: /* CVTQL/V */ 2277 case 0x530: /* CVTQL/SV */ 2278 REQUIRE_REG_31(ra); 2279 REQUIRE_FEN; 2280 vc = dest_fpr(ctx, rc); 2281 vb = load_fpr(ctx, rb); 2282 gen_helper_cvtql(vc, tcg_env, vb); 2283 gen_fp_exc_raise(rc, fn11); 2284 break; 2285 default: 2286 goto invalid_opc; 2287 } 2288 break; 2289 2290 case 0x18: 2291 switch ((uint16_t)disp16) { 2292 case 0x0000: 2293 /* TRAPB */ 2294 /* No-op. */ 2295 break; 2296 case 0x0400: 2297 /* EXCB */ 2298 /* No-op. */ 2299 break; 2300 case 0x4000: 2301 /* MB */ 2302 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 2303 break; 2304 case 0x4400: 2305 /* WMB */ 2306 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2307 break; 2308 case 0x8000: 2309 /* FETCH */ 2310 /* No-op */ 2311 break; 2312 case 0xA000: 2313 /* FETCH_M */ 2314 /* No-op */ 2315 break; 2316 case 0xC000: 2317 /* RPCC */ 2318 va = dest_gpr(ctx, ra); 2319 if (translator_io_start(&ctx->base)) { 2320 ret = DISAS_PC_STALE; 2321 } 2322 gen_helper_load_pcc(va, tcg_env); 2323 break; 2324 case 0xE000: 2325 /* RC */ 2326 gen_rx(ctx, ra, 0); 2327 break; 2328 case 0xE800: 2329 /* ECB */ 2330 break; 2331 case 0xF000: 2332 /* RS */ 2333 gen_rx(ctx, ra, 1); 2334 break; 2335 case 0xF800: 2336 /* WH64 */ 2337 /* No-op */ 2338 break; 2339 case 0xFC00: 2340 /* WH64EN */ 2341 /* No-op */ 2342 break; 2343 default: 2344 goto invalid_opc; 2345 } 2346 break; 2347 2348 case 0x19: 2349 /* HW_MFPR (PALcode) */ 2350 #ifndef CONFIG_USER_ONLY 2351 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2352 va = dest_gpr(ctx, ra); 2353 ret = gen_mfpr(ctx, va, insn & 0xffff); 2354 break; 2355 #else 2356 goto invalid_opc; 2357 #endif 2358 2359 case 0x1A: 2360 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch 2361 prediction stack action, which of course we don't implement. */ 2362 vb = load_gpr(ctx, rb); 2363 if (ra != 31) { 2364 tmp = tcg_temp_new(); 2365 tcg_gen_andi_i64(tmp, vb, ~3); 2366 gen_pc_disp(ctx, ctx->ir[ra], 0); 2367 tcg_gen_mov_i64(cpu_pc, tmp); 2368 } else { 2369 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2370 } 2371 ret = DISAS_PC_UPDATED; 2372 break; 2373 2374 case 0x1B: 2375 /* HW_LD (PALcode) */ 2376 #ifndef CONFIG_USER_ONLY 2377 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2378 { 2379 TCGv addr = tcg_temp_new(); 2380 vb = load_gpr(ctx, rb); 2381 va = dest_gpr(ctx, ra); 2382 2383 tcg_gen_addi_i64(addr, vb, disp12); 2384 switch ((insn >> 12) & 0xF) { 2385 case 0x0: 2386 /* Longword physical access (hw_ldl/p) */ 2387 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2388 break; 2389 case 0x1: 2390 /* Quadword physical access (hw_ldq/p) */ 2391 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2392 break; 2393 case 0x2: 2394 /* Longword physical access with lock (hw_ldl_l/p) */ 2395 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2396 tcg_gen_mov_i64(cpu_lock_addr, addr); 2397 tcg_gen_mov_i64(cpu_lock_value, va); 2398 break; 2399 case 0x3: 2400 /* Quadword physical access with lock (hw_ldq_l/p) */ 2401 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2402 tcg_gen_mov_i64(cpu_lock_addr, addr); 2403 tcg_gen_mov_i64(cpu_lock_value, va); 2404 break; 2405 case 0x4: 2406 /* Longword virtual PTE fetch (hw_ldl/v) */ 2407 goto invalid_opc; 2408 case 0x5: 2409 /* Quadword virtual PTE fetch (hw_ldq/v) */ 2410 goto invalid_opc; 2411 break; 2412 case 0x6: 2413 /* Invalid */ 2414 goto invalid_opc; 2415 case 0x7: 2416 /* Invaliid */ 2417 goto invalid_opc; 2418 case 0x8: 2419 /* Longword virtual access (hw_ldl) */ 2420 goto invalid_opc; 2421 case 0x9: 2422 /* Quadword virtual access (hw_ldq) */ 2423 goto invalid_opc; 2424 case 0xA: 2425 /* Longword virtual access with protection check (hw_ldl/w) */ 2426 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2427 MO_LESL | MO_ALIGN); 2428 break; 2429 case 0xB: 2430 /* Quadword virtual access with protection check (hw_ldq/w) */ 2431 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2432 MO_LEUQ | MO_ALIGN); 2433 break; 2434 case 0xC: 2435 /* Longword virtual access with alt access mode (hw_ldl/a)*/ 2436 goto invalid_opc; 2437 case 0xD: 2438 /* Quadword virtual access with alt access mode (hw_ldq/a) */ 2439 goto invalid_opc; 2440 case 0xE: 2441 /* Longword virtual access with alternate access mode and 2442 protection checks (hw_ldl/wa) */ 2443 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2444 MO_LESL | MO_ALIGN); 2445 break; 2446 case 0xF: 2447 /* Quadword virtual access with alternate access mode and 2448 protection checks (hw_ldq/wa) */ 2449 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2450 MO_LEUQ | MO_ALIGN); 2451 break; 2452 } 2453 break; 2454 } 2455 #else 2456 goto invalid_opc; 2457 #endif 2458 2459 case 0x1C: 2460 vc = dest_gpr(ctx, rc); 2461 if (fn7 == 0x70) { 2462 /* FTOIT */ 2463 REQUIRE_AMASK(FIX); 2464 REQUIRE_REG_31(rb); 2465 va = load_fpr(ctx, ra); 2466 tcg_gen_mov_i64(vc, va); 2467 break; 2468 } else if (fn7 == 0x78) { 2469 /* FTOIS */ 2470 REQUIRE_AMASK(FIX); 2471 REQUIRE_REG_31(rb); 2472 t32 = tcg_temp_new_i32(); 2473 va = load_fpr(ctx, ra); 2474 gen_helper_s_to_memory(t32, va); 2475 tcg_gen_ext_i32_i64(vc, t32); 2476 break; 2477 } 2478 2479 vb = load_gpr_lit(ctx, rb, lit, islit); 2480 switch (fn7) { 2481 case 0x00: 2482 /* SEXTB */ 2483 REQUIRE_AMASK(BWX); 2484 REQUIRE_REG_31(ra); 2485 tcg_gen_ext8s_i64(vc, vb); 2486 break; 2487 case 0x01: 2488 /* SEXTW */ 2489 REQUIRE_AMASK(BWX); 2490 REQUIRE_REG_31(ra); 2491 tcg_gen_ext16s_i64(vc, vb); 2492 break; 2493 case 0x30: 2494 /* CTPOP */ 2495 REQUIRE_AMASK(CIX); 2496 REQUIRE_REG_31(ra); 2497 REQUIRE_NO_LIT; 2498 tcg_gen_ctpop_i64(vc, vb); 2499 break; 2500 case 0x31: 2501 /* PERR */ 2502 REQUIRE_AMASK(MVI); 2503 REQUIRE_NO_LIT; 2504 va = load_gpr(ctx, ra); 2505 gen_helper_perr(vc, va, vb); 2506 break; 2507 case 0x32: 2508 /* CTLZ */ 2509 REQUIRE_AMASK(CIX); 2510 REQUIRE_REG_31(ra); 2511 REQUIRE_NO_LIT; 2512 tcg_gen_clzi_i64(vc, vb, 64); 2513 break; 2514 case 0x33: 2515 /* CTTZ */ 2516 REQUIRE_AMASK(CIX); 2517 REQUIRE_REG_31(ra); 2518 REQUIRE_NO_LIT; 2519 tcg_gen_ctzi_i64(vc, vb, 64); 2520 break; 2521 case 0x34: 2522 /* UNPKBW */ 2523 REQUIRE_AMASK(MVI); 2524 REQUIRE_REG_31(ra); 2525 REQUIRE_NO_LIT; 2526 gen_helper_unpkbw(vc, vb); 2527 break; 2528 case 0x35: 2529 /* UNPKBL */ 2530 REQUIRE_AMASK(MVI); 2531 REQUIRE_REG_31(ra); 2532 REQUIRE_NO_LIT; 2533 gen_helper_unpkbl(vc, vb); 2534 break; 2535 case 0x36: 2536 /* PKWB */ 2537 REQUIRE_AMASK(MVI); 2538 REQUIRE_REG_31(ra); 2539 REQUIRE_NO_LIT; 2540 gen_helper_pkwb(vc, vb); 2541 break; 2542 case 0x37: 2543 /* PKLB */ 2544 REQUIRE_AMASK(MVI); 2545 REQUIRE_REG_31(ra); 2546 REQUIRE_NO_LIT; 2547 gen_helper_pklb(vc, vb); 2548 break; 2549 case 0x38: 2550 /* MINSB8 */ 2551 REQUIRE_AMASK(MVI); 2552 va = load_gpr(ctx, ra); 2553 gen_helper_minsb8(vc, va, vb); 2554 break; 2555 case 0x39: 2556 /* MINSW4 */ 2557 REQUIRE_AMASK(MVI); 2558 va = load_gpr(ctx, ra); 2559 gen_helper_minsw4(vc, va, vb); 2560 break; 2561 case 0x3A: 2562 /* MINUB8 */ 2563 REQUIRE_AMASK(MVI); 2564 va = load_gpr(ctx, ra); 2565 gen_helper_minub8(vc, va, vb); 2566 break; 2567 case 0x3B: 2568 /* MINUW4 */ 2569 REQUIRE_AMASK(MVI); 2570 va = load_gpr(ctx, ra); 2571 gen_helper_minuw4(vc, va, vb); 2572 break; 2573 case 0x3C: 2574 /* MAXUB8 */ 2575 REQUIRE_AMASK(MVI); 2576 va = load_gpr(ctx, ra); 2577 gen_helper_maxub8(vc, va, vb); 2578 break; 2579 case 0x3D: 2580 /* MAXUW4 */ 2581 REQUIRE_AMASK(MVI); 2582 va = load_gpr(ctx, ra); 2583 gen_helper_maxuw4(vc, va, vb); 2584 break; 2585 case 0x3E: 2586 /* MAXSB8 */ 2587 REQUIRE_AMASK(MVI); 2588 va = load_gpr(ctx, ra); 2589 gen_helper_maxsb8(vc, va, vb); 2590 break; 2591 case 0x3F: 2592 /* MAXSW4 */ 2593 REQUIRE_AMASK(MVI); 2594 va = load_gpr(ctx, ra); 2595 gen_helper_maxsw4(vc, va, vb); 2596 break; 2597 default: 2598 goto invalid_opc; 2599 } 2600 break; 2601 2602 case 0x1D: 2603 /* HW_MTPR (PALcode) */ 2604 #ifndef CONFIG_USER_ONLY 2605 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2606 vb = load_gpr(ctx, rb); 2607 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2608 break; 2609 #else 2610 goto invalid_opc; 2611 #endif 2612 2613 case 0x1E: 2614 /* HW_RET (PALcode) */ 2615 #ifndef CONFIG_USER_ONLY 2616 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2617 if (rb == 31) { 2618 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2619 address from EXC_ADDR. This turns out to be useful for our 2620 emulation PALcode, so continue to accept it. */ 2621 vb = dest_sink(ctx); 2622 tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr)); 2623 } else { 2624 vb = load_gpr(ctx, rb); 2625 } 2626 tcg_gen_movi_i64(cpu_lock_addr, -1); 2627 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT); 2628 tmp = tcg_temp_new(); 2629 tcg_gen_andi_i64(tmp, vb, 1); 2630 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2631 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2632 /* Allow interrupts to be recognized right away. */ 2633 ret = DISAS_PC_UPDATED_NOCHAIN; 2634 break; 2635 #else 2636 goto invalid_opc; 2637 #endif 2638 2639 case 0x1F: 2640 /* HW_ST (PALcode) */ 2641 #ifndef CONFIG_USER_ONLY 2642 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2643 { 2644 switch ((insn >> 12) & 0xF) { 2645 case 0x0: 2646 /* Longword physical access */ 2647 va = load_gpr(ctx, ra); 2648 vb = load_gpr(ctx, rb); 2649 tmp = tcg_temp_new(); 2650 tcg_gen_addi_i64(tmp, vb, disp12); 2651 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2652 break; 2653 case 0x1: 2654 /* Quadword physical access */ 2655 va = load_gpr(ctx, ra); 2656 vb = load_gpr(ctx, rb); 2657 tmp = tcg_temp_new(); 2658 tcg_gen_addi_i64(tmp, vb, disp12); 2659 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2660 break; 2661 case 0x2: 2662 /* Longword physical access with lock */ 2663 ret = gen_store_conditional(ctx, ra, rb, disp12, 2664 MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2665 break; 2666 case 0x3: 2667 /* Quadword physical access with lock */ 2668 ret = gen_store_conditional(ctx, ra, rb, disp12, 2669 MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2670 break; 2671 case 0x4: 2672 /* Longword virtual access */ 2673 goto invalid_opc; 2674 case 0x5: 2675 /* Quadword virtual access */ 2676 goto invalid_opc; 2677 case 0x6: 2678 /* Invalid */ 2679 goto invalid_opc; 2680 case 0x7: 2681 /* Invalid */ 2682 goto invalid_opc; 2683 case 0x8: 2684 /* Invalid */ 2685 goto invalid_opc; 2686 case 0x9: 2687 /* Invalid */ 2688 goto invalid_opc; 2689 case 0xA: 2690 /* Invalid */ 2691 goto invalid_opc; 2692 case 0xB: 2693 /* Invalid */ 2694 goto invalid_opc; 2695 case 0xC: 2696 /* Longword virtual access with alternate access mode */ 2697 goto invalid_opc; 2698 case 0xD: 2699 /* Quadword virtual access with alternate access mode */ 2700 goto invalid_opc; 2701 case 0xE: 2702 /* Invalid */ 2703 goto invalid_opc; 2704 case 0xF: 2705 /* Invalid */ 2706 goto invalid_opc; 2707 } 2708 break; 2709 } 2710 #else 2711 goto invalid_opc; 2712 #endif 2713 case 0x20: 2714 /* LDF */ 2715 REQUIRE_FEN; 2716 gen_load_fp(ctx, ra, rb, disp16, gen_ldf); 2717 break; 2718 case 0x21: 2719 /* LDG */ 2720 REQUIRE_FEN; 2721 gen_load_fp(ctx, ra, rb, disp16, gen_ldg); 2722 break; 2723 case 0x22: 2724 /* LDS */ 2725 REQUIRE_FEN; 2726 gen_load_fp(ctx, ra, rb, disp16, gen_lds); 2727 break; 2728 case 0x23: 2729 /* LDT */ 2730 REQUIRE_FEN; 2731 gen_load_fp(ctx, ra, rb, disp16, gen_ldt); 2732 break; 2733 case 0x24: 2734 /* STF */ 2735 REQUIRE_FEN; 2736 gen_store_fp(ctx, ra, rb, disp16, gen_stf); 2737 break; 2738 case 0x25: 2739 /* STG */ 2740 REQUIRE_FEN; 2741 gen_store_fp(ctx, ra, rb, disp16, gen_stg); 2742 break; 2743 case 0x26: 2744 /* STS */ 2745 REQUIRE_FEN; 2746 gen_store_fp(ctx, ra, rb, disp16, gen_sts); 2747 break; 2748 case 0x27: 2749 /* STT */ 2750 REQUIRE_FEN; 2751 gen_store_fp(ctx, ra, rb, disp16, gen_stt); 2752 break; 2753 case 0x28: 2754 /* LDL */ 2755 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); 2756 break; 2757 case 0x29: 2758 /* LDQ */ 2759 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0); 2760 break; 2761 case 0x2A: 2762 /* LDL_L */ 2763 gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1); 2764 break; 2765 case 0x2B: 2766 /* LDQ_L */ 2767 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1); 2768 break; 2769 case 0x2C: 2770 /* STL */ 2771 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); 2772 break; 2773 case 0x2D: 2774 /* STQ */ 2775 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0); 2776 break; 2777 case 0x2E: 2778 /* STL_C */ 2779 ret = gen_store_conditional(ctx, ra, rb, disp16, 2780 ctx->mem_idx, MO_LESL | MO_ALIGN); 2781 break; 2782 case 0x2F: 2783 /* STQ_C */ 2784 ret = gen_store_conditional(ctx, ra, rb, disp16, 2785 ctx->mem_idx, MO_LEUQ | MO_ALIGN); 2786 break; 2787 case 0x30: 2788 /* BR */ 2789 ret = gen_bdirect(ctx, ra, disp21); 2790 break; 2791 case 0x31: /* FBEQ */ 2792 REQUIRE_FEN; 2793 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); 2794 break; 2795 case 0x32: /* FBLT */ 2796 REQUIRE_FEN; 2797 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); 2798 break; 2799 case 0x33: /* FBLE */ 2800 REQUIRE_FEN; 2801 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); 2802 break; 2803 case 0x34: 2804 /* BSR */ 2805 ret = gen_bdirect(ctx, ra, disp21); 2806 break; 2807 case 0x35: /* FBNE */ 2808 REQUIRE_FEN; 2809 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); 2810 break; 2811 case 0x36: /* FBGE */ 2812 REQUIRE_FEN; 2813 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); 2814 break; 2815 case 0x37: /* FBGT */ 2816 REQUIRE_FEN; 2817 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); 2818 break; 2819 case 0x38: 2820 /* BLBC */ 2821 ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21); 2822 break; 2823 case 0x39: 2824 /* BEQ */ 2825 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21); 2826 break; 2827 case 0x3A: 2828 /* BLT */ 2829 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21); 2830 break; 2831 case 0x3B: 2832 /* BLE */ 2833 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21); 2834 break; 2835 case 0x3C: 2836 /* BLBS */ 2837 ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21); 2838 break; 2839 case 0x3D: 2840 /* BNE */ 2841 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21); 2842 break; 2843 case 0x3E: 2844 /* BGE */ 2845 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21); 2846 break; 2847 case 0x3F: 2848 /* BGT */ 2849 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21); 2850 break; 2851 invalid_opc: 2852 ret = gen_invalid(ctx); 2853 break; 2854 raise_fen: 2855 ret = gen_excp(ctx, EXCP_FEN, 0); 2856 break; 2857 } 2858 2859 return ret; 2860 } 2861 2862 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 2863 { 2864 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2865 CPUAlphaState *env = cpu_env(cpu); 2866 int64_t bound; 2867 2868 ctx->tbflags = ctx->base.tb->flags; 2869 ctx->mem_idx = alpha_env_mmu_index(env); 2870 ctx->pcrel = ctx->base.tb->cflags & CF_PCREL; 2871 ctx->implver = env->implver; 2872 ctx->amask = env->amask; 2873 2874 #ifdef CONFIG_USER_ONLY 2875 ctx->ir = cpu_std_ir; 2876 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 2877 #else 2878 ctx->palbr = env->palbr; 2879 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2880 #endif 2881 2882 /* ??? Every TB begins with unset rounding mode, to be initialized on 2883 the first fp insn of the TB. Alternately we could define a proper 2884 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure 2885 to reset the FP_STATUS to that default at the end of any TB that 2886 changes the default. We could even (gasp) dynamically figure out 2887 what default would be most efficient given the running program. */ 2888 ctx->tb_rm = -1; 2889 /* Similarly for flush-to-zero. */ 2890 ctx->tb_ftz = -1; 2891 2892 ctx->zero = NULL; 2893 ctx->sink = NULL; 2894 2895 /* Bound the number of insns to execute to those left on the page. */ 2896 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 2897 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 2898 } 2899 2900 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu) 2901 { 2902 } 2903 2904 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 2905 { 2906 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2907 2908 if (ctx->pcrel) { 2909 tcg_gen_insn_start(dcbase->pc_next & ~TARGET_PAGE_MASK); 2910 } else { 2911 tcg_gen_insn_start(dcbase->pc_next); 2912 } 2913 } 2914 2915 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 2916 { 2917 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2918 uint32_t insn = translator_ldl(cpu_env(cpu), &ctx->base, 2919 ctx->base.pc_next); 2920 2921 ctx->base.pc_next += 4; 2922 ctx->base.is_jmp = translate_one(ctx, insn); 2923 2924 free_context_temps(ctx); 2925 } 2926 2927 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 2928 { 2929 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2930 2931 switch (ctx->base.is_jmp) { 2932 case DISAS_NORETURN: 2933 break; 2934 case DISAS_TOO_MANY: 2935 gen_goto_tb(ctx, 0, 0); 2936 break; 2937 case DISAS_PC_STALE: 2938 gen_pc_disp(ctx, cpu_pc, 0); 2939 /* FALLTHRU */ 2940 case DISAS_PC_UPDATED: 2941 tcg_gen_lookup_and_goto_ptr(); 2942 break; 2943 case DISAS_PC_UPDATED_NOCHAIN: 2944 tcg_gen_exit_tb(NULL, 0); 2945 break; 2946 default: 2947 g_assert_not_reached(); 2948 } 2949 } 2950 2951 static const TranslatorOps alpha_tr_ops = { 2952 .init_disas_context = alpha_tr_init_disas_context, 2953 .tb_start = alpha_tr_tb_start, 2954 .insn_start = alpha_tr_insn_start, 2955 .translate_insn = alpha_tr_translate_insn, 2956 .tb_stop = alpha_tr_tb_stop, 2957 }; 2958 2959 void alpha_translate_code(CPUState *cpu, TranslationBlock *tb, 2960 int *max_insns, vaddr pc, void *host_pc) 2961 { 2962 DisasContext dc; 2963 translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); 2964 } 2965