1 /* 2 * Alpha emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "sysemu/cpus.h" 23 #include "disas/disas.h" 24 #include "qemu/host-utils.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg-op.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 #define HELPER_H "helper.h" 33 #include "exec/helper-info.c.inc" 34 #undef HELPER_H 35 36 #undef ALPHA_DEBUG_DISAS 37 #define CONFIG_SOFTFLOAT_INLINE 38 39 #ifdef ALPHA_DEBUG_DISAS 40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 41 #else 42 # define LOG_DISAS(...) do { } while (0) 43 #endif 44 45 typedef struct DisasContext DisasContext; 46 struct DisasContext { 47 DisasContextBase base; 48 49 #ifdef CONFIG_USER_ONLY 50 MemOp unalign; 51 #else 52 uint64_t palbr; 53 #endif 54 uint32_t tbflags; 55 int mem_idx; 56 57 /* True if generating pc-relative code. */ 58 bool pcrel; 59 60 /* implver and amask values for this CPU. */ 61 int implver; 62 int amask; 63 64 /* Current rounding mode for this TB. */ 65 int tb_rm; 66 /* Current flush-to-zero setting for this TB. */ 67 int tb_ftz; 68 69 /* The set of registers active in the current context. */ 70 TCGv *ir; 71 72 /* Temporaries for $31 and $f31 as source and destination. */ 73 TCGv zero; 74 TCGv sink; 75 }; 76 77 #ifdef CONFIG_USER_ONLY 78 #define UNALIGN(C) (C)->unalign 79 #else 80 #define UNALIGN(C) MO_ALIGN 81 #endif 82 83 /* Target-specific return values from translate_one, indicating the 84 state of the TB. Note that DISAS_NEXT indicates that we are not 85 exiting the TB. */ 86 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 87 #define DISAS_PC_UPDATED DISAS_TARGET_1 88 #define DISAS_PC_STALE DISAS_TARGET_2 89 90 /* global register indexes */ 91 static TCGv cpu_std_ir[31]; 92 static TCGv cpu_fir[31]; 93 static TCGv cpu_pc; 94 static TCGv cpu_lock_addr; 95 static TCGv cpu_lock_value; 96 97 #ifndef CONFIG_USER_ONLY 98 static TCGv cpu_pal_ir[31]; 99 #endif 100 101 void alpha_translate_init(void) 102 { 103 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } 104 105 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 106 static const GlobalVar vars[] = { 107 DEF_VAR(pc), 108 DEF_VAR(lock_addr), 109 DEF_VAR(lock_value), 110 }; 111 112 #undef DEF_VAR 113 114 /* Use the symbolic register names that match the disassembler. */ 115 static const char greg_names[31][4] = { 116 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", 117 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", 118 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", 119 "t10", "t11", "ra", "t12", "at", "gp", "sp" 120 }; 121 static const char freg_names[31][4] = { 122 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 123 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 124 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 125 "f24", "f25", "f26", "f27", "f28", "f29", "f30" 126 }; 127 #ifndef CONFIG_USER_ONLY 128 static const char shadow_names[8][8] = { 129 "pal_t7", "pal_s0", "pal_s1", "pal_s2", 130 "pal_s3", "pal_s4", "pal_s5", "pal_t11" 131 }; 132 #endif 133 134 int i; 135 136 for (i = 0; i < 31; i++) { 137 cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env, 138 offsetof(CPUAlphaState, ir[i]), 139 greg_names[i]); 140 } 141 142 for (i = 0; i < 31; i++) { 143 cpu_fir[i] = tcg_global_mem_new_i64(tcg_env, 144 offsetof(CPUAlphaState, fir[i]), 145 freg_names[i]); 146 } 147 148 #ifndef CONFIG_USER_ONLY 149 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); 150 for (i = 0; i < 8; i++) { 151 int r = (i == 7 ? 25 : i + 8); 152 cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env, 153 offsetof(CPUAlphaState, 154 shadow[i]), 155 shadow_names[i]); 156 } 157 #endif 158 159 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 160 const GlobalVar *v = &vars[i]; 161 *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name); 162 } 163 } 164 165 static TCGv load_zero(DisasContext *ctx) 166 { 167 if (!ctx->zero) { 168 ctx->zero = tcg_constant_i64(0); 169 } 170 return ctx->zero; 171 } 172 173 static TCGv dest_sink(DisasContext *ctx) 174 { 175 if (!ctx->sink) { 176 ctx->sink = tcg_temp_new(); 177 } 178 return ctx->sink; 179 } 180 181 static void free_context_temps(DisasContext *ctx) 182 { 183 if (ctx->sink) { 184 tcg_gen_discard_i64(ctx->sink); 185 ctx->sink = NULL; 186 } 187 } 188 189 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 190 { 191 if (likely(reg < 31)) { 192 return ctx->ir[reg]; 193 } else { 194 return load_zero(ctx); 195 } 196 } 197 198 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, 199 uint8_t lit, bool islit) 200 { 201 if (islit) { 202 return tcg_constant_i64(lit); 203 } else if (likely(reg < 31)) { 204 return ctx->ir[reg]; 205 } else { 206 return load_zero(ctx); 207 } 208 } 209 210 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 211 { 212 if (likely(reg < 31)) { 213 return ctx->ir[reg]; 214 } else { 215 return dest_sink(ctx); 216 } 217 } 218 219 static TCGv load_fpr(DisasContext *ctx, unsigned reg) 220 { 221 if (likely(reg < 31)) { 222 return cpu_fir[reg]; 223 } else { 224 return load_zero(ctx); 225 } 226 } 227 228 static TCGv dest_fpr(DisasContext *ctx, unsigned reg) 229 { 230 if (likely(reg < 31)) { 231 return cpu_fir[reg]; 232 } else { 233 return dest_sink(ctx); 234 } 235 } 236 237 static int get_flag_ofs(unsigned shift) 238 { 239 int ofs = offsetof(CPUAlphaState, flags); 240 #if HOST_BIG_ENDIAN 241 ofs += 3 - (shift / 8); 242 #else 243 ofs += shift / 8; 244 #endif 245 return ofs; 246 } 247 248 static void ld_flag_byte(TCGv val, unsigned shift) 249 { 250 tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift)); 251 } 252 253 static void st_flag_byte(TCGv val, unsigned shift) 254 { 255 tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift)); 256 } 257 258 static void gen_pc_disp(DisasContext *ctx, TCGv dest, int32_t disp) 259 { 260 uint64_t addr = ctx->base.pc_next + disp; 261 if (ctx->pcrel) { 262 tcg_gen_addi_i64(dest, cpu_pc, addr - ctx->base.pc_first); 263 } else { 264 tcg_gen_movi_i64(dest, addr); 265 } 266 } 267 268 static void gen_excp_1(int exception, int error_code) 269 { 270 TCGv_i32 tmp1, tmp2; 271 272 tmp1 = tcg_constant_i32(exception); 273 tmp2 = tcg_constant_i32(error_code); 274 gen_helper_excp(tcg_env, tmp1, tmp2); 275 } 276 277 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) 278 { 279 gen_pc_disp(ctx, cpu_pc, 0); 280 gen_excp_1(exception, error_code); 281 return DISAS_NORETURN; 282 } 283 284 static inline DisasJumpType gen_invalid(DisasContext *ctx) 285 { 286 return gen_excp(ctx, EXCP_OPCDEC, 0); 287 } 288 289 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) 290 { 291 TCGv_i32 tmp32 = tcg_temp_new_i32(); 292 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 293 gen_helper_memory_to_f(dest, tmp32); 294 } 295 296 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) 297 { 298 TCGv tmp = tcg_temp_new(); 299 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 300 gen_helper_memory_to_g(dest, tmp); 301 } 302 303 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) 304 { 305 TCGv_i32 tmp32 = tcg_temp_new_i32(); 306 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 307 gen_helper_memory_to_s(dest, tmp32); 308 } 309 310 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) 311 { 312 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 313 } 314 315 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 316 void (*func)(DisasContext *, TCGv, TCGv)) 317 { 318 /* Loads to $f31 are prefetches, which we can treat as nops. */ 319 if (likely(ra != 31)) { 320 TCGv addr = tcg_temp_new(); 321 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 322 func(ctx, cpu_fir[ra], addr); 323 } 324 } 325 326 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 327 MemOp op, bool clear, bool locked) 328 { 329 TCGv addr, dest; 330 331 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of 332 prefetches, which we can treat as nops. No worries about 333 missed exceptions here. */ 334 if (unlikely(ra == 31)) { 335 return; 336 } 337 338 addr = tcg_temp_new(); 339 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 340 if (clear) { 341 tcg_gen_andi_i64(addr, addr, ~0x7); 342 } else if (!locked) { 343 op |= UNALIGN(ctx); 344 } 345 346 dest = ctx->ir[ra]; 347 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); 348 349 if (locked) { 350 tcg_gen_mov_i64(cpu_lock_addr, addr); 351 tcg_gen_mov_i64(cpu_lock_value, dest); 352 } 353 } 354 355 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) 356 { 357 TCGv_i32 tmp32 = tcg_temp_new_i32(); 358 gen_helper_f_to_memory(tmp32, addr); 359 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 360 } 361 362 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) 363 { 364 TCGv tmp = tcg_temp_new(); 365 gen_helper_g_to_memory(tmp, src); 366 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 367 } 368 369 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) 370 { 371 TCGv_i32 tmp32 = tcg_temp_new_i32(); 372 gen_helper_s_to_memory(tmp32, src); 373 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 374 } 375 376 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) 377 { 378 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); 379 } 380 381 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 382 void (*func)(DisasContext *, TCGv, TCGv)) 383 { 384 TCGv addr = tcg_temp_new(); 385 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 386 func(ctx, load_fpr(ctx, ra), addr); 387 } 388 389 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 390 MemOp op, bool clear) 391 { 392 TCGv addr, src; 393 394 addr = tcg_temp_new(); 395 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 396 if (clear) { 397 tcg_gen_andi_i64(addr, addr, ~0x7); 398 } else { 399 op |= UNALIGN(ctx); 400 } 401 402 src = load_gpr(ctx, ra); 403 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); 404 } 405 406 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, 407 int32_t disp16, int mem_idx, 408 MemOp op) 409 { 410 TCGLabel *lab_fail, *lab_done; 411 TCGv addr, val; 412 413 addr = tcg_temp_new_i64(); 414 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 415 free_context_temps(ctx); 416 417 lab_fail = gen_new_label(); 418 lab_done = gen_new_label(); 419 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); 420 421 val = tcg_temp_new_i64(); 422 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, 423 load_gpr(ctx, ra), mem_idx, op); 424 free_context_temps(ctx); 425 426 if (ra != 31) { 427 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); 428 } 429 tcg_gen_br(lab_done); 430 431 gen_set_label(lab_fail); 432 if (ra != 31) { 433 tcg_gen_movi_i64(ctx->ir[ra], 0); 434 } 435 436 gen_set_label(lab_done); 437 tcg_gen_movi_i64(cpu_lock_addr, -1); 438 return DISAS_NEXT; 439 } 440 441 static void gen_goto_tb(DisasContext *ctx, int idx, int32_t disp) 442 { 443 if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) { 444 /* With PCREL, PC must always be up-to-date. */ 445 if (ctx->pcrel) { 446 gen_pc_disp(ctx, cpu_pc, disp); 447 tcg_gen_goto_tb(idx); 448 } else { 449 tcg_gen_goto_tb(idx); 450 gen_pc_disp(ctx, cpu_pc, disp); 451 } 452 tcg_gen_exit_tb(ctx->base.tb, idx); 453 } else { 454 gen_pc_disp(ctx, cpu_pc, disp); 455 tcg_gen_lookup_and_goto_ptr(); 456 } 457 } 458 459 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) 460 { 461 if (ra != 31) { 462 gen_pc_disp(ctx, ctx->ir[ra], 0); 463 } 464 465 /* Notice branch-to-next; used to initialize RA with the PC. */ 466 if (disp == 0) { 467 return DISAS_NEXT; 468 } 469 gen_goto_tb(ctx, 0, disp); 470 return DISAS_NORETURN; 471 } 472 473 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, 474 TCGv cmp, uint64_t imm, int32_t disp) 475 { 476 TCGLabel *lab_true = gen_new_label(); 477 478 tcg_gen_brcondi_i64(cond, cmp, imm, lab_true); 479 gen_goto_tb(ctx, 0, 0); 480 gen_set_label(lab_true); 481 gen_goto_tb(ctx, 1, disp); 482 483 return DISAS_NORETURN; 484 } 485 486 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, 487 int32_t disp) 488 { 489 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), 490 is_tst_cond(cond), disp); 491 } 492 493 /* Fold -0.0 for comparison with COND. */ 494 495 static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src) 496 { 497 TCGv_i64 tmp; 498 499 *pimm = 0; 500 switch (*pcond) { 501 case TCG_COND_LE: 502 case TCG_COND_GT: 503 /* For <= or >, the -0.0 value directly compares the way we want. */ 504 return src; 505 506 case TCG_COND_EQ: 507 case TCG_COND_NE: 508 /* For == or !=, we can compare without the sign bit. */ 509 *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE; 510 *pimm = INT64_MAX; 511 return src; 512 513 case TCG_COND_GE: 514 case TCG_COND_LT: 515 /* For >= or <, map -0.0 to +0.0. */ 516 tmp = tcg_temp_new_i64(); 517 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, 518 src, tcg_constant_i64(INT64_MIN), 519 tcg_constant_i64(0), src); 520 return tmp; 521 522 default: 523 g_assert_not_reached(); 524 } 525 } 526 527 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, 528 int32_t disp) 529 { 530 uint64_t imm; 531 TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra)); 532 return gen_bcond_internal(ctx, cond, tmp, imm, disp); 533 } 534 535 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) 536 { 537 uint64_t imm; 538 TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra)); 539 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), 540 tmp, tcg_constant_i64(imm), 541 load_fpr(ctx, rb), load_fpr(ctx, rc)); 542 } 543 544 #define QUAL_RM_N 0x080 /* Round mode nearest even */ 545 #define QUAL_RM_C 0x000 /* Round mode chopped */ 546 #define QUAL_RM_M 0x040 /* Round mode minus infinity */ 547 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */ 548 #define QUAL_RM_MASK 0x0c0 549 550 #define QUAL_U 0x100 /* Underflow enable (fp output) */ 551 #define QUAL_V 0x100 /* Overflow enable (int output) */ 552 #define QUAL_S 0x400 /* Software completion enable */ 553 #define QUAL_I 0x200 /* Inexact detection enable */ 554 555 static void gen_qual_roundmode(DisasContext *ctx, int fn11) 556 { 557 TCGv_i32 tmp; 558 559 fn11 &= QUAL_RM_MASK; 560 if (fn11 == ctx->tb_rm) { 561 return; 562 } 563 ctx->tb_rm = fn11; 564 565 tmp = tcg_temp_new_i32(); 566 switch (fn11) { 567 case QUAL_RM_N: 568 tcg_gen_movi_i32(tmp, float_round_nearest_even); 569 break; 570 case QUAL_RM_C: 571 tcg_gen_movi_i32(tmp, float_round_to_zero); 572 break; 573 case QUAL_RM_M: 574 tcg_gen_movi_i32(tmp, float_round_down); 575 break; 576 case QUAL_RM_D: 577 tcg_gen_ld8u_i32(tmp, tcg_env, 578 offsetof(CPUAlphaState, fpcr_dyn_round)); 579 break; 580 } 581 582 #if defined(CONFIG_SOFTFLOAT_INLINE) 583 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. 584 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just 585 sets the one field. */ 586 tcg_gen_st8_i32(tmp, tcg_env, 587 offsetof(CPUAlphaState, fp_status.float_rounding_mode)); 588 #else 589 gen_helper_setroundmode(tmp); 590 #endif 591 } 592 593 static void gen_qual_flushzero(DisasContext *ctx, int fn11) 594 { 595 TCGv_i32 tmp; 596 597 fn11 &= QUAL_U; 598 if (fn11 == ctx->tb_ftz) { 599 return; 600 } 601 ctx->tb_ftz = fn11; 602 603 tmp = tcg_temp_new_i32(); 604 if (fn11) { 605 /* Underflow is enabled, use the FPCR setting. */ 606 tcg_gen_ld8u_i32(tmp, tcg_env, 607 offsetof(CPUAlphaState, fpcr_flush_to_zero)); 608 } else { 609 /* Underflow is disabled, force flush-to-zero. */ 610 tcg_gen_movi_i32(tmp, 1); 611 } 612 613 #if defined(CONFIG_SOFTFLOAT_INLINE) 614 tcg_gen_st8_i32(tmp, tcg_env, 615 offsetof(CPUAlphaState, fp_status.flush_to_zero)); 616 #else 617 gen_helper_setflushzero(tmp); 618 #endif 619 } 620 621 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) 622 { 623 TCGv val; 624 625 if (unlikely(reg == 31)) { 626 val = load_zero(ctx); 627 } else { 628 val = cpu_fir[reg]; 629 if ((fn11 & QUAL_S) == 0) { 630 if (is_cmp) { 631 gen_helper_ieee_input_cmp(tcg_env, val); 632 } else { 633 gen_helper_ieee_input(tcg_env, val); 634 } 635 } else { 636 #ifndef CONFIG_USER_ONLY 637 /* In system mode, raise exceptions for denormals like real 638 hardware. In user mode, proceed as if the OS completion 639 handler is handling the denormal as per spec. */ 640 gen_helper_ieee_input_s(tcg_env, val); 641 #endif 642 } 643 } 644 return val; 645 } 646 647 static void gen_fp_exc_raise(int rc, int fn11) 648 { 649 /* ??? We ought to be able to do something with imprecise exceptions. 650 E.g. notice we're still in the trap shadow of something within the 651 TB and do not generate the code to signal the exception; end the TB 652 when an exception is forced to arrive, either by consumption of a 653 register value or TRAPB or EXCB. */ 654 TCGv_i32 reg, ign; 655 uint32_t ignore = 0; 656 657 if (!(fn11 & QUAL_U)) { 658 /* Note that QUAL_U == QUAL_V, so ignore either. */ 659 ignore |= FPCR_UNF | FPCR_IOV; 660 } 661 if (!(fn11 & QUAL_I)) { 662 ignore |= FPCR_INE; 663 } 664 ign = tcg_constant_i32(ignore); 665 666 /* ??? Pass in the regno of the destination so that the helper can 667 set EXC_MASK, which contains a bitmask of destination registers 668 that have caused arithmetic traps. A simple userspace emulation 669 does not require this. We do need it for a guest kernel's entArith, 670 or if we were to do something clever with imprecise exceptions. */ 671 reg = tcg_constant_i32(rc + 32); 672 if (fn11 & QUAL_S) { 673 gen_helper_fp_exc_raise_s(tcg_env, ign, reg); 674 } else { 675 gen_helper_fp_exc_raise(tcg_env, ign, reg); 676 } 677 } 678 679 static void gen_cvtlq(TCGv vc, TCGv vb) 680 { 681 TCGv tmp = tcg_temp_new(); 682 683 /* The arithmetic right shift here, plus the sign-extended mask below 684 yields a sign-extended result without an explicit ext32s_i64. */ 685 tcg_gen_shri_i64(tmp, vb, 29); 686 tcg_gen_sari_i64(vc, vb, 32); 687 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); 688 } 689 690 static void gen_ieee_arith2(DisasContext *ctx, 691 void (*helper)(TCGv, TCGv_ptr, TCGv), 692 int rb, int rc, int fn11) 693 { 694 TCGv vb; 695 696 gen_qual_roundmode(ctx, fn11); 697 gen_qual_flushzero(ctx, fn11); 698 699 vb = gen_ieee_input(ctx, rb, fn11, 0); 700 helper(dest_fpr(ctx, rc), tcg_env, vb); 701 702 gen_fp_exc_raise(rc, fn11); 703 } 704 705 #define IEEE_ARITH2(name) \ 706 static inline void glue(gen_, name)(DisasContext *ctx, \ 707 int rb, int rc, int fn11) \ 708 { \ 709 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ 710 } 711 IEEE_ARITH2(sqrts) 712 IEEE_ARITH2(sqrtt) 713 IEEE_ARITH2(cvtst) 714 IEEE_ARITH2(cvtts) 715 716 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) 717 { 718 TCGv vb, vc; 719 720 /* No need to set flushzero, since we have an integer output. */ 721 vb = gen_ieee_input(ctx, rb, fn11, 0); 722 vc = dest_fpr(ctx, rc); 723 724 /* Almost all integer conversions use cropped rounding; 725 special case that. */ 726 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { 727 gen_helper_cvttq_c(vc, tcg_env, vb); 728 } else { 729 gen_qual_roundmode(ctx, fn11); 730 gen_helper_cvttq(vc, tcg_env, vb); 731 } 732 gen_fp_exc_raise(rc, fn11); 733 } 734 735 static void gen_ieee_intcvt(DisasContext *ctx, 736 void (*helper)(TCGv, TCGv_ptr, TCGv), 737 int rb, int rc, int fn11) 738 { 739 TCGv vb, vc; 740 741 gen_qual_roundmode(ctx, fn11); 742 vb = load_fpr(ctx, rb); 743 vc = dest_fpr(ctx, rc); 744 745 /* The only exception that can be raised by integer conversion 746 is inexact. Thus we only need to worry about exceptions when 747 inexact handling is requested. */ 748 if (fn11 & QUAL_I) { 749 helper(vc, tcg_env, vb); 750 gen_fp_exc_raise(rc, fn11); 751 } else { 752 helper(vc, tcg_env, vb); 753 } 754 } 755 756 #define IEEE_INTCVT(name) \ 757 static inline void glue(gen_, name)(DisasContext *ctx, \ 758 int rb, int rc, int fn11) \ 759 { \ 760 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ 761 } 762 IEEE_INTCVT(cvtqs) 763 IEEE_INTCVT(cvtqt) 764 765 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) 766 { 767 TCGv vmask = tcg_constant_i64(mask); 768 TCGv tmp = tcg_temp_new_i64(); 769 770 if (inv_a) { 771 tcg_gen_andc_i64(tmp, vmask, va); 772 } else { 773 tcg_gen_and_i64(tmp, va, vmask); 774 } 775 776 tcg_gen_andc_i64(vc, vb, vmask); 777 tcg_gen_or_i64(vc, vc, tmp); 778 } 779 780 static void gen_ieee_arith3(DisasContext *ctx, 781 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 782 int ra, int rb, int rc, int fn11) 783 { 784 TCGv va, vb, vc; 785 786 gen_qual_roundmode(ctx, fn11); 787 gen_qual_flushzero(ctx, fn11); 788 789 va = gen_ieee_input(ctx, ra, fn11, 0); 790 vb = gen_ieee_input(ctx, rb, fn11, 0); 791 vc = dest_fpr(ctx, rc); 792 helper(vc, tcg_env, va, vb); 793 794 gen_fp_exc_raise(rc, fn11); 795 } 796 797 #define IEEE_ARITH3(name) \ 798 static inline void glue(gen_, name)(DisasContext *ctx, \ 799 int ra, int rb, int rc, int fn11) \ 800 { \ 801 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 802 } 803 IEEE_ARITH3(adds) 804 IEEE_ARITH3(subs) 805 IEEE_ARITH3(muls) 806 IEEE_ARITH3(divs) 807 IEEE_ARITH3(addt) 808 IEEE_ARITH3(subt) 809 IEEE_ARITH3(mult) 810 IEEE_ARITH3(divt) 811 812 static void gen_ieee_compare(DisasContext *ctx, 813 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 814 int ra, int rb, int rc, int fn11) 815 { 816 TCGv va, vb, vc; 817 818 va = gen_ieee_input(ctx, ra, fn11, 1); 819 vb = gen_ieee_input(ctx, rb, fn11, 1); 820 vc = dest_fpr(ctx, rc); 821 helper(vc, tcg_env, va, vb); 822 823 gen_fp_exc_raise(rc, fn11); 824 } 825 826 #define IEEE_CMP3(name) \ 827 static inline void glue(gen_, name)(DisasContext *ctx, \ 828 int ra, int rb, int rc, int fn11) \ 829 { \ 830 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 831 } 832 IEEE_CMP3(cmptun) 833 IEEE_CMP3(cmpteq) 834 IEEE_CMP3(cmptlt) 835 IEEE_CMP3(cmptle) 836 837 static inline uint64_t zapnot_mask(uint8_t lit) 838 { 839 uint64_t mask = 0; 840 int i; 841 842 for (i = 0; i < 8; ++i) { 843 if ((lit >> i) & 1) { 844 mask |= 0xffull << (i * 8); 845 } 846 } 847 return mask; 848 } 849 850 /* Implement zapnot with an immediate operand, which expands to some 851 form of immediate AND. This is a basic building block in the 852 definition of many of the other byte manipulation instructions. */ 853 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) 854 { 855 switch (lit) { 856 case 0x00: 857 tcg_gen_movi_i64(dest, 0); 858 break; 859 case 0x01: 860 tcg_gen_ext8u_i64(dest, src); 861 break; 862 case 0x03: 863 tcg_gen_ext16u_i64(dest, src); 864 break; 865 case 0x0f: 866 tcg_gen_ext32u_i64(dest, src); 867 break; 868 case 0xff: 869 tcg_gen_mov_i64(dest, src); 870 break; 871 default: 872 tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); 873 break; 874 } 875 } 876 877 /* EXTWH, EXTLH, EXTQH */ 878 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 879 uint8_t lit, uint8_t byte_mask) 880 { 881 if (islit) { 882 int pos = (64 - lit * 8) & 0x3f; 883 int len = cto32(byte_mask) * 8; 884 if (pos < len) { 885 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); 886 } else { 887 tcg_gen_movi_i64(vc, 0); 888 } 889 } else { 890 TCGv tmp = tcg_temp_new(); 891 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); 892 tcg_gen_neg_i64(tmp, tmp); 893 tcg_gen_andi_i64(tmp, tmp, 0x3f); 894 tcg_gen_shl_i64(vc, va, tmp); 895 } 896 gen_zapnoti(vc, vc, byte_mask); 897 } 898 899 /* EXTBL, EXTWL, EXTLL, EXTQL */ 900 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 901 uint8_t lit, uint8_t byte_mask) 902 { 903 if (islit) { 904 int pos = (lit & 7) * 8; 905 int len = cto32(byte_mask) * 8; 906 if (pos + len >= 64) { 907 len = 64 - pos; 908 } 909 tcg_gen_extract_i64(vc, va, pos, len); 910 } else { 911 TCGv tmp = tcg_temp_new(); 912 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); 913 tcg_gen_shli_i64(tmp, tmp, 3); 914 tcg_gen_shr_i64(vc, va, tmp); 915 gen_zapnoti(vc, vc, byte_mask); 916 } 917 } 918 919 /* INSWH, INSLH, INSQH */ 920 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 921 uint8_t lit, uint8_t byte_mask) 922 { 923 if (islit) { 924 int pos = 64 - (lit & 7) * 8; 925 int len = cto32(byte_mask) * 8; 926 if (pos < len) { 927 tcg_gen_extract_i64(vc, va, pos, len - pos); 928 } else { 929 tcg_gen_movi_i64(vc, 0); 930 } 931 } else { 932 TCGv tmp = tcg_temp_new(); 933 TCGv shift = tcg_temp_new(); 934 935 /* The instruction description has us left-shift the byte mask 936 and extract bits <15:8> and apply that zap at the end. This 937 is equivalent to simply performing the zap first and shifting 938 afterward. */ 939 gen_zapnoti(tmp, va, byte_mask); 940 941 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this 942 portably by splitting the shift into two parts: shift_count-1 and 1. 943 Arrange for the -1 by using ones-complement instead of 944 twos-complement in the negation: ~(B * 8) & 63. */ 945 946 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 947 tcg_gen_not_i64(shift, shift); 948 tcg_gen_andi_i64(shift, shift, 0x3f); 949 950 tcg_gen_shr_i64(vc, tmp, shift); 951 tcg_gen_shri_i64(vc, vc, 1); 952 } 953 } 954 955 /* INSBL, INSWL, INSLL, INSQL */ 956 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 957 uint8_t lit, uint8_t byte_mask) 958 { 959 if (islit) { 960 int pos = (lit & 7) * 8; 961 int len = cto32(byte_mask) * 8; 962 if (pos + len > 64) { 963 len = 64 - pos; 964 } 965 tcg_gen_deposit_z_i64(vc, va, pos, len); 966 } else { 967 TCGv tmp = tcg_temp_new(); 968 TCGv shift = tcg_temp_new(); 969 970 /* The instruction description has us left-shift the byte mask 971 and extract bits <15:8> and apply that zap at the end. This 972 is equivalent to simply performing the zap first and shifting 973 afterward. */ 974 gen_zapnoti(tmp, va, byte_mask); 975 976 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 977 tcg_gen_shli_i64(shift, shift, 3); 978 tcg_gen_shl_i64(vc, tmp, shift); 979 } 980 } 981 982 /* MSKWH, MSKLH, MSKQH */ 983 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 984 uint8_t lit, uint8_t byte_mask) 985 { 986 if (islit) { 987 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); 988 } else { 989 TCGv shift = tcg_temp_new(); 990 TCGv mask = tcg_temp_new(); 991 992 /* The instruction description is as above, where the byte_mask 993 is shifted left, and then we extract bits <15:8>. This can be 994 emulated with a right-shift on the expanded byte mask. This 995 requires extra care because for an input <2:0> == 0 we need a 996 shift of 64 bits in order to generate a zero. This is done by 997 splitting the shift into two parts, the variable shift - 1 998 followed by a constant 1 shift. The code we expand below is 999 equivalent to ~(B * 8) & 63. */ 1000 1001 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1002 tcg_gen_not_i64(shift, shift); 1003 tcg_gen_andi_i64(shift, shift, 0x3f); 1004 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); 1005 tcg_gen_shr_i64(mask, mask, shift); 1006 tcg_gen_shri_i64(mask, mask, 1); 1007 1008 tcg_gen_andc_i64(vc, va, mask); 1009 } 1010 } 1011 1012 /* MSKBL, MSKWL, MSKLL, MSKQL */ 1013 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1014 uint8_t lit, uint8_t byte_mask) 1015 { 1016 if (islit) { 1017 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); 1018 } else { 1019 TCGv shift = tcg_temp_new(); 1020 TCGv mask = tcg_temp_new(); 1021 1022 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1023 tcg_gen_shli_i64(shift, shift, 3); 1024 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); 1025 tcg_gen_shl_i64(mask, mask, shift); 1026 1027 tcg_gen_andc_i64(vc, va, mask); 1028 } 1029 } 1030 1031 static void gen_rx(DisasContext *ctx, int ra, int set) 1032 { 1033 if (ra != 31) { 1034 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1035 } 1036 1037 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT); 1038 } 1039 1040 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) 1041 { 1042 /* We're emulating OSF/1 PALcode. Many of these are trivial access 1043 to internal cpu registers. */ 1044 1045 /* Unprivileged PAL call */ 1046 if (palcode >= 0x80 && palcode < 0xC0) { 1047 switch (palcode) { 1048 case 0x86: 1049 /* IMB */ 1050 /* No-op inside QEMU. */ 1051 break; 1052 case 0x9E: 1053 /* RDUNIQUE */ 1054 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1055 offsetof(CPUAlphaState, unique)); 1056 break; 1057 case 0x9F: 1058 /* WRUNIQUE */ 1059 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1060 offsetof(CPUAlphaState, unique)); 1061 break; 1062 default: 1063 palcode &= 0xbf; 1064 goto do_call_pal; 1065 } 1066 return DISAS_NEXT; 1067 } 1068 1069 #ifndef CONFIG_USER_ONLY 1070 /* Privileged PAL code */ 1071 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1072 switch (palcode) { 1073 case 0x01: 1074 /* CFLUSH */ 1075 /* No-op inside QEMU. */ 1076 break; 1077 case 0x02: 1078 /* DRAINA */ 1079 /* No-op inside QEMU. */ 1080 break; 1081 case 0x2D: 1082 /* WRVPTPTR */ 1083 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1084 offsetof(CPUAlphaState, vptptr)); 1085 break; 1086 case 0x31: 1087 /* WRVAL */ 1088 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1089 offsetof(CPUAlphaState, sysval)); 1090 break; 1091 case 0x32: 1092 /* RDVAL */ 1093 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1094 offsetof(CPUAlphaState, sysval)); 1095 break; 1096 1097 case 0x35: 1098 /* SWPIPL */ 1099 /* Note that we already know we're in kernel mode, so we know 1100 that PS only contains the 3 IPL bits. */ 1101 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1102 1103 /* But make sure and store only the 3 IPL bits from the user. */ 1104 { 1105 TCGv tmp = tcg_temp_new(); 1106 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1107 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1108 } 1109 1110 /* Allow interrupts to be recognized right away. */ 1111 gen_pc_disp(ctx, cpu_pc, 0); 1112 return DISAS_PC_UPDATED_NOCHAIN; 1113 1114 case 0x36: 1115 /* RDPS */ 1116 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1117 break; 1118 1119 case 0x38: 1120 /* WRUSP */ 1121 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, 1122 offsetof(CPUAlphaState, usp)); 1123 break; 1124 case 0x3A: 1125 /* RDUSP */ 1126 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, 1127 offsetof(CPUAlphaState, usp)); 1128 break; 1129 case 0x3C: 1130 /* WHAMI */ 1131 tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env, 1132 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); 1133 break; 1134 1135 case 0x3E: 1136 /* WTINT */ 1137 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1138 -offsetof(AlphaCPU, env) + 1139 offsetof(CPUState, halted)); 1140 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); 1141 return gen_excp(ctx, EXCP_HALTED, 0); 1142 1143 default: 1144 palcode &= 0x3f; 1145 goto do_call_pal; 1146 } 1147 return DISAS_NEXT; 1148 } 1149 #endif 1150 return gen_invalid(ctx); 1151 1152 do_call_pal: 1153 #ifdef CONFIG_USER_ONLY 1154 return gen_excp(ctx, EXCP_CALL_PAL, palcode); 1155 #else 1156 { 1157 TCGv tmp = tcg_temp_new(); 1158 uint64_t entry; 1159 1160 gen_pc_disp(ctx, tmp, 0); 1161 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1162 tcg_gen_ori_i64(tmp, tmp, 1); 1163 } else { 1164 st_flag_byte(tcg_constant_i64(1), ENV_FLAG_PAL_SHIFT); 1165 } 1166 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr)); 1167 1168 entry = ctx->palbr; 1169 entry += (palcode & 0x80 1170 ? 0x2000 + (palcode - 0x80) * 64 1171 : 0x1000 + palcode * 64); 1172 1173 tcg_gen_movi_i64(cpu_pc, entry); 1174 return DISAS_PC_UPDATED; 1175 } 1176 #endif 1177 } 1178 1179 #ifndef CONFIG_USER_ONLY 1180 1181 #define PR_LONG 0x200000 1182 1183 static int cpu_pr_data(int pr) 1184 { 1185 switch (pr) { 1186 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1187 case 3: return offsetof(CPUAlphaState, trap_arg0); 1188 case 4: return offsetof(CPUAlphaState, trap_arg1); 1189 case 5: return offsetof(CPUAlphaState, trap_arg2); 1190 case 6: return offsetof(CPUAlphaState, exc_addr); 1191 case 7: return offsetof(CPUAlphaState, palbr); 1192 case 8: return offsetof(CPUAlphaState, ptbr); 1193 case 9: return offsetof(CPUAlphaState, vptptr); 1194 case 10: return offsetof(CPUAlphaState, unique); 1195 case 11: return offsetof(CPUAlphaState, sysval); 1196 case 12: return offsetof(CPUAlphaState, usp); 1197 1198 case 40 ... 63: 1199 return offsetof(CPUAlphaState, scratch[pr - 40]); 1200 1201 case 251: 1202 return offsetof(CPUAlphaState, alarm_expire); 1203 } 1204 return 0; 1205 } 1206 1207 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) 1208 { 1209 void (*helper)(TCGv); 1210 int data; 1211 1212 switch (regno) { 1213 case 32 ... 39: 1214 /* Accessing the "non-shadow" general registers. */ 1215 regno = regno == 39 ? 25 : regno - 32 + 8; 1216 tcg_gen_mov_i64(va, cpu_std_ir[regno]); 1217 break; 1218 1219 case 250: /* WALLTIME */ 1220 helper = gen_helper_get_walltime; 1221 goto do_helper; 1222 case 249: /* VMTIME */ 1223 helper = gen_helper_get_vmtime; 1224 do_helper: 1225 if (translator_io_start(&ctx->base)) { 1226 helper(va); 1227 return DISAS_PC_STALE; 1228 } else { 1229 helper(va); 1230 } 1231 break; 1232 1233 case 0: /* PS */ 1234 ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1235 break; 1236 case 1: /* FEN */ 1237 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1238 break; 1239 1240 default: 1241 /* The basic registers are data only, and unknown registers 1242 are read-zero, write-ignore. */ 1243 data = cpu_pr_data(regno); 1244 if (data == 0) { 1245 tcg_gen_movi_i64(va, 0); 1246 } else if (data & PR_LONG) { 1247 tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG); 1248 } else { 1249 tcg_gen_ld_i64(va, tcg_env, data); 1250 } 1251 break; 1252 } 1253 1254 return DISAS_NEXT; 1255 } 1256 1257 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) 1258 { 1259 int data; 1260 DisasJumpType ret = DISAS_NEXT; 1261 1262 switch (regno) { 1263 case 255: 1264 /* TBIA */ 1265 gen_helper_tbia(tcg_env); 1266 break; 1267 1268 case 254: 1269 /* TBIS */ 1270 gen_helper_tbis(tcg_env, vb); 1271 break; 1272 1273 case 253: 1274 /* WAIT */ 1275 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1276 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); 1277 return gen_excp(ctx, EXCP_HALTED, 0); 1278 1279 case 252: 1280 /* HALT */ 1281 gen_helper_halt(vb); 1282 return DISAS_PC_STALE; 1283 1284 case 251: 1285 /* ALARM */ 1286 if (translator_io_start(&ctx->base)) { 1287 ret = DISAS_PC_STALE; 1288 } 1289 gen_helper_set_alarm(tcg_env, vb); 1290 break; 1291 1292 case 7: 1293 /* PALBR */ 1294 tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr)); 1295 /* Changing the PAL base register implies un-chaining all of the TBs 1296 that ended with a CALL_PAL. Since the base register usually only 1297 changes during boot, flushing everything works well. */ 1298 gen_helper_tb_flush(tcg_env); 1299 return DISAS_PC_STALE; 1300 1301 case 32 ... 39: 1302 /* Accessing the "non-shadow" general registers. */ 1303 regno = regno == 39 ? 25 : regno - 32 + 8; 1304 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1305 break; 1306 1307 case 0: /* PS */ 1308 st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1309 break; 1310 case 1: /* FEN */ 1311 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1312 break; 1313 1314 default: 1315 /* The basic registers are data only, and unknown registers 1316 are read-zero, write-ignore. */ 1317 data = cpu_pr_data(regno); 1318 if (data != 0) { 1319 if (data & PR_LONG) { 1320 tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG); 1321 } else { 1322 tcg_gen_st_i64(vb, tcg_env, data); 1323 } 1324 } 1325 break; 1326 } 1327 1328 return ret; 1329 } 1330 #endif /* !USER_ONLY*/ 1331 1332 #define REQUIRE_NO_LIT \ 1333 do { \ 1334 if (real_islit) { \ 1335 goto invalid_opc; \ 1336 } \ 1337 } while (0) 1338 1339 #define REQUIRE_AMASK(FLAG) \ 1340 do { \ 1341 if ((ctx->amask & AMASK_##FLAG) == 0) { \ 1342 goto invalid_opc; \ 1343 } \ 1344 } while (0) 1345 1346 #define REQUIRE_TB_FLAG(FLAG) \ 1347 do { \ 1348 if ((ctx->tbflags & (FLAG)) == 0) { \ 1349 goto invalid_opc; \ 1350 } \ 1351 } while (0) 1352 1353 #define REQUIRE_REG_31(WHICH) \ 1354 do { \ 1355 if (WHICH != 31) { \ 1356 goto invalid_opc; \ 1357 } \ 1358 } while (0) 1359 1360 #define REQUIRE_FEN \ 1361 do { \ 1362 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \ 1363 goto raise_fen; \ 1364 } \ 1365 } while (0) 1366 1367 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) 1368 { 1369 int32_t disp21, disp16, disp12 __attribute__((unused)); 1370 uint16_t fn11; 1371 uint8_t opc, ra, rb, rc, fpfn, fn7, lit; 1372 bool islit, real_islit; 1373 TCGv va, vb, vc, tmp, tmp2; 1374 TCGv_i32 t32; 1375 DisasJumpType ret; 1376 1377 /* Decode all instruction fields */ 1378 opc = extract32(insn, 26, 6); 1379 ra = extract32(insn, 21, 5); 1380 rb = extract32(insn, 16, 5); 1381 rc = extract32(insn, 0, 5); 1382 real_islit = islit = extract32(insn, 12, 1); 1383 lit = extract32(insn, 13, 8); 1384 1385 disp21 = sextract32(insn, 0, 21) * 4; 1386 disp16 = sextract32(insn, 0, 16); 1387 disp12 = sextract32(insn, 0, 12); 1388 1389 fn11 = extract32(insn, 5, 11); 1390 fpfn = extract32(insn, 5, 6); 1391 fn7 = extract32(insn, 5, 7); 1392 1393 if (rb == 31 && !islit) { 1394 islit = true; 1395 lit = 0; 1396 } 1397 1398 ret = DISAS_NEXT; 1399 switch (opc) { 1400 case 0x00: 1401 /* CALL_PAL */ 1402 ret = gen_call_pal(ctx, insn & 0x03ffffff); 1403 break; 1404 case 0x01: 1405 /* OPC01 */ 1406 goto invalid_opc; 1407 case 0x02: 1408 /* OPC02 */ 1409 goto invalid_opc; 1410 case 0x03: 1411 /* OPC03 */ 1412 goto invalid_opc; 1413 case 0x04: 1414 /* OPC04 */ 1415 goto invalid_opc; 1416 case 0x05: 1417 /* OPC05 */ 1418 goto invalid_opc; 1419 case 0x06: 1420 /* OPC06 */ 1421 goto invalid_opc; 1422 case 0x07: 1423 /* OPC07 */ 1424 goto invalid_opc; 1425 1426 case 0x09: 1427 /* LDAH */ 1428 disp16 = (uint32_t)disp16 << 16; 1429 /* fall through */ 1430 case 0x08: 1431 /* LDA */ 1432 va = dest_gpr(ctx, ra); 1433 /* It's worth special-casing immediate loads. */ 1434 if (rb == 31) { 1435 tcg_gen_movi_i64(va, disp16); 1436 } else { 1437 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); 1438 } 1439 break; 1440 1441 case 0x0A: 1442 /* LDBU */ 1443 REQUIRE_AMASK(BWX); 1444 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); 1445 break; 1446 case 0x0B: 1447 /* LDQ_U */ 1448 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0); 1449 break; 1450 case 0x0C: 1451 /* LDWU */ 1452 REQUIRE_AMASK(BWX); 1453 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); 1454 break; 1455 case 0x0D: 1456 /* STW */ 1457 REQUIRE_AMASK(BWX); 1458 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); 1459 break; 1460 case 0x0E: 1461 /* STB */ 1462 REQUIRE_AMASK(BWX); 1463 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); 1464 break; 1465 case 0x0F: 1466 /* STQ_U */ 1467 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1); 1468 break; 1469 1470 case 0x10: 1471 vc = dest_gpr(ctx, rc); 1472 vb = load_gpr_lit(ctx, rb, lit, islit); 1473 1474 if (ra == 31) { 1475 if (fn7 == 0x00) { 1476 /* Special case ADDL as SEXTL. */ 1477 tcg_gen_ext32s_i64(vc, vb); 1478 break; 1479 } 1480 if (fn7 == 0x29) { 1481 /* Special case SUBQ as NEGQ. */ 1482 tcg_gen_neg_i64(vc, vb); 1483 break; 1484 } 1485 } 1486 1487 va = load_gpr(ctx, ra); 1488 switch (fn7) { 1489 case 0x00: 1490 /* ADDL */ 1491 tcg_gen_add_i64(vc, va, vb); 1492 tcg_gen_ext32s_i64(vc, vc); 1493 break; 1494 case 0x02: 1495 /* S4ADDL */ 1496 tmp = tcg_temp_new(); 1497 tcg_gen_shli_i64(tmp, va, 2); 1498 tcg_gen_add_i64(tmp, tmp, vb); 1499 tcg_gen_ext32s_i64(vc, tmp); 1500 break; 1501 case 0x09: 1502 /* SUBL */ 1503 tcg_gen_sub_i64(vc, va, vb); 1504 tcg_gen_ext32s_i64(vc, vc); 1505 break; 1506 case 0x0B: 1507 /* S4SUBL */ 1508 tmp = tcg_temp_new(); 1509 tcg_gen_shli_i64(tmp, va, 2); 1510 tcg_gen_sub_i64(tmp, tmp, vb); 1511 tcg_gen_ext32s_i64(vc, tmp); 1512 break; 1513 case 0x0F: 1514 /* CMPBGE */ 1515 if (ra == 31) { 1516 /* Special case 0 >= X as X == 0. */ 1517 gen_helper_cmpbe0(vc, vb); 1518 } else { 1519 gen_helper_cmpbge(vc, va, vb); 1520 } 1521 break; 1522 case 0x12: 1523 /* S8ADDL */ 1524 tmp = tcg_temp_new(); 1525 tcg_gen_shli_i64(tmp, va, 3); 1526 tcg_gen_add_i64(tmp, tmp, vb); 1527 tcg_gen_ext32s_i64(vc, tmp); 1528 break; 1529 case 0x1B: 1530 /* S8SUBL */ 1531 tmp = tcg_temp_new(); 1532 tcg_gen_shli_i64(tmp, va, 3); 1533 tcg_gen_sub_i64(tmp, tmp, vb); 1534 tcg_gen_ext32s_i64(vc, tmp); 1535 break; 1536 case 0x1D: 1537 /* CMPULT */ 1538 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); 1539 break; 1540 case 0x20: 1541 /* ADDQ */ 1542 tcg_gen_add_i64(vc, va, vb); 1543 break; 1544 case 0x22: 1545 /* S4ADDQ */ 1546 tmp = tcg_temp_new(); 1547 tcg_gen_shli_i64(tmp, va, 2); 1548 tcg_gen_add_i64(vc, tmp, vb); 1549 break; 1550 case 0x29: 1551 /* SUBQ */ 1552 tcg_gen_sub_i64(vc, va, vb); 1553 break; 1554 case 0x2B: 1555 /* S4SUBQ */ 1556 tmp = tcg_temp_new(); 1557 tcg_gen_shli_i64(tmp, va, 2); 1558 tcg_gen_sub_i64(vc, tmp, vb); 1559 break; 1560 case 0x2D: 1561 /* CMPEQ */ 1562 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); 1563 break; 1564 case 0x32: 1565 /* S8ADDQ */ 1566 tmp = tcg_temp_new(); 1567 tcg_gen_shli_i64(tmp, va, 3); 1568 tcg_gen_add_i64(vc, tmp, vb); 1569 break; 1570 case 0x3B: 1571 /* S8SUBQ */ 1572 tmp = tcg_temp_new(); 1573 tcg_gen_shli_i64(tmp, va, 3); 1574 tcg_gen_sub_i64(vc, tmp, vb); 1575 break; 1576 case 0x3D: 1577 /* CMPULE */ 1578 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); 1579 break; 1580 case 0x40: 1581 /* ADDL/V */ 1582 tmp = tcg_temp_new(); 1583 tcg_gen_ext32s_i64(tmp, va); 1584 tcg_gen_ext32s_i64(vc, vb); 1585 tcg_gen_add_i64(tmp, tmp, vc); 1586 tcg_gen_ext32s_i64(vc, tmp); 1587 gen_helper_check_overflow(tcg_env, vc, tmp); 1588 break; 1589 case 0x49: 1590 /* SUBL/V */ 1591 tmp = tcg_temp_new(); 1592 tcg_gen_ext32s_i64(tmp, va); 1593 tcg_gen_ext32s_i64(vc, vb); 1594 tcg_gen_sub_i64(tmp, tmp, vc); 1595 tcg_gen_ext32s_i64(vc, tmp); 1596 gen_helper_check_overflow(tcg_env, vc, tmp); 1597 break; 1598 case 0x4D: 1599 /* CMPLT */ 1600 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); 1601 break; 1602 case 0x60: 1603 /* ADDQ/V */ 1604 tmp = tcg_temp_new(); 1605 tmp2 = tcg_temp_new(); 1606 tcg_gen_eqv_i64(tmp, va, vb); 1607 tcg_gen_mov_i64(tmp2, va); 1608 tcg_gen_add_i64(vc, va, vb); 1609 tcg_gen_xor_i64(tmp2, tmp2, vc); 1610 tcg_gen_and_i64(tmp, tmp, tmp2); 1611 tcg_gen_shri_i64(tmp, tmp, 63); 1612 tcg_gen_movi_i64(tmp2, 0); 1613 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1614 break; 1615 case 0x69: 1616 /* SUBQ/V */ 1617 tmp = tcg_temp_new(); 1618 tmp2 = tcg_temp_new(); 1619 tcg_gen_xor_i64(tmp, va, vb); 1620 tcg_gen_mov_i64(tmp2, va); 1621 tcg_gen_sub_i64(vc, va, vb); 1622 tcg_gen_xor_i64(tmp2, tmp2, vc); 1623 tcg_gen_and_i64(tmp, tmp, tmp2); 1624 tcg_gen_shri_i64(tmp, tmp, 63); 1625 tcg_gen_movi_i64(tmp2, 0); 1626 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1627 break; 1628 case 0x6D: 1629 /* CMPLE */ 1630 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); 1631 break; 1632 default: 1633 goto invalid_opc; 1634 } 1635 break; 1636 1637 case 0x11: 1638 if (fn7 == 0x20) { 1639 if (rc == 31) { 1640 /* Special case BIS as NOP. */ 1641 break; 1642 } 1643 if (ra == 31) { 1644 /* Special case BIS as MOV. */ 1645 vc = dest_gpr(ctx, rc); 1646 if (islit) { 1647 tcg_gen_movi_i64(vc, lit); 1648 } else { 1649 tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); 1650 } 1651 break; 1652 } 1653 } 1654 1655 vc = dest_gpr(ctx, rc); 1656 vb = load_gpr_lit(ctx, rb, lit, islit); 1657 1658 if (fn7 == 0x28 && ra == 31) { 1659 /* Special case ORNOT as NOT. */ 1660 tcg_gen_not_i64(vc, vb); 1661 break; 1662 } 1663 1664 va = load_gpr(ctx, ra); 1665 switch (fn7) { 1666 case 0x00: 1667 /* AND */ 1668 tcg_gen_and_i64(vc, va, vb); 1669 break; 1670 case 0x08: 1671 /* BIC */ 1672 tcg_gen_andc_i64(vc, va, vb); 1673 break; 1674 case 0x14: 1675 /* CMOVLBS */ 1676 tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1), 1677 vb, load_gpr(ctx, rc)); 1678 break; 1679 case 0x16: 1680 /* CMOVLBC */ 1681 tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1), 1682 vb, load_gpr(ctx, rc)); 1683 break; 1684 case 0x20: 1685 /* BIS */ 1686 tcg_gen_or_i64(vc, va, vb); 1687 break; 1688 case 0x24: 1689 /* CMOVEQ */ 1690 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), 1691 vb, load_gpr(ctx, rc)); 1692 break; 1693 case 0x26: 1694 /* CMOVNE */ 1695 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), 1696 vb, load_gpr(ctx, rc)); 1697 break; 1698 case 0x28: 1699 /* ORNOT */ 1700 tcg_gen_orc_i64(vc, va, vb); 1701 break; 1702 case 0x40: 1703 /* XOR */ 1704 tcg_gen_xor_i64(vc, va, vb); 1705 break; 1706 case 0x44: 1707 /* CMOVLT */ 1708 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), 1709 vb, load_gpr(ctx, rc)); 1710 break; 1711 case 0x46: 1712 /* CMOVGE */ 1713 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), 1714 vb, load_gpr(ctx, rc)); 1715 break; 1716 case 0x48: 1717 /* EQV */ 1718 tcg_gen_eqv_i64(vc, va, vb); 1719 break; 1720 case 0x61: 1721 /* AMASK */ 1722 REQUIRE_REG_31(ra); 1723 tcg_gen_andi_i64(vc, vb, ~ctx->amask); 1724 break; 1725 case 0x64: 1726 /* CMOVLE */ 1727 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), 1728 vb, load_gpr(ctx, rc)); 1729 break; 1730 case 0x66: 1731 /* CMOVGT */ 1732 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), 1733 vb, load_gpr(ctx, rc)); 1734 break; 1735 case 0x6C: 1736 /* IMPLVER */ 1737 REQUIRE_REG_31(ra); 1738 tcg_gen_movi_i64(vc, ctx->implver); 1739 break; 1740 default: 1741 goto invalid_opc; 1742 } 1743 break; 1744 1745 case 0x12: 1746 vc = dest_gpr(ctx, rc); 1747 va = load_gpr(ctx, ra); 1748 switch (fn7) { 1749 case 0x02: 1750 /* MSKBL */ 1751 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); 1752 break; 1753 case 0x06: 1754 /* EXTBL */ 1755 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); 1756 break; 1757 case 0x0B: 1758 /* INSBL */ 1759 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); 1760 break; 1761 case 0x12: 1762 /* MSKWL */ 1763 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); 1764 break; 1765 case 0x16: 1766 /* EXTWL */ 1767 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); 1768 break; 1769 case 0x1B: 1770 /* INSWL */ 1771 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); 1772 break; 1773 case 0x22: 1774 /* MSKLL */ 1775 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); 1776 break; 1777 case 0x26: 1778 /* EXTLL */ 1779 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); 1780 break; 1781 case 0x2B: 1782 /* INSLL */ 1783 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); 1784 break; 1785 case 0x30: 1786 /* ZAP */ 1787 if (islit) { 1788 gen_zapnoti(vc, va, ~lit); 1789 } else { 1790 gen_helper_zap(vc, va, load_gpr(ctx, rb)); 1791 } 1792 break; 1793 case 0x31: 1794 /* ZAPNOT */ 1795 if (islit) { 1796 gen_zapnoti(vc, va, lit); 1797 } else { 1798 gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); 1799 } 1800 break; 1801 case 0x32: 1802 /* MSKQL */ 1803 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); 1804 break; 1805 case 0x34: 1806 /* SRL */ 1807 if (islit) { 1808 tcg_gen_shri_i64(vc, va, lit & 0x3f); 1809 } else { 1810 tmp = tcg_temp_new(); 1811 vb = load_gpr(ctx, rb); 1812 tcg_gen_andi_i64(tmp, vb, 0x3f); 1813 tcg_gen_shr_i64(vc, va, tmp); 1814 } 1815 break; 1816 case 0x36: 1817 /* EXTQL */ 1818 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); 1819 break; 1820 case 0x39: 1821 /* SLL */ 1822 if (islit) { 1823 tcg_gen_shli_i64(vc, va, lit & 0x3f); 1824 } else { 1825 tmp = tcg_temp_new(); 1826 vb = load_gpr(ctx, rb); 1827 tcg_gen_andi_i64(tmp, vb, 0x3f); 1828 tcg_gen_shl_i64(vc, va, tmp); 1829 } 1830 break; 1831 case 0x3B: 1832 /* INSQL */ 1833 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); 1834 break; 1835 case 0x3C: 1836 /* SRA */ 1837 if (islit) { 1838 tcg_gen_sari_i64(vc, va, lit & 0x3f); 1839 } else { 1840 tmp = tcg_temp_new(); 1841 vb = load_gpr(ctx, rb); 1842 tcg_gen_andi_i64(tmp, vb, 0x3f); 1843 tcg_gen_sar_i64(vc, va, tmp); 1844 } 1845 break; 1846 case 0x52: 1847 /* MSKWH */ 1848 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); 1849 break; 1850 case 0x57: 1851 /* INSWH */ 1852 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); 1853 break; 1854 case 0x5A: 1855 /* EXTWH */ 1856 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); 1857 break; 1858 case 0x62: 1859 /* MSKLH */ 1860 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); 1861 break; 1862 case 0x67: 1863 /* INSLH */ 1864 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); 1865 break; 1866 case 0x6A: 1867 /* EXTLH */ 1868 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); 1869 break; 1870 case 0x72: 1871 /* MSKQH */ 1872 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); 1873 break; 1874 case 0x77: 1875 /* INSQH */ 1876 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); 1877 break; 1878 case 0x7A: 1879 /* EXTQH */ 1880 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); 1881 break; 1882 default: 1883 goto invalid_opc; 1884 } 1885 break; 1886 1887 case 0x13: 1888 vc = dest_gpr(ctx, rc); 1889 vb = load_gpr_lit(ctx, rb, lit, islit); 1890 va = load_gpr(ctx, ra); 1891 switch (fn7) { 1892 case 0x00: 1893 /* MULL */ 1894 tcg_gen_mul_i64(vc, va, vb); 1895 tcg_gen_ext32s_i64(vc, vc); 1896 break; 1897 case 0x20: 1898 /* MULQ */ 1899 tcg_gen_mul_i64(vc, va, vb); 1900 break; 1901 case 0x30: 1902 /* UMULH */ 1903 tmp = tcg_temp_new(); 1904 tcg_gen_mulu2_i64(tmp, vc, va, vb); 1905 break; 1906 case 0x40: 1907 /* MULL/V */ 1908 tmp = tcg_temp_new(); 1909 tcg_gen_ext32s_i64(tmp, va); 1910 tcg_gen_ext32s_i64(vc, vb); 1911 tcg_gen_mul_i64(tmp, tmp, vc); 1912 tcg_gen_ext32s_i64(vc, tmp); 1913 gen_helper_check_overflow(tcg_env, vc, tmp); 1914 break; 1915 case 0x60: 1916 /* MULQ/V */ 1917 tmp = tcg_temp_new(); 1918 tmp2 = tcg_temp_new(); 1919 tcg_gen_muls2_i64(vc, tmp, va, vb); 1920 tcg_gen_sari_i64(tmp2, vc, 63); 1921 gen_helper_check_overflow(tcg_env, tmp, tmp2); 1922 break; 1923 default: 1924 goto invalid_opc; 1925 } 1926 break; 1927 1928 case 0x14: 1929 REQUIRE_AMASK(FIX); 1930 vc = dest_fpr(ctx, rc); 1931 switch (fpfn) { /* fn11 & 0x3F */ 1932 case 0x04: 1933 /* ITOFS */ 1934 REQUIRE_REG_31(rb); 1935 REQUIRE_FEN; 1936 t32 = tcg_temp_new_i32(); 1937 va = load_gpr(ctx, ra); 1938 tcg_gen_extrl_i64_i32(t32, va); 1939 gen_helper_memory_to_s(vc, t32); 1940 break; 1941 case 0x0A: 1942 /* SQRTF */ 1943 REQUIRE_REG_31(ra); 1944 REQUIRE_FEN; 1945 vb = load_fpr(ctx, rb); 1946 gen_helper_sqrtf(vc, tcg_env, vb); 1947 break; 1948 case 0x0B: 1949 /* SQRTS */ 1950 REQUIRE_REG_31(ra); 1951 REQUIRE_FEN; 1952 gen_sqrts(ctx, rb, rc, fn11); 1953 break; 1954 case 0x14: 1955 /* ITOFF */ 1956 REQUIRE_REG_31(rb); 1957 REQUIRE_FEN; 1958 t32 = tcg_temp_new_i32(); 1959 va = load_gpr(ctx, ra); 1960 tcg_gen_extrl_i64_i32(t32, va); 1961 gen_helper_memory_to_f(vc, t32); 1962 break; 1963 case 0x24: 1964 /* ITOFT */ 1965 REQUIRE_REG_31(rb); 1966 REQUIRE_FEN; 1967 va = load_gpr(ctx, ra); 1968 tcg_gen_mov_i64(vc, va); 1969 break; 1970 case 0x2A: 1971 /* SQRTG */ 1972 REQUIRE_REG_31(ra); 1973 REQUIRE_FEN; 1974 vb = load_fpr(ctx, rb); 1975 gen_helper_sqrtg(vc, tcg_env, vb); 1976 break; 1977 case 0x02B: 1978 /* SQRTT */ 1979 REQUIRE_REG_31(ra); 1980 REQUIRE_FEN; 1981 gen_sqrtt(ctx, rb, rc, fn11); 1982 break; 1983 default: 1984 goto invalid_opc; 1985 } 1986 break; 1987 1988 case 0x15: 1989 /* VAX floating point */ 1990 /* XXX: rounding mode and trap are ignored (!) */ 1991 vc = dest_fpr(ctx, rc); 1992 vb = load_fpr(ctx, rb); 1993 va = load_fpr(ctx, ra); 1994 switch (fpfn) { /* fn11 & 0x3F */ 1995 case 0x00: 1996 /* ADDF */ 1997 REQUIRE_FEN; 1998 gen_helper_addf(vc, tcg_env, va, vb); 1999 break; 2000 case 0x01: 2001 /* SUBF */ 2002 REQUIRE_FEN; 2003 gen_helper_subf(vc, tcg_env, va, vb); 2004 break; 2005 case 0x02: 2006 /* MULF */ 2007 REQUIRE_FEN; 2008 gen_helper_mulf(vc, tcg_env, va, vb); 2009 break; 2010 case 0x03: 2011 /* DIVF */ 2012 REQUIRE_FEN; 2013 gen_helper_divf(vc, tcg_env, va, vb); 2014 break; 2015 case 0x1E: 2016 /* CVTDG -- TODO */ 2017 REQUIRE_REG_31(ra); 2018 goto invalid_opc; 2019 case 0x20: 2020 /* ADDG */ 2021 REQUIRE_FEN; 2022 gen_helper_addg(vc, tcg_env, va, vb); 2023 break; 2024 case 0x21: 2025 /* SUBG */ 2026 REQUIRE_FEN; 2027 gen_helper_subg(vc, tcg_env, va, vb); 2028 break; 2029 case 0x22: 2030 /* MULG */ 2031 REQUIRE_FEN; 2032 gen_helper_mulg(vc, tcg_env, va, vb); 2033 break; 2034 case 0x23: 2035 /* DIVG */ 2036 REQUIRE_FEN; 2037 gen_helper_divg(vc, tcg_env, va, vb); 2038 break; 2039 case 0x25: 2040 /* CMPGEQ */ 2041 REQUIRE_FEN; 2042 gen_helper_cmpgeq(vc, tcg_env, va, vb); 2043 break; 2044 case 0x26: 2045 /* CMPGLT */ 2046 REQUIRE_FEN; 2047 gen_helper_cmpglt(vc, tcg_env, va, vb); 2048 break; 2049 case 0x27: 2050 /* CMPGLE */ 2051 REQUIRE_FEN; 2052 gen_helper_cmpgle(vc, tcg_env, va, vb); 2053 break; 2054 case 0x2C: 2055 /* CVTGF */ 2056 REQUIRE_REG_31(ra); 2057 REQUIRE_FEN; 2058 gen_helper_cvtgf(vc, tcg_env, vb); 2059 break; 2060 case 0x2D: 2061 /* CVTGD -- TODO */ 2062 REQUIRE_REG_31(ra); 2063 goto invalid_opc; 2064 case 0x2F: 2065 /* CVTGQ */ 2066 REQUIRE_REG_31(ra); 2067 REQUIRE_FEN; 2068 gen_helper_cvtgq(vc, tcg_env, vb); 2069 break; 2070 case 0x3C: 2071 /* CVTQF */ 2072 REQUIRE_REG_31(ra); 2073 REQUIRE_FEN; 2074 gen_helper_cvtqf(vc, tcg_env, vb); 2075 break; 2076 case 0x3E: 2077 /* CVTQG */ 2078 REQUIRE_REG_31(ra); 2079 REQUIRE_FEN; 2080 gen_helper_cvtqg(vc, tcg_env, vb); 2081 break; 2082 default: 2083 goto invalid_opc; 2084 } 2085 break; 2086 2087 case 0x16: 2088 /* IEEE floating-point */ 2089 switch (fpfn) { /* fn11 & 0x3F */ 2090 case 0x00: 2091 /* ADDS */ 2092 REQUIRE_FEN; 2093 gen_adds(ctx, ra, rb, rc, fn11); 2094 break; 2095 case 0x01: 2096 /* SUBS */ 2097 REQUIRE_FEN; 2098 gen_subs(ctx, ra, rb, rc, fn11); 2099 break; 2100 case 0x02: 2101 /* MULS */ 2102 REQUIRE_FEN; 2103 gen_muls(ctx, ra, rb, rc, fn11); 2104 break; 2105 case 0x03: 2106 /* DIVS */ 2107 REQUIRE_FEN; 2108 gen_divs(ctx, ra, rb, rc, fn11); 2109 break; 2110 case 0x20: 2111 /* ADDT */ 2112 REQUIRE_FEN; 2113 gen_addt(ctx, ra, rb, rc, fn11); 2114 break; 2115 case 0x21: 2116 /* SUBT */ 2117 REQUIRE_FEN; 2118 gen_subt(ctx, ra, rb, rc, fn11); 2119 break; 2120 case 0x22: 2121 /* MULT */ 2122 REQUIRE_FEN; 2123 gen_mult(ctx, ra, rb, rc, fn11); 2124 break; 2125 case 0x23: 2126 /* DIVT */ 2127 REQUIRE_FEN; 2128 gen_divt(ctx, ra, rb, rc, fn11); 2129 break; 2130 case 0x24: 2131 /* CMPTUN */ 2132 REQUIRE_FEN; 2133 gen_cmptun(ctx, ra, rb, rc, fn11); 2134 break; 2135 case 0x25: 2136 /* CMPTEQ */ 2137 REQUIRE_FEN; 2138 gen_cmpteq(ctx, ra, rb, rc, fn11); 2139 break; 2140 case 0x26: 2141 /* CMPTLT */ 2142 REQUIRE_FEN; 2143 gen_cmptlt(ctx, ra, rb, rc, fn11); 2144 break; 2145 case 0x27: 2146 /* CMPTLE */ 2147 REQUIRE_FEN; 2148 gen_cmptle(ctx, ra, rb, rc, fn11); 2149 break; 2150 case 0x2C: 2151 REQUIRE_REG_31(ra); 2152 REQUIRE_FEN; 2153 if (fn11 == 0x2AC || fn11 == 0x6AC) { 2154 /* CVTST */ 2155 gen_cvtst(ctx, rb, rc, fn11); 2156 } else { 2157 /* CVTTS */ 2158 gen_cvtts(ctx, rb, rc, fn11); 2159 } 2160 break; 2161 case 0x2F: 2162 /* CVTTQ */ 2163 REQUIRE_REG_31(ra); 2164 REQUIRE_FEN; 2165 gen_cvttq(ctx, rb, rc, fn11); 2166 break; 2167 case 0x3C: 2168 /* CVTQS */ 2169 REQUIRE_REG_31(ra); 2170 REQUIRE_FEN; 2171 gen_cvtqs(ctx, rb, rc, fn11); 2172 break; 2173 case 0x3E: 2174 /* CVTQT */ 2175 REQUIRE_REG_31(ra); 2176 REQUIRE_FEN; 2177 gen_cvtqt(ctx, rb, rc, fn11); 2178 break; 2179 default: 2180 goto invalid_opc; 2181 } 2182 break; 2183 2184 case 0x17: 2185 switch (fn11) { 2186 case 0x010: 2187 /* CVTLQ */ 2188 REQUIRE_REG_31(ra); 2189 REQUIRE_FEN; 2190 vc = dest_fpr(ctx, rc); 2191 vb = load_fpr(ctx, rb); 2192 gen_cvtlq(vc, vb); 2193 break; 2194 case 0x020: 2195 /* CPYS */ 2196 REQUIRE_FEN; 2197 if (rc == 31) { 2198 /* Special case CPYS as FNOP. */ 2199 } else { 2200 vc = dest_fpr(ctx, rc); 2201 va = load_fpr(ctx, ra); 2202 if (ra == rb) { 2203 /* Special case CPYS as FMOV. */ 2204 tcg_gen_mov_i64(vc, va); 2205 } else { 2206 vb = load_fpr(ctx, rb); 2207 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); 2208 } 2209 } 2210 break; 2211 case 0x021: 2212 /* CPYSN */ 2213 REQUIRE_FEN; 2214 vc = dest_fpr(ctx, rc); 2215 vb = load_fpr(ctx, rb); 2216 va = load_fpr(ctx, ra); 2217 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); 2218 break; 2219 case 0x022: 2220 /* CPYSE */ 2221 REQUIRE_FEN; 2222 vc = dest_fpr(ctx, rc); 2223 vb = load_fpr(ctx, rb); 2224 va = load_fpr(ctx, ra); 2225 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); 2226 break; 2227 case 0x024: 2228 /* MT_FPCR */ 2229 REQUIRE_FEN; 2230 va = load_fpr(ctx, ra); 2231 gen_helper_store_fpcr(tcg_env, va); 2232 if (ctx->tb_rm == QUAL_RM_D) { 2233 /* Re-do the copy of the rounding mode to fp_status 2234 the next time we use dynamic rounding. */ 2235 ctx->tb_rm = -1; 2236 } 2237 break; 2238 case 0x025: 2239 /* MF_FPCR */ 2240 REQUIRE_FEN; 2241 va = dest_fpr(ctx, ra); 2242 gen_helper_load_fpcr(va, tcg_env); 2243 break; 2244 case 0x02A: 2245 /* FCMOVEQ */ 2246 REQUIRE_FEN; 2247 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); 2248 break; 2249 case 0x02B: 2250 /* FCMOVNE */ 2251 REQUIRE_FEN; 2252 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); 2253 break; 2254 case 0x02C: 2255 /* FCMOVLT */ 2256 REQUIRE_FEN; 2257 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); 2258 break; 2259 case 0x02D: 2260 /* FCMOVGE */ 2261 REQUIRE_FEN; 2262 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); 2263 break; 2264 case 0x02E: 2265 /* FCMOVLE */ 2266 REQUIRE_FEN; 2267 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); 2268 break; 2269 case 0x02F: 2270 /* FCMOVGT */ 2271 REQUIRE_FEN; 2272 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); 2273 break; 2274 case 0x030: /* CVTQL */ 2275 case 0x130: /* CVTQL/V */ 2276 case 0x530: /* CVTQL/SV */ 2277 REQUIRE_REG_31(ra); 2278 REQUIRE_FEN; 2279 vc = dest_fpr(ctx, rc); 2280 vb = load_fpr(ctx, rb); 2281 gen_helper_cvtql(vc, tcg_env, vb); 2282 gen_fp_exc_raise(rc, fn11); 2283 break; 2284 default: 2285 goto invalid_opc; 2286 } 2287 break; 2288 2289 case 0x18: 2290 switch ((uint16_t)disp16) { 2291 case 0x0000: 2292 /* TRAPB */ 2293 /* No-op. */ 2294 break; 2295 case 0x0400: 2296 /* EXCB */ 2297 /* No-op. */ 2298 break; 2299 case 0x4000: 2300 /* MB */ 2301 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 2302 break; 2303 case 0x4400: 2304 /* WMB */ 2305 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2306 break; 2307 case 0x8000: 2308 /* FETCH */ 2309 /* No-op */ 2310 break; 2311 case 0xA000: 2312 /* FETCH_M */ 2313 /* No-op */ 2314 break; 2315 case 0xC000: 2316 /* RPCC */ 2317 va = dest_gpr(ctx, ra); 2318 if (translator_io_start(&ctx->base)) { 2319 ret = DISAS_PC_STALE; 2320 } 2321 gen_helper_load_pcc(va, tcg_env); 2322 break; 2323 case 0xE000: 2324 /* RC */ 2325 gen_rx(ctx, ra, 0); 2326 break; 2327 case 0xE800: 2328 /* ECB */ 2329 break; 2330 case 0xF000: 2331 /* RS */ 2332 gen_rx(ctx, ra, 1); 2333 break; 2334 case 0xF800: 2335 /* WH64 */ 2336 /* No-op */ 2337 break; 2338 case 0xFC00: 2339 /* WH64EN */ 2340 /* No-op */ 2341 break; 2342 default: 2343 goto invalid_opc; 2344 } 2345 break; 2346 2347 case 0x19: 2348 /* HW_MFPR (PALcode) */ 2349 #ifndef CONFIG_USER_ONLY 2350 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2351 va = dest_gpr(ctx, ra); 2352 ret = gen_mfpr(ctx, va, insn & 0xffff); 2353 break; 2354 #else 2355 goto invalid_opc; 2356 #endif 2357 2358 case 0x1A: 2359 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch 2360 prediction stack action, which of course we don't implement. */ 2361 vb = load_gpr(ctx, rb); 2362 if (ra != 31) { 2363 tmp = tcg_temp_new(); 2364 tcg_gen_andi_i64(tmp, vb, ~3); 2365 gen_pc_disp(ctx, ctx->ir[ra], 0); 2366 tcg_gen_mov_i64(cpu_pc, tmp); 2367 } else { 2368 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2369 } 2370 ret = DISAS_PC_UPDATED; 2371 break; 2372 2373 case 0x1B: 2374 /* HW_LD (PALcode) */ 2375 #ifndef CONFIG_USER_ONLY 2376 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2377 { 2378 TCGv addr = tcg_temp_new(); 2379 vb = load_gpr(ctx, rb); 2380 va = dest_gpr(ctx, ra); 2381 2382 tcg_gen_addi_i64(addr, vb, disp12); 2383 switch ((insn >> 12) & 0xF) { 2384 case 0x0: 2385 /* Longword physical access (hw_ldl/p) */ 2386 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2387 break; 2388 case 0x1: 2389 /* Quadword physical access (hw_ldq/p) */ 2390 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2391 break; 2392 case 0x2: 2393 /* Longword physical access with lock (hw_ldl_l/p) */ 2394 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2395 tcg_gen_mov_i64(cpu_lock_addr, addr); 2396 tcg_gen_mov_i64(cpu_lock_value, va); 2397 break; 2398 case 0x3: 2399 /* Quadword physical access with lock (hw_ldq_l/p) */ 2400 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2401 tcg_gen_mov_i64(cpu_lock_addr, addr); 2402 tcg_gen_mov_i64(cpu_lock_value, va); 2403 break; 2404 case 0x4: 2405 /* Longword virtual PTE fetch (hw_ldl/v) */ 2406 goto invalid_opc; 2407 case 0x5: 2408 /* Quadword virtual PTE fetch (hw_ldq/v) */ 2409 goto invalid_opc; 2410 break; 2411 case 0x6: 2412 /* Invalid */ 2413 goto invalid_opc; 2414 case 0x7: 2415 /* Invaliid */ 2416 goto invalid_opc; 2417 case 0x8: 2418 /* Longword virtual access (hw_ldl) */ 2419 goto invalid_opc; 2420 case 0x9: 2421 /* Quadword virtual access (hw_ldq) */ 2422 goto invalid_opc; 2423 case 0xA: 2424 /* Longword virtual access with protection check (hw_ldl/w) */ 2425 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2426 MO_LESL | MO_ALIGN); 2427 break; 2428 case 0xB: 2429 /* Quadword virtual access with protection check (hw_ldq/w) */ 2430 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, 2431 MO_LEUQ | MO_ALIGN); 2432 break; 2433 case 0xC: 2434 /* Longword virtual access with alt access mode (hw_ldl/a)*/ 2435 goto invalid_opc; 2436 case 0xD: 2437 /* Quadword virtual access with alt access mode (hw_ldq/a) */ 2438 goto invalid_opc; 2439 case 0xE: 2440 /* Longword virtual access with alternate access mode and 2441 protection checks (hw_ldl/wa) */ 2442 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2443 MO_LESL | MO_ALIGN); 2444 break; 2445 case 0xF: 2446 /* Quadword virtual access with alternate access mode and 2447 protection checks (hw_ldq/wa) */ 2448 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, 2449 MO_LEUQ | MO_ALIGN); 2450 break; 2451 } 2452 break; 2453 } 2454 #else 2455 goto invalid_opc; 2456 #endif 2457 2458 case 0x1C: 2459 vc = dest_gpr(ctx, rc); 2460 if (fn7 == 0x70) { 2461 /* FTOIT */ 2462 REQUIRE_AMASK(FIX); 2463 REQUIRE_REG_31(rb); 2464 va = load_fpr(ctx, ra); 2465 tcg_gen_mov_i64(vc, va); 2466 break; 2467 } else if (fn7 == 0x78) { 2468 /* FTOIS */ 2469 REQUIRE_AMASK(FIX); 2470 REQUIRE_REG_31(rb); 2471 t32 = tcg_temp_new_i32(); 2472 va = load_fpr(ctx, ra); 2473 gen_helper_s_to_memory(t32, va); 2474 tcg_gen_ext_i32_i64(vc, t32); 2475 break; 2476 } 2477 2478 vb = load_gpr_lit(ctx, rb, lit, islit); 2479 switch (fn7) { 2480 case 0x00: 2481 /* SEXTB */ 2482 REQUIRE_AMASK(BWX); 2483 REQUIRE_REG_31(ra); 2484 tcg_gen_ext8s_i64(vc, vb); 2485 break; 2486 case 0x01: 2487 /* SEXTW */ 2488 REQUIRE_AMASK(BWX); 2489 REQUIRE_REG_31(ra); 2490 tcg_gen_ext16s_i64(vc, vb); 2491 break; 2492 case 0x30: 2493 /* CTPOP */ 2494 REQUIRE_AMASK(CIX); 2495 REQUIRE_REG_31(ra); 2496 REQUIRE_NO_LIT; 2497 tcg_gen_ctpop_i64(vc, vb); 2498 break; 2499 case 0x31: 2500 /* PERR */ 2501 REQUIRE_AMASK(MVI); 2502 REQUIRE_NO_LIT; 2503 va = load_gpr(ctx, ra); 2504 gen_helper_perr(vc, va, vb); 2505 break; 2506 case 0x32: 2507 /* CTLZ */ 2508 REQUIRE_AMASK(CIX); 2509 REQUIRE_REG_31(ra); 2510 REQUIRE_NO_LIT; 2511 tcg_gen_clzi_i64(vc, vb, 64); 2512 break; 2513 case 0x33: 2514 /* CTTZ */ 2515 REQUIRE_AMASK(CIX); 2516 REQUIRE_REG_31(ra); 2517 REQUIRE_NO_LIT; 2518 tcg_gen_ctzi_i64(vc, vb, 64); 2519 break; 2520 case 0x34: 2521 /* UNPKBW */ 2522 REQUIRE_AMASK(MVI); 2523 REQUIRE_REG_31(ra); 2524 REQUIRE_NO_LIT; 2525 gen_helper_unpkbw(vc, vb); 2526 break; 2527 case 0x35: 2528 /* UNPKBL */ 2529 REQUIRE_AMASK(MVI); 2530 REQUIRE_REG_31(ra); 2531 REQUIRE_NO_LIT; 2532 gen_helper_unpkbl(vc, vb); 2533 break; 2534 case 0x36: 2535 /* PKWB */ 2536 REQUIRE_AMASK(MVI); 2537 REQUIRE_REG_31(ra); 2538 REQUIRE_NO_LIT; 2539 gen_helper_pkwb(vc, vb); 2540 break; 2541 case 0x37: 2542 /* PKLB */ 2543 REQUIRE_AMASK(MVI); 2544 REQUIRE_REG_31(ra); 2545 REQUIRE_NO_LIT; 2546 gen_helper_pklb(vc, vb); 2547 break; 2548 case 0x38: 2549 /* MINSB8 */ 2550 REQUIRE_AMASK(MVI); 2551 va = load_gpr(ctx, ra); 2552 gen_helper_minsb8(vc, va, vb); 2553 break; 2554 case 0x39: 2555 /* MINSW4 */ 2556 REQUIRE_AMASK(MVI); 2557 va = load_gpr(ctx, ra); 2558 gen_helper_minsw4(vc, va, vb); 2559 break; 2560 case 0x3A: 2561 /* MINUB8 */ 2562 REQUIRE_AMASK(MVI); 2563 va = load_gpr(ctx, ra); 2564 gen_helper_minub8(vc, va, vb); 2565 break; 2566 case 0x3B: 2567 /* MINUW4 */ 2568 REQUIRE_AMASK(MVI); 2569 va = load_gpr(ctx, ra); 2570 gen_helper_minuw4(vc, va, vb); 2571 break; 2572 case 0x3C: 2573 /* MAXUB8 */ 2574 REQUIRE_AMASK(MVI); 2575 va = load_gpr(ctx, ra); 2576 gen_helper_maxub8(vc, va, vb); 2577 break; 2578 case 0x3D: 2579 /* MAXUW4 */ 2580 REQUIRE_AMASK(MVI); 2581 va = load_gpr(ctx, ra); 2582 gen_helper_maxuw4(vc, va, vb); 2583 break; 2584 case 0x3E: 2585 /* MAXSB8 */ 2586 REQUIRE_AMASK(MVI); 2587 va = load_gpr(ctx, ra); 2588 gen_helper_maxsb8(vc, va, vb); 2589 break; 2590 case 0x3F: 2591 /* MAXSW4 */ 2592 REQUIRE_AMASK(MVI); 2593 va = load_gpr(ctx, ra); 2594 gen_helper_maxsw4(vc, va, vb); 2595 break; 2596 default: 2597 goto invalid_opc; 2598 } 2599 break; 2600 2601 case 0x1D: 2602 /* HW_MTPR (PALcode) */ 2603 #ifndef CONFIG_USER_ONLY 2604 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2605 vb = load_gpr(ctx, rb); 2606 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2607 break; 2608 #else 2609 goto invalid_opc; 2610 #endif 2611 2612 case 0x1E: 2613 /* HW_RET (PALcode) */ 2614 #ifndef CONFIG_USER_ONLY 2615 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2616 if (rb == 31) { 2617 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2618 address from EXC_ADDR. This turns out to be useful for our 2619 emulation PALcode, so continue to accept it. */ 2620 vb = dest_sink(ctx); 2621 tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr)); 2622 } else { 2623 vb = load_gpr(ctx, rb); 2624 } 2625 tcg_gen_movi_i64(cpu_lock_addr, -1); 2626 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT); 2627 tmp = tcg_temp_new(); 2628 tcg_gen_andi_i64(tmp, vb, 1); 2629 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2630 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2631 /* Allow interrupts to be recognized right away. */ 2632 ret = DISAS_PC_UPDATED_NOCHAIN; 2633 break; 2634 #else 2635 goto invalid_opc; 2636 #endif 2637 2638 case 0x1F: 2639 /* HW_ST (PALcode) */ 2640 #ifndef CONFIG_USER_ONLY 2641 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2642 { 2643 switch ((insn >> 12) & 0xF) { 2644 case 0x0: 2645 /* Longword physical access */ 2646 va = load_gpr(ctx, ra); 2647 vb = load_gpr(ctx, rb); 2648 tmp = tcg_temp_new(); 2649 tcg_gen_addi_i64(tmp, vb, disp12); 2650 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2651 break; 2652 case 0x1: 2653 /* Quadword physical access */ 2654 va = load_gpr(ctx, ra); 2655 vb = load_gpr(ctx, rb); 2656 tmp = tcg_temp_new(); 2657 tcg_gen_addi_i64(tmp, vb, disp12); 2658 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2659 break; 2660 case 0x2: 2661 /* Longword physical access with lock */ 2662 ret = gen_store_conditional(ctx, ra, rb, disp12, 2663 MMU_PHYS_IDX, MO_LESL | MO_ALIGN); 2664 break; 2665 case 0x3: 2666 /* Quadword physical access with lock */ 2667 ret = gen_store_conditional(ctx, ra, rb, disp12, 2668 MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); 2669 break; 2670 case 0x4: 2671 /* Longword virtual access */ 2672 goto invalid_opc; 2673 case 0x5: 2674 /* Quadword virtual access */ 2675 goto invalid_opc; 2676 case 0x6: 2677 /* Invalid */ 2678 goto invalid_opc; 2679 case 0x7: 2680 /* Invalid */ 2681 goto invalid_opc; 2682 case 0x8: 2683 /* Invalid */ 2684 goto invalid_opc; 2685 case 0x9: 2686 /* Invalid */ 2687 goto invalid_opc; 2688 case 0xA: 2689 /* Invalid */ 2690 goto invalid_opc; 2691 case 0xB: 2692 /* Invalid */ 2693 goto invalid_opc; 2694 case 0xC: 2695 /* Longword virtual access with alternate access mode */ 2696 goto invalid_opc; 2697 case 0xD: 2698 /* Quadword virtual access with alternate access mode */ 2699 goto invalid_opc; 2700 case 0xE: 2701 /* Invalid */ 2702 goto invalid_opc; 2703 case 0xF: 2704 /* Invalid */ 2705 goto invalid_opc; 2706 } 2707 break; 2708 } 2709 #else 2710 goto invalid_opc; 2711 #endif 2712 case 0x20: 2713 /* LDF */ 2714 REQUIRE_FEN; 2715 gen_load_fp(ctx, ra, rb, disp16, gen_ldf); 2716 break; 2717 case 0x21: 2718 /* LDG */ 2719 REQUIRE_FEN; 2720 gen_load_fp(ctx, ra, rb, disp16, gen_ldg); 2721 break; 2722 case 0x22: 2723 /* LDS */ 2724 REQUIRE_FEN; 2725 gen_load_fp(ctx, ra, rb, disp16, gen_lds); 2726 break; 2727 case 0x23: 2728 /* LDT */ 2729 REQUIRE_FEN; 2730 gen_load_fp(ctx, ra, rb, disp16, gen_ldt); 2731 break; 2732 case 0x24: 2733 /* STF */ 2734 REQUIRE_FEN; 2735 gen_store_fp(ctx, ra, rb, disp16, gen_stf); 2736 break; 2737 case 0x25: 2738 /* STG */ 2739 REQUIRE_FEN; 2740 gen_store_fp(ctx, ra, rb, disp16, gen_stg); 2741 break; 2742 case 0x26: 2743 /* STS */ 2744 REQUIRE_FEN; 2745 gen_store_fp(ctx, ra, rb, disp16, gen_sts); 2746 break; 2747 case 0x27: 2748 /* STT */ 2749 REQUIRE_FEN; 2750 gen_store_fp(ctx, ra, rb, disp16, gen_stt); 2751 break; 2752 case 0x28: 2753 /* LDL */ 2754 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); 2755 break; 2756 case 0x29: 2757 /* LDQ */ 2758 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0); 2759 break; 2760 case 0x2A: 2761 /* LDL_L */ 2762 gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1); 2763 break; 2764 case 0x2B: 2765 /* LDQ_L */ 2766 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1); 2767 break; 2768 case 0x2C: 2769 /* STL */ 2770 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); 2771 break; 2772 case 0x2D: 2773 /* STQ */ 2774 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0); 2775 break; 2776 case 0x2E: 2777 /* STL_C */ 2778 ret = gen_store_conditional(ctx, ra, rb, disp16, 2779 ctx->mem_idx, MO_LESL | MO_ALIGN); 2780 break; 2781 case 0x2F: 2782 /* STQ_C */ 2783 ret = gen_store_conditional(ctx, ra, rb, disp16, 2784 ctx->mem_idx, MO_LEUQ | MO_ALIGN); 2785 break; 2786 case 0x30: 2787 /* BR */ 2788 ret = gen_bdirect(ctx, ra, disp21); 2789 break; 2790 case 0x31: /* FBEQ */ 2791 REQUIRE_FEN; 2792 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); 2793 break; 2794 case 0x32: /* FBLT */ 2795 REQUIRE_FEN; 2796 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); 2797 break; 2798 case 0x33: /* FBLE */ 2799 REQUIRE_FEN; 2800 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); 2801 break; 2802 case 0x34: 2803 /* BSR */ 2804 ret = gen_bdirect(ctx, ra, disp21); 2805 break; 2806 case 0x35: /* FBNE */ 2807 REQUIRE_FEN; 2808 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); 2809 break; 2810 case 0x36: /* FBGE */ 2811 REQUIRE_FEN; 2812 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); 2813 break; 2814 case 0x37: /* FBGT */ 2815 REQUIRE_FEN; 2816 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); 2817 break; 2818 case 0x38: 2819 /* BLBC */ 2820 ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21); 2821 break; 2822 case 0x39: 2823 /* BEQ */ 2824 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21); 2825 break; 2826 case 0x3A: 2827 /* BLT */ 2828 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21); 2829 break; 2830 case 0x3B: 2831 /* BLE */ 2832 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21); 2833 break; 2834 case 0x3C: 2835 /* BLBS */ 2836 ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21); 2837 break; 2838 case 0x3D: 2839 /* BNE */ 2840 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21); 2841 break; 2842 case 0x3E: 2843 /* BGE */ 2844 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21); 2845 break; 2846 case 0x3F: 2847 /* BGT */ 2848 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21); 2849 break; 2850 invalid_opc: 2851 ret = gen_invalid(ctx); 2852 break; 2853 raise_fen: 2854 ret = gen_excp(ctx, EXCP_FEN, 0); 2855 break; 2856 } 2857 2858 return ret; 2859 } 2860 2861 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 2862 { 2863 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2864 CPUAlphaState *env = cpu_env(cpu); 2865 int64_t bound; 2866 2867 ctx->tbflags = ctx->base.tb->flags; 2868 ctx->mem_idx = alpha_env_mmu_index(env); 2869 ctx->pcrel = ctx->base.tb->cflags & CF_PCREL; 2870 ctx->implver = env->implver; 2871 ctx->amask = env->amask; 2872 2873 #ifdef CONFIG_USER_ONLY 2874 ctx->ir = cpu_std_ir; 2875 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 2876 #else 2877 ctx->palbr = env->palbr; 2878 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2879 #endif 2880 2881 /* ??? Every TB begins with unset rounding mode, to be initialized on 2882 the first fp insn of the TB. Alternately we could define a proper 2883 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure 2884 to reset the FP_STATUS to that default at the end of any TB that 2885 changes the default. We could even (gasp) dynamically figure out 2886 what default would be most efficient given the running program. */ 2887 ctx->tb_rm = -1; 2888 /* Similarly for flush-to-zero. */ 2889 ctx->tb_ftz = -1; 2890 2891 ctx->zero = NULL; 2892 ctx->sink = NULL; 2893 2894 /* Bound the number of insns to execute to those left on the page. */ 2895 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 2896 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 2897 } 2898 2899 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu) 2900 { 2901 } 2902 2903 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 2904 { 2905 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2906 2907 if (ctx->pcrel) { 2908 tcg_gen_insn_start(dcbase->pc_next & ~TARGET_PAGE_MASK); 2909 } else { 2910 tcg_gen_insn_start(dcbase->pc_next); 2911 } 2912 } 2913 2914 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 2915 { 2916 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2917 uint32_t insn = translator_ldl(cpu_env(cpu), &ctx->base, 2918 ctx->base.pc_next); 2919 2920 ctx->base.pc_next += 4; 2921 ctx->base.is_jmp = translate_one(ctx, insn); 2922 2923 free_context_temps(ctx); 2924 } 2925 2926 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 2927 { 2928 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2929 2930 switch (ctx->base.is_jmp) { 2931 case DISAS_NORETURN: 2932 break; 2933 case DISAS_TOO_MANY: 2934 gen_goto_tb(ctx, 0, 0); 2935 break; 2936 case DISAS_PC_STALE: 2937 gen_pc_disp(ctx, cpu_pc, 0); 2938 /* FALLTHRU */ 2939 case DISAS_PC_UPDATED: 2940 tcg_gen_lookup_and_goto_ptr(); 2941 break; 2942 case DISAS_PC_UPDATED_NOCHAIN: 2943 tcg_gen_exit_tb(NULL, 0); 2944 break; 2945 default: 2946 g_assert_not_reached(); 2947 } 2948 } 2949 2950 static void alpha_tr_disas_log(const DisasContextBase *dcbase, 2951 CPUState *cpu, FILE *logfile) 2952 { 2953 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 2954 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); 2955 } 2956 2957 static const TranslatorOps alpha_tr_ops = { 2958 .init_disas_context = alpha_tr_init_disas_context, 2959 .tb_start = alpha_tr_tb_start, 2960 .insn_start = alpha_tr_insn_start, 2961 .translate_insn = alpha_tr_translate_insn, 2962 .tb_stop = alpha_tr_tb_stop, 2963 .disas_log = alpha_tr_disas_log, 2964 }; 2965 2966 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, 2967 vaddr pc, void *host_pc) 2968 { 2969 DisasContext dc; 2970 translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); 2971 } 2972