1 /* 2 * Alpha emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "sysemu/cpus.h" 23 #include "sysemu/cpu-timers.h" 24 #include "disas/disas.h" 25 #include "qemu/host-utils.h" 26 #include "exec/exec-all.h" 27 #include "tcg/tcg-op.h" 28 #include "exec/cpu_ldst.h" 29 #include "exec/helper-proto.h" 30 #include "exec/helper-gen.h" 31 #include "exec/translator.h" 32 #include "exec/log.h" 33 34 35 #undef ALPHA_DEBUG_DISAS 36 #define CONFIG_SOFTFLOAT_INLINE 37 38 #ifdef ALPHA_DEBUG_DISAS 39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 40 #else 41 # define LOG_DISAS(...) do { } while (0) 42 #endif 43 44 typedef struct DisasContext DisasContext; 45 struct DisasContext { 46 DisasContextBase base; 47 48 #ifdef CONFIG_USER_ONLY 49 MemOp unalign; 50 #else 51 uint64_t palbr; 52 #endif 53 uint32_t tbflags; 54 int mem_idx; 55 56 /* implver and amask values for this CPU. */ 57 int implver; 58 int amask; 59 60 /* Current rounding mode for this TB. */ 61 int tb_rm; 62 /* Current flush-to-zero setting for this TB. */ 63 int tb_ftz; 64 65 /* The set of registers active in the current context. */ 66 TCGv *ir; 67 68 /* Temporaries for $31 and $f31 as source and destination. */ 69 TCGv zero; 70 TCGv sink; 71 }; 72 73 #ifdef CONFIG_USER_ONLY 74 #define UNALIGN(C) (C)->unalign 75 #else 76 #define UNALIGN(C) 0 77 #endif 78 79 /* Target-specific return values from translate_one, indicating the 80 state of the TB. Note that DISAS_NEXT indicates that we are not 81 exiting the TB. */ 82 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 83 #define DISAS_PC_UPDATED DISAS_TARGET_1 84 #define DISAS_PC_STALE DISAS_TARGET_2 85 86 /* global register indexes */ 87 static TCGv cpu_std_ir[31]; 88 static TCGv cpu_fir[31]; 89 static TCGv cpu_pc; 90 static TCGv cpu_lock_addr; 91 static TCGv cpu_lock_value; 92 93 #ifndef CONFIG_USER_ONLY 94 static TCGv cpu_pal_ir[31]; 95 #endif 96 97 #include "exec/gen-icount.h" 98 99 void alpha_translate_init(void) 100 { 101 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } 102 103 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 104 static const GlobalVar vars[] = { 105 DEF_VAR(pc), 106 DEF_VAR(lock_addr), 107 DEF_VAR(lock_value), 108 }; 109 110 #undef DEF_VAR 111 112 /* Use the symbolic register names that match the disassembler. */ 113 static const char greg_names[31][4] = { 114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", 115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", 116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", 117 "t10", "t11", "ra", "t12", "at", "gp", "sp" 118 }; 119 static const char freg_names[31][4] = { 120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 123 "f24", "f25", "f26", "f27", "f28", "f29", "f30" 124 }; 125 #ifndef CONFIG_USER_ONLY 126 static const char shadow_names[8][8] = { 127 "pal_t7", "pal_s0", "pal_s1", "pal_s2", 128 "pal_s3", "pal_s4", "pal_s5", "pal_t11" 129 }; 130 #endif 131 132 int i; 133 134 for (i = 0; i < 31; i++) { 135 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env, 136 offsetof(CPUAlphaState, ir[i]), 137 greg_names[i]); 138 } 139 140 for (i = 0; i < 31; i++) { 141 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env, 142 offsetof(CPUAlphaState, fir[i]), 143 freg_names[i]); 144 } 145 146 #ifndef CONFIG_USER_ONLY 147 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); 148 for (i = 0; i < 8; i++) { 149 int r = (i == 7 ? 25 : i + 8); 150 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env, 151 offsetof(CPUAlphaState, 152 shadow[i]), 153 shadow_names[i]); 154 } 155 #endif 156 157 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 158 const GlobalVar *v = &vars[i]; 159 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name); 160 } 161 } 162 163 static TCGv load_zero(DisasContext *ctx) 164 { 165 if (!ctx->zero) { 166 ctx->zero = tcg_constant_i64(0); 167 } 168 return ctx->zero; 169 } 170 171 static TCGv dest_sink(DisasContext *ctx) 172 { 173 if (!ctx->sink) { 174 ctx->sink = tcg_temp_new(); 175 } 176 return ctx->sink; 177 } 178 179 static void free_context_temps(DisasContext *ctx) 180 { 181 if (ctx->sink) { 182 tcg_gen_discard_i64(ctx->sink); 183 tcg_temp_free(ctx->sink); 184 ctx->sink = NULL; 185 } 186 } 187 188 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 189 { 190 if (likely(reg < 31)) { 191 return ctx->ir[reg]; 192 } else { 193 return load_zero(ctx); 194 } 195 } 196 197 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, 198 uint8_t lit, bool islit) 199 { 200 if (islit) { 201 return tcg_constant_i64(lit); 202 } else if (likely(reg < 31)) { 203 return ctx->ir[reg]; 204 } else { 205 return load_zero(ctx); 206 } 207 } 208 209 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 210 { 211 if (likely(reg < 31)) { 212 return ctx->ir[reg]; 213 } else { 214 return dest_sink(ctx); 215 } 216 } 217 218 static TCGv load_fpr(DisasContext *ctx, unsigned reg) 219 { 220 if (likely(reg < 31)) { 221 return cpu_fir[reg]; 222 } else { 223 return load_zero(ctx); 224 } 225 } 226 227 static TCGv dest_fpr(DisasContext *ctx, unsigned reg) 228 { 229 if (likely(reg < 31)) { 230 return cpu_fir[reg]; 231 } else { 232 return dest_sink(ctx); 233 } 234 } 235 236 static int get_flag_ofs(unsigned shift) 237 { 238 int ofs = offsetof(CPUAlphaState, flags); 239 #ifdef HOST_WORDS_BIGENDIAN 240 ofs += 3 - (shift / 8); 241 #else 242 ofs += shift / 8; 243 #endif 244 return ofs; 245 } 246 247 static void ld_flag_byte(TCGv val, unsigned shift) 248 { 249 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift)); 250 } 251 252 static void st_flag_byte(TCGv val, unsigned shift) 253 { 254 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift)); 255 } 256 257 static void gen_excp_1(int exception, int error_code) 258 { 259 TCGv_i32 tmp1, tmp2; 260 261 tmp1 = tcg_constant_i32(exception); 262 tmp2 = tcg_constant_i32(error_code); 263 gen_helper_excp(cpu_env, tmp1, tmp2); 264 } 265 266 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) 267 { 268 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 269 gen_excp_1(exception, error_code); 270 return DISAS_NORETURN; 271 } 272 273 static inline DisasJumpType gen_invalid(DisasContext *ctx) 274 { 275 return gen_excp(ctx, EXCP_OPCDEC, 0); 276 } 277 278 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) 279 { 280 TCGv_i32 tmp32 = tcg_temp_new_i32(); 281 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 282 gen_helper_memory_to_f(dest, tmp32); 283 tcg_temp_free_i32(tmp32); 284 } 285 286 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) 287 { 288 TCGv tmp = tcg_temp_new(); 289 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx)); 290 gen_helper_memory_to_g(dest, tmp); 291 tcg_temp_free(tmp); 292 } 293 294 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) 295 { 296 TCGv_i32 tmp32 = tcg_temp_new_i32(); 297 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 298 gen_helper_memory_to_s(dest, tmp32); 299 tcg_temp_free_i32(tmp32); 300 } 301 302 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) 303 { 304 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx)); 305 } 306 307 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 308 void (*func)(DisasContext *, TCGv, TCGv)) 309 { 310 /* Loads to $f31 are prefetches, which we can treat as nops. */ 311 if (likely(ra != 31)) { 312 TCGv addr = tcg_temp_new(); 313 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 314 func(ctx, cpu_fir[ra], addr); 315 tcg_temp_free(addr); 316 } 317 } 318 319 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 320 MemOp op, bool clear, bool locked) 321 { 322 TCGv addr, dest; 323 324 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of 325 prefetches, which we can treat as nops. No worries about 326 missed exceptions here. */ 327 if (unlikely(ra == 31)) { 328 return; 329 } 330 331 addr = tcg_temp_new(); 332 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 333 if (clear) { 334 tcg_gen_andi_i64(addr, addr, ~0x7); 335 } else if (!locked) { 336 op |= UNALIGN(ctx); 337 } 338 339 dest = ctx->ir[ra]; 340 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); 341 342 if (locked) { 343 tcg_gen_mov_i64(cpu_lock_addr, addr); 344 tcg_gen_mov_i64(cpu_lock_value, dest); 345 } 346 tcg_temp_free(addr); 347 } 348 349 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) 350 { 351 TCGv_i32 tmp32 = tcg_temp_new_i32(); 352 gen_helper_f_to_memory(tmp32, addr); 353 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 354 tcg_temp_free_i32(tmp32); 355 } 356 357 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) 358 { 359 TCGv tmp = tcg_temp_new(); 360 gen_helper_g_to_memory(tmp, src); 361 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx)); 362 tcg_temp_free(tmp); 363 } 364 365 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) 366 { 367 TCGv_i32 tmp32 = tcg_temp_new_i32(); 368 gen_helper_s_to_memory(tmp32, src); 369 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); 370 tcg_temp_free_i32(tmp32); 371 } 372 373 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) 374 { 375 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx)); 376 } 377 378 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, 379 void (*func)(DisasContext *, TCGv, TCGv)) 380 { 381 TCGv addr = tcg_temp_new(); 382 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 383 func(ctx, load_fpr(ctx, ra), addr); 384 tcg_temp_free(addr); 385 } 386 387 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, 388 MemOp op, bool clear) 389 { 390 TCGv addr, src; 391 392 addr = tcg_temp_new(); 393 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 394 if (clear) { 395 tcg_gen_andi_i64(addr, addr, ~0x7); 396 } else { 397 op |= UNALIGN(ctx); 398 } 399 400 src = load_gpr(ctx, ra); 401 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); 402 403 tcg_temp_free(addr); 404 } 405 406 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, 407 int32_t disp16, int mem_idx, 408 MemOp op) 409 { 410 TCGLabel *lab_fail, *lab_done; 411 TCGv addr, val; 412 413 addr = tcg_temp_new_i64(); 414 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); 415 free_context_temps(ctx); 416 417 lab_fail = gen_new_label(); 418 lab_done = gen_new_label(); 419 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); 420 tcg_temp_free_i64(addr); 421 422 val = tcg_temp_new_i64(); 423 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, 424 load_gpr(ctx, ra), mem_idx, op); 425 free_context_temps(ctx); 426 427 if (ra != 31) { 428 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); 429 } 430 tcg_temp_free_i64(val); 431 tcg_gen_br(lab_done); 432 433 gen_set_label(lab_fail); 434 if (ra != 31) { 435 tcg_gen_movi_i64(ctx->ir[ra], 0); 436 } 437 438 gen_set_label(lab_done); 439 tcg_gen_movi_i64(cpu_lock_addr, -1); 440 return DISAS_NEXT; 441 } 442 443 static bool use_goto_tb(DisasContext *ctx, uint64_t dest) 444 { 445 return translator_use_goto_tb(&ctx->base, dest); 446 } 447 448 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) 449 { 450 uint64_t dest = ctx->base.pc_next + (disp << 2); 451 452 if (ra != 31) { 453 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 454 } 455 456 /* Notice branch-to-next; used to initialize RA with the PC. */ 457 if (disp == 0) { 458 return 0; 459 } else if (use_goto_tb(ctx, dest)) { 460 tcg_gen_goto_tb(0); 461 tcg_gen_movi_i64(cpu_pc, dest); 462 tcg_gen_exit_tb(ctx->base.tb, 0); 463 return DISAS_NORETURN; 464 } else { 465 tcg_gen_movi_i64(cpu_pc, dest); 466 return DISAS_PC_UPDATED; 467 } 468 } 469 470 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, 471 TCGv cmp, int32_t disp) 472 { 473 uint64_t dest = ctx->base.pc_next + (disp << 2); 474 TCGLabel *lab_true = gen_new_label(); 475 476 if (use_goto_tb(ctx, dest)) { 477 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); 478 479 tcg_gen_goto_tb(0); 480 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 481 tcg_gen_exit_tb(ctx->base.tb, 0); 482 483 gen_set_label(lab_true); 484 tcg_gen_goto_tb(1); 485 tcg_gen_movi_i64(cpu_pc, dest); 486 tcg_gen_exit_tb(ctx->base.tb, 1); 487 488 return DISAS_NORETURN; 489 } else { 490 TCGv_i64 z = load_zero(ctx); 491 TCGv_i64 d = tcg_constant_i64(dest); 492 TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next); 493 494 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); 495 return DISAS_PC_UPDATED; 496 } 497 } 498 499 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, 500 int32_t disp, int mask) 501 { 502 if (mask) { 503 TCGv tmp = tcg_temp_new(); 504 DisasJumpType ret; 505 506 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); 507 ret = gen_bcond_internal(ctx, cond, tmp, disp); 508 tcg_temp_free(tmp); 509 return ret; 510 } 511 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); 512 } 513 514 /* Fold -0.0 for comparison with COND. */ 515 516 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) 517 { 518 uint64_t mzero = 1ull << 63; 519 520 switch (cond) { 521 case TCG_COND_LE: 522 case TCG_COND_GT: 523 /* For <= or >, the -0.0 value directly compares the way we want. */ 524 tcg_gen_mov_i64(dest, src); 525 break; 526 527 case TCG_COND_EQ: 528 case TCG_COND_NE: 529 /* For == or !=, we can simply mask off the sign bit and compare. */ 530 tcg_gen_andi_i64(dest, src, mzero - 1); 531 break; 532 533 case TCG_COND_GE: 534 case TCG_COND_LT: 535 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */ 536 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero); 537 tcg_gen_neg_i64(dest, dest); 538 tcg_gen_and_i64(dest, dest, src); 539 break; 540 541 default: 542 abort(); 543 } 544 } 545 546 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, 547 int32_t disp) 548 { 549 TCGv cmp_tmp = tcg_temp_new(); 550 DisasJumpType ret; 551 552 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); 553 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); 554 tcg_temp_free(cmp_tmp); 555 return ret; 556 } 557 558 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) 559 { 560 TCGv_i64 va, vb, z; 561 562 z = load_zero(ctx); 563 vb = load_fpr(ctx, rb); 564 va = tcg_temp_new(); 565 gen_fold_mzero(cond, va, load_fpr(ctx, ra)); 566 567 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); 568 569 tcg_temp_free(va); 570 } 571 572 #define QUAL_RM_N 0x080 /* Round mode nearest even */ 573 #define QUAL_RM_C 0x000 /* Round mode chopped */ 574 #define QUAL_RM_M 0x040 /* Round mode minus infinity */ 575 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */ 576 #define QUAL_RM_MASK 0x0c0 577 578 #define QUAL_U 0x100 /* Underflow enable (fp output) */ 579 #define QUAL_V 0x100 /* Overflow enable (int output) */ 580 #define QUAL_S 0x400 /* Software completion enable */ 581 #define QUAL_I 0x200 /* Inexact detection enable */ 582 583 static void gen_qual_roundmode(DisasContext *ctx, int fn11) 584 { 585 TCGv_i32 tmp; 586 587 fn11 &= QUAL_RM_MASK; 588 if (fn11 == ctx->tb_rm) { 589 return; 590 } 591 ctx->tb_rm = fn11; 592 593 tmp = tcg_temp_new_i32(); 594 switch (fn11) { 595 case QUAL_RM_N: 596 tcg_gen_movi_i32(tmp, float_round_nearest_even); 597 break; 598 case QUAL_RM_C: 599 tcg_gen_movi_i32(tmp, float_round_to_zero); 600 break; 601 case QUAL_RM_M: 602 tcg_gen_movi_i32(tmp, float_round_down); 603 break; 604 case QUAL_RM_D: 605 tcg_gen_ld8u_i32(tmp, cpu_env, 606 offsetof(CPUAlphaState, fpcr_dyn_round)); 607 break; 608 } 609 610 #if defined(CONFIG_SOFTFLOAT_INLINE) 611 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. 612 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just 613 sets the one field. */ 614 tcg_gen_st8_i32(tmp, cpu_env, 615 offsetof(CPUAlphaState, fp_status.float_rounding_mode)); 616 #else 617 gen_helper_setroundmode(tmp); 618 #endif 619 620 tcg_temp_free_i32(tmp); 621 } 622 623 static void gen_qual_flushzero(DisasContext *ctx, int fn11) 624 { 625 TCGv_i32 tmp; 626 627 fn11 &= QUAL_U; 628 if (fn11 == ctx->tb_ftz) { 629 return; 630 } 631 ctx->tb_ftz = fn11; 632 633 tmp = tcg_temp_new_i32(); 634 if (fn11) { 635 /* Underflow is enabled, use the FPCR setting. */ 636 tcg_gen_ld8u_i32(tmp, cpu_env, 637 offsetof(CPUAlphaState, fpcr_flush_to_zero)); 638 } else { 639 /* Underflow is disabled, force flush-to-zero. */ 640 tcg_gen_movi_i32(tmp, 1); 641 } 642 643 #if defined(CONFIG_SOFTFLOAT_INLINE) 644 tcg_gen_st8_i32(tmp, cpu_env, 645 offsetof(CPUAlphaState, fp_status.flush_to_zero)); 646 #else 647 gen_helper_setflushzero(tmp); 648 #endif 649 650 tcg_temp_free_i32(tmp); 651 } 652 653 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) 654 { 655 TCGv val; 656 657 if (unlikely(reg == 31)) { 658 val = load_zero(ctx); 659 } else { 660 val = cpu_fir[reg]; 661 if ((fn11 & QUAL_S) == 0) { 662 if (is_cmp) { 663 gen_helper_ieee_input_cmp(cpu_env, val); 664 } else { 665 gen_helper_ieee_input(cpu_env, val); 666 } 667 } else { 668 #ifndef CONFIG_USER_ONLY 669 /* In system mode, raise exceptions for denormals like real 670 hardware. In user mode, proceed as if the OS completion 671 handler is handling the denormal as per spec. */ 672 gen_helper_ieee_input_s(cpu_env, val); 673 #endif 674 } 675 } 676 return val; 677 } 678 679 static void gen_fp_exc_raise(int rc, int fn11) 680 { 681 /* ??? We ought to be able to do something with imprecise exceptions. 682 E.g. notice we're still in the trap shadow of something within the 683 TB and do not generate the code to signal the exception; end the TB 684 when an exception is forced to arrive, either by consumption of a 685 register value or TRAPB or EXCB. */ 686 TCGv_i32 reg, ign; 687 uint32_t ignore = 0; 688 689 if (!(fn11 & QUAL_U)) { 690 /* Note that QUAL_U == QUAL_V, so ignore either. */ 691 ignore |= FPCR_UNF | FPCR_IOV; 692 } 693 if (!(fn11 & QUAL_I)) { 694 ignore |= FPCR_INE; 695 } 696 ign = tcg_constant_i32(ignore); 697 698 /* ??? Pass in the regno of the destination so that the helper can 699 set EXC_MASK, which contains a bitmask of destination registers 700 that have caused arithmetic traps. A simple userspace emulation 701 does not require this. We do need it for a guest kernel's entArith, 702 or if we were to do something clever with imprecise exceptions. */ 703 reg = tcg_constant_i32(rc + 32); 704 if (fn11 & QUAL_S) { 705 gen_helper_fp_exc_raise_s(cpu_env, ign, reg); 706 } else { 707 gen_helper_fp_exc_raise(cpu_env, ign, reg); 708 } 709 } 710 711 static void gen_cvtlq(TCGv vc, TCGv vb) 712 { 713 TCGv tmp = tcg_temp_new(); 714 715 /* The arithmetic right shift here, plus the sign-extended mask below 716 yields a sign-extended result without an explicit ext32s_i64. */ 717 tcg_gen_shri_i64(tmp, vb, 29); 718 tcg_gen_sari_i64(vc, vb, 32); 719 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); 720 721 tcg_temp_free(tmp); 722 } 723 724 static void gen_ieee_arith2(DisasContext *ctx, 725 void (*helper)(TCGv, TCGv_ptr, TCGv), 726 int rb, int rc, int fn11) 727 { 728 TCGv vb; 729 730 gen_qual_roundmode(ctx, fn11); 731 gen_qual_flushzero(ctx, fn11); 732 733 vb = gen_ieee_input(ctx, rb, fn11, 0); 734 helper(dest_fpr(ctx, rc), cpu_env, vb); 735 736 gen_fp_exc_raise(rc, fn11); 737 } 738 739 #define IEEE_ARITH2(name) \ 740 static inline void glue(gen_, name)(DisasContext *ctx, \ 741 int rb, int rc, int fn11) \ 742 { \ 743 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ 744 } 745 IEEE_ARITH2(sqrts) 746 IEEE_ARITH2(sqrtt) 747 IEEE_ARITH2(cvtst) 748 IEEE_ARITH2(cvtts) 749 750 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) 751 { 752 TCGv vb, vc; 753 754 /* No need to set flushzero, since we have an integer output. */ 755 vb = gen_ieee_input(ctx, rb, fn11, 0); 756 vc = dest_fpr(ctx, rc); 757 758 /* Almost all integer conversions use cropped rounding; 759 special case that. */ 760 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { 761 gen_helper_cvttq_c(vc, cpu_env, vb); 762 } else { 763 gen_qual_roundmode(ctx, fn11); 764 gen_helper_cvttq(vc, cpu_env, vb); 765 } 766 gen_fp_exc_raise(rc, fn11); 767 } 768 769 static void gen_ieee_intcvt(DisasContext *ctx, 770 void (*helper)(TCGv, TCGv_ptr, TCGv), 771 int rb, int rc, int fn11) 772 { 773 TCGv vb, vc; 774 775 gen_qual_roundmode(ctx, fn11); 776 vb = load_fpr(ctx, rb); 777 vc = dest_fpr(ctx, rc); 778 779 /* The only exception that can be raised by integer conversion 780 is inexact. Thus we only need to worry about exceptions when 781 inexact handling is requested. */ 782 if (fn11 & QUAL_I) { 783 helper(vc, cpu_env, vb); 784 gen_fp_exc_raise(rc, fn11); 785 } else { 786 helper(vc, cpu_env, vb); 787 } 788 } 789 790 #define IEEE_INTCVT(name) \ 791 static inline void glue(gen_, name)(DisasContext *ctx, \ 792 int rb, int rc, int fn11) \ 793 { \ 794 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ 795 } 796 IEEE_INTCVT(cvtqs) 797 IEEE_INTCVT(cvtqt) 798 799 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) 800 { 801 TCGv vmask = tcg_constant_i64(mask); 802 TCGv tmp = tcg_temp_new_i64(); 803 804 if (inv_a) { 805 tcg_gen_andc_i64(tmp, vmask, va); 806 } else { 807 tcg_gen_and_i64(tmp, va, vmask); 808 } 809 810 tcg_gen_andc_i64(vc, vb, vmask); 811 tcg_gen_or_i64(vc, vc, tmp); 812 813 tcg_temp_free(tmp); 814 } 815 816 static void gen_ieee_arith3(DisasContext *ctx, 817 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 818 int ra, int rb, int rc, int fn11) 819 { 820 TCGv va, vb, vc; 821 822 gen_qual_roundmode(ctx, fn11); 823 gen_qual_flushzero(ctx, fn11); 824 825 va = gen_ieee_input(ctx, ra, fn11, 0); 826 vb = gen_ieee_input(ctx, rb, fn11, 0); 827 vc = dest_fpr(ctx, rc); 828 helper(vc, cpu_env, va, vb); 829 830 gen_fp_exc_raise(rc, fn11); 831 } 832 833 #define IEEE_ARITH3(name) \ 834 static inline void glue(gen_, name)(DisasContext *ctx, \ 835 int ra, int rb, int rc, int fn11) \ 836 { \ 837 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 838 } 839 IEEE_ARITH3(adds) 840 IEEE_ARITH3(subs) 841 IEEE_ARITH3(muls) 842 IEEE_ARITH3(divs) 843 IEEE_ARITH3(addt) 844 IEEE_ARITH3(subt) 845 IEEE_ARITH3(mult) 846 IEEE_ARITH3(divt) 847 848 static void gen_ieee_compare(DisasContext *ctx, 849 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), 850 int ra, int rb, int rc, int fn11) 851 { 852 TCGv va, vb, vc; 853 854 va = gen_ieee_input(ctx, ra, fn11, 1); 855 vb = gen_ieee_input(ctx, rb, fn11, 1); 856 vc = dest_fpr(ctx, rc); 857 helper(vc, cpu_env, va, vb); 858 859 gen_fp_exc_raise(rc, fn11); 860 } 861 862 #define IEEE_CMP3(name) \ 863 static inline void glue(gen_, name)(DisasContext *ctx, \ 864 int ra, int rb, int rc, int fn11) \ 865 { \ 866 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ 867 } 868 IEEE_CMP3(cmptun) 869 IEEE_CMP3(cmpteq) 870 IEEE_CMP3(cmptlt) 871 IEEE_CMP3(cmptle) 872 873 static inline uint64_t zapnot_mask(uint8_t lit) 874 { 875 uint64_t mask = 0; 876 int i; 877 878 for (i = 0; i < 8; ++i) { 879 if ((lit >> i) & 1) { 880 mask |= 0xffull << (i * 8); 881 } 882 } 883 return mask; 884 } 885 886 /* Implement zapnot with an immediate operand, which expands to some 887 form of immediate AND. This is a basic building block in the 888 definition of many of the other byte manipulation instructions. */ 889 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) 890 { 891 switch (lit) { 892 case 0x00: 893 tcg_gen_movi_i64(dest, 0); 894 break; 895 case 0x01: 896 tcg_gen_ext8u_i64(dest, src); 897 break; 898 case 0x03: 899 tcg_gen_ext16u_i64(dest, src); 900 break; 901 case 0x0f: 902 tcg_gen_ext32u_i64(dest, src); 903 break; 904 case 0xff: 905 tcg_gen_mov_i64(dest, src); 906 break; 907 default: 908 tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); 909 break; 910 } 911 } 912 913 /* EXTWH, EXTLH, EXTQH */ 914 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 915 uint8_t lit, uint8_t byte_mask) 916 { 917 if (islit) { 918 int pos = (64 - lit * 8) & 0x3f; 919 int len = cto32(byte_mask) * 8; 920 if (pos < len) { 921 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); 922 } else { 923 tcg_gen_movi_i64(vc, 0); 924 } 925 } else { 926 TCGv tmp = tcg_temp_new(); 927 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); 928 tcg_gen_neg_i64(tmp, tmp); 929 tcg_gen_andi_i64(tmp, tmp, 0x3f); 930 tcg_gen_shl_i64(vc, va, tmp); 931 tcg_temp_free(tmp); 932 } 933 gen_zapnoti(vc, vc, byte_mask); 934 } 935 936 /* EXTBL, EXTWL, EXTLL, EXTQL */ 937 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 938 uint8_t lit, uint8_t byte_mask) 939 { 940 if (islit) { 941 int pos = (lit & 7) * 8; 942 int len = cto32(byte_mask) * 8; 943 if (pos + len >= 64) { 944 len = 64 - pos; 945 } 946 tcg_gen_extract_i64(vc, va, pos, len); 947 } else { 948 TCGv tmp = tcg_temp_new(); 949 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); 950 tcg_gen_shli_i64(tmp, tmp, 3); 951 tcg_gen_shr_i64(vc, va, tmp); 952 tcg_temp_free(tmp); 953 gen_zapnoti(vc, vc, byte_mask); 954 } 955 } 956 957 /* INSWH, INSLH, INSQH */ 958 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 959 uint8_t lit, uint8_t byte_mask) 960 { 961 if (islit) { 962 int pos = 64 - (lit & 7) * 8; 963 int len = cto32(byte_mask) * 8; 964 if (pos < len) { 965 tcg_gen_extract_i64(vc, va, pos, len - pos); 966 } else { 967 tcg_gen_movi_i64(vc, 0); 968 } 969 } else { 970 TCGv tmp = tcg_temp_new(); 971 TCGv shift = tcg_temp_new(); 972 973 /* The instruction description has us left-shift the byte mask 974 and extract bits <15:8> and apply that zap at the end. This 975 is equivalent to simply performing the zap first and shifting 976 afterward. */ 977 gen_zapnoti(tmp, va, byte_mask); 978 979 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this 980 portably by splitting the shift into two parts: shift_count-1 and 1. 981 Arrange for the -1 by using ones-complement instead of 982 twos-complement in the negation: ~(B * 8) & 63. */ 983 984 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 985 tcg_gen_not_i64(shift, shift); 986 tcg_gen_andi_i64(shift, shift, 0x3f); 987 988 tcg_gen_shr_i64(vc, tmp, shift); 989 tcg_gen_shri_i64(vc, vc, 1); 990 tcg_temp_free(shift); 991 tcg_temp_free(tmp); 992 } 993 } 994 995 /* INSBL, INSWL, INSLL, INSQL */ 996 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 997 uint8_t lit, uint8_t byte_mask) 998 { 999 if (islit) { 1000 int pos = (lit & 7) * 8; 1001 int len = cto32(byte_mask) * 8; 1002 if (pos + len > 64) { 1003 len = 64 - pos; 1004 } 1005 tcg_gen_deposit_z_i64(vc, va, pos, len); 1006 } else { 1007 TCGv tmp = tcg_temp_new(); 1008 TCGv shift = tcg_temp_new(); 1009 1010 /* The instruction description has us left-shift the byte mask 1011 and extract bits <15:8> and apply that zap at the end. This 1012 is equivalent to simply performing the zap first and shifting 1013 afterward. */ 1014 gen_zapnoti(tmp, va, byte_mask); 1015 1016 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1017 tcg_gen_shli_i64(shift, shift, 3); 1018 tcg_gen_shl_i64(vc, tmp, shift); 1019 tcg_temp_free(shift); 1020 tcg_temp_free(tmp); 1021 } 1022 } 1023 1024 /* MSKWH, MSKLH, MSKQH */ 1025 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1026 uint8_t lit, uint8_t byte_mask) 1027 { 1028 if (islit) { 1029 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); 1030 } else { 1031 TCGv shift = tcg_temp_new(); 1032 TCGv mask = tcg_temp_new(); 1033 1034 /* The instruction description is as above, where the byte_mask 1035 is shifted left, and then we extract bits <15:8>. This can be 1036 emulated with a right-shift on the expanded byte mask. This 1037 requires extra care because for an input <2:0> == 0 we need a 1038 shift of 64 bits in order to generate a zero. This is done by 1039 splitting the shift into two parts, the variable shift - 1 1040 followed by a constant 1 shift. The code we expand below is 1041 equivalent to ~(B * 8) & 63. */ 1042 1043 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); 1044 tcg_gen_not_i64(shift, shift); 1045 tcg_gen_andi_i64(shift, shift, 0x3f); 1046 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); 1047 tcg_gen_shr_i64(mask, mask, shift); 1048 tcg_gen_shri_i64(mask, mask, 1); 1049 1050 tcg_gen_andc_i64(vc, va, mask); 1051 1052 tcg_temp_free(mask); 1053 tcg_temp_free(shift); 1054 } 1055 } 1056 1057 /* MSKBL, MSKWL, MSKLL, MSKQL */ 1058 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, 1059 uint8_t lit, uint8_t byte_mask) 1060 { 1061 if (islit) { 1062 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); 1063 } else { 1064 TCGv shift = tcg_temp_new(); 1065 TCGv mask = tcg_temp_new(); 1066 1067 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); 1068 tcg_gen_shli_i64(shift, shift, 3); 1069 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); 1070 tcg_gen_shl_i64(mask, mask, shift); 1071 1072 tcg_gen_andc_i64(vc, va, mask); 1073 1074 tcg_temp_free(mask); 1075 tcg_temp_free(shift); 1076 } 1077 } 1078 1079 static void gen_rx(DisasContext *ctx, int ra, int set) 1080 { 1081 if (ra != 31) { 1082 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1083 } 1084 1085 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT); 1086 } 1087 1088 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) 1089 { 1090 /* We're emulating OSF/1 PALcode. Many of these are trivial access 1091 to internal cpu registers. */ 1092 1093 /* Unprivileged PAL call */ 1094 if (palcode >= 0x80 && palcode < 0xC0) { 1095 switch (palcode) { 1096 case 0x86: 1097 /* IMB */ 1098 /* No-op inside QEMU. */ 1099 break; 1100 case 0x9E: 1101 /* RDUNIQUE */ 1102 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1103 offsetof(CPUAlphaState, unique)); 1104 break; 1105 case 0x9F: 1106 /* WRUNIQUE */ 1107 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1108 offsetof(CPUAlphaState, unique)); 1109 break; 1110 default: 1111 palcode &= 0xbf; 1112 goto do_call_pal; 1113 } 1114 return DISAS_NEXT; 1115 } 1116 1117 #ifndef CONFIG_USER_ONLY 1118 /* Privileged PAL code */ 1119 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1120 switch (palcode) { 1121 case 0x01: 1122 /* CFLUSH */ 1123 /* No-op inside QEMU. */ 1124 break; 1125 case 0x02: 1126 /* DRAINA */ 1127 /* No-op inside QEMU. */ 1128 break; 1129 case 0x2D: 1130 /* WRVPTPTR */ 1131 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1132 offsetof(CPUAlphaState, vptptr)); 1133 break; 1134 case 0x31: 1135 /* WRVAL */ 1136 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1137 offsetof(CPUAlphaState, sysval)); 1138 break; 1139 case 0x32: 1140 /* RDVAL */ 1141 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1142 offsetof(CPUAlphaState, sysval)); 1143 break; 1144 1145 case 0x35: 1146 /* SWPIPL */ 1147 /* Note that we already know we're in kernel mode, so we know 1148 that PS only contains the 3 IPL bits. */ 1149 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1150 1151 /* But make sure and store only the 3 IPL bits from the user. */ 1152 { 1153 TCGv tmp = tcg_temp_new(); 1154 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1155 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1156 tcg_temp_free(tmp); 1157 } 1158 1159 /* Allow interrupts to be recognized right away. */ 1160 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 1161 return DISAS_PC_UPDATED_NOCHAIN; 1162 1163 case 0x36: 1164 /* RDPS */ 1165 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1166 break; 1167 1168 case 0x38: 1169 /* WRUSP */ 1170 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, 1171 offsetof(CPUAlphaState, usp)); 1172 break; 1173 case 0x3A: 1174 /* RDUSP */ 1175 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, 1176 offsetof(CPUAlphaState, usp)); 1177 break; 1178 case 0x3C: 1179 /* WHAMI */ 1180 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env, 1181 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); 1182 break; 1183 1184 case 0x3E: 1185 /* WTINT */ 1186 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, 1187 -offsetof(AlphaCPU, env) + 1188 offsetof(CPUState, halted)); 1189 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); 1190 return gen_excp(ctx, EXCP_HALTED, 0); 1191 1192 default: 1193 palcode &= 0x3f; 1194 goto do_call_pal; 1195 } 1196 return DISAS_NEXT; 1197 } 1198 #endif 1199 return gen_invalid(ctx); 1200 1201 do_call_pal: 1202 #ifdef CONFIG_USER_ONLY 1203 return gen_excp(ctx, EXCP_CALL_PAL, palcode); 1204 #else 1205 { 1206 TCGv tmp = tcg_temp_new(); 1207 uint64_t exc_addr = ctx->base.pc_next; 1208 uint64_t entry = ctx->palbr; 1209 1210 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1211 exc_addr |= 1; 1212 } else { 1213 tcg_gen_movi_i64(tmp, 1); 1214 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 1215 } 1216 1217 tcg_gen_movi_i64(tmp, exc_addr); 1218 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); 1219 tcg_temp_free(tmp); 1220 1221 entry += (palcode & 0x80 1222 ? 0x2000 + (palcode - 0x80) * 64 1223 : 0x1000 + palcode * 64); 1224 1225 tcg_gen_movi_i64(cpu_pc, entry); 1226 return DISAS_PC_UPDATED; 1227 } 1228 #endif 1229 } 1230 1231 #ifndef CONFIG_USER_ONLY 1232 1233 #define PR_LONG 0x200000 1234 1235 static int cpu_pr_data(int pr) 1236 { 1237 switch (pr) { 1238 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1239 case 3: return offsetof(CPUAlphaState, trap_arg0); 1240 case 4: return offsetof(CPUAlphaState, trap_arg1); 1241 case 5: return offsetof(CPUAlphaState, trap_arg2); 1242 case 6: return offsetof(CPUAlphaState, exc_addr); 1243 case 7: return offsetof(CPUAlphaState, palbr); 1244 case 8: return offsetof(CPUAlphaState, ptbr); 1245 case 9: return offsetof(CPUAlphaState, vptptr); 1246 case 10: return offsetof(CPUAlphaState, unique); 1247 case 11: return offsetof(CPUAlphaState, sysval); 1248 case 12: return offsetof(CPUAlphaState, usp); 1249 1250 case 40 ... 63: 1251 return offsetof(CPUAlphaState, scratch[pr - 40]); 1252 1253 case 251: 1254 return offsetof(CPUAlphaState, alarm_expire); 1255 } 1256 return 0; 1257 } 1258 1259 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) 1260 { 1261 void (*helper)(TCGv); 1262 int data; 1263 1264 switch (regno) { 1265 case 32 ... 39: 1266 /* Accessing the "non-shadow" general registers. */ 1267 regno = regno == 39 ? 25 : regno - 32 + 8; 1268 tcg_gen_mov_i64(va, cpu_std_ir[regno]); 1269 break; 1270 1271 case 250: /* WALLTIME */ 1272 helper = gen_helper_get_walltime; 1273 goto do_helper; 1274 case 249: /* VMTIME */ 1275 helper = gen_helper_get_vmtime; 1276 do_helper: 1277 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 1278 gen_io_start(); 1279 helper(va); 1280 return DISAS_PC_STALE; 1281 } else { 1282 helper(va); 1283 } 1284 break; 1285 1286 case 0: /* PS */ 1287 ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1288 break; 1289 case 1: /* FEN */ 1290 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1291 break; 1292 1293 default: 1294 /* The basic registers are data only, and unknown registers 1295 are read-zero, write-ignore. */ 1296 data = cpu_pr_data(regno); 1297 if (data == 0) { 1298 tcg_gen_movi_i64(va, 0); 1299 } else if (data & PR_LONG) { 1300 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); 1301 } else { 1302 tcg_gen_ld_i64(va, cpu_env, data); 1303 } 1304 break; 1305 } 1306 1307 return DISAS_NEXT; 1308 } 1309 1310 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) 1311 { 1312 int data; 1313 DisasJumpType ret = DISAS_NEXT; 1314 1315 switch (regno) { 1316 case 255: 1317 /* TBIA */ 1318 gen_helper_tbia(cpu_env); 1319 break; 1320 1321 case 254: 1322 /* TBIS */ 1323 gen_helper_tbis(cpu_env, vb); 1324 break; 1325 1326 case 253: 1327 /* WAIT */ 1328 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, 1329 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); 1330 return gen_excp(ctx, EXCP_HALTED, 0); 1331 1332 case 252: 1333 /* HALT */ 1334 gen_helper_halt(vb); 1335 return DISAS_PC_STALE; 1336 1337 case 251: 1338 /* ALARM */ 1339 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 1340 gen_io_start(); 1341 ret = DISAS_PC_STALE; 1342 } 1343 gen_helper_set_alarm(cpu_env, vb); 1344 break; 1345 1346 case 7: 1347 /* PALBR */ 1348 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr)); 1349 /* Changing the PAL base register implies un-chaining all of the TBs 1350 that ended with a CALL_PAL. Since the base register usually only 1351 changes during boot, flushing everything works well. */ 1352 gen_helper_tb_flush(cpu_env); 1353 return DISAS_PC_STALE; 1354 1355 case 32 ... 39: 1356 /* Accessing the "non-shadow" general registers. */ 1357 regno = regno == 39 ? 25 : regno - 32 + 8; 1358 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1359 break; 1360 1361 case 0: /* PS */ 1362 st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1363 break; 1364 case 1: /* FEN */ 1365 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1366 break; 1367 1368 default: 1369 /* The basic registers are data only, and unknown registers 1370 are read-zero, write-ignore. */ 1371 data = cpu_pr_data(regno); 1372 if (data != 0) { 1373 if (data & PR_LONG) { 1374 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); 1375 } else { 1376 tcg_gen_st_i64(vb, cpu_env, data); 1377 } 1378 } 1379 break; 1380 } 1381 1382 return ret; 1383 } 1384 #endif /* !USER_ONLY*/ 1385 1386 #define REQUIRE_NO_LIT \ 1387 do { \ 1388 if (real_islit) { \ 1389 goto invalid_opc; \ 1390 } \ 1391 } while (0) 1392 1393 #define REQUIRE_AMASK(FLAG) \ 1394 do { \ 1395 if ((ctx->amask & AMASK_##FLAG) == 0) { \ 1396 goto invalid_opc; \ 1397 } \ 1398 } while (0) 1399 1400 #define REQUIRE_TB_FLAG(FLAG) \ 1401 do { \ 1402 if ((ctx->tbflags & (FLAG)) == 0) { \ 1403 goto invalid_opc; \ 1404 } \ 1405 } while (0) 1406 1407 #define REQUIRE_REG_31(WHICH) \ 1408 do { \ 1409 if (WHICH != 31) { \ 1410 goto invalid_opc; \ 1411 } \ 1412 } while (0) 1413 1414 #define REQUIRE_FEN \ 1415 do { \ 1416 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \ 1417 goto raise_fen; \ 1418 } \ 1419 } while (0) 1420 1421 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) 1422 { 1423 int32_t disp21, disp16, disp12 __attribute__((unused)); 1424 uint16_t fn11; 1425 uint8_t opc, ra, rb, rc, fpfn, fn7, lit; 1426 bool islit, real_islit; 1427 TCGv va, vb, vc, tmp, tmp2; 1428 TCGv_i32 t32; 1429 DisasJumpType ret; 1430 1431 /* Decode all instruction fields */ 1432 opc = extract32(insn, 26, 6); 1433 ra = extract32(insn, 21, 5); 1434 rb = extract32(insn, 16, 5); 1435 rc = extract32(insn, 0, 5); 1436 real_islit = islit = extract32(insn, 12, 1); 1437 lit = extract32(insn, 13, 8); 1438 1439 disp21 = sextract32(insn, 0, 21); 1440 disp16 = sextract32(insn, 0, 16); 1441 disp12 = sextract32(insn, 0, 12); 1442 1443 fn11 = extract32(insn, 5, 11); 1444 fpfn = extract32(insn, 5, 6); 1445 fn7 = extract32(insn, 5, 7); 1446 1447 if (rb == 31 && !islit) { 1448 islit = true; 1449 lit = 0; 1450 } 1451 1452 ret = DISAS_NEXT; 1453 switch (opc) { 1454 case 0x00: 1455 /* CALL_PAL */ 1456 ret = gen_call_pal(ctx, insn & 0x03ffffff); 1457 break; 1458 case 0x01: 1459 /* OPC01 */ 1460 goto invalid_opc; 1461 case 0x02: 1462 /* OPC02 */ 1463 goto invalid_opc; 1464 case 0x03: 1465 /* OPC03 */ 1466 goto invalid_opc; 1467 case 0x04: 1468 /* OPC04 */ 1469 goto invalid_opc; 1470 case 0x05: 1471 /* OPC05 */ 1472 goto invalid_opc; 1473 case 0x06: 1474 /* OPC06 */ 1475 goto invalid_opc; 1476 case 0x07: 1477 /* OPC07 */ 1478 goto invalid_opc; 1479 1480 case 0x09: 1481 /* LDAH */ 1482 disp16 = (uint32_t)disp16 << 16; 1483 /* fall through */ 1484 case 0x08: 1485 /* LDA */ 1486 va = dest_gpr(ctx, ra); 1487 /* It's worth special-casing immediate loads. */ 1488 if (rb == 31) { 1489 tcg_gen_movi_i64(va, disp16); 1490 } else { 1491 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); 1492 } 1493 break; 1494 1495 case 0x0A: 1496 /* LDBU */ 1497 REQUIRE_AMASK(BWX); 1498 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); 1499 break; 1500 case 0x0B: 1501 /* LDQ_U */ 1502 gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0); 1503 break; 1504 case 0x0C: 1505 /* LDWU */ 1506 REQUIRE_AMASK(BWX); 1507 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); 1508 break; 1509 case 0x0D: 1510 /* STW */ 1511 REQUIRE_AMASK(BWX); 1512 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); 1513 break; 1514 case 0x0E: 1515 /* STB */ 1516 REQUIRE_AMASK(BWX); 1517 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); 1518 break; 1519 case 0x0F: 1520 /* STQ_U */ 1521 gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1); 1522 break; 1523 1524 case 0x10: 1525 vc = dest_gpr(ctx, rc); 1526 vb = load_gpr_lit(ctx, rb, lit, islit); 1527 1528 if (ra == 31) { 1529 if (fn7 == 0x00) { 1530 /* Special case ADDL as SEXTL. */ 1531 tcg_gen_ext32s_i64(vc, vb); 1532 break; 1533 } 1534 if (fn7 == 0x29) { 1535 /* Special case SUBQ as NEGQ. */ 1536 tcg_gen_neg_i64(vc, vb); 1537 break; 1538 } 1539 } 1540 1541 va = load_gpr(ctx, ra); 1542 switch (fn7) { 1543 case 0x00: 1544 /* ADDL */ 1545 tcg_gen_add_i64(vc, va, vb); 1546 tcg_gen_ext32s_i64(vc, vc); 1547 break; 1548 case 0x02: 1549 /* S4ADDL */ 1550 tmp = tcg_temp_new(); 1551 tcg_gen_shli_i64(tmp, va, 2); 1552 tcg_gen_add_i64(tmp, tmp, vb); 1553 tcg_gen_ext32s_i64(vc, tmp); 1554 tcg_temp_free(tmp); 1555 break; 1556 case 0x09: 1557 /* SUBL */ 1558 tcg_gen_sub_i64(vc, va, vb); 1559 tcg_gen_ext32s_i64(vc, vc); 1560 break; 1561 case 0x0B: 1562 /* S4SUBL */ 1563 tmp = tcg_temp_new(); 1564 tcg_gen_shli_i64(tmp, va, 2); 1565 tcg_gen_sub_i64(tmp, tmp, vb); 1566 tcg_gen_ext32s_i64(vc, tmp); 1567 tcg_temp_free(tmp); 1568 break; 1569 case 0x0F: 1570 /* CMPBGE */ 1571 if (ra == 31) { 1572 /* Special case 0 >= X as X == 0. */ 1573 gen_helper_cmpbe0(vc, vb); 1574 } else { 1575 gen_helper_cmpbge(vc, va, vb); 1576 } 1577 break; 1578 case 0x12: 1579 /* S8ADDL */ 1580 tmp = tcg_temp_new(); 1581 tcg_gen_shli_i64(tmp, va, 3); 1582 tcg_gen_add_i64(tmp, tmp, vb); 1583 tcg_gen_ext32s_i64(vc, tmp); 1584 tcg_temp_free(tmp); 1585 break; 1586 case 0x1B: 1587 /* S8SUBL */ 1588 tmp = tcg_temp_new(); 1589 tcg_gen_shli_i64(tmp, va, 3); 1590 tcg_gen_sub_i64(tmp, tmp, vb); 1591 tcg_gen_ext32s_i64(vc, tmp); 1592 tcg_temp_free(tmp); 1593 break; 1594 case 0x1D: 1595 /* CMPULT */ 1596 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); 1597 break; 1598 case 0x20: 1599 /* ADDQ */ 1600 tcg_gen_add_i64(vc, va, vb); 1601 break; 1602 case 0x22: 1603 /* S4ADDQ */ 1604 tmp = tcg_temp_new(); 1605 tcg_gen_shli_i64(tmp, va, 2); 1606 tcg_gen_add_i64(vc, tmp, vb); 1607 tcg_temp_free(tmp); 1608 break; 1609 case 0x29: 1610 /* SUBQ */ 1611 tcg_gen_sub_i64(vc, va, vb); 1612 break; 1613 case 0x2B: 1614 /* S4SUBQ */ 1615 tmp = tcg_temp_new(); 1616 tcg_gen_shli_i64(tmp, va, 2); 1617 tcg_gen_sub_i64(vc, tmp, vb); 1618 tcg_temp_free(tmp); 1619 break; 1620 case 0x2D: 1621 /* CMPEQ */ 1622 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); 1623 break; 1624 case 0x32: 1625 /* S8ADDQ */ 1626 tmp = tcg_temp_new(); 1627 tcg_gen_shli_i64(tmp, va, 3); 1628 tcg_gen_add_i64(vc, tmp, vb); 1629 tcg_temp_free(tmp); 1630 break; 1631 case 0x3B: 1632 /* S8SUBQ */ 1633 tmp = tcg_temp_new(); 1634 tcg_gen_shli_i64(tmp, va, 3); 1635 tcg_gen_sub_i64(vc, tmp, vb); 1636 tcg_temp_free(tmp); 1637 break; 1638 case 0x3D: 1639 /* CMPULE */ 1640 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); 1641 break; 1642 case 0x40: 1643 /* ADDL/V */ 1644 tmp = tcg_temp_new(); 1645 tcg_gen_ext32s_i64(tmp, va); 1646 tcg_gen_ext32s_i64(vc, vb); 1647 tcg_gen_add_i64(tmp, tmp, vc); 1648 tcg_gen_ext32s_i64(vc, tmp); 1649 gen_helper_check_overflow(cpu_env, vc, tmp); 1650 tcg_temp_free(tmp); 1651 break; 1652 case 0x49: 1653 /* SUBL/V */ 1654 tmp = tcg_temp_new(); 1655 tcg_gen_ext32s_i64(tmp, va); 1656 tcg_gen_ext32s_i64(vc, vb); 1657 tcg_gen_sub_i64(tmp, tmp, vc); 1658 tcg_gen_ext32s_i64(vc, tmp); 1659 gen_helper_check_overflow(cpu_env, vc, tmp); 1660 tcg_temp_free(tmp); 1661 break; 1662 case 0x4D: 1663 /* CMPLT */ 1664 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); 1665 break; 1666 case 0x60: 1667 /* ADDQ/V */ 1668 tmp = tcg_temp_new(); 1669 tmp2 = tcg_temp_new(); 1670 tcg_gen_eqv_i64(tmp, va, vb); 1671 tcg_gen_mov_i64(tmp2, va); 1672 tcg_gen_add_i64(vc, va, vb); 1673 tcg_gen_xor_i64(tmp2, tmp2, vc); 1674 tcg_gen_and_i64(tmp, tmp, tmp2); 1675 tcg_gen_shri_i64(tmp, tmp, 63); 1676 tcg_gen_movi_i64(tmp2, 0); 1677 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1678 tcg_temp_free(tmp); 1679 tcg_temp_free(tmp2); 1680 break; 1681 case 0x69: 1682 /* SUBQ/V */ 1683 tmp = tcg_temp_new(); 1684 tmp2 = tcg_temp_new(); 1685 tcg_gen_xor_i64(tmp, va, vb); 1686 tcg_gen_mov_i64(tmp2, va); 1687 tcg_gen_sub_i64(vc, va, vb); 1688 tcg_gen_xor_i64(tmp2, tmp2, vc); 1689 tcg_gen_and_i64(tmp, tmp, tmp2); 1690 tcg_gen_shri_i64(tmp, tmp, 63); 1691 tcg_gen_movi_i64(tmp2, 0); 1692 gen_helper_check_overflow(cpu_env, tmp, tmp2); 1693 tcg_temp_free(tmp); 1694 tcg_temp_free(tmp2); 1695 break; 1696 case 0x6D: 1697 /* CMPLE */ 1698 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); 1699 break; 1700 default: 1701 goto invalid_opc; 1702 } 1703 break; 1704 1705 case 0x11: 1706 if (fn7 == 0x20) { 1707 if (rc == 31) { 1708 /* Special case BIS as NOP. */ 1709 break; 1710 } 1711 if (ra == 31) { 1712 /* Special case BIS as MOV. */ 1713 vc = dest_gpr(ctx, rc); 1714 if (islit) { 1715 tcg_gen_movi_i64(vc, lit); 1716 } else { 1717 tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); 1718 } 1719 break; 1720 } 1721 } 1722 1723 vc = dest_gpr(ctx, rc); 1724 vb = load_gpr_lit(ctx, rb, lit, islit); 1725 1726 if (fn7 == 0x28 && ra == 31) { 1727 /* Special case ORNOT as NOT. */ 1728 tcg_gen_not_i64(vc, vb); 1729 break; 1730 } 1731 1732 va = load_gpr(ctx, ra); 1733 switch (fn7) { 1734 case 0x00: 1735 /* AND */ 1736 tcg_gen_and_i64(vc, va, vb); 1737 break; 1738 case 0x08: 1739 /* BIC */ 1740 tcg_gen_andc_i64(vc, va, vb); 1741 break; 1742 case 0x14: 1743 /* CMOVLBS */ 1744 tmp = tcg_temp_new(); 1745 tcg_gen_andi_i64(tmp, va, 1); 1746 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), 1747 vb, load_gpr(ctx, rc)); 1748 tcg_temp_free(tmp); 1749 break; 1750 case 0x16: 1751 /* CMOVLBC */ 1752 tmp = tcg_temp_new(); 1753 tcg_gen_andi_i64(tmp, va, 1); 1754 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), 1755 vb, load_gpr(ctx, rc)); 1756 tcg_temp_free(tmp); 1757 break; 1758 case 0x20: 1759 /* BIS */ 1760 tcg_gen_or_i64(vc, va, vb); 1761 break; 1762 case 0x24: 1763 /* CMOVEQ */ 1764 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), 1765 vb, load_gpr(ctx, rc)); 1766 break; 1767 case 0x26: 1768 /* CMOVNE */ 1769 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), 1770 vb, load_gpr(ctx, rc)); 1771 break; 1772 case 0x28: 1773 /* ORNOT */ 1774 tcg_gen_orc_i64(vc, va, vb); 1775 break; 1776 case 0x40: 1777 /* XOR */ 1778 tcg_gen_xor_i64(vc, va, vb); 1779 break; 1780 case 0x44: 1781 /* CMOVLT */ 1782 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), 1783 vb, load_gpr(ctx, rc)); 1784 break; 1785 case 0x46: 1786 /* CMOVGE */ 1787 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), 1788 vb, load_gpr(ctx, rc)); 1789 break; 1790 case 0x48: 1791 /* EQV */ 1792 tcg_gen_eqv_i64(vc, va, vb); 1793 break; 1794 case 0x61: 1795 /* AMASK */ 1796 REQUIRE_REG_31(ra); 1797 tcg_gen_andi_i64(vc, vb, ~ctx->amask); 1798 break; 1799 case 0x64: 1800 /* CMOVLE */ 1801 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), 1802 vb, load_gpr(ctx, rc)); 1803 break; 1804 case 0x66: 1805 /* CMOVGT */ 1806 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), 1807 vb, load_gpr(ctx, rc)); 1808 break; 1809 case 0x6C: 1810 /* IMPLVER */ 1811 REQUIRE_REG_31(ra); 1812 tcg_gen_movi_i64(vc, ctx->implver); 1813 break; 1814 default: 1815 goto invalid_opc; 1816 } 1817 break; 1818 1819 case 0x12: 1820 vc = dest_gpr(ctx, rc); 1821 va = load_gpr(ctx, ra); 1822 switch (fn7) { 1823 case 0x02: 1824 /* MSKBL */ 1825 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); 1826 break; 1827 case 0x06: 1828 /* EXTBL */ 1829 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); 1830 break; 1831 case 0x0B: 1832 /* INSBL */ 1833 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); 1834 break; 1835 case 0x12: 1836 /* MSKWL */ 1837 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); 1838 break; 1839 case 0x16: 1840 /* EXTWL */ 1841 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); 1842 break; 1843 case 0x1B: 1844 /* INSWL */ 1845 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); 1846 break; 1847 case 0x22: 1848 /* MSKLL */ 1849 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); 1850 break; 1851 case 0x26: 1852 /* EXTLL */ 1853 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); 1854 break; 1855 case 0x2B: 1856 /* INSLL */ 1857 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); 1858 break; 1859 case 0x30: 1860 /* ZAP */ 1861 if (islit) { 1862 gen_zapnoti(vc, va, ~lit); 1863 } else { 1864 gen_helper_zap(vc, va, load_gpr(ctx, rb)); 1865 } 1866 break; 1867 case 0x31: 1868 /* ZAPNOT */ 1869 if (islit) { 1870 gen_zapnoti(vc, va, lit); 1871 } else { 1872 gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); 1873 } 1874 break; 1875 case 0x32: 1876 /* MSKQL */ 1877 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); 1878 break; 1879 case 0x34: 1880 /* SRL */ 1881 if (islit) { 1882 tcg_gen_shri_i64(vc, va, lit & 0x3f); 1883 } else { 1884 tmp = tcg_temp_new(); 1885 vb = load_gpr(ctx, rb); 1886 tcg_gen_andi_i64(tmp, vb, 0x3f); 1887 tcg_gen_shr_i64(vc, va, tmp); 1888 tcg_temp_free(tmp); 1889 } 1890 break; 1891 case 0x36: 1892 /* EXTQL */ 1893 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); 1894 break; 1895 case 0x39: 1896 /* SLL */ 1897 if (islit) { 1898 tcg_gen_shli_i64(vc, va, lit & 0x3f); 1899 } else { 1900 tmp = tcg_temp_new(); 1901 vb = load_gpr(ctx, rb); 1902 tcg_gen_andi_i64(tmp, vb, 0x3f); 1903 tcg_gen_shl_i64(vc, va, tmp); 1904 tcg_temp_free(tmp); 1905 } 1906 break; 1907 case 0x3B: 1908 /* INSQL */ 1909 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); 1910 break; 1911 case 0x3C: 1912 /* SRA */ 1913 if (islit) { 1914 tcg_gen_sari_i64(vc, va, lit & 0x3f); 1915 } else { 1916 tmp = tcg_temp_new(); 1917 vb = load_gpr(ctx, rb); 1918 tcg_gen_andi_i64(tmp, vb, 0x3f); 1919 tcg_gen_sar_i64(vc, va, tmp); 1920 tcg_temp_free(tmp); 1921 } 1922 break; 1923 case 0x52: 1924 /* MSKWH */ 1925 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); 1926 break; 1927 case 0x57: 1928 /* INSWH */ 1929 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); 1930 break; 1931 case 0x5A: 1932 /* EXTWH */ 1933 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); 1934 break; 1935 case 0x62: 1936 /* MSKLH */ 1937 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); 1938 break; 1939 case 0x67: 1940 /* INSLH */ 1941 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); 1942 break; 1943 case 0x6A: 1944 /* EXTLH */ 1945 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); 1946 break; 1947 case 0x72: 1948 /* MSKQH */ 1949 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); 1950 break; 1951 case 0x77: 1952 /* INSQH */ 1953 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); 1954 break; 1955 case 0x7A: 1956 /* EXTQH */ 1957 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); 1958 break; 1959 default: 1960 goto invalid_opc; 1961 } 1962 break; 1963 1964 case 0x13: 1965 vc = dest_gpr(ctx, rc); 1966 vb = load_gpr_lit(ctx, rb, lit, islit); 1967 va = load_gpr(ctx, ra); 1968 switch (fn7) { 1969 case 0x00: 1970 /* MULL */ 1971 tcg_gen_mul_i64(vc, va, vb); 1972 tcg_gen_ext32s_i64(vc, vc); 1973 break; 1974 case 0x20: 1975 /* MULQ */ 1976 tcg_gen_mul_i64(vc, va, vb); 1977 break; 1978 case 0x30: 1979 /* UMULH */ 1980 tmp = tcg_temp_new(); 1981 tcg_gen_mulu2_i64(tmp, vc, va, vb); 1982 tcg_temp_free(tmp); 1983 break; 1984 case 0x40: 1985 /* MULL/V */ 1986 tmp = tcg_temp_new(); 1987 tcg_gen_ext32s_i64(tmp, va); 1988 tcg_gen_ext32s_i64(vc, vb); 1989 tcg_gen_mul_i64(tmp, tmp, vc); 1990 tcg_gen_ext32s_i64(vc, tmp); 1991 gen_helper_check_overflow(cpu_env, vc, tmp); 1992 tcg_temp_free(tmp); 1993 break; 1994 case 0x60: 1995 /* MULQ/V */ 1996 tmp = tcg_temp_new(); 1997 tmp2 = tcg_temp_new(); 1998 tcg_gen_muls2_i64(vc, tmp, va, vb); 1999 tcg_gen_sari_i64(tmp2, vc, 63); 2000 gen_helper_check_overflow(cpu_env, tmp, tmp2); 2001 tcg_temp_free(tmp); 2002 tcg_temp_free(tmp2); 2003 break; 2004 default: 2005 goto invalid_opc; 2006 } 2007 break; 2008 2009 case 0x14: 2010 REQUIRE_AMASK(FIX); 2011 vc = dest_fpr(ctx, rc); 2012 switch (fpfn) { /* fn11 & 0x3F */ 2013 case 0x04: 2014 /* ITOFS */ 2015 REQUIRE_REG_31(rb); 2016 REQUIRE_FEN; 2017 t32 = tcg_temp_new_i32(); 2018 va = load_gpr(ctx, ra); 2019 tcg_gen_extrl_i64_i32(t32, va); 2020 gen_helper_memory_to_s(vc, t32); 2021 tcg_temp_free_i32(t32); 2022 break; 2023 case 0x0A: 2024 /* SQRTF */ 2025 REQUIRE_REG_31(ra); 2026 REQUIRE_FEN; 2027 vb = load_fpr(ctx, rb); 2028 gen_helper_sqrtf(vc, cpu_env, vb); 2029 break; 2030 case 0x0B: 2031 /* SQRTS */ 2032 REQUIRE_REG_31(ra); 2033 REQUIRE_FEN; 2034 gen_sqrts(ctx, rb, rc, fn11); 2035 break; 2036 case 0x14: 2037 /* ITOFF */ 2038 REQUIRE_REG_31(rb); 2039 REQUIRE_FEN; 2040 t32 = tcg_temp_new_i32(); 2041 va = load_gpr(ctx, ra); 2042 tcg_gen_extrl_i64_i32(t32, va); 2043 gen_helper_memory_to_f(vc, t32); 2044 tcg_temp_free_i32(t32); 2045 break; 2046 case 0x24: 2047 /* ITOFT */ 2048 REQUIRE_REG_31(rb); 2049 REQUIRE_FEN; 2050 va = load_gpr(ctx, ra); 2051 tcg_gen_mov_i64(vc, va); 2052 break; 2053 case 0x2A: 2054 /* SQRTG */ 2055 REQUIRE_REG_31(ra); 2056 REQUIRE_FEN; 2057 vb = load_fpr(ctx, rb); 2058 gen_helper_sqrtg(vc, cpu_env, vb); 2059 break; 2060 case 0x02B: 2061 /* SQRTT */ 2062 REQUIRE_REG_31(ra); 2063 REQUIRE_FEN; 2064 gen_sqrtt(ctx, rb, rc, fn11); 2065 break; 2066 default: 2067 goto invalid_opc; 2068 } 2069 break; 2070 2071 case 0x15: 2072 /* VAX floating point */ 2073 /* XXX: rounding mode and trap are ignored (!) */ 2074 vc = dest_fpr(ctx, rc); 2075 vb = load_fpr(ctx, rb); 2076 va = load_fpr(ctx, ra); 2077 switch (fpfn) { /* fn11 & 0x3F */ 2078 case 0x00: 2079 /* ADDF */ 2080 REQUIRE_FEN; 2081 gen_helper_addf(vc, cpu_env, va, vb); 2082 break; 2083 case 0x01: 2084 /* SUBF */ 2085 REQUIRE_FEN; 2086 gen_helper_subf(vc, cpu_env, va, vb); 2087 break; 2088 case 0x02: 2089 /* MULF */ 2090 REQUIRE_FEN; 2091 gen_helper_mulf(vc, cpu_env, va, vb); 2092 break; 2093 case 0x03: 2094 /* DIVF */ 2095 REQUIRE_FEN; 2096 gen_helper_divf(vc, cpu_env, va, vb); 2097 break; 2098 case 0x1E: 2099 /* CVTDG -- TODO */ 2100 REQUIRE_REG_31(ra); 2101 goto invalid_opc; 2102 case 0x20: 2103 /* ADDG */ 2104 REQUIRE_FEN; 2105 gen_helper_addg(vc, cpu_env, va, vb); 2106 break; 2107 case 0x21: 2108 /* SUBG */ 2109 REQUIRE_FEN; 2110 gen_helper_subg(vc, cpu_env, va, vb); 2111 break; 2112 case 0x22: 2113 /* MULG */ 2114 REQUIRE_FEN; 2115 gen_helper_mulg(vc, cpu_env, va, vb); 2116 break; 2117 case 0x23: 2118 /* DIVG */ 2119 REQUIRE_FEN; 2120 gen_helper_divg(vc, cpu_env, va, vb); 2121 break; 2122 case 0x25: 2123 /* CMPGEQ */ 2124 REQUIRE_FEN; 2125 gen_helper_cmpgeq(vc, cpu_env, va, vb); 2126 break; 2127 case 0x26: 2128 /* CMPGLT */ 2129 REQUIRE_FEN; 2130 gen_helper_cmpglt(vc, cpu_env, va, vb); 2131 break; 2132 case 0x27: 2133 /* CMPGLE */ 2134 REQUIRE_FEN; 2135 gen_helper_cmpgle(vc, cpu_env, va, vb); 2136 break; 2137 case 0x2C: 2138 /* CVTGF */ 2139 REQUIRE_REG_31(ra); 2140 REQUIRE_FEN; 2141 gen_helper_cvtgf(vc, cpu_env, vb); 2142 break; 2143 case 0x2D: 2144 /* CVTGD -- TODO */ 2145 REQUIRE_REG_31(ra); 2146 goto invalid_opc; 2147 case 0x2F: 2148 /* CVTGQ */ 2149 REQUIRE_REG_31(ra); 2150 REQUIRE_FEN; 2151 gen_helper_cvtgq(vc, cpu_env, vb); 2152 break; 2153 case 0x3C: 2154 /* CVTQF */ 2155 REQUIRE_REG_31(ra); 2156 REQUIRE_FEN; 2157 gen_helper_cvtqf(vc, cpu_env, vb); 2158 break; 2159 case 0x3E: 2160 /* CVTQG */ 2161 REQUIRE_REG_31(ra); 2162 REQUIRE_FEN; 2163 gen_helper_cvtqg(vc, cpu_env, vb); 2164 break; 2165 default: 2166 goto invalid_opc; 2167 } 2168 break; 2169 2170 case 0x16: 2171 /* IEEE floating-point */ 2172 switch (fpfn) { /* fn11 & 0x3F */ 2173 case 0x00: 2174 /* ADDS */ 2175 REQUIRE_FEN; 2176 gen_adds(ctx, ra, rb, rc, fn11); 2177 break; 2178 case 0x01: 2179 /* SUBS */ 2180 REQUIRE_FEN; 2181 gen_subs(ctx, ra, rb, rc, fn11); 2182 break; 2183 case 0x02: 2184 /* MULS */ 2185 REQUIRE_FEN; 2186 gen_muls(ctx, ra, rb, rc, fn11); 2187 break; 2188 case 0x03: 2189 /* DIVS */ 2190 REQUIRE_FEN; 2191 gen_divs(ctx, ra, rb, rc, fn11); 2192 break; 2193 case 0x20: 2194 /* ADDT */ 2195 REQUIRE_FEN; 2196 gen_addt(ctx, ra, rb, rc, fn11); 2197 break; 2198 case 0x21: 2199 /* SUBT */ 2200 REQUIRE_FEN; 2201 gen_subt(ctx, ra, rb, rc, fn11); 2202 break; 2203 case 0x22: 2204 /* MULT */ 2205 REQUIRE_FEN; 2206 gen_mult(ctx, ra, rb, rc, fn11); 2207 break; 2208 case 0x23: 2209 /* DIVT */ 2210 REQUIRE_FEN; 2211 gen_divt(ctx, ra, rb, rc, fn11); 2212 break; 2213 case 0x24: 2214 /* CMPTUN */ 2215 REQUIRE_FEN; 2216 gen_cmptun(ctx, ra, rb, rc, fn11); 2217 break; 2218 case 0x25: 2219 /* CMPTEQ */ 2220 REQUIRE_FEN; 2221 gen_cmpteq(ctx, ra, rb, rc, fn11); 2222 break; 2223 case 0x26: 2224 /* CMPTLT */ 2225 REQUIRE_FEN; 2226 gen_cmptlt(ctx, ra, rb, rc, fn11); 2227 break; 2228 case 0x27: 2229 /* CMPTLE */ 2230 REQUIRE_FEN; 2231 gen_cmptle(ctx, ra, rb, rc, fn11); 2232 break; 2233 case 0x2C: 2234 REQUIRE_REG_31(ra); 2235 REQUIRE_FEN; 2236 if (fn11 == 0x2AC || fn11 == 0x6AC) { 2237 /* CVTST */ 2238 gen_cvtst(ctx, rb, rc, fn11); 2239 } else { 2240 /* CVTTS */ 2241 gen_cvtts(ctx, rb, rc, fn11); 2242 } 2243 break; 2244 case 0x2F: 2245 /* CVTTQ */ 2246 REQUIRE_REG_31(ra); 2247 REQUIRE_FEN; 2248 gen_cvttq(ctx, rb, rc, fn11); 2249 break; 2250 case 0x3C: 2251 /* CVTQS */ 2252 REQUIRE_REG_31(ra); 2253 REQUIRE_FEN; 2254 gen_cvtqs(ctx, rb, rc, fn11); 2255 break; 2256 case 0x3E: 2257 /* CVTQT */ 2258 REQUIRE_REG_31(ra); 2259 REQUIRE_FEN; 2260 gen_cvtqt(ctx, rb, rc, fn11); 2261 break; 2262 default: 2263 goto invalid_opc; 2264 } 2265 break; 2266 2267 case 0x17: 2268 switch (fn11) { 2269 case 0x010: 2270 /* CVTLQ */ 2271 REQUIRE_REG_31(ra); 2272 REQUIRE_FEN; 2273 vc = dest_fpr(ctx, rc); 2274 vb = load_fpr(ctx, rb); 2275 gen_cvtlq(vc, vb); 2276 break; 2277 case 0x020: 2278 /* CPYS */ 2279 REQUIRE_FEN; 2280 if (rc == 31) { 2281 /* Special case CPYS as FNOP. */ 2282 } else { 2283 vc = dest_fpr(ctx, rc); 2284 va = load_fpr(ctx, ra); 2285 if (ra == rb) { 2286 /* Special case CPYS as FMOV. */ 2287 tcg_gen_mov_i64(vc, va); 2288 } else { 2289 vb = load_fpr(ctx, rb); 2290 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); 2291 } 2292 } 2293 break; 2294 case 0x021: 2295 /* CPYSN */ 2296 REQUIRE_FEN; 2297 vc = dest_fpr(ctx, rc); 2298 vb = load_fpr(ctx, rb); 2299 va = load_fpr(ctx, ra); 2300 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); 2301 break; 2302 case 0x022: 2303 /* CPYSE */ 2304 REQUIRE_FEN; 2305 vc = dest_fpr(ctx, rc); 2306 vb = load_fpr(ctx, rb); 2307 va = load_fpr(ctx, ra); 2308 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); 2309 break; 2310 case 0x024: 2311 /* MT_FPCR */ 2312 REQUIRE_FEN; 2313 va = load_fpr(ctx, ra); 2314 gen_helper_store_fpcr(cpu_env, va); 2315 if (ctx->tb_rm == QUAL_RM_D) { 2316 /* Re-do the copy of the rounding mode to fp_status 2317 the next time we use dynamic rounding. */ 2318 ctx->tb_rm = -1; 2319 } 2320 break; 2321 case 0x025: 2322 /* MF_FPCR */ 2323 REQUIRE_FEN; 2324 va = dest_fpr(ctx, ra); 2325 gen_helper_load_fpcr(va, cpu_env); 2326 break; 2327 case 0x02A: 2328 /* FCMOVEQ */ 2329 REQUIRE_FEN; 2330 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); 2331 break; 2332 case 0x02B: 2333 /* FCMOVNE */ 2334 REQUIRE_FEN; 2335 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); 2336 break; 2337 case 0x02C: 2338 /* FCMOVLT */ 2339 REQUIRE_FEN; 2340 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); 2341 break; 2342 case 0x02D: 2343 /* FCMOVGE */ 2344 REQUIRE_FEN; 2345 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); 2346 break; 2347 case 0x02E: 2348 /* FCMOVLE */ 2349 REQUIRE_FEN; 2350 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); 2351 break; 2352 case 0x02F: 2353 /* FCMOVGT */ 2354 REQUIRE_FEN; 2355 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); 2356 break; 2357 case 0x030: /* CVTQL */ 2358 case 0x130: /* CVTQL/V */ 2359 case 0x530: /* CVTQL/SV */ 2360 REQUIRE_REG_31(ra); 2361 REQUIRE_FEN; 2362 vc = dest_fpr(ctx, rc); 2363 vb = load_fpr(ctx, rb); 2364 gen_helper_cvtql(vc, cpu_env, vb); 2365 gen_fp_exc_raise(rc, fn11); 2366 break; 2367 default: 2368 goto invalid_opc; 2369 } 2370 break; 2371 2372 case 0x18: 2373 switch ((uint16_t)disp16) { 2374 case 0x0000: 2375 /* TRAPB */ 2376 /* No-op. */ 2377 break; 2378 case 0x0400: 2379 /* EXCB */ 2380 /* No-op. */ 2381 break; 2382 case 0x4000: 2383 /* MB */ 2384 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 2385 break; 2386 case 0x4400: 2387 /* WMB */ 2388 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2389 break; 2390 case 0x8000: 2391 /* FETCH */ 2392 /* No-op */ 2393 break; 2394 case 0xA000: 2395 /* FETCH_M */ 2396 /* No-op */ 2397 break; 2398 case 0xC000: 2399 /* RPCC */ 2400 va = dest_gpr(ctx, ra); 2401 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 2402 gen_io_start(); 2403 gen_helper_load_pcc(va, cpu_env); 2404 ret = DISAS_PC_STALE; 2405 } else { 2406 gen_helper_load_pcc(va, cpu_env); 2407 } 2408 break; 2409 case 0xE000: 2410 /* RC */ 2411 gen_rx(ctx, ra, 0); 2412 break; 2413 case 0xE800: 2414 /* ECB */ 2415 break; 2416 case 0xF000: 2417 /* RS */ 2418 gen_rx(ctx, ra, 1); 2419 break; 2420 case 0xF800: 2421 /* WH64 */ 2422 /* No-op */ 2423 break; 2424 case 0xFC00: 2425 /* WH64EN */ 2426 /* No-op */ 2427 break; 2428 default: 2429 goto invalid_opc; 2430 } 2431 break; 2432 2433 case 0x19: 2434 /* HW_MFPR (PALcode) */ 2435 #ifndef CONFIG_USER_ONLY 2436 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2437 va = dest_gpr(ctx, ra); 2438 ret = gen_mfpr(ctx, va, insn & 0xffff); 2439 break; 2440 #else 2441 goto invalid_opc; 2442 #endif 2443 2444 case 0x1A: 2445 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch 2446 prediction stack action, which of course we don't implement. */ 2447 vb = load_gpr(ctx, rb); 2448 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2449 if (ra != 31) { 2450 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next); 2451 } 2452 ret = DISAS_PC_UPDATED; 2453 break; 2454 2455 case 0x1B: 2456 /* HW_LD (PALcode) */ 2457 #ifndef CONFIG_USER_ONLY 2458 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2459 { 2460 TCGv addr = tcg_temp_new(); 2461 vb = load_gpr(ctx, rb); 2462 va = dest_gpr(ctx, ra); 2463 2464 tcg_gen_addi_i64(addr, vb, disp12); 2465 switch ((insn >> 12) & 0xF) { 2466 case 0x0: 2467 /* Longword physical access (hw_ldl/p) */ 2468 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); 2469 break; 2470 case 0x1: 2471 /* Quadword physical access (hw_ldq/p) */ 2472 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ); 2473 break; 2474 case 0x2: 2475 /* Longword physical access with lock (hw_ldl_l/p) */ 2476 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); 2477 tcg_gen_mov_i64(cpu_lock_addr, addr); 2478 tcg_gen_mov_i64(cpu_lock_value, va); 2479 break; 2480 case 0x3: 2481 /* Quadword physical access with lock (hw_ldq_l/p) */ 2482 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ); 2483 tcg_gen_mov_i64(cpu_lock_addr, addr); 2484 tcg_gen_mov_i64(cpu_lock_value, va); 2485 break; 2486 case 0x4: 2487 /* Longword virtual PTE fetch (hw_ldl/v) */ 2488 goto invalid_opc; 2489 case 0x5: 2490 /* Quadword virtual PTE fetch (hw_ldq/v) */ 2491 goto invalid_opc; 2492 break; 2493 case 0x6: 2494 /* Invalid */ 2495 goto invalid_opc; 2496 case 0x7: 2497 /* Invaliid */ 2498 goto invalid_opc; 2499 case 0x8: 2500 /* Longword virtual access (hw_ldl) */ 2501 goto invalid_opc; 2502 case 0x9: 2503 /* Quadword virtual access (hw_ldq) */ 2504 goto invalid_opc; 2505 case 0xA: 2506 /* Longword virtual access with protection check (hw_ldl/w) */ 2507 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); 2508 break; 2509 case 0xB: 2510 /* Quadword virtual access with protection check (hw_ldq/w) */ 2511 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ); 2512 break; 2513 case 0xC: 2514 /* Longword virtual access with alt access mode (hw_ldl/a)*/ 2515 goto invalid_opc; 2516 case 0xD: 2517 /* Quadword virtual access with alt access mode (hw_ldq/a) */ 2518 goto invalid_opc; 2519 case 0xE: 2520 /* Longword virtual access with alternate access mode and 2521 protection checks (hw_ldl/wa) */ 2522 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); 2523 break; 2524 case 0xF: 2525 /* Quadword virtual access with alternate access mode and 2526 protection checks (hw_ldq/wa) */ 2527 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ); 2528 break; 2529 } 2530 tcg_temp_free(addr); 2531 break; 2532 } 2533 #else 2534 goto invalid_opc; 2535 #endif 2536 2537 case 0x1C: 2538 vc = dest_gpr(ctx, rc); 2539 if (fn7 == 0x70) { 2540 /* FTOIT */ 2541 REQUIRE_AMASK(FIX); 2542 REQUIRE_REG_31(rb); 2543 va = load_fpr(ctx, ra); 2544 tcg_gen_mov_i64(vc, va); 2545 break; 2546 } else if (fn7 == 0x78) { 2547 /* FTOIS */ 2548 REQUIRE_AMASK(FIX); 2549 REQUIRE_REG_31(rb); 2550 t32 = tcg_temp_new_i32(); 2551 va = load_fpr(ctx, ra); 2552 gen_helper_s_to_memory(t32, va); 2553 tcg_gen_ext_i32_i64(vc, t32); 2554 tcg_temp_free_i32(t32); 2555 break; 2556 } 2557 2558 vb = load_gpr_lit(ctx, rb, lit, islit); 2559 switch (fn7) { 2560 case 0x00: 2561 /* SEXTB */ 2562 REQUIRE_AMASK(BWX); 2563 REQUIRE_REG_31(ra); 2564 tcg_gen_ext8s_i64(vc, vb); 2565 break; 2566 case 0x01: 2567 /* SEXTW */ 2568 REQUIRE_AMASK(BWX); 2569 REQUIRE_REG_31(ra); 2570 tcg_gen_ext16s_i64(vc, vb); 2571 break; 2572 case 0x30: 2573 /* CTPOP */ 2574 REQUIRE_AMASK(CIX); 2575 REQUIRE_REG_31(ra); 2576 REQUIRE_NO_LIT; 2577 tcg_gen_ctpop_i64(vc, vb); 2578 break; 2579 case 0x31: 2580 /* PERR */ 2581 REQUIRE_AMASK(MVI); 2582 REQUIRE_NO_LIT; 2583 va = load_gpr(ctx, ra); 2584 gen_helper_perr(vc, va, vb); 2585 break; 2586 case 0x32: 2587 /* CTLZ */ 2588 REQUIRE_AMASK(CIX); 2589 REQUIRE_REG_31(ra); 2590 REQUIRE_NO_LIT; 2591 tcg_gen_clzi_i64(vc, vb, 64); 2592 break; 2593 case 0x33: 2594 /* CTTZ */ 2595 REQUIRE_AMASK(CIX); 2596 REQUIRE_REG_31(ra); 2597 REQUIRE_NO_LIT; 2598 tcg_gen_ctzi_i64(vc, vb, 64); 2599 break; 2600 case 0x34: 2601 /* UNPKBW */ 2602 REQUIRE_AMASK(MVI); 2603 REQUIRE_REG_31(ra); 2604 REQUIRE_NO_LIT; 2605 gen_helper_unpkbw(vc, vb); 2606 break; 2607 case 0x35: 2608 /* UNPKBL */ 2609 REQUIRE_AMASK(MVI); 2610 REQUIRE_REG_31(ra); 2611 REQUIRE_NO_LIT; 2612 gen_helper_unpkbl(vc, vb); 2613 break; 2614 case 0x36: 2615 /* PKWB */ 2616 REQUIRE_AMASK(MVI); 2617 REQUIRE_REG_31(ra); 2618 REQUIRE_NO_LIT; 2619 gen_helper_pkwb(vc, vb); 2620 break; 2621 case 0x37: 2622 /* PKLB */ 2623 REQUIRE_AMASK(MVI); 2624 REQUIRE_REG_31(ra); 2625 REQUIRE_NO_LIT; 2626 gen_helper_pklb(vc, vb); 2627 break; 2628 case 0x38: 2629 /* MINSB8 */ 2630 REQUIRE_AMASK(MVI); 2631 va = load_gpr(ctx, ra); 2632 gen_helper_minsb8(vc, va, vb); 2633 break; 2634 case 0x39: 2635 /* MINSW4 */ 2636 REQUIRE_AMASK(MVI); 2637 va = load_gpr(ctx, ra); 2638 gen_helper_minsw4(vc, va, vb); 2639 break; 2640 case 0x3A: 2641 /* MINUB8 */ 2642 REQUIRE_AMASK(MVI); 2643 va = load_gpr(ctx, ra); 2644 gen_helper_minub8(vc, va, vb); 2645 break; 2646 case 0x3B: 2647 /* MINUW4 */ 2648 REQUIRE_AMASK(MVI); 2649 va = load_gpr(ctx, ra); 2650 gen_helper_minuw4(vc, va, vb); 2651 break; 2652 case 0x3C: 2653 /* MAXUB8 */ 2654 REQUIRE_AMASK(MVI); 2655 va = load_gpr(ctx, ra); 2656 gen_helper_maxub8(vc, va, vb); 2657 break; 2658 case 0x3D: 2659 /* MAXUW4 */ 2660 REQUIRE_AMASK(MVI); 2661 va = load_gpr(ctx, ra); 2662 gen_helper_maxuw4(vc, va, vb); 2663 break; 2664 case 0x3E: 2665 /* MAXSB8 */ 2666 REQUIRE_AMASK(MVI); 2667 va = load_gpr(ctx, ra); 2668 gen_helper_maxsb8(vc, va, vb); 2669 break; 2670 case 0x3F: 2671 /* MAXSW4 */ 2672 REQUIRE_AMASK(MVI); 2673 va = load_gpr(ctx, ra); 2674 gen_helper_maxsw4(vc, va, vb); 2675 break; 2676 default: 2677 goto invalid_opc; 2678 } 2679 break; 2680 2681 case 0x1D: 2682 /* HW_MTPR (PALcode) */ 2683 #ifndef CONFIG_USER_ONLY 2684 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2685 vb = load_gpr(ctx, rb); 2686 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2687 break; 2688 #else 2689 goto invalid_opc; 2690 #endif 2691 2692 case 0x1E: 2693 /* HW_RET (PALcode) */ 2694 #ifndef CONFIG_USER_ONLY 2695 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2696 if (rb == 31) { 2697 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2698 address from EXC_ADDR. This turns out to be useful for our 2699 emulation PALcode, so continue to accept it. */ 2700 vb = dest_sink(ctx); 2701 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr)); 2702 } else { 2703 vb = load_gpr(ctx, rb); 2704 } 2705 tcg_gen_movi_i64(cpu_lock_addr, -1); 2706 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT); 2707 tmp = tcg_temp_new(); 2708 tcg_gen_andi_i64(tmp, vb, 1); 2709 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2710 tcg_temp_free(tmp); 2711 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2712 /* Allow interrupts to be recognized right away. */ 2713 ret = DISAS_PC_UPDATED_NOCHAIN; 2714 break; 2715 #else 2716 goto invalid_opc; 2717 #endif 2718 2719 case 0x1F: 2720 /* HW_ST (PALcode) */ 2721 #ifndef CONFIG_USER_ONLY 2722 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2723 { 2724 switch ((insn >> 12) & 0xF) { 2725 case 0x0: 2726 /* Longword physical access */ 2727 va = load_gpr(ctx, ra); 2728 vb = load_gpr(ctx, rb); 2729 tmp = tcg_temp_new(); 2730 tcg_gen_addi_i64(tmp, vb, disp12); 2731 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); 2732 tcg_temp_free(tmp); 2733 break; 2734 case 0x1: 2735 /* Quadword physical access */ 2736 va = load_gpr(ctx, ra); 2737 vb = load_gpr(ctx, rb); 2738 tmp = tcg_temp_new(); 2739 tcg_gen_addi_i64(tmp, vb, disp12); 2740 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ); 2741 tcg_temp_free(tmp); 2742 break; 2743 case 0x2: 2744 /* Longword physical access with lock */ 2745 ret = gen_store_conditional(ctx, ra, rb, disp12, 2746 MMU_PHYS_IDX, MO_LESL); 2747 break; 2748 case 0x3: 2749 /* Quadword physical access with lock */ 2750 ret = gen_store_conditional(ctx, ra, rb, disp12, 2751 MMU_PHYS_IDX, MO_LEQ); 2752 break; 2753 case 0x4: 2754 /* Longword virtual access */ 2755 goto invalid_opc; 2756 case 0x5: 2757 /* Quadword virtual access */ 2758 goto invalid_opc; 2759 case 0x6: 2760 /* Invalid */ 2761 goto invalid_opc; 2762 case 0x7: 2763 /* Invalid */ 2764 goto invalid_opc; 2765 case 0x8: 2766 /* Invalid */ 2767 goto invalid_opc; 2768 case 0x9: 2769 /* Invalid */ 2770 goto invalid_opc; 2771 case 0xA: 2772 /* Invalid */ 2773 goto invalid_opc; 2774 case 0xB: 2775 /* Invalid */ 2776 goto invalid_opc; 2777 case 0xC: 2778 /* Longword virtual access with alternate access mode */ 2779 goto invalid_opc; 2780 case 0xD: 2781 /* Quadword virtual access with alternate access mode */ 2782 goto invalid_opc; 2783 case 0xE: 2784 /* Invalid */ 2785 goto invalid_opc; 2786 case 0xF: 2787 /* Invalid */ 2788 goto invalid_opc; 2789 } 2790 break; 2791 } 2792 #else 2793 goto invalid_opc; 2794 #endif 2795 case 0x20: 2796 /* LDF */ 2797 REQUIRE_FEN; 2798 gen_load_fp(ctx, ra, rb, disp16, gen_ldf); 2799 break; 2800 case 0x21: 2801 /* LDG */ 2802 REQUIRE_FEN; 2803 gen_load_fp(ctx, ra, rb, disp16, gen_ldg); 2804 break; 2805 case 0x22: 2806 /* LDS */ 2807 REQUIRE_FEN; 2808 gen_load_fp(ctx, ra, rb, disp16, gen_lds); 2809 break; 2810 case 0x23: 2811 /* LDT */ 2812 REQUIRE_FEN; 2813 gen_load_fp(ctx, ra, rb, disp16, gen_ldt); 2814 break; 2815 case 0x24: 2816 /* STF */ 2817 REQUIRE_FEN; 2818 gen_store_fp(ctx, ra, rb, disp16, gen_stf); 2819 break; 2820 case 0x25: 2821 /* STG */ 2822 REQUIRE_FEN; 2823 gen_store_fp(ctx, ra, rb, disp16, gen_stg); 2824 break; 2825 case 0x26: 2826 /* STS */ 2827 REQUIRE_FEN; 2828 gen_store_fp(ctx, ra, rb, disp16, gen_sts); 2829 break; 2830 case 0x27: 2831 /* STT */ 2832 REQUIRE_FEN; 2833 gen_store_fp(ctx, ra, rb, disp16, gen_stt); 2834 break; 2835 case 0x28: 2836 /* LDL */ 2837 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); 2838 break; 2839 case 0x29: 2840 /* LDQ */ 2841 gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0); 2842 break; 2843 case 0x2A: 2844 /* LDL_L */ 2845 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); 2846 break; 2847 case 0x2B: 2848 /* LDQ_L */ 2849 gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1); 2850 break; 2851 case 0x2C: 2852 /* STL */ 2853 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); 2854 break; 2855 case 0x2D: 2856 /* STQ */ 2857 gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0); 2858 break; 2859 case 0x2E: 2860 /* STL_C */ 2861 ret = gen_store_conditional(ctx, ra, rb, disp16, 2862 ctx->mem_idx, MO_LESL); 2863 break; 2864 case 0x2F: 2865 /* STQ_C */ 2866 ret = gen_store_conditional(ctx, ra, rb, disp16, 2867 ctx->mem_idx, MO_LEQ); 2868 break; 2869 case 0x30: 2870 /* BR */ 2871 ret = gen_bdirect(ctx, ra, disp21); 2872 break; 2873 case 0x31: /* FBEQ */ 2874 REQUIRE_FEN; 2875 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); 2876 break; 2877 case 0x32: /* FBLT */ 2878 REQUIRE_FEN; 2879 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); 2880 break; 2881 case 0x33: /* FBLE */ 2882 REQUIRE_FEN; 2883 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); 2884 break; 2885 case 0x34: 2886 /* BSR */ 2887 ret = gen_bdirect(ctx, ra, disp21); 2888 break; 2889 case 0x35: /* FBNE */ 2890 REQUIRE_FEN; 2891 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); 2892 break; 2893 case 0x36: /* FBGE */ 2894 REQUIRE_FEN; 2895 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); 2896 break; 2897 case 0x37: /* FBGT */ 2898 REQUIRE_FEN; 2899 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); 2900 break; 2901 case 0x38: 2902 /* BLBC */ 2903 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); 2904 break; 2905 case 0x39: 2906 /* BEQ */ 2907 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); 2908 break; 2909 case 0x3A: 2910 /* BLT */ 2911 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); 2912 break; 2913 case 0x3B: 2914 /* BLE */ 2915 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); 2916 break; 2917 case 0x3C: 2918 /* BLBS */ 2919 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); 2920 break; 2921 case 0x3D: 2922 /* BNE */ 2923 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); 2924 break; 2925 case 0x3E: 2926 /* BGE */ 2927 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); 2928 break; 2929 case 0x3F: 2930 /* BGT */ 2931 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); 2932 break; 2933 invalid_opc: 2934 ret = gen_invalid(ctx); 2935 break; 2936 raise_fen: 2937 ret = gen_excp(ctx, EXCP_FEN, 0); 2938 break; 2939 } 2940 2941 return ret; 2942 } 2943 2944 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 2945 { 2946 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2947 CPUAlphaState *env = cpu->env_ptr; 2948 int64_t bound; 2949 2950 ctx->tbflags = ctx->base.tb->flags; 2951 ctx->mem_idx = cpu_mmu_index(env, false); 2952 ctx->implver = env->implver; 2953 ctx->amask = env->amask; 2954 2955 #ifdef CONFIG_USER_ONLY 2956 ctx->ir = cpu_std_ir; 2957 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 2958 #else 2959 ctx->palbr = env->palbr; 2960 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2961 #endif 2962 2963 /* ??? Every TB begins with unset rounding mode, to be initialized on 2964 the first fp insn of the TB. Alternately we could define a proper 2965 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure 2966 to reset the FP_STATUS to that default at the end of any TB that 2967 changes the default. We could even (gasp) dynamiclly figure out 2968 what default would be most efficient given the running program. */ 2969 ctx->tb_rm = -1; 2970 /* Similarly for flush-to-zero. */ 2971 ctx->tb_ftz = -1; 2972 2973 ctx->zero = NULL; 2974 ctx->sink = NULL; 2975 2976 /* Bound the number of insns to execute to those left on the page. */ 2977 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 2978 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 2979 } 2980 2981 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu) 2982 { 2983 } 2984 2985 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 2986 { 2987 tcg_gen_insn_start(dcbase->pc_next); 2988 } 2989 2990 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 2991 { 2992 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2993 CPUAlphaState *env = cpu->env_ptr; 2994 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 2995 2996 ctx->base.pc_next += 4; 2997 ctx->base.is_jmp = translate_one(ctx, insn); 2998 2999 free_context_temps(ctx); 3000 translator_loop_temp_check(&ctx->base); 3001 } 3002 3003 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 3004 { 3005 DisasContext *ctx = container_of(dcbase, DisasContext, base); 3006 3007 switch (ctx->base.is_jmp) { 3008 case DISAS_NORETURN: 3009 break; 3010 case DISAS_TOO_MANY: 3011 if (use_goto_tb(ctx, ctx->base.pc_next)) { 3012 tcg_gen_goto_tb(0); 3013 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 3014 tcg_gen_exit_tb(ctx->base.tb, 0); 3015 } 3016 /* FALLTHRU */ 3017 case DISAS_PC_STALE: 3018 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); 3019 /* FALLTHRU */ 3020 case DISAS_PC_UPDATED: 3021 tcg_gen_lookup_and_goto_ptr(); 3022 break; 3023 case DISAS_PC_UPDATED_NOCHAIN: 3024 tcg_gen_exit_tb(NULL, 0); 3025 break; 3026 default: 3027 g_assert_not_reached(); 3028 } 3029 } 3030 3031 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) 3032 { 3033 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); 3034 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); 3035 } 3036 3037 static const TranslatorOps alpha_tr_ops = { 3038 .init_disas_context = alpha_tr_init_disas_context, 3039 .tb_start = alpha_tr_tb_start, 3040 .insn_start = alpha_tr_insn_start, 3041 .translate_insn = alpha_tr_translate_insn, 3042 .tb_stop = alpha_tr_tb_stop, 3043 .disas_log = alpha_tr_disas_log, 3044 }; 3045 3046 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) 3047 { 3048 DisasContext dc; 3049 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns); 3050 } 3051 3052 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, 3053 target_ulong *data) 3054 { 3055 env->pc = data[0]; 3056 } 3057