1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg-op.h" 26 #include "exec/cpu_ldst.h" 27 28 #include "exec/helper-proto.h" 29 #include "exec/helper-gen.h" 30 31 #include "trace-tcg.h" 32 #include "exec/log.h" 33 34 typedef struct DisasCond { 35 TCGCond c; 36 TCGv a0, a1; 37 bool a0_is_n; 38 bool a1_is_0; 39 } DisasCond; 40 41 typedef struct DisasContext { 42 struct TranslationBlock *tb; 43 CPUState *cs; 44 45 target_ulong iaoq_f; 46 target_ulong iaoq_b; 47 target_ulong iaoq_n; 48 TCGv iaoq_n_var; 49 50 int ntemps; 51 TCGv temps[8]; 52 53 DisasCond null_cond; 54 TCGLabel *null_lab; 55 56 bool singlestep_enabled; 57 bool psw_n_nonzero; 58 } DisasContext; 59 60 /* Return values from translate_one, indicating the state of the TB. 61 Note that zero indicates that we are not exiting the TB. */ 62 63 typedef enum { 64 NO_EXIT, 65 66 /* We have emitted one or more goto_tb. No fixup required. */ 67 EXIT_GOTO_TB, 68 69 /* We are not using a goto_tb (for whatever reason), but have updated 70 the iaq (for whatever reason), so don't do it again on exit. */ 71 EXIT_IAQ_N_UPDATED, 72 73 /* We are exiting the TB, but have neither emitted a goto_tb, nor 74 updated the iaq for the next instruction to be executed. */ 75 EXIT_IAQ_N_STALE, 76 77 /* We are ending the TB with a noreturn function call, e.g. longjmp. 78 No following code will be executed. */ 79 EXIT_NORETURN, 80 } ExitStatus; 81 82 typedef struct DisasInsn { 83 uint32_t insn, mask; 84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn, 85 const struct DisasInsn *f); 86 union { 87 void (*f_ttt)(TCGv, TCGv, TCGv); 88 void (*f_weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32); 89 void (*f_dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64); 90 void (*f_wew)(TCGv_i32, TCGv_env, TCGv_i32); 91 void (*f_ded)(TCGv_i64, TCGv_env, TCGv_i64); 92 void (*f_wed)(TCGv_i32, TCGv_env, TCGv_i64); 93 void (*f_dew)(TCGv_i64, TCGv_env, TCGv_i32); 94 }; 95 } DisasInsn; 96 97 /* global register indexes */ 98 static TCGv_env cpu_env; 99 static TCGv cpu_gr[32]; 100 static TCGv cpu_iaoq_f; 101 static TCGv cpu_iaoq_b; 102 static TCGv cpu_sar; 103 static TCGv cpu_psw_n; 104 static TCGv cpu_psw_v; 105 static TCGv cpu_psw_cb; 106 static TCGv cpu_psw_cb_msb; 107 static TCGv cpu_cr26; 108 static TCGv cpu_cr27; 109 110 #include "exec/gen-icount.h" 111 112 void hppa_translate_init(void) 113 { 114 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 115 116 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 117 static const GlobalVar vars[] = { 118 DEF_VAR(sar), 119 DEF_VAR(cr26), 120 DEF_VAR(cr27), 121 DEF_VAR(psw_n), 122 DEF_VAR(psw_v), 123 DEF_VAR(psw_cb), 124 DEF_VAR(psw_cb_msb), 125 DEF_VAR(iaoq_f), 126 DEF_VAR(iaoq_b), 127 }; 128 129 #undef DEF_VAR 130 131 /* Use the symbolic register names that match the disassembler. */ 132 static const char gr_names[32][4] = { 133 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 134 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 135 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 136 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 137 }; 138 139 static bool done_init = 0; 140 int i; 141 142 if (done_init) { 143 return; 144 } 145 done_init = 1; 146 147 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 148 tcg_ctx.tcg_env = cpu_env; 149 150 TCGV_UNUSED(cpu_gr[0]); 151 for (i = 1; i < 32; i++) { 152 cpu_gr[i] = tcg_global_mem_new(cpu_env, 153 offsetof(CPUHPPAState, gr[i]), 154 gr_names[i]); 155 } 156 157 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 158 const GlobalVar *v = &vars[i]; 159 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name); 160 } 161 } 162 163 static DisasCond cond_make_f(void) 164 { 165 DisasCond r = { .c = TCG_COND_NEVER }; 166 TCGV_UNUSED(r.a0); 167 TCGV_UNUSED(r.a1); 168 return r; 169 } 170 171 static DisasCond cond_make_n(void) 172 { 173 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true }; 174 r.a0 = cpu_psw_n; 175 TCGV_UNUSED(r.a1); 176 return r; 177 } 178 179 static DisasCond cond_make_0(TCGCond c, TCGv a0) 180 { 181 DisasCond r = { .c = c, .a1_is_0 = true }; 182 183 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 184 r.a0 = tcg_temp_new(); 185 tcg_gen_mov_tl(r.a0, a0); 186 TCGV_UNUSED(r.a1); 187 188 return r; 189 } 190 191 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1) 192 { 193 DisasCond r = { .c = c }; 194 195 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 196 r.a0 = tcg_temp_new(); 197 tcg_gen_mov_tl(r.a0, a0); 198 r.a1 = tcg_temp_new(); 199 tcg_gen_mov_tl(r.a1, a1); 200 201 return r; 202 } 203 204 static void cond_prep(DisasCond *cond) 205 { 206 if (cond->a1_is_0) { 207 cond->a1_is_0 = false; 208 cond->a1 = tcg_const_tl(0); 209 } 210 } 211 212 static void cond_free(DisasCond *cond) 213 { 214 switch (cond->c) { 215 default: 216 if (!cond->a0_is_n) { 217 tcg_temp_free(cond->a0); 218 } 219 if (!cond->a1_is_0) { 220 tcg_temp_free(cond->a1); 221 } 222 cond->a0_is_n = false; 223 cond->a1_is_0 = false; 224 TCGV_UNUSED(cond->a0); 225 TCGV_UNUSED(cond->a1); 226 /* fallthru */ 227 case TCG_COND_ALWAYS: 228 cond->c = TCG_COND_NEVER; 229 break; 230 case TCG_COND_NEVER: 231 break; 232 } 233 } 234 235 static TCGv get_temp(DisasContext *ctx) 236 { 237 unsigned i = ctx->ntemps++; 238 g_assert(i < ARRAY_SIZE(ctx->temps)); 239 return ctx->temps[i] = tcg_temp_new(); 240 } 241 242 static TCGv load_const(DisasContext *ctx, target_long v) 243 { 244 TCGv t = get_temp(ctx); 245 tcg_gen_movi_tl(t, v); 246 return t; 247 } 248 249 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 250 { 251 if (reg == 0) { 252 TCGv t = get_temp(ctx); 253 tcg_gen_movi_tl(t, 0); 254 return t; 255 } else { 256 return cpu_gr[reg]; 257 } 258 } 259 260 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 261 { 262 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 263 return get_temp(ctx); 264 } else { 265 return cpu_gr[reg]; 266 } 267 } 268 269 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t) 270 { 271 if (ctx->null_cond.c != TCG_COND_NEVER) { 272 cond_prep(&ctx->null_cond); 273 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0, 274 ctx->null_cond.a1, dest, t); 275 } else { 276 tcg_gen_mov_tl(dest, t); 277 } 278 } 279 280 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t) 281 { 282 if (reg != 0) { 283 save_or_nullify(ctx, cpu_gr[reg], t); 284 } 285 } 286 287 #ifdef HOST_WORDS_BIGENDIAN 288 # define HI_OFS 0 289 # define LO_OFS 4 290 #else 291 # define HI_OFS 4 292 # define LO_OFS 0 293 #endif 294 295 static TCGv_i32 load_frw_i32(unsigned rt) 296 { 297 TCGv_i32 ret = tcg_temp_new_i32(); 298 tcg_gen_ld_i32(ret, cpu_env, 299 offsetof(CPUHPPAState, fr[rt & 31]) 300 + (rt & 32 ? LO_OFS : HI_OFS)); 301 return ret; 302 } 303 304 static TCGv_i32 load_frw0_i32(unsigned rt) 305 { 306 if (rt == 0) { 307 return tcg_const_i32(0); 308 } else { 309 return load_frw_i32(rt); 310 } 311 } 312 313 static TCGv_i64 load_frw0_i64(unsigned rt) 314 { 315 if (rt == 0) { 316 return tcg_const_i64(0); 317 } else { 318 TCGv_i64 ret = tcg_temp_new_i64(); 319 tcg_gen_ld32u_i64(ret, cpu_env, 320 offsetof(CPUHPPAState, fr[rt & 31]) 321 + (rt & 32 ? LO_OFS : HI_OFS)); 322 return ret; 323 } 324 } 325 326 static void save_frw_i32(unsigned rt, TCGv_i32 val) 327 { 328 tcg_gen_st_i32(val, cpu_env, 329 offsetof(CPUHPPAState, fr[rt & 31]) 330 + (rt & 32 ? LO_OFS : HI_OFS)); 331 } 332 333 #undef HI_OFS 334 #undef LO_OFS 335 336 static TCGv_i64 load_frd(unsigned rt) 337 { 338 TCGv_i64 ret = tcg_temp_new_i64(); 339 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt])); 340 return ret; 341 } 342 343 static TCGv_i64 load_frd0(unsigned rt) 344 { 345 if (rt == 0) { 346 return tcg_const_i64(0); 347 } else { 348 return load_frd(rt); 349 } 350 } 351 352 static void save_frd(unsigned rt, TCGv_i64 val) 353 { 354 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt])); 355 } 356 357 /* Skip over the implementation of an insn that has been nullified. 358 Use this when the insn is too complex for a conditional move. */ 359 static void nullify_over(DisasContext *ctx) 360 { 361 if (ctx->null_cond.c != TCG_COND_NEVER) { 362 /* The always condition should have been handled in the main loop. */ 363 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 364 365 ctx->null_lab = gen_new_label(); 366 cond_prep(&ctx->null_cond); 367 368 /* If we're using PSW[N], copy it to a temp because... */ 369 if (ctx->null_cond.a0_is_n) { 370 ctx->null_cond.a0_is_n = false; 371 ctx->null_cond.a0 = tcg_temp_new(); 372 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n); 373 } 374 /* ... we clear it before branching over the implementation, 375 so that (1) it's clear after nullifying this insn and 376 (2) if this insn nullifies the next, PSW[N] is valid. */ 377 if (ctx->psw_n_nonzero) { 378 ctx->psw_n_nonzero = false; 379 tcg_gen_movi_tl(cpu_psw_n, 0); 380 } 381 382 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0, 383 ctx->null_cond.a1, ctx->null_lab); 384 cond_free(&ctx->null_cond); 385 } 386 } 387 388 /* Save the current nullification state to PSW[N]. */ 389 static void nullify_save(DisasContext *ctx) 390 { 391 if (ctx->null_cond.c == TCG_COND_NEVER) { 392 if (ctx->psw_n_nonzero) { 393 tcg_gen_movi_tl(cpu_psw_n, 0); 394 } 395 return; 396 } 397 if (!ctx->null_cond.a0_is_n) { 398 cond_prep(&ctx->null_cond); 399 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n, 400 ctx->null_cond.a0, ctx->null_cond.a1); 401 ctx->psw_n_nonzero = true; 402 } 403 cond_free(&ctx->null_cond); 404 } 405 406 /* Set a PSW[N] to X. The intention is that this is used immediately 407 before a goto_tb/exit_tb, so that there is no fallthru path to other 408 code within the TB. Therefore we do not update psw_n_nonzero. */ 409 static void nullify_set(DisasContext *ctx, bool x) 410 { 411 if (ctx->psw_n_nonzero || x) { 412 tcg_gen_movi_tl(cpu_psw_n, x); 413 } 414 } 415 416 /* Mark the end of an instruction that may have been nullified. 417 This is the pair to nullify_over. */ 418 static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status) 419 { 420 TCGLabel *null_lab = ctx->null_lab; 421 422 if (likely(null_lab == NULL)) { 423 /* The current insn wasn't conditional or handled the condition 424 applied to it without a branch, so the (new) setting of 425 NULL_COND can be applied directly to the next insn. */ 426 return status; 427 } 428 ctx->null_lab = NULL; 429 430 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 431 /* The next instruction will be unconditional, 432 and NULL_COND already reflects that. */ 433 gen_set_label(null_lab); 434 } else { 435 /* The insn that we just executed is itself nullifying the next 436 instruction. Store the condition in the PSW[N] global. 437 We asserted PSW[N] = 0 in nullify_over, so that after the 438 label we have the proper value in place. */ 439 nullify_save(ctx); 440 gen_set_label(null_lab); 441 ctx->null_cond = cond_make_n(); 442 } 443 444 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED); 445 if (status == EXIT_NORETURN) { 446 status = NO_EXIT; 447 } 448 return status; 449 } 450 451 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval) 452 { 453 if (unlikely(ival == -1)) { 454 tcg_gen_mov_tl(dest, vval); 455 } else { 456 tcg_gen_movi_tl(dest, ival); 457 } 458 } 459 460 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp) 461 { 462 return ctx->iaoq_f + disp + 8; 463 } 464 465 static void gen_excp_1(int exception) 466 { 467 TCGv_i32 t = tcg_const_i32(exception); 468 gen_helper_excp(cpu_env, t); 469 tcg_temp_free_i32(t); 470 } 471 472 static ExitStatus gen_excp(DisasContext *ctx, int exception) 473 { 474 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 475 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 476 nullify_save(ctx); 477 gen_excp_1(exception); 478 return EXIT_NORETURN; 479 } 480 481 static ExitStatus gen_illegal(DisasContext *ctx) 482 { 483 nullify_over(ctx); 484 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL)); 485 } 486 487 static bool use_goto_tb(DisasContext *ctx, target_ulong dest) 488 { 489 /* Suppress goto_tb in the case of single-steping and IO. */ 490 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) { 491 return false; 492 } 493 return true; 494 } 495 496 /* If the next insn is to be nullified, and it's on the same page, 497 and we're not attempting to set a breakpoint on it, then we can 498 totally skip the nullified insn. This avoids creating and 499 executing a TB that merely branches to the next TB. */ 500 static bool use_nullify_skip(DisasContext *ctx) 501 { 502 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 503 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 504 } 505 506 static void gen_goto_tb(DisasContext *ctx, int which, 507 target_ulong f, target_ulong b) 508 { 509 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 510 tcg_gen_goto_tb(which); 511 tcg_gen_movi_tl(cpu_iaoq_f, f); 512 tcg_gen_movi_tl(cpu_iaoq_b, b); 513 tcg_gen_exit_tb((uintptr_t)ctx->tb + which); 514 } else { 515 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 516 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 517 if (ctx->singlestep_enabled) { 518 gen_excp_1(EXCP_DEBUG); 519 } else { 520 tcg_gen_exit_tb(0); 521 } 522 } 523 } 524 525 /* PA has a habit of taking the LSB of a field and using that as the sign, 526 with the rest of the field becoming the least significant bits. */ 527 static target_long low_sextract(uint32_t val, int pos, int len) 528 { 529 target_ulong x = -(target_ulong)extract32(val, pos, 1); 530 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1); 531 return x; 532 } 533 534 static unsigned assemble_rt64(uint32_t insn) 535 { 536 unsigned r1 = extract32(insn, 6, 1); 537 unsigned r0 = extract32(insn, 0, 5); 538 return r1 * 32 + r0; 539 } 540 541 static unsigned assemble_ra64(uint32_t insn) 542 { 543 unsigned r1 = extract32(insn, 7, 1); 544 unsigned r0 = extract32(insn, 21, 5); 545 return r1 * 32 + r0; 546 } 547 548 static unsigned assemble_rb64(uint32_t insn) 549 { 550 unsigned r1 = extract32(insn, 12, 1); 551 unsigned r0 = extract32(insn, 16, 5); 552 return r1 * 32 + r0; 553 } 554 555 static unsigned assemble_rc64(uint32_t insn) 556 { 557 unsigned r2 = extract32(insn, 8, 1); 558 unsigned r1 = extract32(insn, 13, 3); 559 unsigned r0 = extract32(insn, 9, 2); 560 return r2 * 32 + r1 * 4 + r0; 561 } 562 563 static target_long assemble_12(uint32_t insn) 564 { 565 target_ulong x = -(target_ulong)(insn & 1); 566 x = (x << 1) | extract32(insn, 2, 1); 567 x = (x << 10) | extract32(insn, 3, 10); 568 return x; 569 } 570 571 static target_long assemble_16(uint32_t insn) 572 { 573 /* Take the name from PA2.0, which produces a 16-bit number 574 only with wide mode; otherwise a 14-bit number. Since we don't 575 implement wide mode, this is always the 14-bit number. */ 576 return low_sextract(insn, 0, 14); 577 } 578 579 static target_long assemble_16a(uint32_t insn) 580 { 581 /* Take the name from PA2.0, which produces a 14-bit shifted number 582 only with wide mode; otherwise a 12-bit shifted number. Since we 583 don't implement wide mode, this is always the 12-bit number. */ 584 target_ulong x = -(target_ulong)(insn & 1); 585 x = (x << 11) | extract32(insn, 2, 11); 586 return x << 2; 587 } 588 589 static target_long assemble_17(uint32_t insn) 590 { 591 target_ulong x = -(target_ulong)(insn & 1); 592 x = (x << 5) | extract32(insn, 16, 5); 593 x = (x << 1) | extract32(insn, 2, 1); 594 x = (x << 10) | extract32(insn, 3, 10); 595 return x << 2; 596 } 597 598 static target_long assemble_21(uint32_t insn) 599 { 600 target_ulong x = -(target_ulong)(insn & 1); 601 x = (x << 11) | extract32(insn, 1, 11); 602 x = (x << 2) | extract32(insn, 14, 2); 603 x = (x << 5) | extract32(insn, 16, 5); 604 x = (x << 2) | extract32(insn, 12, 2); 605 return x << 11; 606 } 607 608 static target_long assemble_22(uint32_t insn) 609 { 610 target_ulong x = -(target_ulong)(insn & 1); 611 x = (x << 10) | extract32(insn, 16, 10); 612 x = (x << 1) | extract32(insn, 2, 1); 613 x = (x << 10) | extract32(insn, 3, 10); 614 return x << 2; 615 } 616 617 /* The parisc documentation describes only the general interpretation of 618 the conditions, without describing their exact implementation. The 619 interpretations do not stand up well when considering ADD,C and SUB,B. 620 However, considering the Addition, Subtraction and Logical conditions 621 as a whole it would appear that these relations are similar to what 622 a traditional NZCV set of flags would produce. */ 623 624 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv) 625 { 626 DisasCond cond; 627 TCGv tmp; 628 629 switch (cf >> 1) { 630 case 0: /* Never / TR */ 631 cond = cond_make_f(); 632 break; 633 case 1: /* = / <> (Z / !Z) */ 634 cond = cond_make_0(TCG_COND_EQ, res); 635 break; 636 case 2: /* < / >= (N / !N) */ 637 cond = cond_make_0(TCG_COND_LT, res); 638 break; 639 case 3: /* <= / > (N | Z / !N & !Z) */ 640 cond = cond_make_0(TCG_COND_LE, res); 641 break; 642 case 4: /* NUV / UV (!C / C) */ 643 cond = cond_make_0(TCG_COND_EQ, cb_msb); 644 break; 645 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 646 tmp = tcg_temp_new(); 647 tcg_gen_neg_tl(tmp, cb_msb); 648 tcg_gen_and_tl(tmp, tmp, res); 649 cond = cond_make_0(TCG_COND_EQ, tmp); 650 tcg_temp_free(tmp); 651 break; 652 case 6: /* SV / NSV (V / !V) */ 653 cond = cond_make_0(TCG_COND_LT, sv); 654 break; 655 case 7: /* OD / EV */ 656 tmp = tcg_temp_new(); 657 tcg_gen_andi_tl(tmp, res, 1); 658 cond = cond_make_0(TCG_COND_NE, tmp); 659 tcg_temp_free(tmp); 660 break; 661 default: 662 g_assert_not_reached(); 663 } 664 if (cf & 1) { 665 cond.c = tcg_invert_cond(cond.c); 666 } 667 668 return cond; 669 } 670 671 /* Similar, but for the special case of subtraction without borrow, we 672 can use the inputs directly. This can allow other computation to be 673 deleted as unused. */ 674 675 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv) 676 { 677 DisasCond cond; 678 679 switch (cf >> 1) { 680 case 1: /* = / <> */ 681 cond = cond_make(TCG_COND_EQ, in1, in2); 682 break; 683 case 2: /* < / >= */ 684 cond = cond_make(TCG_COND_LT, in1, in2); 685 break; 686 case 3: /* <= / > */ 687 cond = cond_make(TCG_COND_LE, in1, in2); 688 break; 689 case 4: /* << / >>= */ 690 cond = cond_make(TCG_COND_LTU, in1, in2); 691 break; 692 case 5: /* <<= / >> */ 693 cond = cond_make(TCG_COND_LEU, in1, in2); 694 break; 695 default: 696 return do_cond(cf, res, sv, sv); 697 } 698 if (cf & 1) { 699 cond.c = tcg_invert_cond(cond.c); 700 } 701 702 return cond; 703 } 704 705 /* Similar, but for logicals, where the carry and overflow bits are not 706 computed, and use of them is undefined. */ 707 708 static DisasCond do_log_cond(unsigned cf, TCGv res) 709 { 710 switch (cf >> 1) { 711 case 4: case 5: case 6: 712 cf &= 1; 713 break; 714 } 715 return do_cond(cf, res, res, res); 716 } 717 718 /* Similar, but for shift/extract/deposit conditions. */ 719 720 static DisasCond do_sed_cond(unsigned orig, TCGv res) 721 { 722 unsigned c, f; 723 724 /* Convert the compressed condition codes to standard. 725 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 726 4-7 are the reverse of 0-3. */ 727 c = orig & 3; 728 if (c == 3) { 729 c = 7; 730 } 731 f = (orig & 4) / 4; 732 733 return do_log_cond(c * 2 + f, res); 734 } 735 736 /* Similar, but for unit conditions. */ 737 738 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2) 739 { 740 DisasCond cond; 741 TCGv tmp, cb; 742 743 TCGV_UNUSED(cb); 744 if (cf & 8) { 745 /* Since we want to test lots of carry-out bits all at once, do not 746 * do our normal thing and compute carry-in of bit B+1 since that 747 * leaves us with carry bits spread across two words. 748 */ 749 cb = tcg_temp_new(); 750 tmp = tcg_temp_new(); 751 tcg_gen_or_tl(cb, in1, in2); 752 tcg_gen_and_tl(tmp, in1, in2); 753 tcg_gen_andc_tl(cb, cb, res); 754 tcg_gen_or_tl(cb, cb, tmp); 755 tcg_temp_free(tmp); 756 } 757 758 switch (cf >> 1) { 759 case 0: /* never / TR */ 760 case 1: /* undefined */ 761 case 5: /* undefined */ 762 cond = cond_make_f(); 763 break; 764 765 case 2: /* SBZ / NBZ */ 766 /* See hasless(v,1) from 767 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 768 */ 769 tmp = tcg_temp_new(); 770 tcg_gen_subi_tl(tmp, res, 0x01010101u); 771 tcg_gen_andc_tl(tmp, tmp, res); 772 tcg_gen_andi_tl(tmp, tmp, 0x80808080u); 773 cond = cond_make_0(TCG_COND_NE, tmp); 774 tcg_temp_free(tmp); 775 break; 776 777 case 3: /* SHZ / NHZ */ 778 tmp = tcg_temp_new(); 779 tcg_gen_subi_tl(tmp, res, 0x00010001u); 780 tcg_gen_andc_tl(tmp, tmp, res); 781 tcg_gen_andi_tl(tmp, tmp, 0x80008000u); 782 cond = cond_make_0(TCG_COND_NE, tmp); 783 tcg_temp_free(tmp); 784 break; 785 786 case 4: /* SDC / NDC */ 787 tcg_gen_andi_tl(cb, cb, 0x88888888u); 788 cond = cond_make_0(TCG_COND_NE, cb); 789 break; 790 791 case 6: /* SBC / NBC */ 792 tcg_gen_andi_tl(cb, cb, 0x80808080u); 793 cond = cond_make_0(TCG_COND_NE, cb); 794 break; 795 796 case 7: /* SHC / NHC */ 797 tcg_gen_andi_tl(cb, cb, 0x80008000u); 798 cond = cond_make_0(TCG_COND_NE, cb); 799 break; 800 801 default: 802 g_assert_not_reached(); 803 } 804 if (cf & 8) { 805 tcg_temp_free(cb); 806 } 807 if (cf & 1) { 808 cond.c = tcg_invert_cond(cond.c); 809 } 810 811 return cond; 812 } 813 814 /* Compute signed overflow for addition. */ 815 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2) 816 { 817 TCGv sv = get_temp(ctx); 818 TCGv tmp = tcg_temp_new(); 819 820 tcg_gen_xor_tl(sv, res, in1); 821 tcg_gen_xor_tl(tmp, in1, in2); 822 tcg_gen_andc_tl(sv, sv, tmp); 823 tcg_temp_free(tmp); 824 825 return sv; 826 } 827 828 /* Compute signed overflow for subtraction. */ 829 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2) 830 { 831 TCGv sv = get_temp(ctx); 832 TCGv tmp = tcg_temp_new(); 833 834 tcg_gen_xor_tl(sv, res, in1); 835 tcg_gen_xor_tl(tmp, in1, in2); 836 tcg_gen_and_tl(sv, sv, tmp); 837 tcg_temp_free(tmp); 838 839 return sv; 840 } 841 842 static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 843 unsigned shift, bool is_l, bool is_tsv, bool is_tc, 844 bool is_c, unsigned cf) 845 { 846 TCGv dest, cb, cb_msb, sv, tmp; 847 unsigned c = cf >> 1; 848 DisasCond cond; 849 850 dest = tcg_temp_new(); 851 TCGV_UNUSED(cb); 852 TCGV_UNUSED(cb_msb); 853 854 if (shift) { 855 tmp = get_temp(ctx); 856 tcg_gen_shli_tl(tmp, in1, shift); 857 in1 = tmp; 858 } 859 860 if (!is_l || c == 4 || c == 5) { 861 TCGv zero = tcg_const_tl(0); 862 cb_msb = get_temp(ctx); 863 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero); 864 if (is_c) { 865 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 866 } 867 tcg_temp_free(zero); 868 if (!is_l) { 869 cb = get_temp(ctx); 870 tcg_gen_xor_tl(cb, in1, in2); 871 tcg_gen_xor_tl(cb, cb, dest); 872 } 873 } else { 874 tcg_gen_add_tl(dest, in1, in2); 875 if (is_c) { 876 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb); 877 } 878 } 879 880 /* Compute signed overflow if required. */ 881 TCGV_UNUSED(sv); 882 if (is_tsv || c == 6) { 883 sv = do_add_sv(ctx, dest, in1, in2); 884 if (is_tsv) { 885 /* ??? Need to include overflow from shift. */ 886 gen_helper_tsv(cpu_env, sv); 887 } 888 } 889 890 /* Emit any conditional trap before any writeback. */ 891 cond = do_cond(cf, dest, cb_msb, sv); 892 if (is_tc) { 893 cond_prep(&cond); 894 tmp = tcg_temp_new(); 895 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 896 gen_helper_tcond(cpu_env, tmp); 897 tcg_temp_free(tmp); 898 } 899 900 /* Write back the result. */ 901 if (!is_l) { 902 save_or_nullify(ctx, cpu_psw_cb, cb); 903 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 904 } 905 save_gpr(ctx, rt, dest); 906 tcg_temp_free(dest); 907 908 /* Install the new nullification. */ 909 cond_free(&ctx->null_cond); 910 ctx->null_cond = cond; 911 return NO_EXIT; 912 } 913 914 static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 915 bool is_tsv, bool is_b, bool is_tc, unsigned cf) 916 { 917 TCGv dest, sv, cb, cb_msb, zero, tmp; 918 unsigned c = cf >> 1; 919 DisasCond cond; 920 921 dest = tcg_temp_new(); 922 cb = tcg_temp_new(); 923 cb_msb = tcg_temp_new(); 924 925 zero = tcg_const_tl(0); 926 if (is_b) { 927 /* DEST,C = IN1 + ~IN2 + C. */ 928 tcg_gen_not_tl(cb, in2); 929 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 930 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero); 931 tcg_gen_xor_tl(cb, cb, in1); 932 tcg_gen_xor_tl(cb, cb, dest); 933 } else { 934 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 935 operations by seeding the high word with 1 and subtracting. */ 936 tcg_gen_movi_tl(cb_msb, 1); 937 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero); 938 tcg_gen_eqv_tl(cb, in1, in2); 939 tcg_gen_xor_tl(cb, cb, dest); 940 } 941 tcg_temp_free(zero); 942 943 /* Compute signed overflow if required. */ 944 TCGV_UNUSED(sv); 945 if (is_tsv || c == 6) { 946 sv = do_sub_sv(ctx, dest, in1, in2); 947 if (is_tsv) { 948 gen_helper_tsv(cpu_env, sv); 949 } 950 } 951 952 /* Compute the condition. We cannot use the special case for borrow. */ 953 if (!is_b) { 954 cond = do_sub_cond(cf, dest, in1, in2, sv); 955 } else { 956 cond = do_cond(cf, dest, cb_msb, sv); 957 } 958 959 /* Emit any conditional trap before any writeback. */ 960 if (is_tc) { 961 cond_prep(&cond); 962 tmp = tcg_temp_new(); 963 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 964 gen_helper_tcond(cpu_env, tmp); 965 tcg_temp_free(tmp); 966 } 967 968 /* Write back the result. */ 969 save_or_nullify(ctx, cpu_psw_cb, cb); 970 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 971 save_gpr(ctx, rt, dest); 972 tcg_temp_free(dest); 973 974 /* Install the new nullification. */ 975 cond_free(&ctx->null_cond); 976 ctx->null_cond = cond; 977 return NO_EXIT; 978 } 979 980 static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1, 981 TCGv in2, unsigned cf) 982 { 983 TCGv dest, sv; 984 DisasCond cond; 985 986 dest = tcg_temp_new(); 987 tcg_gen_sub_tl(dest, in1, in2); 988 989 /* Compute signed overflow if required. */ 990 TCGV_UNUSED(sv); 991 if ((cf >> 1) == 6) { 992 sv = do_sub_sv(ctx, dest, in1, in2); 993 } 994 995 /* Form the condition for the compare. */ 996 cond = do_sub_cond(cf, dest, in1, in2, sv); 997 998 /* Clear. */ 999 tcg_gen_movi_tl(dest, 0); 1000 save_gpr(ctx, rt, dest); 1001 tcg_temp_free(dest); 1002 1003 /* Install the new nullification. */ 1004 cond_free(&ctx->null_cond); 1005 ctx->null_cond = cond; 1006 return NO_EXIT; 1007 } 1008 1009 static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 1010 unsigned cf, void (*fn)(TCGv, TCGv, TCGv)) 1011 { 1012 TCGv dest = dest_gpr(ctx, rt); 1013 1014 /* Perform the operation, and writeback. */ 1015 fn(dest, in1, in2); 1016 save_gpr(ctx, rt, dest); 1017 1018 /* Install the new nullification. */ 1019 cond_free(&ctx->null_cond); 1020 if (cf) { 1021 ctx->null_cond = do_log_cond(cf, dest); 1022 } 1023 return NO_EXIT; 1024 } 1025 1026 static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1, 1027 TCGv in2, unsigned cf, bool is_tc, 1028 void (*fn)(TCGv, TCGv, TCGv)) 1029 { 1030 TCGv dest; 1031 DisasCond cond; 1032 1033 if (cf == 0) { 1034 dest = dest_gpr(ctx, rt); 1035 fn(dest, in1, in2); 1036 save_gpr(ctx, rt, dest); 1037 cond_free(&ctx->null_cond); 1038 } else { 1039 dest = tcg_temp_new(); 1040 fn(dest, in1, in2); 1041 1042 cond = do_unit_cond(cf, dest, in1, in2); 1043 1044 if (is_tc) { 1045 TCGv tmp = tcg_temp_new(); 1046 cond_prep(&cond); 1047 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 1048 gen_helper_tcond(cpu_env, tmp); 1049 tcg_temp_free(tmp); 1050 } 1051 save_gpr(ctx, rt, dest); 1052 1053 cond_free(&ctx->null_cond); 1054 ctx->null_cond = cond; 1055 } 1056 return NO_EXIT; 1057 } 1058 1059 /* Emit a memory load. The modify parameter should be 1060 * < 0 for pre-modify, 1061 * > 0 for post-modify, 1062 * = 0 for no base register update. 1063 */ 1064 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1065 unsigned rx, int scale, target_long disp, 1066 int modify, TCGMemOp mop) 1067 { 1068 TCGv addr, base; 1069 1070 /* Caller uses nullify_over/nullify_end. */ 1071 assert(ctx->null_cond.c == TCG_COND_NEVER); 1072 1073 addr = tcg_temp_new(); 1074 base = load_gpr(ctx, rb); 1075 1076 /* Note that RX is mutually exclusive with DISP. */ 1077 if (rx) { 1078 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1079 tcg_gen_add_tl(addr, addr, base); 1080 } else { 1081 tcg_gen_addi_tl(addr, base, disp); 1082 } 1083 1084 if (modify == 0) { 1085 tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop); 1086 } else { 1087 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base), 1088 MMU_USER_IDX, mop); 1089 save_gpr(ctx, rb, addr); 1090 } 1091 tcg_temp_free(addr); 1092 } 1093 1094 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1095 unsigned rx, int scale, target_long disp, 1096 int modify, TCGMemOp mop) 1097 { 1098 TCGv addr, base; 1099 1100 /* Caller uses nullify_over/nullify_end. */ 1101 assert(ctx->null_cond.c == TCG_COND_NEVER); 1102 1103 addr = tcg_temp_new(); 1104 base = load_gpr(ctx, rb); 1105 1106 /* Note that RX is mutually exclusive with DISP. */ 1107 if (rx) { 1108 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1109 tcg_gen_add_tl(addr, addr, base); 1110 } else { 1111 tcg_gen_addi_tl(addr, base, disp); 1112 } 1113 1114 if (modify == 0) { 1115 tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop); 1116 } else { 1117 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base), 1118 MMU_USER_IDX, mop); 1119 save_gpr(ctx, rb, addr); 1120 } 1121 tcg_temp_free(addr); 1122 } 1123 1124 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1125 unsigned rx, int scale, target_long disp, 1126 int modify, TCGMemOp mop) 1127 { 1128 TCGv addr, base; 1129 1130 /* Caller uses nullify_over/nullify_end. */ 1131 assert(ctx->null_cond.c == TCG_COND_NEVER); 1132 1133 addr = tcg_temp_new(); 1134 base = load_gpr(ctx, rb); 1135 1136 /* Note that RX is mutually exclusive with DISP. */ 1137 if (rx) { 1138 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1139 tcg_gen_add_tl(addr, addr, base); 1140 } else { 1141 tcg_gen_addi_tl(addr, base, disp); 1142 } 1143 1144 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop); 1145 1146 if (modify != 0) { 1147 save_gpr(ctx, rb, addr); 1148 } 1149 tcg_temp_free(addr); 1150 } 1151 1152 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1153 unsigned rx, int scale, target_long disp, 1154 int modify, TCGMemOp mop) 1155 { 1156 TCGv addr, base; 1157 1158 /* Caller uses nullify_over/nullify_end. */ 1159 assert(ctx->null_cond.c == TCG_COND_NEVER); 1160 1161 addr = tcg_temp_new(); 1162 base = load_gpr(ctx, rb); 1163 1164 /* Note that RX is mutually exclusive with DISP. */ 1165 if (rx) { 1166 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1167 tcg_gen_add_tl(addr, addr, base); 1168 } else { 1169 tcg_gen_addi_tl(addr, base, disp); 1170 } 1171 1172 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop); 1173 1174 if (modify != 0) { 1175 save_gpr(ctx, rb, addr); 1176 } 1177 tcg_temp_free(addr); 1178 } 1179 1180 #if TARGET_LONG_BITS == 64 1181 #define do_load_tl do_load_64 1182 #define do_store_tl do_store_64 1183 #else 1184 #define do_load_tl do_load_32 1185 #define do_store_tl do_store_32 1186 #endif 1187 1188 static ExitStatus do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1189 unsigned rx, int scale, target_long disp, 1190 int modify, TCGMemOp mop) 1191 { 1192 TCGv dest; 1193 1194 nullify_over(ctx); 1195 1196 if (modify == 0) { 1197 /* No base register update. */ 1198 dest = dest_gpr(ctx, rt); 1199 } else { 1200 /* Make sure if RT == RB, we see the result of the load. */ 1201 dest = get_temp(ctx); 1202 } 1203 do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop); 1204 save_gpr(ctx, rt, dest); 1205 1206 return nullify_end(ctx, NO_EXIT); 1207 } 1208 1209 static ExitStatus do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1210 unsigned rx, int scale, target_long disp, 1211 int modify) 1212 { 1213 TCGv_i32 tmp; 1214 1215 nullify_over(ctx); 1216 1217 tmp = tcg_temp_new_i32(); 1218 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL); 1219 save_frw_i32(rt, tmp); 1220 tcg_temp_free_i32(tmp); 1221 1222 if (rt == 0) { 1223 gen_helper_loaded_fr0(cpu_env); 1224 } 1225 1226 return nullify_end(ctx, NO_EXIT); 1227 } 1228 1229 static ExitStatus do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1230 unsigned rx, int scale, target_long disp, 1231 int modify) 1232 { 1233 TCGv_i64 tmp; 1234 1235 nullify_over(ctx); 1236 1237 tmp = tcg_temp_new_i64(); 1238 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ); 1239 save_frd(rt, tmp); 1240 tcg_temp_free_i64(tmp); 1241 1242 if (rt == 0) { 1243 gen_helper_loaded_fr0(cpu_env); 1244 } 1245 1246 return nullify_end(ctx, NO_EXIT); 1247 } 1248 1249 static ExitStatus do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1250 target_long disp, int modify, TCGMemOp mop) 1251 { 1252 nullify_over(ctx); 1253 do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop); 1254 return nullify_end(ctx, NO_EXIT); 1255 } 1256 1257 static ExitStatus do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1258 unsigned rx, int scale, target_long disp, 1259 int modify) 1260 { 1261 TCGv_i32 tmp; 1262 1263 nullify_over(ctx); 1264 1265 tmp = load_frw_i32(rt); 1266 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL); 1267 tcg_temp_free_i32(tmp); 1268 1269 return nullify_end(ctx, NO_EXIT); 1270 } 1271 1272 static ExitStatus do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1273 unsigned rx, int scale, target_long disp, 1274 int modify) 1275 { 1276 TCGv_i64 tmp; 1277 1278 nullify_over(ctx); 1279 1280 tmp = load_frd(rt); 1281 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ); 1282 tcg_temp_free_i64(tmp); 1283 1284 return nullify_end(ctx, NO_EXIT); 1285 } 1286 1287 static ExitStatus do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1288 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1289 { 1290 TCGv_i32 tmp; 1291 1292 nullify_over(ctx); 1293 tmp = load_frw0_i32(ra); 1294 1295 func(tmp, cpu_env, tmp); 1296 1297 save_frw_i32(rt, tmp); 1298 tcg_temp_free_i32(tmp); 1299 return nullify_end(ctx, NO_EXIT); 1300 } 1301 1302 static ExitStatus do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1303 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1304 { 1305 TCGv_i32 dst; 1306 TCGv_i64 src; 1307 1308 nullify_over(ctx); 1309 src = load_frd(ra); 1310 dst = tcg_temp_new_i32(); 1311 1312 func(dst, cpu_env, src); 1313 1314 tcg_temp_free_i64(src); 1315 save_frw_i32(rt, dst); 1316 tcg_temp_free_i32(dst); 1317 return nullify_end(ctx, NO_EXIT); 1318 } 1319 1320 static ExitStatus do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1321 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1322 { 1323 TCGv_i64 tmp; 1324 1325 nullify_over(ctx); 1326 tmp = load_frd0(ra); 1327 1328 func(tmp, cpu_env, tmp); 1329 1330 save_frd(rt, tmp); 1331 tcg_temp_free_i64(tmp); 1332 return nullify_end(ctx, NO_EXIT); 1333 } 1334 1335 static ExitStatus do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1336 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1337 { 1338 TCGv_i32 src; 1339 TCGv_i64 dst; 1340 1341 nullify_over(ctx); 1342 src = load_frw0_i32(ra); 1343 dst = tcg_temp_new_i64(); 1344 1345 func(dst, cpu_env, src); 1346 1347 tcg_temp_free_i32(src); 1348 save_frd(rt, dst); 1349 tcg_temp_free_i64(dst); 1350 return nullify_end(ctx, NO_EXIT); 1351 } 1352 1353 static ExitStatus do_fop_weww(DisasContext *ctx, unsigned rt, 1354 unsigned ra, unsigned rb, 1355 void (*func)(TCGv_i32, TCGv_env, 1356 TCGv_i32, TCGv_i32)) 1357 { 1358 TCGv_i32 a, b; 1359 1360 nullify_over(ctx); 1361 a = load_frw0_i32(ra); 1362 b = load_frw0_i32(rb); 1363 1364 func(a, cpu_env, a, b); 1365 1366 tcg_temp_free_i32(b); 1367 save_frw_i32(rt, a); 1368 tcg_temp_free_i32(a); 1369 return nullify_end(ctx, NO_EXIT); 1370 } 1371 1372 static ExitStatus do_fop_dedd(DisasContext *ctx, unsigned rt, 1373 unsigned ra, unsigned rb, 1374 void (*func)(TCGv_i64, TCGv_env, 1375 TCGv_i64, TCGv_i64)) 1376 { 1377 TCGv_i64 a, b; 1378 1379 nullify_over(ctx); 1380 a = load_frd0(ra); 1381 b = load_frd0(rb); 1382 1383 func(a, cpu_env, a, b); 1384 1385 tcg_temp_free_i64(b); 1386 save_frd(rt, a); 1387 tcg_temp_free_i64(a); 1388 return nullify_end(ctx, NO_EXIT); 1389 } 1390 1391 /* Emit an unconditional branch to a direct target, which may or may not 1392 have already had nullification handled. */ 1393 static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest, 1394 unsigned link, bool is_n) 1395 { 1396 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1397 if (link != 0) { 1398 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1399 } 1400 ctx->iaoq_n = dest; 1401 if (is_n) { 1402 ctx->null_cond.c = TCG_COND_ALWAYS; 1403 } 1404 return NO_EXIT; 1405 } else { 1406 nullify_over(ctx); 1407 1408 if (link != 0) { 1409 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1410 } 1411 1412 if (is_n && use_nullify_skip(ctx)) { 1413 nullify_set(ctx, 0); 1414 gen_goto_tb(ctx, 0, dest, dest + 4); 1415 } else { 1416 nullify_set(ctx, is_n); 1417 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1418 } 1419 1420 nullify_end(ctx, NO_EXIT); 1421 1422 nullify_set(ctx, 0); 1423 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1424 return EXIT_GOTO_TB; 1425 } 1426 } 1427 1428 /* Emit a conditional branch to a direct target. If the branch itself 1429 is nullified, we should have already used nullify_over. */ 1430 static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n, 1431 DisasCond *cond) 1432 { 1433 target_ulong dest = iaoq_dest(ctx, disp); 1434 TCGLabel *taken = NULL; 1435 TCGCond c = cond->c; 1436 bool n; 1437 1438 assert(ctx->null_cond.c == TCG_COND_NEVER); 1439 1440 /* Handle TRUE and NEVER as direct branches. */ 1441 if (c == TCG_COND_ALWAYS) { 1442 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1443 } 1444 if (c == TCG_COND_NEVER) { 1445 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1446 } 1447 1448 taken = gen_new_label(); 1449 cond_prep(cond); 1450 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken); 1451 cond_free(cond); 1452 1453 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1454 n = is_n && disp < 0; 1455 if (n && use_nullify_skip(ctx)) { 1456 nullify_set(ctx, 0); 1457 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1458 } else { 1459 if (!n && ctx->null_lab) { 1460 gen_set_label(ctx->null_lab); 1461 ctx->null_lab = NULL; 1462 } 1463 nullify_set(ctx, n); 1464 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1465 } 1466 1467 gen_set_label(taken); 1468 1469 /* Taken: Condition satisfied; nullify on forward branches. */ 1470 n = is_n && disp >= 0; 1471 if (n && use_nullify_skip(ctx)) { 1472 nullify_set(ctx, 0); 1473 gen_goto_tb(ctx, 1, dest, dest + 4); 1474 } else { 1475 nullify_set(ctx, n); 1476 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1477 } 1478 1479 /* Not taken: the branch itself was nullified. */ 1480 if (ctx->null_lab) { 1481 gen_set_label(ctx->null_lab); 1482 ctx->null_lab = NULL; 1483 return EXIT_IAQ_N_STALE; 1484 } else { 1485 return EXIT_GOTO_TB; 1486 } 1487 } 1488 1489 /* Emit an unconditional branch to an indirect target. This handles 1490 nullification of the branch itself. */ 1491 static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest, 1492 unsigned link, bool is_n) 1493 { 1494 TCGv a0, a1, next, tmp; 1495 TCGCond c; 1496 1497 assert(ctx->null_lab == NULL); 1498 1499 if (ctx->null_cond.c == TCG_COND_NEVER) { 1500 if (link != 0) { 1501 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1502 } 1503 next = get_temp(ctx); 1504 tcg_gen_mov_tl(next, dest); 1505 ctx->iaoq_n = -1; 1506 ctx->iaoq_n_var = next; 1507 if (is_n) { 1508 ctx->null_cond.c = TCG_COND_ALWAYS; 1509 } 1510 } else if (is_n && use_nullify_skip(ctx)) { 1511 /* The (conditional) branch, B, nullifies the next insn, N, 1512 and we're allowed to skip execution N (no single-step or 1513 tracepoint in effect). Since the exit_tb that we must use 1514 for the indirect branch consumes no special resources, we 1515 can (conditionally) skip B and continue execution. */ 1516 /* The use_nullify_skip test implies we have a known control path. */ 1517 tcg_debug_assert(ctx->iaoq_b != -1); 1518 tcg_debug_assert(ctx->iaoq_n != -1); 1519 1520 /* We do have to handle the non-local temporary, DEST, before 1521 branching. Since IOAQ_F is not really live at this point, we 1522 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1523 tcg_gen_mov_tl(cpu_iaoq_f, dest); 1524 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4); 1525 1526 nullify_over(ctx); 1527 if (link != 0) { 1528 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n); 1529 } 1530 tcg_gen_exit_tb(0); 1531 return nullify_end(ctx, NO_EXIT); 1532 } else { 1533 cond_prep(&ctx->null_cond); 1534 c = ctx->null_cond.c; 1535 a0 = ctx->null_cond.a0; 1536 a1 = ctx->null_cond.a1; 1537 1538 tmp = tcg_temp_new(); 1539 next = get_temp(ctx); 1540 1541 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1542 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest); 1543 ctx->iaoq_n = -1; 1544 ctx->iaoq_n_var = next; 1545 1546 if (link != 0) { 1547 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1548 } 1549 1550 if (is_n) { 1551 /* The branch nullifies the next insn, which means the state of N 1552 after the branch is the inverse of the state of N that applied 1553 to the branch. */ 1554 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1555 cond_free(&ctx->null_cond); 1556 ctx->null_cond = cond_make_n(); 1557 ctx->psw_n_nonzero = true; 1558 } else { 1559 cond_free(&ctx->null_cond); 1560 } 1561 } 1562 1563 return NO_EXIT; 1564 } 1565 1566 /* On Linux, page zero is normally marked execute only + gateway. 1567 Therefore normal read or write is supposed to fail, but specific 1568 offsets have kernel code mapped to raise permissions to implement 1569 system calls. Handling this via an explicit check here, rather 1570 in than the "be disp(sr2,r0)" instruction that probably sent us 1571 here, is the easiest way to handle the branch delay slot on the 1572 aforementioned BE. */ 1573 static ExitStatus do_page_zero(DisasContext *ctx) 1574 { 1575 /* If by some means we get here with PSW[N]=1, that implies that 1576 the B,GATE instruction would be skipped, and we'd fault on the 1577 next insn within the privilaged page. */ 1578 switch (ctx->null_cond.c) { 1579 case TCG_COND_NEVER: 1580 break; 1581 case TCG_COND_ALWAYS: 1582 tcg_gen_movi_tl(cpu_psw_n, 0); 1583 goto do_sigill; 1584 default: 1585 /* Since this is always the first (and only) insn within the 1586 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1587 g_assert_not_reached(); 1588 } 1589 1590 /* Check that we didn't arrive here via some means that allowed 1591 non-sequential instruction execution. Normally the PSW[B] bit 1592 detects this by disallowing the B,GATE instruction to execute 1593 under such conditions. */ 1594 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1595 goto do_sigill; 1596 } 1597 1598 switch (ctx->iaoq_f) { 1599 case 0x00: /* Null pointer call */ 1600 gen_excp_1(EXCP_SIGSEGV); 1601 return EXIT_NORETURN; 1602 1603 case 0xb0: /* LWS */ 1604 gen_excp_1(EXCP_SYSCALL_LWS); 1605 return EXIT_NORETURN; 1606 1607 case 0xe0: /* SET_THREAD_POINTER */ 1608 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]); 1609 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]); 1610 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4); 1611 return EXIT_IAQ_N_UPDATED; 1612 1613 case 0x100: /* SYSCALL */ 1614 gen_excp_1(EXCP_SYSCALL); 1615 return EXIT_NORETURN; 1616 1617 default: 1618 do_sigill: 1619 gen_excp_1(EXCP_SIGILL); 1620 return EXIT_NORETURN; 1621 } 1622 } 1623 1624 static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn, 1625 const DisasInsn *di) 1626 { 1627 cond_free(&ctx->null_cond); 1628 return NO_EXIT; 1629 } 1630 1631 static ExitStatus trans_break(DisasContext *ctx, uint32_t insn, 1632 const DisasInsn *di) 1633 { 1634 nullify_over(ctx); 1635 return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG)); 1636 } 1637 1638 static ExitStatus trans_sync(DisasContext *ctx, uint32_t insn, 1639 const DisasInsn *di) 1640 { 1641 /* No point in nullifying the memory barrier. */ 1642 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 1643 1644 cond_free(&ctx->null_cond); 1645 return NO_EXIT; 1646 } 1647 1648 static ExitStatus trans_mfia(DisasContext *ctx, uint32_t insn, 1649 const DisasInsn *di) 1650 { 1651 unsigned rt = extract32(insn, 0, 5); 1652 TCGv tmp = dest_gpr(ctx, rt); 1653 tcg_gen_movi_tl(tmp, ctx->iaoq_f); 1654 save_gpr(ctx, rt, tmp); 1655 1656 cond_free(&ctx->null_cond); 1657 return NO_EXIT; 1658 } 1659 1660 static ExitStatus trans_mfsp(DisasContext *ctx, uint32_t insn, 1661 const DisasInsn *di) 1662 { 1663 unsigned rt = extract32(insn, 0, 5); 1664 TCGv tmp = dest_gpr(ctx, rt); 1665 1666 /* ??? We don't implement space registers. */ 1667 tcg_gen_movi_tl(tmp, 0); 1668 save_gpr(ctx, rt, tmp); 1669 1670 cond_free(&ctx->null_cond); 1671 return NO_EXIT; 1672 } 1673 1674 static ExitStatus trans_mfctl(DisasContext *ctx, uint32_t insn, 1675 const DisasInsn *di) 1676 { 1677 unsigned rt = extract32(insn, 0, 5); 1678 unsigned ctl = extract32(insn, 21, 5); 1679 TCGv tmp; 1680 1681 switch (ctl) { 1682 case 11: /* SAR */ 1683 #ifdef TARGET_HPPA64 1684 if (extract32(insn, 14, 1) == 0) { 1685 /* MFSAR without ,W masks low 5 bits. */ 1686 tmp = dest_gpr(ctx, rt); 1687 tcg_gen_andi_tl(tmp, cpu_sar, 31); 1688 save_gpr(ctx, rt, tmp); 1689 break; 1690 } 1691 #endif 1692 save_gpr(ctx, rt, cpu_sar); 1693 break; 1694 case 16: /* Interval Timer */ 1695 tmp = dest_gpr(ctx, rt); 1696 tcg_gen_movi_tl(tmp, 0); /* FIXME */ 1697 save_gpr(ctx, rt, tmp); 1698 break; 1699 case 26: 1700 save_gpr(ctx, rt, cpu_cr26); 1701 break; 1702 case 27: 1703 save_gpr(ctx, rt, cpu_cr27); 1704 break; 1705 default: 1706 /* All other control registers are privileged. */ 1707 return gen_illegal(ctx); 1708 } 1709 1710 cond_free(&ctx->null_cond); 1711 return NO_EXIT; 1712 } 1713 1714 static ExitStatus trans_mtctl(DisasContext *ctx, uint32_t insn, 1715 const DisasInsn *di) 1716 { 1717 unsigned rin = extract32(insn, 16, 5); 1718 unsigned ctl = extract32(insn, 21, 5); 1719 TCGv tmp; 1720 1721 if (ctl == 11) { /* SAR */ 1722 tmp = tcg_temp_new(); 1723 tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1); 1724 save_or_nullify(ctx, cpu_sar, tmp); 1725 tcg_temp_free(tmp); 1726 } else { 1727 /* All other control registers are privileged or read-only. */ 1728 return gen_illegal(ctx); 1729 } 1730 1731 cond_free(&ctx->null_cond); 1732 return NO_EXIT; 1733 } 1734 1735 static ExitStatus trans_mtsarcm(DisasContext *ctx, uint32_t insn, 1736 const DisasInsn *di) 1737 { 1738 unsigned rin = extract32(insn, 16, 5); 1739 TCGv tmp = tcg_temp_new(); 1740 1741 tcg_gen_not_tl(tmp, load_gpr(ctx, rin)); 1742 tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1); 1743 save_or_nullify(ctx, cpu_sar, tmp); 1744 tcg_temp_free(tmp); 1745 1746 cond_free(&ctx->null_cond); 1747 return NO_EXIT; 1748 } 1749 1750 static ExitStatus trans_ldsid(DisasContext *ctx, uint32_t insn, 1751 const DisasInsn *di) 1752 { 1753 unsigned rt = extract32(insn, 0, 5); 1754 TCGv dest = dest_gpr(ctx, rt); 1755 1756 /* Since we don't implement space registers, this returns zero. */ 1757 tcg_gen_movi_tl(dest, 0); 1758 save_gpr(ctx, rt, dest); 1759 1760 cond_free(&ctx->null_cond); 1761 return NO_EXIT; 1762 } 1763 1764 static const DisasInsn table_system[] = { 1765 { 0x00000000u, 0xfc001fe0u, trans_break }, 1766 /* We don't implement space register, so MTSP is a nop. */ 1767 { 0x00001820u, 0xffe01fffu, trans_nop }, 1768 { 0x00001840u, 0xfc00ffffu, trans_mtctl }, 1769 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm }, 1770 { 0x000014a0u, 0xffffffe0u, trans_mfia }, 1771 { 0x000004a0u, 0xffff1fe0u, trans_mfsp }, 1772 { 0x000008a0u, 0xfc1fffe0u, trans_mfctl }, 1773 { 0x00000400u, 0xffffffffu, trans_sync }, 1774 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid }, 1775 }; 1776 1777 static ExitStatus trans_base_idx_mod(DisasContext *ctx, uint32_t insn, 1778 const DisasInsn *di) 1779 { 1780 unsigned rb = extract32(insn, 21, 5); 1781 unsigned rx = extract32(insn, 16, 5); 1782 TCGv dest = dest_gpr(ctx, rb); 1783 TCGv src1 = load_gpr(ctx, rb); 1784 TCGv src2 = load_gpr(ctx, rx); 1785 1786 /* The only thing we need to do is the base register modification. */ 1787 tcg_gen_add_tl(dest, src1, src2); 1788 save_gpr(ctx, rb, dest); 1789 1790 cond_free(&ctx->null_cond); 1791 return NO_EXIT; 1792 } 1793 1794 static ExitStatus trans_probe(DisasContext *ctx, uint32_t insn, 1795 const DisasInsn *di) 1796 { 1797 unsigned rt = extract32(insn, 0, 5); 1798 unsigned rb = extract32(insn, 21, 5); 1799 unsigned is_write = extract32(insn, 6, 1); 1800 TCGv dest; 1801 1802 nullify_over(ctx); 1803 1804 /* ??? Do something with priv level operand. */ 1805 dest = dest_gpr(ctx, rt); 1806 if (is_write) { 1807 gen_helper_probe_w(dest, load_gpr(ctx, rb)); 1808 } else { 1809 gen_helper_probe_r(dest, load_gpr(ctx, rb)); 1810 } 1811 save_gpr(ctx, rt, dest); 1812 return nullify_end(ctx, NO_EXIT); 1813 } 1814 1815 static const DisasInsn table_mem_mgmt[] = { 1816 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */ 1817 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */ 1818 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */ 1819 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */ 1820 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */ 1821 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */ 1822 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */ 1823 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */ 1824 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */ 1825 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */ 1826 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */ 1827 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */ 1828 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */ 1829 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */ 1830 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */ 1831 }; 1832 1833 static ExitStatus trans_add(DisasContext *ctx, uint32_t insn, 1834 const DisasInsn *di) 1835 { 1836 unsigned r2 = extract32(insn, 21, 5); 1837 unsigned r1 = extract32(insn, 16, 5); 1838 unsigned cf = extract32(insn, 12, 4); 1839 unsigned ext = extract32(insn, 8, 4); 1840 unsigned shift = extract32(insn, 6, 2); 1841 unsigned rt = extract32(insn, 0, 5); 1842 TCGv tcg_r1, tcg_r2; 1843 bool is_c = false; 1844 bool is_l = false; 1845 bool is_tc = false; 1846 bool is_tsv = false; 1847 ExitStatus ret; 1848 1849 switch (ext) { 1850 case 0x6: /* ADD, SHLADD */ 1851 break; 1852 case 0xa: /* ADD,L, SHLADD,L */ 1853 is_l = true; 1854 break; 1855 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */ 1856 is_tsv = true; 1857 break; 1858 case 0x7: /* ADD,C */ 1859 is_c = true; 1860 break; 1861 case 0xf: /* ADD,C,TSV */ 1862 is_c = is_tsv = true; 1863 break; 1864 default: 1865 return gen_illegal(ctx); 1866 } 1867 1868 if (cf) { 1869 nullify_over(ctx); 1870 } 1871 tcg_r1 = load_gpr(ctx, r1); 1872 tcg_r2 = load_gpr(ctx, r2); 1873 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf); 1874 return nullify_end(ctx, ret); 1875 } 1876 1877 static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn, 1878 const DisasInsn *di) 1879 { 1880 unsigned r2 = extract32(insn, 21, 5); 1881 unsigned r1 = extract32(insn, 16, 5); 1882 unsigned cf = extract32(insn, 12, 4); 1883 unsigned ext = extract32(insn, 6, 6); 1884 unsigned rt = extract32(insn, 0, 5); 1885 TCGv tcg_r1, tcg_r2; 1886 bool is_b = false; 1887 bool is_tc = false; 1888 bool is_tsv = false; 1889 ExitStatus ret; 1890 1891 switch (ext) { 1892 case 0x10: /* SUB */ 1893 break; 1894 case 0x30: /* SUB,TSV */ 1895 is_tsv = true; 1896 break; 1897 case 0x14: /* SUB,B */ 1898 is_b = true; 1899 break; 1900 case 0x34: /* SUB,B,TSV */ 1901 is_b = is_tsv = true; 1902 break; 1903 case 0x13: /* SUB,TC */ 1904 is_tc = true; 1905 break; 1906 case 0x33: /* SUB,TSV,TC */ 1907 is_tc = is_tsv = true; 1908 break; 1909 default: 1910 return gen_illegal(ctx); 1911 } 1912 1913 if (cf) { 1914 nullify_over(ctx); 1915 } 1916 tcg_r1 = load_gpr(ctx, r1); 1917 tcg_r2 = load_gpr(ctx, r2); 1918 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf); 1919 return nullify_end(ctx, ret); 1920 } 1921 1922 static ExitStatus trans_log(DisasContext *ctx, uint32_t insn, 1923 const DisasInsn *di) 1924 { 1925 unsigned r2 = extract32(insn, 21, 5); 1926 unsigned r1 = extract32(insn, 16, 5); 1927 unsigned cf = extract32(insn, 12, 4); 1928 unsigned rt = extract32(insn, 0, 5); 1929 TCGv tcg_r1, tcg_r2; 1930 ExitStatus ret; 1931 1932 if (cf) { 1933 nullify_over(ctx); 1934 } 1935 tcg_r1 = load_gpr(ctx, r1); 1936 tcg_r2 = load_gpr(ctx, r2); 1937 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt); 1938 return nullify_end(ctx, ret); 1939 } 1940 1941 /* OR r,0,t -> COPY (according to gas) */ 1942 static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn, 1943 const DisasInsn *di) 1944 { 1945 unsigned r1 = extract32(insn, 16, 5); 1946 unsigned rt = extract32(insn, 0, 5); 1947 1948 if (r1 == 0) { 1949 TCGv dest = dest_gpr(ctx, rt); 1950 tcg_gen_movi_tl(dest, 0); 1951 save_gpr(ctx, rt, dest); 1952 } else { 1953 save_gpr(ctx, rt, cpu_gr[r1]); 1954 } 1955 cond_free(&ctx->null_cond); 1956 return NO_EXIT; 1957 } 1958 1959 static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn, 1960 const DisasInsn *di) 1961 { 1962 unsigned r2 = extract32(insn, 21, 5); 1963 unsigned r1 = extract32(insn, 16, 5); 1964 unsigned cf = extract32(insn, 12, 4); 1965 unsigned rt = extract32(insn, 0, 5); 1966 TCGv tcg_r1, tcg_r2; 1967 ExitStatus ret; 1968 1969 if (cf) { 1970 nullify_over(ctx); 1971 } 1972 tcg_r1 = load_gpr(ctx, r1); 1973 tcg_r2 = load_gpr(ctx, r2); 1974 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf); 1975 return nullify_end(ctx, ret); 1976 } 1977 1978 static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn, 1979 const DisasInsn *di) 1980 { 1981 unsigned r2 = extract32(insn, 21, 5); 1982 unsigned r1 = extract32(insn, 16, 5); 1983 unsigned cf = extract32(insn, 12, 4); 1984 unsigned rt = extract32(insn, 0, 5); 1985 TCGv tcg_r1, tcg_r2; 1986 ExitStatus ret; 1987 1988 if (cf) { 1989 nullify_over(ctx); 1990 } 1991 tcg_r1 = load_gpr(ctx, r1); 1992 tcg_r2 = load_gpr(ctx, r2); 1993 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl); 1994 return nullify_end(ctx, ret); 1995 } 1996 1997 static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn, 1998 const DisasInsn *di) 1999 { 2000 unsigned r2 = extract32(insn, 21, 5); 2001 unsigned r1 = extract32(insn, 16, 5); 2002 unsigned cf = extract32(insn, 12, 4); 2003 unsigned is_tc = extract32(insn, 6, 1); 2004 unsigned rt = extract32(insn, 0, 5); 2005 TCGv tcg_r1, tcg_r2, tmp; 2006 ExitStatus ret; 2007 2008 if (cf) { 2009 nullify_over(ctx); 2010 } 2011 tcg_r1 = load_gpr(ctx, r1); 2012 tcg_r2 = load_gpr(ctx, r2); 2013 tmp = get_temp(ctx); 2014 tcg_gen_not_tl(tmp, tcg_r2); 2015 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl); 2016 return nullify_end(ctx, ret); 2017 } 2018 2019 static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn, 2020 const DisasInsn *di) 2021 { 2022 unsigned r2 = extract32(insn, 21, 5); 2023 unsigned cf = extract32(insn, 12, 4); 2024 unsigned is_i = extract32(insn, 6, 1); 2025 unsigned rt = extract32(insn, 0, 5); 2026 TCGv tmp; 2027 ExitStatus ret; 2028 2029 nullify_over(ctx); 2030 2031 tmp = get_temp(ctx); 2032 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3); 2033 if (!is_i) { 2034 tcg_gen_not_tl(tmp, tmp); 2035 } 2036 tcg_gen_andi_tl(tmp, tmp, 0x11111111); 2037 tcg_gen_muli_tl(tmp, tmp, 6); 2038 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false, 2039 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl); 2040 2041 return nullify_end(ctx, ret); 2042 } 2043 2044 static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn, 2045 const DisasInsn *di) 2046 { 2047 unsigned r2 = extract32(insn, 21, 5); 2048 unsigned r1 = extract32(insn, 16, 5); 2049 unsigned cf = extract32(insn, 12, 4); 2050 unsigned rt = extract32(insn, 0, 5); 2051 TCGv dest, add1, add2, addc, zero, in1, in2; 2052 2053 nullify_over(ctx); 2054 2055 in1 = load_gpr(ctx, r1); 2056 in2 = load_gpr(ctx, r2); 2057 2058 add1 = tcg_temp_new(); 2059 add2 = tcg_temp_new(); 2060 addc = tcg_temp_new(); 2061 dest = tcg_temp_new(); 2062 zero = tcg_const_tl(0); 2063 2064 /* Form R1 << 1 | PSW[CB]{8}. */ 2065 tcg_gen_add_tl(add1, in1, in1); 2066 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb); 2067 2068 /* Add or subtract R2, depending on PSW[V]. Proper computation of 2069 carry{8} requires that we subtract via + ~R2 + 1, as described in 2070 the manual. By extracting and masking V, we can produce the 2071 proper inputs to the addition without movcond. */ 2072 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1); 2073 tcg_gen_xor_tl(add2, in2, addc); 2074 tcg_gen_andi_tl(addc, addc, 1); 2075 /* ??? This is only correct for 32-bit. */ 2076 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2077 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2078 2079 tcg_temp_free(addc); 2080 tcg_temp_free(zero); 2081 2082 /* Write back the result register. */ 2083 save_gpr(ctx, rt, dest); 2084 2085 /* Write back PSW[CB]. */ 2086 tcg_gen_xor_tl(cpu_psw_cb, add1, add2); 2087 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest); 2088 2089 /* Write back PSW[V] for the division step. */ 2090 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb); 2091 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2); 2092 2093 /* Install the new nullification. */ 2094 if (cf) { 2095 TCGv sv; 2096 TCGV_UNUSED(sv); 2097 if (cf >> 1 == 6) { 2098 /* ??? The lshift is supposed to contribute to overflow. */ 2099 sv = do_add_sv(ctx, dest, add1, add2); 2100 } 2101 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv); 2102 } 2103 2104 tcg_temp_free(add1); 2105 tcg_temp_free(add2); 2106 tcg_temp_free(dest); 2107 2108 return nullify_end(ctx, NO_EXIT); 2109 } 2110 2111 static const DisasInsn table_arith_log[] = { 2112 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */ 2113 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */ 2114 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl }, 2115 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl }, 2116 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl }, 2117 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl }, 2118 { 0x08000880u, 0xfc000fe0u, trans_cmpclr }, 2119 { 0x08000380u, 0xfc000fe0u, trans_uxor }, 2120 { 0x08000980u, 0xfc000fa0u, trans_uaddcm }, 2121 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor }, 2122 { 0x08000440u, 0xfc000fe0u, trans_ds }, 2123 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */ 2124 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */ 2125 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */ 2126 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */ 2127 }; 2128 2129 static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn) 2130 { 2131 target_long im = low_sextract(insn, 0, 11); 2132 unsigned e1 = extract32(insn, 11, 1); 2133 unsigned cf = extract32(insn, 12, 4); 2134 unsigned rt = extract32(insn, 16, 5); 2135 unsigned r2 = extract32(insn, 21, 5); 2136 unsigned o1 = extract32(insn, 26, 1); 2137 TCGv tcg_im, tcg_r2; 2138 ExitStatus ret; 2139 2140 if (cf) { 2141 nullify_over(ctx); 2142 } 2143 2144 tcg_im = load_const(ctx, im); 2145 tcg_r2 = load_gpr(ctx, r2); 2146 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf); 2147 2148 return nullify_end(ctx, ret); 2149 } 2150 2151 static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn) 2152 { 2153 target_long im = low_sextract(insn, 0, 11); 2154 unsigned e1 = extract32(insn, 11, 1); 2155 unsigned cf = extract32(insn, 12, 4); 2156 unsigned rt = extract32(insn, 16, 5); 2157 unsigned r2 = extract32(insn, 21, 5); 2158 TCGv tcg_im, tcg_r2; 2159 ExitStatus ret; 2160 2161 if (cf) { 2162 nullify_over(ctx); 2163 } 2164 2165 tcg_im = load_const(ctx, im); 2166 tcg_r2 = load_gpr(ctx, r2); 2167 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf); 2168 2169 return nullify_end(ctx, ret); 2170 } 2171 2172 static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn) 2173 { 2174 target_long im = low_sextract(insn, 0, 11); 2175 unsigned cf = extract32(insn, 12, 4); 2176 unsigned rt = extract32(insn, 16, 5); 2177 unsigned r2 = extract32(insn, 21, 5); 2178 TCGv tcg_im, tcg_r2; 2179 ExitStatus ret; 2180 2181 if (cf) { 2182 nullify_over(ctx); 2183 } 2184 2185 tcg_im = load_const(ctx, im); 2186 tcg_r2 = load_gpr(ctx, r2); 2187 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf); 2188 2189 return nullify_end(ctx, ret); 2190 } 2191 2192 static ExitStatus trans_ld_idx_i(DisasContext *ctx, uint32_t insn, 2193 const DisasInsn *di) 2194 { 2195 unsigned rt = extract32(insn, 0, 5); 2196 unsigned m = extract32(insn, 5, 1); 2197 unsigned sz = extract32(insn, 6, 2); 2198 unsigned a = extract32(insn, 13, 1); 2199 int disp = low_sextract(insn, 16, 5); 2200 unsigned rb = extract32(insn, 21, 5); 2201 int modify = (m ? (a ? -1 : 1) : 0); 2202 TCGMemOp mop = MO_TE | sz; 2203 2204 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop); 2205 } 2206 2207 static ExitStatus trans_ld_idx_x(DisasContext *ctx, uint32_t insn, 2208 const DisasInsn *di) 2209 { 2210 unsigned rt = extract32(insn, 0, 5); 2211 unsigned m = extract32(insn, 5, 1); 2212 unsigned sz = extract32(insn, 6, 2); 2213 unsigned u = extract32(insn, 13, 1); 2214 unsigned rx = extract32(insn, 16, 5); 2215 unsigned rb = extract32(insn, 21, 5); 2216 TCGMemOp mop = MO_TE | sz; 2217 2218 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop); 2219 } 2220 2221 static ExitStatus trans_st_idx_i(DisasContext *ctx, uint32_t insn, 2222 const DisasInsn *di) 2223 { 2224 int disp = low_sextract(insn, 0, 5); 2225 unsigned m = extract32(insn, 5, 1); 2226 unsigned sz = extract32(insn, 6, 2); 2227 unsigned a = extract32(insn, 13, 1); 2228 unsigned rr = extract32(insn, 16, 5); 2229 unsigned rb = extract32(insn, 21, 5); 2230 int modify = (m ? (a ? -1 : 1) : 0); 2231 TCGMemOp mop = MO_TE | sz; 2232 2233 return do_store(ctx, rr, rb, disp, modify, mop); 2234 } 2235 2236 static ExitStatus trans_ldcw(DisasContext *ctx, uint32_t insn, 2237 const DisasInsn *di) 2238 { 2239 unsigned rt = extract32(insn, 0, 5); 2240 unsigned m = extract32(insn, 5, 1); 2241 unsigned i = extract32(insn, 12, 1); 2242 unsigned au = extract32(insn, 13, 1); 2243 unsigned rx = extract32(insn, 16, 5); 2244 unsigned rb = extract32(insn, 21, 5); 2245 TCGMemOp mop = MO_TEUL | MO_ALIGN_16; 2246 TCGv zero, addr, base, dest; 2247 int modify, disp = 0, scale = 0; 2248 2249 nullify_over(ctx); 2250 2251 /* ??? Share more code with do_load and do_load_{32,64}. */ 2252 2253 if (i) { 2254 modify = (m ? (au ? -1 : 1) : 0); 2255 disp = low_sextract(rx, 0, 5); 2256 rx = 0; 2257 } else { 2258 modify = m; 2259 if (au) { 2260 scale = mop & MO_SIZE; 2261 } 2262 } 2263 if (modify) { 2264 /* Base register modification. Make sure if RT == RB, we see 2265 the result of the load. */ 2266 dest = get_temp(ctx); 2267 } else { 2268 dest = dest_gpr(ctx, rt); 2269 } 2270 2271 addr = tcg_temp_new(); 2272 base = load_gpr(ctx, rb); 2273 if (rx) { 2274 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 2275 tcg_gen_add_tl(addr, addr, base); 2276 } else { 2277 tcg_gen_addi_tl(addr, base, disp); 2278 } 2279 2280 zero = tcg_const_tl(0); 2281 tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base), 2282 zero, MMU_USER_IDX, mop); 2283 if (modify) { 2284 save_gpr(ctx, rb, addr); 2285 } 2286 save_gpr(ctx, rt, dest); 2287 2288 return nullify_end(ctx, NO_EXIT); 2289 } 2290 2291 static ExitStatus trans_stby(DisasContext *ctx, uint32_t insn, 2292 const DisasInsn *di) 2293 { 2294 target_long disp = low_sextract(insn, 0, 5); 2295 unsigned m = extract32(insn, 5, 1); 2296 unsigned a = extract32(insn, 13, 1); 2297 unsigned rt = extract32(insn, 16, 5); 2298 unsigned rb = extract32(insn, 21, 5); 2299 TCGv addr, val; 2300 2301 nullify_over(ctx); 2302 2303 addr = tcg_temp_new(); 2304 if (m || disp == 0) { 2305 tcg_gen_mov_tl(addr, load_gpr(ctx, rb)); 2306 } else { 2307 tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp); 2308 } 2309 val = load_gpr(ctx, rt); 2310 2311 if (a) { 2312 gen_helper_stby_e(cpu_env, addr, val); 2313 } else { 2314 gen_helper_stby_b(cpu_env, addr, val); 2315 } 2316 2317 if (m) { 2318 tcg_gen_addi_tl(addr, addr, disp); 2319 tcg_gen_andi_tl(addr, addr, ~3); 2320 save_gpr(ctx, rb, addr); 2321 } 2322 tcg_temp_free(addr); 2323 2324 return nullify_end(ctx, NO_EXIT); 2325 } 2326 2327 static const DisasInsn table_index_mem[] = { 2328 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */ 2329 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */ 2330 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */ 2331 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw }, 2332 { 0x0c001300u, 0xfc0013c0, trans_stby }, 2333 }; 2334 2335 static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn) 2336 { 2337 unsigned rt = extract32(insn, 21, 5); 2338 target_long i = assemble_21(insn); 2339 TCGv tcg_rt = dest_gpr(ctx, rt); 2340 2341 tcg_gen_movi_tl(tcg_rt, i); 2342 save_gpr(ctx, rt, tcg_rt); 2343 cond_free(&ctx->null_cond); 2344 2345 return NO_EXIT; 2346 } 2347 2348 static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn) 2349 { 2350 unsigned rt = extract32(insn, 21, 5); 2351 target_long i = assemble_21(insn); 2352 TCGv tcg_rt = load_gpr(ctx, rt); 2353 TCGv tcg_r1 = dest_gpr(ctx, 1); 2354 2355 tcg_gen_addi_tl(tcg_r1, tcg_rt, i); 2356 save_gpr(ctx, 1, tcg_r1); 2357 cond_free(&ctx->null_cond); 2358 2359 return NO_EXIT; 2360 } 2361 2362 static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn) 2363 { 2364 unsigned rb = extract32(insn, 21, 5); 2365 unsigned rt = extract32(insn, 16, 5); 2366 target_long i = assemble_16(insn); 2367 TCGv tcg_rt = dest_gpr(ctx, rt); 2368 2369 /* Special case rb == 0, for the LDI pseudo-op. 2370 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 2371 if (rb == 0) { 2372 tcg_gen_movi_tl(tcg_rt, i); 2373 } else { 2374 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i); 2375 } 2376 save_gpr(ctx, rt, tcg_rt); 2377 cond_free(&ctx->null_cond); 2378 2379 return NO_EXIT; 2380 } 2381 2382 static ExitStatus trans_load(DisasContext *ctx, uint32_t insn, 2383 bool is_mod, TCGMemOp mop) 2384 { 2385 unsigned rb = extract32(insn, 21, 5); 2386 unsigned rt = extract32(insn, 16, 5); 2387 target_long i = assemble_16(insn); 2388 2389 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop); 2390 } 2391 2392 static ExitStatus trans_load_w(DisasContext *ctx, uint32_t insn) 2393 { 2394 unsigned rb = extract32(insn, 21, 5); 2395 unsigned rt = extract32(insn, 16, 5); 2396 target_long i = assemble_16a(insn); 2397 unsigned ext2 = extract32(insn, 1, 2); 2398 2399 switch (ext2) { 2400 case 0: 2401 case 1: 2402 /* FLDW without modification. */ 2403 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0); 2404 case 2: 2405 /* LDW with modification. Note that the sign of I selects 2406 post-dec vs pre-inc. */ 2407 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL); 2408 default: 2409 return gen_illegal(ctx); 2410 } 2411 } 2412 2413 static ExitStatus trans_fload_mod(DisasContext *ctx, uint32_t insn) 2414 { 2415 target_long i = assemble_16a(insn); 2416 unsigned t1 = extract32(insn, 1, 1); 2417 unsigned a = extract32(insn, 2, 1); 2418 unsigned t0 = extract32(insn, 16, 5); 2419 unsigned rb = extract32(insn, 21, 5); 2420 2421 /* FLDW with modification. */ 2422 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1)); 2423 } 2424 2425 static ExitStatus trans_store(DisasContext *ctx, uint32_t insn, 2426 bool is_mod, TCGMemOp mop) 2427 { 2428 unsigned rb = extract32(insn, 21, 5); 2429 unsigned rt = extract32(insn, 16, 5); 2430 target_long i = assemble_16(insn); 2431 2432 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop); 2433 } 2434 2435 static ExitStatus trans_store_w(DisasContext *ctx, uint32_t insn) 2436 { 2437 unsigned rb = extract32(insn, 21, 5); 2438 unsigned rt = extract32(insn, 16, 5); 2439 target_long i = assemble_16a(insn); 2440 unsigned ext2 = extract32(insn, 1, 2); 2441 2442 switch (ext2) { 2443 case 0: 2444 case 1: 2445 /* FSTW without modification. */ 2446 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0); 2447 case 2: 2448 /* LDW with modification. */ 2449 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL); 2450 default: 2451 return gen_illegal(ctx); 2452 } 2453 } 2454 2455 static ExitStatus trans_fstore_mod(DisasContext *ctx, uint32_t insn) 2456 { 2457 target_long i = assemble_16a(insn); 2458 unsigned t1 = extract32(insn, 1, 1); 2459 unsigned a = extract32(insn, 2, 1); 2460 unsigned t0 = extract32(insn, 16, 5); 2461 unsigned rb = extract32(insn, 21, 5); 2462 2463 /* FSTW with modification. */ 2464 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1)); 2465 } 2466 2467 static ExitStatus trans_copr_w(DisasContext *ctx, uint32_t insn) 2468 { 2469 unsigned t0 = extract32(insn, 0, 5); 2470 unsigned m = extract32(insn, 5, 1); 2471 unsigned t1 = extract32(insn, 6, 1); 2472 unsigned ext3 = extract32(insn, 7, 3); 2473 /* unsigned cc = extract32(insn, 10, 2); */ 2474 unsigned i = extract32(insn, 12, 1); 2475 unsigned ua = extract32(insn, 13, 1); 2476 unsigned rx = extract32(insn, 16, 5); 2477 unsigned rb = extract32(insn, 21, 5); 2478 unsigned rt = t1 * 32 + t0; 2479 int modify = (m ? (ua ? -1 : 1) : 0); 2480 int disp, scale; 2481 2482 if (i == 0) { 2483 scale = (ua ? 2 : 0); 2484 disp = 0; 2485 modify = m; 2486 } else { 2487 disp = low_sextract(rx, 0, 5); 2488 scale = 0; 2489 rx = 0; 2490 modify = (m ? (ua ? -1 : 1) : 0); 2491 } 2492 2493 switch (ext3) { 2494 case 0: /* FLDW */ 2495 return do_floadw(ctx, rt, rb, rx, scale, disp, modify); 2496 case 4: /* FSTW */ 2497 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify); 2498 } 2499 return gen_illegal(ctx); 2500 } 2501 2502 static ExitStatus trans_copr_dw(DisasContext *ctx, uint32_t insn) 2503 { 2504 unsigned rt = extract32(insn, 0, 5); 2505 unsigned m = extract32(insn, 5, 1); 2506 unsigned ext4 = extract32(insn, 6, 4); 2507 /* unsigned cc = extract32(insn, 10, 2); */ 2508 unsigned i = extract32(insn, 12, 1); 2509 unsigned ua = extract32(insn, 13, 1); 2510 unsigned rx = extract32(insn, 16, 5); 2511 unsigned rb = extract32(insn, 21, 5); 2512 int modify = (m ? (ua ? -1 : 1) : 0); 2513 int disp, scale; 2514 2515 if (i == 0) { 2516 scale = (ua ? 3 : 0); 2517 disp = 0; 2518 modify = m; 2519 } else { 2520 disp = low_sextract(rx, 0, 5); 2521 scale = 0; 2522 rx = 0; 2523 modify = (m ? (ua ? -1 : 1) : 0); 2524 } 2525 2526 switch (ext4) { 2527 case 0: /* FLDD */ 2528 return do_floadd(ctx, rt, rb, rx, scale, disp, modify); 2529 case 8: /* FSTD */ 2530 return do_fstored(ctx, rt, rb, rx, scale, disp, modify); 2531 default: 2532 return gen_illegal(ctx); 2533 } 2534 } 2535 2536 static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn, 2537 bool is_true, bool is_imm, bool is_dw) 2538 { 2539 target_long disp = assemble_12(insn) * 4; 2540 unsigned n = extract32(insn, 1, 1); 2541 unsigned c = extract32(insn, 13, 3); 2542 unsigned r = extract32(insn, 21, 5); 2543 unsigned cf = c * 2 + !is_true; 2544 TCGv dest, in1, in2, sv; 2545 DisasCond cond; 2546 2547 nullify_over(ctx); 2548 2549 if (is_imm) { 2550 in1 = load_const(ctx, low_sextract(insn, 16, 5)); 2551 } else { 2552 in1 = load_gpr(ctx, extract32(insn, 16, 5)); 2553 } 2554 in2 = load_gpr(ctx, r); 2555 dest = get_temp(ctx); 2556 2557 tcg_gen_sub_tl(dest, in1, in2); 2558 2559 TCGV_UNUSED(sv); 2560 if (c == 6) { 2561 sv = do_sub_sv(ctx, dest, in1, in2); 2562 } 2563 2564 cond = do_sub_cond(cf, dest, in1, in2, sv); 2565 return do_cbranch(ctx, disp, n, &cond); 2566 } 2567 2568 static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn, 2569 bool is_true, bool is_imm) 2570 { 2571 target_long disp = assemble_12(insn) * 4; 2572 unsigned n = extract32(insn, 1, 1); 2573 unsigned c = extract32(insn, 13, 3); 2574 unsigned r = extract32(insn, 21, 5); 2575 unsigned cf = c * 2 + !is_true; 2576 TCGv dest, in1, in2, sv, cb_msb; 2577 DisasCond cond; 2578 2579 nullify_over(ctx); 2580 2581 if (is_imm) { 2582 in1 = load_const(ctx, low_sextract(insn, 16, 5)); 2583 } else { 2584 in1 = load_gpr(ctx, extract32(insn, 16, 5)); 2585 } 2586 in2 = load_gpr(ctx, r); 2587 dest = dest_gpr(ctx, r); 2588 TCGV_UNUSED(sv); 2589 TCGV_UNUSED(cb_msb); 2590 2591 switch (c) { 2592 default: 2593 tcg_gen_add_tl(dest, in1, in2); 2594 break; 2595 case 4: case 5: 2596 cb_msb = get_temp(ctx); 2597 tcg_gen_movi_tl(cb_msb, 0); 2598 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb); 2599 break; 2600 case 6: 2601 tcg_gen_add_tl(dest, in1, in2); 2602 sv = do_add_sv(ctx, dest, in1, in2); 2603 break; 2604 } 2605 2606 cond = do_cond(cf, dest, cb_msb, sv); 2607 return do_cbranch(ctx, disp, n, &cond); 2608 } 2609 2610 static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn) 2611 { 2612 target_long disp = assemble_12(insn) * 4; 2613 unsigned n = extract32(insn, 1, 1); 2614 unsigned c = extract32(insn, 15, 1); 2615 unsigned r = extract32(insn, 16, 5); 2616 unsigned p = extract32(insn, 21, 5); 2617 unsigned i = extract32(insn, 26, 1); 2618 TCGv tmp, tcg_r; 2619 DisasCond cond; 2620 2621 nullify_over(ctx); 2622 2623 tmp = tcg_temp_new(); 2624 tcg_r = load_gpr(ctx, r); 2625 if (i) { 2626 tcg_gen_shli_tl(tmp, tcg_r, p); 2627 } else { 2628 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar); 2629 } 2630 2631 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp); 2632 tcg_temp_free(tmp); 2633 return do_cbranch(ctx, disp, n, &cond); 2634 } 2635 2636 static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm) 2637 { 2638 target_long disp = assemble_12(insn) * 4; 2639 unsigned n = extract32(insn, 1, 1); 2640 unsigned c = extract32(insn, 13, 3); 2641 unsigned t = extract32(insn, 16, 5); 2642 unsigned r = extract32(insn, 21, 5); 2643 TCGv dest; 2644 DisasCond cond; 2645 2646 nullify_over(ctx); 2647 2648 dest = dest_gpr(ctx, r); 2649 if (is_imm) { 2650 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5)); 2651 } else if (t == 0) { 2652 tcg_gen_movi_tl(dest, 0); 2653 } else { 2654 tcg_gen_mov_tl(dest, cpu_gr[t]); 2655 } 2656 2657 cond = do_sed_cond(c, dest); 2658 return do_cbranch(ctx, disp, n, &cond); 2659 } 2660 2661 static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn, 2662 const DisasInsn *di) 2663 { 2664 unsigned rt = extract32(insn, 0, 5); 2665 unsigned c = extract32(insn, 13, 3); 2666 unsigned r1 = extract32(insn, 16, 5); 2667 unsigned r2 = extract32(insn, 21, 5); 2668 TCGv dest; 2669 2670 if (c) { 2671 nullify_over(ctx); 2672 } 2673 2674 dest = dest_gpr(ctx, rt); 2675 if (r1 == 0) { 2676 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2)); 2677 tcg_gen_shr_tl(dest, dest, cpu_sar); 2678 } else if (r1 == r2) { 2679 TCGv_i32 t32 = tcg_temp_new_i32(); 2680 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2)); 2681 tcg_gen_rotr_i32(t32, t32, cpu_sar); 2682 tcg_gen_extu_i32_tl(dest, t32); 2683 tcg_temp_free_i32(t32); 2684 } else { 2685 TCGv_i64 t = tcg_temp_new_i64(); 2686 TCGv_i64 s = tcg_temp_new_i64(); 2687 2688 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1)); 2689 tcg_gen_extu_tl_i64(s, cpu_sar); 2690 tcg_gen_shr_i64(t, t, s); 2691 tcg_gen_trunc_i64_tl(dest, t); 2692 2693 tcg_temp_free_i64(t); 2694 tcg_temp_free_i64(s); 2695 } 2696 save_gpr(ctx, rt, dest); 2697 2698 /* Install the new nullification. */ 2699 cond_free(&ctx->null_cond); 2700 if (c) { 2701 ctx->null_cond = do_sed_cond(c, dest); 2702 } 2703 return nullify_end(ctx, NO_EXIT); 2704 } 2705 2706 static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn, 2707 const DisasInsn *di) 2708 { 2709 unsigned rt = extract32(insn, 0, 5); 2710 unsigned cpos = extract32(insn, 5, 5); 2711 unsigned c = extract32(insn, 13, 3); 2712 unsigned r1 = extract32(insn, 16, 5); 2713 unsigned r2 = extract32(insn, 21, 5); 2714 unsigned sa = 31 - cpos; 2715 TCGv dest, t2; 2716 2717 if (c) { 2718 nullify_over(ctx); 2719 } 2720 2721 dest = dest_gpr(ctx, rt); 2722 t2 = load_gpr(ctx, r2); 2723 if (r1 == r2) { 2724 TCGv_i32 t32 = tcg_temp_new_i32(); 2725 tcg_gen_trunc_tl_i32(t32, t2); 2726 tcg_gen_rotri_i32(t32, t32, sa); 2727 tcg_gen_extu_i32_tl(dest, t32); 2728 tcg_temp_free_i32(t32); 2729 } else if (r1 == 0) { 2730 tcg_gen_extract_tl(dest, t2, sa, 32 - sa); 2731 } else { 2732 TCGv t0 = tcg_temp_new(); 2733 tcg_gen_extract_tl(t0, t2, sa, 32 - sa); 2734 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa); 2735 tcg_temp_free(t0); 2736 } 2737 save_gpr(ctx, rt, dest); 2738 2739 /* Install the new nullification. */ 2740 cond_free(&ctx->null_cond); 2741 if (c) { 2742 ctx->null_cond = do_sed_cond(c, dest); 2743 } 2744 return nullify_end(ctx, NO_EXIT); 2745 } 2746 2747 static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn, 2748 const DisasInsn *di) 2749 { 2750 unsigned clen = extract32(insn, 0, 5); 2751 unsigned is_se = extract32(insn, 10, 1); 2752 unsigned c = extract32(insn, 13, 3); 2753 unsigned rt = extract32(insn, 16, 5); 2754 unsigned rr = extract32(insn, 21, 5); 2755 unsigned len = 32 - clen; 2756 TCGv dest, src, tmp; 2757 2758 if (c) { 2759 nullify_over(ctx); 2760 } 2761 2762 dest = dest_gpr(ctx, rt); 2763 src = load_gpr(ctx, rr); 2764 tmp = tcg_temp_new(); 2765 2766 /* Recall that SAR is using big-endian bit numbering. */ 2767 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1); 2768 if (is_se) { 2769 tcg_gen_sar_tl(dest, src, tmp); 2770 tcg_gen_sextract_tl(dest, dest, 0, len); 2771 } else { 2772 tcg_gen_shr_tl(dest, src, tmp); 2773 tcg_gen_extract_tl(dest, dest, 0, len); 2774 } 2775 tcg_temp_free(tmp); 2776 save_gpr(ctx, rt, dest); 2777 2778 /* Install the new nullification. */ 2779 cond_free(&ctx->null_cond); 2780 if (c) { 2781 ctx->null_cond = do_sed_cond(c, dest); 2782 } 2783 return nullify_end(ctx, NO_EXIT); 2784 } 2785 2786 static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn, 2787 const DisasInsn *di) 2788 { 2789 unsigned clen = extract32(insn, 0, 5); 2790 unsigned pos = extract32(insn, 5, 5); 2791 unsigned is_se = extract32(insn, 10, 1); 2792 unsigned c = extract32(insn, 13, 3); 2793 unsigned rt = extract32(insn, 16, 5); 2794 unsigned rr = extract32(insn, 21, 5); 2795 unsigned len = 32 - clen; 2796 unsigned cpos = 31 - pos; 2797 TCGv dest, src; 2798 2799 if (c) { 2800 nullify_over(ctx); 2801 } 2802 2803 dest = dest_gpr(ctx, rt); 2804 src = load_gpr(ctx, rr); 2805 if (is_se) { 2806 tcg_gen_sextract_tl(dest, src, cpos, len); 2807 } else { 2808 tcg_gen_extract_tl(dest, src, cpos, len); 2809 } 2810 save_gpr(ctx, rt, dest); 2811 2812 /* Install the new nullification. */ 2813 cond_free(&ctx->null_cond); 2814 if (c) { 2815 ctx->null_cond = do_sed_cond(c, dest); 2816 } 2817 return nullify_end(ctx, NO_EXIT); 2818 } 2819 2820 static const DisasInsn table_sh_ex[] = { 2821 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar }, 2822 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm }, 2823 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar }, 2824 { 0xd0001800u, 0xfc001800u, trans_extrw_imm }, 2825 }; 2826 2827 static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn, 2828 const DisasInsn *di) 2829 { 2830 unsigned clen = extract32(insn, 0, 5); 2831 unsigned cpos = extract32(insn, 5, 5); 2832 unsigned nz = extract32(insn, 10, 1); 2833 unsigned c = extract32(insn, 13, 3); 2834 target_long val = low_sextract(insn, 16, 5); 2835 unsigned rt = extract32(insn, 21, 5); 2836 unsigned len = 32 - clen; 2837 target_long mask0, mask1; 2838 TCGv dest; 2839 2840 if (c) { 2841 nullify_over(ctx); 2842 } 2843 if (cpos + len > 32) { 2844 len = 32 - cpos; 2845 } 2846 2847 dest = dest_gpr(ctx, rt); 2848 mask0 = deposit64(0, cpos, len, val); 2849 mask1 = deposit64(-1, cpos, len, val); 2850 2851 if (nz) { 2852 TCGv src = load_gpr(ctx, rt); 2853 if (mask1 != -1) { 2854 tcg_gen_andi_tl(dest, src, mask1); 2855 src = dest; 2856 } 2857 tcg_gen_ori_tl(dest, src, mask0); 2858 } else { 2859 tcg_gen_movi_tl(dest, mask0); 2860 } 2861 save_gpr(ctx, rt, dest); 2862 2863 /* Install the new nullification. */ 2864 cond_free(&ctx->null_cond); 2865 if (c) { 2866 ctx->null_cond = do_sed_cond(c, dest); 2867 } 2868 return nullify_end(ctx, NO_EXIT); 2869 } 2870 2871 static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn, 2872 const DisasInsn *di) 2873 { 2874 unsigned clen = extract32(insn, 0, 5); 2875 unsigned cpos = extract32(insn, 5, 5); 2876 unsigned nz = extract32(insn, 10, 1); 2877 unsigned c = extract32(insn, 13, 3); 2878 unsigned rr = extract32(insn, 16, 5); 2879 unsigned rt = extract32(insn, 21, 5); 2880 unsigned rs = nz ? rt : 0; 2881 unsigned len = 32 - clen; 2882 TCGv dest, val; 2883 2884 if (c) { 2885 nullify_over(ctx); 2886 } 2887 if (cpos + len > 32) { 2888 len = 32 - cpos; 2889 } 2890 2891 dest = dest_gpr(ctx, rt); 2892 val = load_gpr(ctx, rr); 2893 if (rs == 0) { 2894 tcg_gen_deposit_z_tl(dest, val, cpos, len); 2895 } else { 2896 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len); 2897 } 2898 save_gpr(ctx, rt, dest); 2899 2900 /* Install the new nullification. */ 2901 cond_free(&ctx->null_cond); 2902 if (c) { 2903 ctx->null_cond = do_sed_cond(c, dest); 2904 } 2905 return nullify_end(ctx, NO_EXIT); 2906 } 2907 2908 static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn, 2909 const DisasInsn *di) 2910 { 2911 unsigned clen = extract32(insn, 0, 5); 2912 unsigned nz = extract32(insn, 10, 1); 2913 unsigned i = extract32(insn, 12, 1); 2914 unsigned c = extract32(insn, 13, 3); 2915 unsigned rt = extract32(insn, 21, 5); 2916 unsigned rs = nz ? rt : 0; 2917 unsigned len = 32 - clen; 2918 TCGv val, mask, tmp, shift, dest; 2919 unsigned msb = 1U << (len - 1); 2920 2921 if (c) { 2922 nullify_over(ctx); 2923 } 2924 2925 if (i) { 2926 val = load_const(ctx, low_sextract(insn, 16, 5)); 2927 } else { 2928 val = load_gpr(ctx, extract32(insn, 16, 5)); 2929 } 2930 dest = dest_gpr(ctx, rt); 2931 shift = tcg_temp_new(); 2932 tmp = tcg_temp_new(); 2933 2934 /* Convert big-endian bit numbering in SAR to left-shift. */ 2935 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1); 2936 2937 mask = tcg_const_tl(msb + (msb - 1)); 2938 tcg_gen_and_tl(tmp, val, mask); 2939 if (rs) { 2940 tcg_gen_shl_tl(mask, mask, shift); 2941 tcg_gen_shl_tl(tmp, tmp, shift); 2942 tcg_gen_andc_tl(dest, cpu_gr[rs], mask); 2943 tcg_gen_or_tl(dest, dest, tmp); 2944 } else { 2945 tcg_gen_shl_tl(dest, tmp, shift); 2946 } 2947 tcg_temp_free(shift); 2948 tcg_temp_free(mask); 2949 tcg_temp_free(tmp); 2950 save_gpr(ctx, rt, dest); 2951 2952 /* Install the new nullification. */ 2953 cond_free(&ctx->null_cond); 2954 if (c) { 2955 ctx->null_cond = do_sed_cond(c, dest); 2956 } 2957 return nullify_end(ctx, NO_EXIT); 2958 } 2959 2960 static const DisasInsn table_depw[] = { 2961 { 0xd4000000u, 0xfc000be0u, trans_depw_sar }, 2962 { 0xd4000800u, 0xfc001800u, trans_depw_imm }, 2963 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c }, 2964 }; 2965 2966 static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l) 2967 { 2968 unsigned n = extract32(insn, 1, 1); 2969 unsigned b = extract32(insn, 21, 5); 2970 target_long disp = assemble_17(insn); 2971 2972 /* unsigned s = low_uextract(insn, 13, 3); */ 2973 /* ??? It seems like there should be a good way of using 2974 "be disp(sr2, r0)", the canonical gateway entry mechanism 2975 to our advantage. But that appears to be inconvenient to 2976 manage along side branch delay slots. Therefore we handle 2977 entry into the gateway page via absolute address. */ 2978 2979 /* Since we don't implement spaces, just branch. Do notice the special 2980 case of "be disp(*,r0)" using a direct branch to disp, so that we can 2981 goto_tb to the TB containing the syscall. */ 2982 if (b == 0) { 2983 return do_dbranch(ctx, disp, is_l ? 31 : 0, n); 2984 } else { 2985 TCGv tmp = get_temp(ctx); 2986 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp); 2987 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n); 2988 } 2989 } 2990 2991 static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn, 2992 const DisasInsn *di) 2993 { 2994 unsigned n = extract32(insn, 1, 1); 2995 unsigned link = extract32(insn, 21, 5); 2996 target_long disp = assemble_17(insn); 2997 2998 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n); 2999 } 3000 3001 static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn, 3002 const DisasInsn *di) 3003 { 3004 unsigned n = extract32(insn, 1, 1); 3005 target_long disp = assemble_22(insn); 3006 3007 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n); 3008 } 3009 3010 static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn, 3011 const DisasInsn *di) 3012 { 3013 unsigned n = extract32(insn, 1, 1); 3014 unsigned rx = extract32(insn, 16, 5); 3015 unsigned link = extract32(insn, 21, 5); 3016 TCGv tmp = get_temp(ctx); 3017 3018 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3); 3019 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8); 3020 return do_ibranch(ctx, tmp, link, n); 3021 } 3022 3023 static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn, 3024 const DisasInsn *di) 3025 { 3026 unsigned n = extract32(insn, 1, 1); 3027 unsigned rx = extract32(insn, 16, 5); 3028 unsigned rb = extract32(insn, 21, 5); 3029 TCGv dest; 3030 3031 if (rx == 0) { 3032 dest = load_gpr(ctx, rb); 3033 } else { 3034 dest = get_temp(ctx); 3035 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3); 3036 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb)); 3037 } 3038 return do_ibranch(ctx, dest, 0, n); 3039 } 3040 3041 static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn, 3042 const DisasInsn *di) 3043 { 3044 unsigned n = extract32(insn, 1, 1); 3045 unsigned rb = extract32(insn, 21, 5); 3046 unsigned link = extract32(insn, 13, 1) ? 2 : 0; 3047 3048 return do_ibranch(ctx, load_gpr(ctx, rb), link, n); 3049 } 3050 3051 static const DisasInsn table_branch[] = { 3052 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */ 3053 { 0xe800a000u, 0xfc00e000u, trans_bl_long }, 3054 { 0xe8004000u, 0xfc00fffdu, trans_blr }, 3055 { 0xe800c000u, 0xfc00fffdu, trans_bv }, 3056 { 0xe800d000u, 0xfc00dffcu, trans_bve }, 3057 }; 3058 3059 static ExitStatus trans_fop_wew_0c(DisasContext *ctx, uint32_t insn, 3060 const DisasInsn *di) 3061 { 3062 unsigned rt = extract32(insn, 0, 5); 3063 unsigned ra = extract32(insn, 21, 5); 3064 return do_fop_wew(ctx, rt, ra, di->f_wew); 3065 } 3066 3067 static ExitStatus trans_fop_wew_0e(DisasContext *ctx, uint32_t insn, 3068 const DisasInsn *di) 3069 { 3070 unsigned rt = assemble_rt64(insn); 3071 unsigned ra = assemble_ra64(insn); 3072 return do_fop_wew(ctx, rt, ra, di->f_wew); 3073 } 3074 3075 static ExitStatus trans_fop_ded(DisasContext *ctx, uint32_t insn, 3076 const DisasInsn *di) 3077 { 3078 unsigned rt = extract32(insn, 0, 5); 3079 unsigned ra = extract32(insn, 21, 5); 3080 return do_fop_ded(ctx, rt, ra, di->f_ded); 3081 } 3082 3083 static ExitStatus trans_fop_wed_0c(DisasContext *ctx, uint32_t insn, 3084 const DisasInsn *di) 3085 { 3086 unsigned rt = extract32(insn, 0, 5); 3087 unsigned ra = extract32(insn, 21, 5); 3088 return do_fop_wed(ctx, rt, ra, di->f_wed); 3089 } 3090 3091 static ExitStatus trans_fop_wed_0e(DisasContext *ctx, uint32_t insn, 3092 const DisasInsn *di) 3093 { 3094 unsigned rt = assemble_rt64(insn); 3095 unsigned ra = extract32(insn, 21, 5); 3096 return do_fop_wed(ctx, rt, ra, di->f_wed); 3097 } 3098 3099 static ExitStatus trans_fop_dew_0c(DisasContext *ctx, uint32_t insn, 3100 const DisasInsn *di) 3101 { 3102 unsigned rt = extract32(insn, 0, 5); 3103 unsigned ra = extract32(insn, 21, 5); 3104 return do_fop_dew(ctx, rt, ra, di->f_dew); 3105 } 3106 3107 static ExitStatus trans_fop_dew_0e(DisasContext *ctx, uint32_t insn, 3108 const DisasInsn *di) 3109 { 3110 unsigned rt = extract32(insn, 0, 5); 3111 unsigned ra = assemble_ra64(insn); 3112 return do_fop_dew(ctx, rt, ra, di->f_dew); 3113 } 3114 3115 static ExitStatus trans_fop_weww_0c(DisasContext *ctx, uint32_t insn, 3116 const DisasInsn *di) 3117 { 3118 unsigned rt = extract32(insn, 0, 5); 3119 unsigned rb = extract32(insn, 16, 5); 3120 unsigned ra = extract32(insn, 21, 5); 3121 return do_fop_weww(ctx, rt, ra, rb, di->f_weww); 3122 } 3123 3124 static ExitStatus trans_fop_weww_0e(DisasContext *ctx, uint32_t insn, 3125 const DisasInsn *di) 3126 { 3127 unsigned rt = assemble_rt64(insn); 3128 unsigned rb = assemble_rb64(insn); 3129 unsigned ra = assemble_ra64(insn); 3130 return do_fop_weww(ctx, rt, ra, rb, di->f_weww); 3131 } 3132 3133 static ExitStatus trans_fop_dedd(DisasContext *ctx, uint32_t insn, 3134 const DisasInsn *di) 3135 { 3136 unsigned rt = extract32(insn, 0, 5); 3137 unsigned rb = extract32(insn, 16, 5); 3138 unsigned ra = extract32(insn, 21, 5); 3139 return do_fop_dedd(ctx, rt, ra, rb, di->f_dedd); 3140 } 3141 3142 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3143 { 3144 tcg_gen_mov_i32(dst, src); 3145 } 3146 3147 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3148 { 3149 tcg_gen_mov_i64(dst, src); 3150 } 3151 3152 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3153 { 3154 tcg_gen_andi_i32(dst, src, INT32_MAX); 3155 } 3156 3157 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3158 { 3159 tcg_gen_andi_i64(dst, src, INT64_MAX); 3160 } 3161 3162 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3163 { 3164 tcg_gen_xori_i32(dst, src, INT32_MIN); 3165 } 3166 3167 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3168 { 3169 tcg_gen_xori_i64(dst, src, INT64_MIN); 3170 } 3171 3172 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3173 { 3174 tcg_gen_ori_i32(dst, src, INT32_MIN); 3175 } 3176 3177 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3178 { 3179 tcg_gen_ori_i64(dst, src, INT64_MIN); 3180 } 3181 3182 static ExitStatus do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb, 3183 unsigned y, unsigned c) 3184 { 3185 TCGv_i32 ta, tb, tc, ty; 3186 3187 nullify_over(ctx); 3188 3189 ta = load_frw0_i32(ra); 3190 tb = load_frw0_i32(rb); 3191 ty = tcg_const_i32(y); 3192 tc = tcg_const_i32(c); 3193 3194 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc); 3195 3196 tcg_temp_free_i32(ta); 3197 tcg_temp_free_i32(tb); 3198 tcg_temp_free_i32(ty); 3199 tcg_temp_free_i32(tc); 3200 3201 return nullify_end(ctx, NO_EXIT); 3202 } 3203 3204 static ExitStatus trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn, 3205 const DisasInsn *di) 3206 { 3207 unsigned c = extract32(insn, 0, 5); 3208 unsigned y = extract32(insn, 13, 3); 3209 unsigned rb = extract32(insn, 16, 5); 3210 unsigned ra = extract32(insn, 21, 5); 3211 return do_fcmp_s(ctx, ra, rb, y, c); 3212 } 3213 3214 static ExitStatus trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn, 3215 const DisasInsn *di) 3216 { 3217 unsigned c = extract32(insn, 0, 5); 3218 unsigned y = extract32(insn, 13, 3); 3219 unsigned rb = assemble_rb64(insn); 3220 unsigned ra = assemble_ra64(insn); 3221 return do_fcmp_s(ctx, ra, rb, y, c); 3222 } 3223 3224 static ExitStatus trans_fcmp_d(DisasContext *ctx, uint32_t insn, 3225 const DisasInsn *di) 3226 { 3227 unsigned c = extract32(insn, 0, 5); 3228 unsigned y = extract32(insn, 13, 3); 3229 unsigned rb = extract32(insn, 16, 5); 3230 unsigned ra = extract32(insn, 21, 5); 3231 TCGv_i64 ta, tb; 3232 TCGv_i32 tc, ty; 3233 3234 nullify_over(ctx); 3235 3236 ta = load_frd0(ra); 3237 tb = load_frd0(rb); 3238 ty = tcg_const_i32(y); 3239 tc = tcg_const_i32(c); 3240 3241 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc); 3242 3243 tcg_temp_free_i64(ta); 3244 tcg_temp_free_i64(tb); 3245 tcg_temp_free_i32(ty); 3246 tcg_temp_free_i32(tc); 3247 3248 return nullify_end(ctx, NO_EXIT); 3249 } 3250 3251 static ExitStatus trans_ftest_t(DisasContext *ctx, uint32_t insn, 3252 const DisasInsn *di) 3253 { 3254 unsigned y = extract32(insn, 13, 3); 3255 unsigned cbit = (y ^ 1) - 1; 3256 TCGv t; 3257 3258 nullify_over(ctx); 3259 3260 t = tcg_temp_new(); 3261 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow)); 3262 tcg_gen_extract_tl(t, t, 21 - cbit, 1); 3263 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3264 tcg_temp_free(t); 3265 3266 return nullify_end(ctx, NO_EXIT); 3267 } 3268 3269 static ExitStatus trans_ftest_q(DisasContext *ctx, uint32_t insn, 3270 const DisasInsn *di) 3271 { 3272 unsigned c = extract32(insn, 0, 5); 3273 int mask; 3274 bool inv = false; 3275 TCGv t; 3276 3277 nullify_over(ctx); 3278 3279 t = tcg_temp_new(); 3280 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow)); 3281 3282 switch (c) { 3283 case 0: /* simple */ 3284 tcg_gen_andi_tl(t, t, 0x4000000); 3285 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3286 goto done; 3287 case 2: /* rej */ 3288 inv = true; 3289 /* fallthru */ 3290 case 1: /* acc */ 3291 mask = 0x43ff800; 3292 break; 3293 case 6: /* rej8 */ 3294 inv = true; 3295 /* fallthru */ 3296 case 5: /* acc8 */ 3297 mask = 0x43f8000; 3298 break; 3299 case 9: /* acc6 */ 3300 mask = 0x43e0000; 3301 break; 3302 case 13: /* acc4 */ 3303 mask = 0x4380000; 3304 break; 3305 case 17: /* acc2 */ 3306 mask = 0x4200000; 3307 break; 3308 default: 3309 return gen_illegal(ctx); 3310 } 3311 if (inv) { 3312 TCGv c = load_const(ctx, mask); 3313 tcg_gen_or_tl(t, t, c); 3314 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 3315 } else { 3316 tcg_gen_andi_tl(t, t, mask); 3317 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 3318 } 3319 done: 3320 return nullify_end(ctx, NO_EXIT); 3321 } 3322 3323 static ExitStatus trans_xmpyu(DisasContext *ctx, uint32_t insn, 3324 const DisasInsn *di) 3325 { 3326 unsigned rt = extract32(insn, 0, 5); 3327 unsigned rb = assemble_rb64(insn); 3328 unsigned ra = assemble_ra64(insn); 3329 TCGv_i64 a, b; 3330 3331 nullify_over(ctx); 3332 3333 a = load_frw0_i64(ra); 3334 b = load_frw0_i64(rb); 3335 tcg_gen_mul_i64(a, a, b); 3336 save_frd(rt, a); 3337 tcg_temp_free_i64(a); 3338 tcg_temp_free_i64(b); 3339 3340 return nullify_end(ctx, NO_EXIT); 3341 } 3342 3343 #define FOP_DED trans_fop_ded, .f_ded 3344 #define FOP_DEDD trans_fop_dedd, .f_dedd 3345 3346 #define FOP_WEW trans_fop_wew_0c, .f_wew 3347 #define FOP_DEW trans_fop_dew_0c, .f_dew 3348 #define FOP_WED trans_fop_wed_0c, .f_wed 3349 #define FOP_WEWW trans_fop_weww_0c, .f_weww 3350 3351 static const DisasInsn table_float_0c[] = { 3352 /* floating point class zero */ 3353 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s }, 3354 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s }, 3355 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s }, 3356 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s }, 3357 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s }, 3358 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s }, 3359 3360 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d }, 3361 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d }, 3362 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d }, 3363 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d }, 3364 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d }, 3365 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d }, 3366 3367 /* floating point class three */ 3368 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s }, 3369 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s }, 3370 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s }, 3371 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s }, 3372 3373 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d }, 3374 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d }, 3375 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d }, 3376 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d }, 3377 3378 /* floating point class one */ 3379 /* float/float */ 3380 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s }, 3381 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d }, 3382 /* int/float */ 3383 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s }, 3384 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s }, 3385 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d }, 3386 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d }, 3387 /* float/int */ 3388 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w }, 3389 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w }, 3390 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw }, 3391 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw }, 3392 /* float/int truncate */ 3393 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w }, 3394 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w }, 3395 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw }, 3396 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw }, 3397 /* uint/float */ 3398 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s }, 3399 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s }, 3400 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d }, 3401 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d }, 3402 /* float/uint */ 3403 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw }, 3404 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw }, 3405 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw }, 3406 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw }, 3407 /* float/uint truncate */ 3408 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw }, 3409 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw }, 3410 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw }, 3411 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw }, 3412 3413 /* floating point class two */ 3414 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c }, 3415 { 0x30000c00, 0xfc001fe0, trans_fcmp_d }, 3416 { 0x30002420, 0xffffffe0, trans_ftest_q }, 3417 { 0x30000420, 0xffff1fff, trans_ftest_t }, 3418 3419 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0. 3420 This is machine/revision == 0, which is reserved for simulator. */ 3421 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s }, 3422 }; 3423 3424 #undef FOP_WEW 3425 #undef FOP_DEW 3426 #undef FOP_WED 3427 #undef FOP_WEWW 3428 #define FOP_WEW trans_fop_wew_0e, .f_wew 3429 #define FOP_DEW trans_fop_dew_0e, .f_dew 3430 #define FOP_WED trans_fop_wed_0e, .f_wed 3431 #define FOP_WEWW trans_fop_weww_0e, .f_weww 3432 3433 static const DisasInsn table_float_0e[] = { 3434 /* floating point class zero */ 3435 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s }, 3436 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s }, 3437 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s }, 3438 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s }, 3439 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s }, 3440 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s }, 3441 3442 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d }, 3443 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d }, 3444 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d }, 3445 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d }, 3446 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d }, 3447 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d }, 3448 3449 /* floating point class three */ 3450 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s }, 3451 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s }, 3452 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s }, 3453 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s }, 3454 3455 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d }, 3456 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d }, 3457 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d }, 3458 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d }, 3459 3460 { 0x38004700, 0xfc00ef60, trans_xmpyu }, 3461 3462 /* floating point class one */ 3463 /* float/float */ 3464 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s }, 3465 { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d }, 3466 /* int/float */ 3467 { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s }, 3468 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s }, 3469 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d }, 3470 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d }, 3471 /* float/int */ 3472 { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w }, 3473 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w }, 3474 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw }, 3475 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw }, 3476 /* float/int truncate */ 3477 { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w }, 3478 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w }, 3479 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw }, 3480 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw }, 3481 /* uint/float */ 3482 { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s }, 3483 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s }, 3484 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d }, 3485 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d }, 3486 /* float/uint */ 3487 { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw }, 3488 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw }, 3489 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw }, 3490 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw }, 3491 /* float/uint truncate */ 3492 { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw }, 3493 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw }, 3494 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw }, 3495 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw }, 3496 3497 /* floating point class two */ 3498 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e }, 3499 { 0x38000c00, 0xfc001fe0, trans_fcmp_d }, 3500 }; 3501 3502 #undef FOP_WEW 3503 #undef FOP_DEW 3504 #undef FOP_WED 3505 #undef FOP_WEWW 3506 #undef FOP_DED 3507 #undef FOP_DEDD 3508 3509 /* Convert the fmpyadd single-precision register encodings to standard. */ 3510 static inline int fmpyadd_s_reg(unsigned r) 3511 { 3512 return (r & 16) * 2 + 16 + (r & 15); 3513 } 3514 3515 static ExitStatus trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub) 3516 { 3517 unsigned tm = extract32(insn, 0, 5); 3518 unsigned f = extract32(insn, 5, 1); 3519 unsigned ra = extract32(insn, 6, 5); 3520 unsigned ta = extract32(insn, 11, 5); 3521 unsigned rm2 = extract32(insn, 16, 5); 3522 unsigned rm1 = extract32(insn, 21, 5); 3523 3524 nullify_over(ctx); 3525 3526 /* Independent multiply & add/sub, with undefined behaviour 3527 if outputs overlap inputs. */ 3528 if (f == 0) { 3529 tm = fmpyadd_s_reg(tm); 3530 ra = fmpyadd_s_reg(ra); 3531 ta = fmpyadd_s_reg(ta); 3532 rm2 = fmpyadd_s_reg(rm2); 3533 rm1 = fmpyadd_s_reg(rm1); 3534 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 3535 do_fop_weww(ctx, ta, ta, ra, 3536 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 3537 } else { 3538 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d); 3539 do_fop_dedd(ctx, ta, ta, ra, 3540 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 3541 } 3542 3543 return nullify_end(ctx, NO_EXIT); 3544 } 3545 3546 static ExitStatus trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn, 3547 const DisasInsn *di) 3548 { 3549 unsigned rt = assemble_rt64(insn); 3550 unsigned neg = extract32(insn, 5, 1); 3551 unsigned rm1 = assemble_ra64(insn); 3552 unsigned rm2 = assemble_rb64(insn); 3553 unsigned ra3 = assemble_rc64(insn); 3554 TCGv_i32 a, b, c; 3555 3556 nullify_over(ctx); 3557 a = load_frw0_i32(rm1); 3558 b = load_frw0_i32(rm2); 3559 c = load_frw0_i32(ra3); 3560 3561 if (neg) { 3562 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c); 3563 } else { 3564 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c); 3565 } 3566 3567 tcg_temp_free_i32(b); 3568 tcg_temp_free_i32(c); 3569 save_frw_i32(rt, a); 3570 tcg_temp_free_i32(a); 3571 return nullify_end(ctx, NO_EXIT); 3572 } 3573 3574 static ExitStatus trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn, 3575 const DisasInsn *di) 3576 { 3577 unsigned rt = extract32(insn, 0, 5); 3578 unsigned neg = extract32(insn, 5, 1); 3579 unsigned rm1 = extract32(insn, 21, 5); 3580 unsigned rm2 = extract32(insn, 16, 5); 3581 unsigned ra3 = assemble_rc64(insn); 3582 TCGv_i64 a, b, c; 3583 3584 nullify_over(ctx); 3585 a = load_frd0(rm1); 3586 b = load_frd0(rm2); 3587 c = load_frd0(ra3); 3588 3589 if (neg) { 3590 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c); 3591 } else { 3592 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c); 3593 } 3594 3595 tcg_temp_free_i64(b); 3596 tcg_temp_free_i64(c); 3597 save_frd(rt, a); 3598 tcg_temp_free_i64(a); 3599 return nullify_end(ctx, NO_EXIT); 3600 } 3601 3602 static const DisasInsn table_fp_fused[] = { 3603 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s }, 3604 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d } 3605 }; 3606 3607 static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn, 3608 const DisasInsn table[], size_t n) 3609 { 3610 size_t i; 3611 for (i = 0; i < n; ++i) { 3612 if ((insn & table[i].mask) == table[i].insn) { 3613 return table[i].trans(ctx, insn, &table[i]); 3614 } 3615 } 3616 return gen_illegal(ctx); 3617 } 3618 3619 #define translate_table(ctx, insn, table) \ 3620 translate_table_int(ctx, insn, table, ARRAY_SIZE(table)) 3621 3622 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) 3623 { 3624 uint32_t opc = extract32(insn, 26, 6); 3625 3626 switch (opc) { 3627 case 0x00: /* system op */ 3628 return translate_table(ctx, insn, table_system); 3629 case 0x01: 3630 return translate_table(ctx, insn, table_mem_mgmt); 3631 case 0x02: 3632 return translate_table(ctx, insn, table_arith_log); 3633 case 0x03: 3634 return translate_table(ctx, insn, table_index_mem); 3635 case 0x06: 3636 return trans_fmpyadd(ctx, insn, false); 3637 case 0x08: 3638 return trans_ldil(ctx, insn); 3639 case 0x09: 3640 return trans_copr_w(ctx, insn); 3641 case 0x0A: 3642 return trans_addil(ctx, insn); 3643 case 0x0B: 3644 return trans_copr_dw(ctx, insn); 3645 case 0x0C: 3646 return translate_table(ctx, insn, table_float_0c); 3647 case 0x0D: 3648 return trans_ldo(ctx, insn); 3649 case 0x0E: 3650 return translate_table(ctx, insn, table_float_0e); 3651 3652 case 0x10: 3653 return trans_load(ctx, insn, false, MO_UB); 3654 case 0x11: 3655 return trans_load(ctx, insn, false, MO_TEUW); 3656 case 0x12: 3657 return trans_load(ctx, insn, false, MO_TEUL); 3658 case 0x13: 3659 return trans_load(ctx, insn, true, MO_TEUL); 3660 case 0x16: 3661 return trans_fload_mod(ctx, insn); 3662 case 0x17: 3663 return trans_load_w(ctx, insn); 3664 case 0x18: 3665 return trans_store(ctx, insn, false, MO_UB); 3666 case 0x19: 3667 return trans_store(ctx, insn, false, MO_TEUW); 3668 case 0x1A: 3669 return trans_store(ctx, insn, false, MO_TEUL); 3670 case 0x1B: 3671 return trans_store(ctx, insn, true, MO_TEUL); 3672 case 0x1E: 3673 return trans_fstore_mod(ctx, insn); 3674 case 0x1F: 3675 return trans_store_w(ctx, insn); 3676 3677 case 0x20: 3678 return trans_cmpb(ctx, insn, true, false, false); 3679 case 0x21: 3680 return trans_cmpb(ctx, insn, true, true, false); 3681 case 0x22: 3682 return trans_cmpb(ctx, insn, false, false, false); 3683 case 0x23: 3684 return trans_cmpb(ctx, insn, false, true, false); 3685 case 0x24: 3686 return trans_cmpiclr(ctx, insn); 3687 case 0x25: 3688 return trans_subi(ctx, insn); 3689 case 0x26: 3690 return trans_fmpyadd(ctx, insn, true); 3691 case 0x27: 3692 return trans_cmpb(ctx, insn, true, false, true); 3693 case 0x28: 3694 return trans_addb(ctx, insn, true, false); 3695 case 0x29: 3696 return trans_addb(ctx, insn, true, true); 3697 case 0x2A: 3698 return trans_addb(ctx, insn, false, false); 3699 case 0x2B: 3700 return trans_addb(ctx, insn, false, true); 3701 case 0x2C: 3702 case 0x2D: 3703 return trans_addi(ctx, insn); 3704 case 0x2E: 3705 return translate_table(ctx, insn, table_fp_fused); 3706 case 0x2F: 3707 return trans_cmpb(ctx, insn, false, false, true); 3708 3709 case 0x30: 3710 case 0x31: 3711 return trans_bb(ctx, insn); 3712 case 0x32: 3713 return trans_movb(ctx, insn, false); 3714 case 0x33: 3715 return trans_movb(ctx, insn, true); 3716 case 0x34: 3717 return translate_table(ctx, insn, table_sh_ex); 3718 case 0x35: 3719 return translate_table(ctx, insn, table_depw); 3720 case 0x38: 3721 return trans_be(ctx, insn, false); 3722 case 0x39: 3723 return trans_be(ctx, insn, true); 3724 case 0x3A: 3725 return translate_table(ctx, insn, table_branch); 3726 3727 case 0x04: /* spopn */ 3728 case 0x05: /* diag */ 3729 case 0x0F: /* product specific */ 3730 break; 3731 3732 case 0x07: /* unassigned */ 3733 case 0x15: /* unassigned */ 3734 case 0x1D: /* unassigned */ 3735 case 0x37: /* unassigned */ 3736 case 0x3F: /* unassigned */ 3737 default: 3738 break; 3739 } 3740 return gen_illegal(ctx); 3741 } 3742 3743 void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb) 3744 { 3745 HPPACPU *cpu = hppa_env_get_cpu(env); 3746 CPUState *cs = CPU(cpu); 3747 DisasContext ctx; 3748 ExitStatus ret; 3749 int num_insns, max_insns, i; 3750 3751 ctx.tb = tb; 3752 ctx.cs = cs; 3753 ctx.iaoq_f = tb->pc; 3754 ctx.iaoq_b = tb->cs_base; 3755 ctx.singlestep_enabled = cs->singlestep_enabled; 3756 3757 ctx.ntemps = 0; 3758 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) { 3759 TCGV_UNUSED(ctx.temps[i]); 3760 } 3761 3762 /* Compute the maximum number of insns to execute, as bounded by 3763 (1) icount, (2) single-stepping, (3) branch delay slots, or 3764 (4) the number of insns remaining on the current page. */ 3765 max_insns = tb->cflags & CF_COUNT_MASK; 3766 if (max_insns == 0) { 3767 max_insns = CF_COUNT_MASK; 3768 } 3769 if (ctx.singlestep_enabled || singlestep) { 3770 max_insns = 1; 3771 } else if (max_insns > TCG_MAX_INSNS) { 3772 max_insns = TCG_MAX_INSNS; 3773 } 3774 3775 num_insns = 0; 3776 gen_tb_start(tb); 3777 3778 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */ 3779 ctx.null_cond = cond_make_f(); 3780 ctx.psw_n_nonzero = false; 3781 if (tb->flags & 1) { 3782 ctx.null_cond.c = TCG_COND_ALWAYS; 3783 ctx.psw_n_nonzero = true; 3784 } 3785 ctx.null_lab = NULL; 3786 3787 do { 3788 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b); 3789 num_insns++; 3790 3791 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) { 3792 ret = gen_excp(&ctx, EXCP_DEBUG); 3793 break; 3794 } 3795 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { 3796 gen_io_start(); 3797 } 3798 3799 if (ctx.iaoq_f < TARGET_PAGE_SIZE) { 3800 ret = do_page_zero(&ctx); 3801 assert(ret != NO_EXIT); 3802 } else { 3803 /* Always fetch the insn, even if nullified, so that we check 3804 the page permissions for execute. */ 3805 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f); 3806 3807 /* Set up the IA queue for the next insn. 3808 This will be overwritten by a branch. */ 3809 if (ctx.iaoq_b == -1) { 3810 ctx.iaoq_n = -1; 3811 ctx.iaoq_n_var = get_temp(&ctx); 3812 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4); 3813 } else { 3814 ctx.iaoq_n = ctx.iaoq_b + 4; 3815 TCGV_UNUSED(ctx.iaoq_n_var); 3816 } 3817 3818 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) { 3819 ctx.null_cond.c = TCG_COND_NEVER; 3820 ret = NO_EXIT; 3821 } else { 3822 ret = translate_one(&ctx, insn); 3823 assert(ctx.null_lab == NULL); 3824 } 3825 } 3826 3827 for (i = 0; i < ctx.ntemps; ++i) { 3828 tcg_temp_free(ctx.temps[i]); 3829 TCGV_UNUSED(ctx.temps[i]); 3830 } 3831 ctx.ntemps = 0; 3832 3833 /* If we see non-linear instructions, exhaust instruction count, 3834 or run out of buffer space, stop generation. */ 3835 /* ??? The non-linear instruction restriction is purely due to 3836 the debugging dump. Otherwise we *could* follow unconditional 3837 branches within the same page. */ 3838 if (ret == NO_EXIT 3839 && (ctx.iaoq_b != ctx.iaoq_f + 4 3840 || num_insns >= max_insns 3841 || tcg_op_buf_full())) { 3842 if (ctx.null_cond.c == TCG_COND_NEVER 3843 || ctx.null_cond.c == TCG_COND_ALWAYS) { 3844 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS); 3845 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n); 3846 ret = EXIT_GOTO_TB; 3847 } else { 3848 ret = EXIT_IAQ_N_STALE; 3849 } 3850 } 3851 3852 ctx.iaoq_f = ctx.iaoq_b; 3853 ctx.iaoq_b = ctx.iaoq_n; 3854 if (ret == EXIT_NORETURN 3855 || ret == EXIT_GOTO_TB 3856 || ret == EXIT_IAQ_N_UPDATED) { 3857 break; 3858 } 3859 if (ctx.iaoq_f == -1) { 3860 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b); 3861 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var); 3862 nullify_save(&ctx); 3863 ret = EXIT_IAQ_N_UPDATED; 3864 break; 3865 } 3866 if (ctx.iaoq_b == -1) { 3867 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var); 3868 } 3869 } while (ret == NO_EXIT); 3870 3871 if (tb->cflags & CF_LAST_IO) { 3872 gen_io_end(); 3873 } 3874 3875 switch (ret) { 3876 case EXIT_GOTO_TB: 3877 case EXIT_NORETURN: 3878 break; 3879 case EXIT_IAQ_N_STALE: 3880 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f); 3881 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b); 3882 nullify_save(&ctx); 3883 /* FALLTHRU */ 3884 case EXIT_IAQ_N_UPDATED: 3885 if (ctx.singlestep_enabled) { 3886 gen_excp_1(EXCP_DEBUG); 3887 } else { 3888 tcg_gen_exit_tb(0); 3889 } 3890 break; 3891 default: 3892 abort(); 3893 } 3894 3895 gen_tb_end(tb, num_insns); 3896 3897 tb->size = num_insns * 4; 3898 tb->icount = num_insns; 3899 3900 #ifdef DEBUG_DISAS 3901 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 3902 && qemu_log_in_addr_range(tb->pc)) { 3903 qemu_log_lock(); 3904 switch (tb->pc) { 3905 case 0x00: 3906 qemu_log("IN:\n0x00000000: (null)\n\n"); 3907 break; 3908 case 0xb0: 3909 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n"); 3910 break; 3911 case 0xe0: 3912 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n"); 3913 break; 3914 case 0x100: 3915 qemu_log("IN:\n0x00000100: syscall\n\n"); 3916 break; 3917 default: 3918 qemu_log("IN: %s\n", lookup_symbol(tb->pc)); 3919 log_target_disas(cs, tb->pc, tb->size, 1); 3920 qemu_log("\n"); 3921 break; 3922 } 3923 qemu_log_unlock(); 3924 } 3925 #endif 3926 } 3927 3928 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb, 3929 target_ulong *data) 3930 { 3931 env->iaoq_f = data[0]; 3932 if (data[1] != -1) { 3933 env->iaoq_b = data[1]; 3934 } 3935 /* Since we were executing the instruction at IAOQ_F, and took some 3936 sort of action that provoked the cpu_restore_state, we can infer 3937 that the instruction was not nullified. */ 3938 env->psw_n = 0; 3939 } 3940