1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg-op.h" 26 #include "exec/cpu_ldst.h" 27 28 #include "exec/helper-proto.h" 29 #include "exec/helper-gen.h" 30 31 #include "trace-tcg.h" 32 #include "exec/log.h" 33 34 typedef struct DisasCond { 35 TCGCond c; 36 TCGv a0, a1; 37 bool a0_is_n; 38 bool a1_is_0; 39 } DisasCond; 40 41 typedef struct DisasContext { 42 struct TranslationBlock *tb; 43 CPUState *cs; 44 45 target_ulong iaoq_f; 46 target_ulong iaoq_b; 47 target_ulong iaoq_n; 48 TCGv iaoq_n_var; 49 50 int ntemps; 51 TCGv temps[8]; 52 53 DisasCond null_cond; 54 TCGLabel *null_lab; 55 56 bool singlestep_enabled; 57 bool psw_n_nonzero; 58 } DisasContext; 59 60 /* Return values from translate_one, indicating the state of the TB. 61 Note that zero indicates that we are not exiting the TB. */ 62 63 typedef enum { 64 NO_EXIT, 65 66 /* We have emitted one or more goto_tb. No fixup required. */ 67 EXIT_GOTO_TB, 68 69 /* We are not using a goto_tb (for whatever reason), but have updated 70 the iaq (for whatever reason), so don't do it again on exit. */ 71 EXIT_IAQ_N_UPDATED, 72 73 /* We are exiting the TB, but have neither emitted a goto_tb, nor 74 updated the iaq for the next instruction to be executed. */ 75 EXIT_IAQ_N_STALE, 76 77 /* We are ending the TB with a noreturn function call, e.g. longjmp. 78 No following code will be executed. */ 79 EXIT_NORETURN, 80 } ExitStatus; 81 82 typedef struct DisasInsn { 83 uint32_t insn, mask; 84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn, 85 const struct DisasInsn *f); 86 union { 87 void (*f_ttt)(TCGv, TCGv, TCGv); 88 }; 89 } DisasInsn; 90 91 /* global register indexes */ 92 static TCGv_env cpu_env; 93 static TCGv cpu_gr[32]; 94 static TCGv cpu_iaoq_f; 95 static TCGv cpu_iaoq_b; 96 static TCGv cpu_sar; 97 static TCGv cpu_psw_n; 98 static TCGv cpu_psw_v; 99 static TCGv cpu_psw_cb; 100 static TCGv cpu_psw_cb_msb; 101 static TCGv cpu_cr26; 102 static TCGv cpu_cr27; 103 104 #include "exec/gen-icount.h" 105 106 void hppa_translate_init(void) 107 { 108 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 109 110 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 111 static const GlobalVar vars[] = { 112 DEF_VAR(sar), 113 DEF_VAR(cr26), 114 DEF_VAR(cr27), 115 DEF_VAR(psw_n), 116 DEF_VAR(psw_v), 117 DEF_VAR(psw_cb), 118 DEF_VAR(psw_cb_msb), 119 DEF_VAR(iaoq_f), 120 DEF_VAR(iaoq_b), 121 }; 122 123 #undef DEF_VAR 124 125 /* Use the symbolic register names that match the disassembler. */ 126 static const char gr_names[32][4] = { 127 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 128 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 129 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 130 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 131 }; 132 133 static bool done_init = 0; 134 int i; 135 136 if (done_init) { 137 return; 138 } 139 done_init = 1; 140 141 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 142 tcg_ctx.tcg_env = cpu_env; 143 144 TCGV_UNUSED(cpu_gr[0]); 145 for (i = 1; i < 32; i++) { 146 cpu_gr[i] = tcg_global_mem_new(cpu_env, 147 offsetof(CPUHPPAState, gr[i]), 148 gr_names[i]); 149 } 150 151 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 152 const GlobalVar *v = &vars[i]; 153 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name); 154 } 155 } 156 157 static DisasCond cond_make_f(void) 158 { 159 DisasCond r = { .c = TCG_COND_NEVER }; 160 TCGV_UNUSED(r.a0); 161 TCGV_UNUSED(r.a1); 162 return r; 163 } 164 165 static DisasCond cond_make_n(void) 166 { 167 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true }; 168 r.a0 = cpu_psw_n; 169 TCGV_UNUSED(r.a1); 170 return r; 171 } 172 173 static DisasCond cond_make_0(TCGCond c, TCGv a0) 174 { 175 DisasCond r = { .c = c, .a1_is_0 = true }; 176 177 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 178 r.a0 = tcg_temp_new(); 179 tcg_gen_mov_tl(r.a0, a0); 180 TCGV_UNUSED(r.a1); 181 182 return r; 183 } 184 185 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1) 186 { 187 DisasCond r = { .c = c }; 188 189 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 190 r.a0 = tcg_temp_new(); 191 tcg_gen_mov_tl(r.a0, a0); 192 r.a1 = tcg_temp_new(); 193 tcg_gen_mov_tl(r.a1, a1); 194 195 return r; 196 } 197 198 static void cond_prep(DisasCond *cond) 199 { 200 if (cond->a1_is_0) { 201 cond->a1_is_0 = false; 202 cond->a1 = tcg_const_tl(0); 203 } 204 } 205 206 static void cond_free(DisasCond *cond) 207 { 208 switch (cond->c) { 209 default: 210 if (!cond->a0_is_n) { 211 tcg_temp_free(cond->a0); 212 } 213 if (!cond->a1_is_0) { 214 tcg_temp_free(cond->a1); 215 } 216 cond->a0_is_n = false; 217 cond->a1_is_0 = false; 218 TCGV_UNUSED(cond->a0); 219 TCGV_UNUSED(cond->a1); 220 /* fallthru */ 221 case TCG_COND_ALWAYS: 222 cond->c = TCG_COND_NEVER; 223 break; 224 case TCG_COND_NEVER: 225 break; 226 } 227 } 228 229 static TCGv get_temp(DisasContext *ctx) 230 { 231 unsigned i = ctx->ntemps++; 232 g_assert(i < ARRAY_SIZE(ctx->temps)); 233 return ctx->temps[i] = tcg_temp_new(); 234 } 235 236 static TCGv load_const(DisasContext *ctx, target_long v) 237 { 238 TCGv t = get_temp(ctx); 239 tcg_gen_movi_tl(t, v); 240 return t; 241 } 242 243 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 244 { 245 if (reg == 0) { 246 TCGv t = get_temp(ctx); 247 tcg_gen_movi_tl(t, 0); 248 return t; 249 } else { 250 return cpu_gr[reg]; 251 } 252 } 253 254 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 255 { 256 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 257 return get_temp(ctx); 258 } else { 259 return cpu_gr[reg]; 260 } 261 } 262 263 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t) 264 { 265 if (ctx->null_cond.c != TCG_COND_NEVER) { 266 cond_prep(&ctx->null_cond); 267 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0, 268 ctx->null_cond.a1, dest, t); 269 } else { 270 tcg_gen_mov_tl(dest, t); 271 } 272 } 273 274 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t) 275 { 276 if (reg != 0) { 277 save_or_nullify(ctx, cpu_gr[reg], t); 278 } 279 } 280 281 #ifdef HOST_WORDS_BIGENDIAN 282 # define HI_OFS 0 283 # define LO_OFS 4 284 #else 285 # define HI_OFS 4 286 # define LO_OFS 0 287 #endif 288 289 static TCGv_i32 load_frw_i32(unsigned rt) 290 { 291 TCGv_i32 ret = tcg_temp_new_i32(); 292 tcg_gen_ld_i32(ret, cpu_env, 293 offsetof(CPUHPPAState, fr[rt & 31]) 294 + (rt & 32 ? LO_OFS : HI_OFS)); 295 return ret; 296 } 297 298 static void save_frw_i32(unsigned rt, TCGv_i32 val) 299 { 300 tcg_gen_st_i32(val, cpu_env, 301 offsetof(CPUHPPAState, fr[rt & 31]) 302 + (rt & 32 ? LO_OFS : HI_OFS)); 303 } 304 305 #undef HI_OFS 306 #undef LO_OFS 307 308 static TCGv_i64 load_frd(unsigned rt) 309 { 310 TCGv_i64 ret = tcg_temp_new_i64(); 311 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt])); 312 return ret; 313 } 314 315 static void save_frd(unsigned rt, TCGv_i64 val) 316 { 317 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt])); 318 } 319 320 /* Skip over the implementation of an insn that has been nullified. 321 Use this when the insn is too complex for a conditional move. */ 322 static void nullify_over(DisasContext *ctx) 323 { 324 if (ctx->null_cond.c != TCG_COND_NEVER) { 325 /* The always condition should have been handled in the main loop. */ 326 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 327 328 ctx->null_lab = gen_new_label(); 329 cond_prep(&ctx->null_cond); 330 331 /* If we're using PSW[N], copy it to a temp because... */ 332 if (ctx->null_cond.a0_is_n) { 333 ctx->null_cond.a0_is_n = false; 334 ctx->null_cond.a0 = tcg_temp_new(); 335 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n); 336 } 337 /* ... we clear it before branching over the implementation, 338 so that (1) it's clear after nullifying this insn and 339 (2) if this insn nullifies the next, PSW[N] is valid. */ 340 if (ctx->psw_n_nonzero) { 341 ctx->psw_n_nonzero = false; 342 tcg_gen_movi_tl(cpu_psw_n, 0); 343 } 344 345 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0, 346 ctx->null_cond.a1, ctx->null_lab); 347 cond_free(&ctx->null_cond); 348 } 349 } 350 351 /* Save the current nullification state to PSW[N]. */ 352 static void nullify_save(DisasContext *ctx) 353 { 354 if (ctx->null_cond.c == TCG_COND_NEVER) { 355 if (ctx->psw_n_nonzero) { 356 tcg_gen_movi_tl(cpu_psw_n, 0); 357 } 358 return; 359 } 360 if (!ctx->null_cond.a0_is_n) { 361 cond_prep(&ctx->null_cond); 362 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n, 363 ctx->null_cond.a0, ctx->null_cond.a1); 364 ctx->psw_n_nonzero = true; 365 } 366 cond_free(&ctx->null_cond); 367 } 368 369 /* Set a PSW[N] to X. The intention is that this is used immediately 370 before a goto_tb/exit_tb, so that there is no fallthru path to other 371 code within the TB. Therefore we do not update psw_n_nonzero. */ 372 static void nullify_set(DisasContext *ctx, bool x) 373 { 374 if (ctx->psw_n_nonzero || x) { 375 tcg_gen_movi_tl(cpu_psw_n, x); 376 } 377 } 378 379 /* Mark the end of an instruction that may have been nullified. 380 This is the pair to nullify_over. */ 381 static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status) 382 { 383 TCGLabel *null_lab = ctx->null_lab; 384 385 if (likely(null_lab == NULL)) { 386 /* The current insn wasn't conditional or handled the condition 387 applied to it without a branch, so the (new) setting of 388 NULL_COND can be applied directly to the next insn. */ 389 return status; 390 } 391 ctx->null_lab = NULL; 392 393 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 394 /* The next instruction will be unconditional, 395 and NULL_COND already reflects that. */ 396 gen_set_label(null_lab); 397 } else { 398 /* The insn that we just executed is itself nullifying the next 399 instruction. Store the condition in the PSW[N] global. 400 We asserted PSW[N] = 0 in nullify_over, so that after the 401 label we have the proper value in place. */ 402 nullify_save(ctx); 403 gen_set_label(null_lab); 404 ctx->null_cond = cond_make_n(); 405 } 406 407 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED); 408 if (status == EXIT_NORETURN) { 409 status = NO_EXIT; 410 } 411 return status; 412 } 413 414 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval) 415 { 416 if (unlikely(ival == -1)) { 417 tcg_gen_mov_tl(dest, vval); 418 } else { 419 tcg_gen_movi_tl(dest, ival); 420 } 421 } 422 423 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp) 424 { 425 return ctx->iaoq_f + disp + 8; 426 } 427 428 static void gen_excp_1(int exception) 429 { 430 TCGv_i32 t = tcg_const_i32(exception); 431 gen_helper_excp(cpu_env, t); 432 tcg_temp_free_i32(t); 433 } 434 435 static ExitStatus gen_excp(DisasContext *ctx, int exception) 436 { 437 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 438 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 439 nullify_save(ctx); 440 gen_excp_1(exception); 441 return EXIT_NORETURN; 442 } 443 444 static ExitStatus gen_illegal(DisasContext *ctx) 445 { 446 nullify_over(ctx); 447 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL)); 448 } 449 450 static bool use_goto_tb(DisasContext *ctx, target_ulong dest) 451 { 452 /* Suppress goto_tb in the case of single-steping and IO. */ 453 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) { 454 return false; 455 } 456 return true; 457 } 458 459 /* If the next insn is to be nullified, and it's on the same page, 460 and we're not attempting to set a breakpoint on it, then we can 461 totally skip the nullified insn. This avoids creating and 462 executing a TB that merely branches to the next TB. */ 463 static bool use_nullify_skip(DisasContext *ctx) 464 { 465 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 466 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 467 } 468 469 static void gen_goto_tb(DisasContext *ctx, int which, 470 target_ulong f, target_ulong b) 471 { 472 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 473 tcg_gen_goto_tb(which); 474 tcg_gen_movi_tl(cpu_iaoq_f, f); 475 tcg_gen_movi_tl(cpu_iaoq_b, b); 476 tcg_gen_exit_tb((uintptr_t)ctx->tb + which); 477 } else { 478 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 479 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 480 if (ctx->singlestep_enabled) { 481 gen_excp_1(EXCP_DEBUG); 482 } else { 483 tcg_gen_exit_tb(0); 484 } 485 } 486 } 487 488 /* PA has a habit of taking the LSB of a field and using that as the sign, 489 with the rest of the field becoming the least significant bits. */ 490 static target_long low_sextract(uint32_t val, int pos, int len) 491 { 492 target_ulong x = -(target_ulong)extract32(val, pos, 1); 493 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1); 494 return x; 495 } 496 497 static target_long assemble_12(uint32_t insn) 498 { 499 target_ulong x = -(target_ulong)(insn & 1); 500 x = (x << 1) | extract32(insn, 2, 1); 501 x = (x << 10) | extract32(insn, 3, 10); 502 return x; 503 } 504 505 static target_long assemble_16(uint32_t insn) 506 { 507 /* Take the name from PA2.0, which produces a 16-bit number 508 only with wide mode; otherwise a 14-bit number. Since we don't 509 implement wide mode, this is always the 14-bit number. */ 510 return low_sextract(insn, 0, 14); 511 } 512 513 static target_long assemble_16a(uint32_t insn) 514 { 515 /* Take the name from PA2.0, which produces a 14-bit shifted number 516 only with wide mode; otherwise a 12-bit shifted number. Since we 517 don't implement wide mode, this is always the 12-bit number. */ 518 target_ulong x = -(target_ulong)(insn & 1); 519 x = (x << 11) | extract32(insn, 2, 11); 520 return x << 2; 521 } 522 523 static target_long assemble_17(uint32_t insn) 524 { 525 target_ulong x = -(target_ulong)(insn & 1); 526 x = (x << 5) | extract32(insn, 16, 5); 527 x = (x << 1) | extract32(insn, 2, 1); 528 x = (x << 10) | extract32(insn, 3, 10); 529 return x << 2; 530 } 531 532 static target_long assemble_21(uint32_t insn) 533 { 534 target_ulong x = -(target_ulong)(insn & 1); 535 x = (x << 11) | extract32(insn, 1, 11); 536 x = (x << 2) | extract32(insn, 14, 2); 537 x = (x << 5) | extract32(insn, 16, 5); 538 x = (x << 2) | extract32(insn, 12, 2); 539 return x << 11; 540 } 541 542 static target_long assemble_22(uint32_t insn) 543 { 544 target_ulong x = -(target_ulong)(insn & 1); 545 x = (x << 10) | extract32(insn, 16, 10); 546 x = (x << 1) | extract32(insn, 2, 1); 547 x = (x << 10) | extract32(insn, 3, 10); 548 return x << 2; 549 } 550 551 /* The parisc documentation describes only the general interpretation of 552 the conditions, without describing their exact implementation. The 553 interpretations do not stand up well when considering ADD,C and SUB,B. 554 However, considering the Addition, Subtraction and Logical conditions 555 as a whole it would appear that these relations are similar to what 556 a traditional NZCV set of flags would produce. */ 557 558 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv) 559 { 560 DisasCond cond; 561 TCGv tmp; 562 563 switch (cf >> 1) { 564 case 0: /* Never / TR */ 565 cond = cond_make_f(); 566 break; 567 case 1: /* = / <> (Z / !Z) */ 568 cond = cond_make_0(TCG_COND_EQ, res); 569 break; 570 case 2: /* < / >= (N / !N) */ 571 cond = cond_make_0(TCG_COND_LT, res); 572 break; 573 case 3: /* <= / > (N | Z / !N & !Z) */ 574 cond = cond_make_0(TCG_COND_LE, res); 575 break; 576 case 4: /* NUV / UV (!C / C) */ 577 cond = cond_make_0(TCG_COND_EQ, cb_msb); 578 break; 579 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 580 tmp = tcg_temp_new(); 581 tcg_gen_neg_tl(tmp, cb_msb); 582 tcg_gen_and_tl(tmp, tmp, res); 583 cond = cond_make_0(TCG_COND_EQ, tmp); 584 tcg_temp_free(tmp); 585 break; 586 case 6: /* SV / NSV (V / !V) */ 587 cond = cond_make_0(TCG_COND_LT, sv); 588 break; 589 case 7: /* OD / EV */ 590 tmp = tcg_temp_new(); 591 tcg_gen_andi_tl(tmp, res, 1); 592 cond = cond_make_0(TCG_COND_NE, tmp); 593 tcg_temp_free(tmp); 594 break; 595 default: 596 g_assert_not_reached(); 597 } 598 if (cf & 1) { 599 cond.c = tcg_invert_cond(cond.c); 600 } 601 602 return cond; 603 } 604 605 /* Similar, but for the special case of subtraction without borrow, we 606 can use the inputs directly. This can allow other computation to be 607 deleted as unused. */ 608 609 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv) 610 { 611 DisasCond cond; 612 613 switch (cf >> 1) { 614 case 1: /* = / <> */ 615 cond = cond_make(TCG_COND_EQ, in1, in2); 616 break; 617 case 2: /* < / >= */ 618 cond = cond_make(TCG_COND_LT, in1, in2); 619 break; 620 case 3: /* <= / > */ 621 cond = cond_make(TCG_COND_LE, in1, in2); 622 break; 623 case 4: /* << / >>= */ 624 cond = cond_make(TCG_COND_LTU, in1, in2); 625 break; 626 case 5: /* <<= / >> */ 627 cond = cond_make(TCG_COND_LEU, in1, in2); 628 break; 629 default: 630 return do_cond(cf, res, sv, sv); 631 } 632 if (cf & 1) { 633 cond.c = tcg_invert_cond(cond.c); 634 } 635 636 return cond; 637 } 638 639 /* Similar, but for logicals, where the carry and overflow bits are not 640 computed, and use of them is undefined. */ 641 642 static DisasCond do_log_cond(unsigned cf, TCGv res) 643 { 644 switch (cf >> 1) { 645 case 4: case 5: case 6: 646 cf &= 1; 647 break; 648 } 649 return do_cond(cf, res, res, res); 650 } 651 652 /* Similar, but for shift/extract/deposit conditions. */ 653 654 static DisasCond do_sed_cond(unsigned orig, TCGv res) 655 { 656 unsigned c, f; 657 658 /* Convert the compressed condition codes to standard. 659 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 660 4-7 are the reverse of 0-3. */ 661 c = orig & 3; 662 if (c == 3) { 663 c = 7; 664 } 665 f = (orig & 4) / 4; 666 667 return do_log_cond(c * 2 + f, res); 668 } 669 670 /* Similar, but for unit conditions. */ 671 672 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2) 673 { 674 DisasCond cond; 675 TCGv tmp, cb; 676 677 TCGV_UNUSED(cb); 678 if (cf & 8) { 679 /* Since we want to test lots of carry-out bits all at once, do not 680 * do our normal thing and compute carry-in of bit B+1 since that 681 * leaves us with carry bits spread across two words. 682 */ 683 cb = tcg_temp_new(); 684 tmp = tcg_temp_new(); 685 tcg_gen_or_tl(cb, in1, in2); 686 tcg_gen_and_tl(tmp, in1, in2); 687 tcg_gen_andc_tl(cb, cb, res); 688 tcg_gen_or_tl(cb, cb, tmp); 689 tcg_temp_free(tmp); 690 } 691 692 switch (cf >> 1) { 693 case 0: /* never / TR */ 694 case 1: /* undefined */ 695 case 5: /* undefined */ 696 cond = cond_make_f(); 697 break; 698 699 case 2: /* SBZ / NBZ */ 700 /* See hasless(v,1) from 701 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 702 */ 703 tmp = tcg_temp_new(); 704 tcg_gen_subi_tl(tmp, res, 0x01010101u); 705 tcg_gen_andc_tl(tmp, tmp, res); 706 tcg_gen_andi_tl(tmp, tmp, 0x80808080u); 707 cond = cond_make_0(TCG_COND_NE, tmp); 708 tcg_temp_free(tmp); 709 break; 710 711 case 3: /* SHZ / NHZ */ 712 tmp = tcg_temp_new(); 713 tcg_gen_subi_tl(tmp, res, 0x00010001u); 714 tcg_gen_andc_tl(tmp, tmp, res); 715 tcg_gen_andi_tl(tmp, tmp, 0x80008000u); 716 cond = cond_make_0(TCG_COND_NE, tmp); 717 tcg_temp_free(tmp); 718 break; 719 720 case 4: /* SDC / NDC */ 721 tcg_gen_andi_tl(cb, cb, 0x88888888u); 722 cond = cond_make_0(TCG_COND_NE, cb); 723 break; 724 725 case 6: /* SBC / NBC */ 726 tcg_gen_andi_tl(cb, cb, 0x80808080u); 727 cond = cond_make_0(TCG_COND_NE, cb); 728 break; 729 730 case 7: /* SHC / NHC */ 731 tcg_gen_andi_tl(cb, cb, 0x80008000u); 732 cond = cond_make_0(TCG_COND_NE, cb); 733 break; 734 735 default: 736 g_assert_not_reached(); 737 } 738 if (cf & 8) { 739 tcg_temp_free(cb); 740 } 741 if (cf & 1) { 742 cond.c = tcg_invert_cond(cond.c); 743 } 744 745 return cond; 746 } 747 748 /* Compute signed overflow for addition. */ 749 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2) 750 { 751 TCGv sv = get_temp(ctx); 752 TCGv tmp = tcg_temp_new(); 753 754 tcg_gen_xor_tl(sv, res, in1); 755 tcg_gen_xor_tl(tmp, in1, in2); 756 tcg_gen_andc_tl(sv, sv, tmp); 757 tcg_temp_free(tmp); 758 759 return sv; 760 } 761 762 /* Compute signed overflow for subtraction. */ 763 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2) 764 { 765 TCGv sv = get_temp(ctx); 766 TCGv tmp = tcg_temp_new(); 767 768 tcg_gen_xor_tl(sv, res, in1); 769 tcg_gen_xor_tl(tmp, in1, in2); 770 tcg_gen_and_tl(sv, sv, tmp); 771 tcg_temp_free(tmp); 772 773 return sv; 774 } 775 776 static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 777 unsigned shift, bool is_l, bool is_tsv, bool is_tc, 778 bool is_c, unsigned cf) 779 { 780 TCGv dest, cb, cb_msb, sv, tmp; 781 unsigned c = cf >> 1; 782 DisasCond cond; 783 784 dest = tcg_temp_new(); 785 TCGV_UNUSED(cb); 786 TCGV_UNUSED(cb_msb); 787 788 if (shift) { 789 tmp = get_temp(ctx); 790 tcg_gen_shli_tl(tmp, in1, shift); 791 in1 = tmp; 792 } 793 794 if (!is_l || c == 4 || c == 5) { 795 TCGv zero = tcg_const_tl(0); 796 cb_msb = get_temp(ctx); 797 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero); 798 if (is_c) { 799 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 800 } 801 tcg_temp_free(zero); 802 if (!is_l) { 803 cb = get_temp(ctx); 804 tcg_gen_xor_tl(cb, in1, in2); 805 tcg_gen_xor_tl(cb, cb, dest); 806 } 807 } else { 808 tcg_gen_add_tl(dest, in1, in2); 809 if (is_c) { 810 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb); 811 } 812 } 813 814 /* Compute signed overflow if required. */ 815 TCGV_UNUSED(sv); 816 if (is_tsv || c == 6) { 817 sv = do_add_sv(ctx, dest, in1, in2); 818 if (is_tsv) { 819 /* ??? Need to include overflow from shift. */ 820 gen_helper_tsv(cpu_env, sv); 821 } 822 } 823 824 /* Emit any conditional trap before any writeback. */ 825 cond = do_cond(cf, dest, cb_msb, sv); 826 if (is_tc) { 827 cond_prep(&cond); 828 tmp = tcg_temp_new(); 829 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 830 gen_helper_tcond(cpu_env, tmp); 831 tcg_temp_free(tmp); 832 } 833 834 /* Write back the result. */ 835 if (!is_l) { 836 save_or_nullify(ctx, cpu_psw_cb, cb); 837 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 838 } 839 save_gpr(ctx, rt, dest); 840 tcg_temp_free(dest); 841 842 /* Install the new nullification. */ 843 cond_free(&ctx->null_cond); 844 ctx->null_cond = cond; 845 return NO_EXIT; 846 } 847 848 static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 849 bool is_tsv, bool is_b, bool is_tc, unsigned cf) 850 { 851 TCGv dest, sv, cb, cb_msb, zero, tmp; 852 unsigned c = cf >> 1; 853 DisasCond cond; 854 855 dest = tcg_temp_new(); 856 cb = tcg_temp_new(); 857 cb_msb = tcg_temp_new(); 858 859 zero = tcg_const_tl(0); 860 if (is_b) { 861 /* DEST,C = IN1 + ~IN2 + C. */ 862 tcg_gen_not_tl(cb, in2); 863 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 864 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero); 865 tcg_gen_xor_tl(cb, cb, in1); 866 tcg_gen_xor_tl(cb, cb, dest); 867 } else { 868 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 869 operations by seeding the high word with 1 and subtracting. */ 870 tcg_gen_movi_tl(cb_msb, 1); 871 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero); 872 tcg_gen_eqv_tl(cb, in1, in2); 873 tcg_gen_xor_tl(cb, cb, dest); 874 } 875 tcg_temp_free(zero); 876 877 /* Compute signed overflow if required. */ 878 TCGV_UNUSED(sv); 879 if (is_tsv || c == 6) { 880 sv = do_sub_sv(ctx, dest, in1, in2); 881 if (is_tsv) { 882 gen_helper_tsv(cpu_env, sv); 883 } 884 } 885 886 /* Compute the condition. We cannot use the special case for borrow. */ 887 if (!is_b) { 888 cond = do_sub_cond(cf, dest, in1, in2, sv); 889 } else { 890 cond = do_cond(cf, dest, cb_msb, sv); 891 } 892 893 /* Emit any conditional trap before any writeback. */ 894 if (is_tc) { 895 cond_prep(&cond); 896 tmp = tcg_temp_new(); 897 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 898 gen_helper_tcond(cpu_env, tmp); 899 tcg_temp_free(tmp); 900 } 901 902 /* Write back the result. */ 903 save_or_nullify(ctx, cpu_psw_cb, cb); 904 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 905 save_gpr(ctx, rt, dest); 906 tcg_temp_free(dest); 907 908 /* Install the new nullification. */ 909 cond_free(&ctx->null_cond); 910 ctx->null_cond = cond; 911 return NO_EXIT; 912 } 913 914 static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1, 915 TCGv in2, unsigned cf) 916 { 917 TCGv dest, sv; 918 DisasCond cond; 919 920 dest = tcg_temp_new(); 921 tcg_gen_sub_tl(dest, in1, in2); 922 923 /* Compute signed overflow if required. */ 924 TCGV_UNUSED(sv); 925 if ((cf >> 1) == 6) { 926 sv = do_sub_sv(ctx, dest, in1, in2); 927 } 928 929 /* Form the condition for the compare. */ 930 cond = do_sub_cond(cf, dest, in1, in2, sv); 931 932 /* Clear. */ 933 tcg_gen_movi_tl(dest, 0); 934 save_gpr(ctx, rt, dest); 935 tcg_temp_free(dest); 936 937 /* Install the new nullification. */ 938 cond_free(&ctx->null_cond); 939 ctx->null_cond = cond; 940 return NO_EXIT; 941 } 942 943 static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 944 unsigned cf, void (*fn)(TCGv, TCGv, TCGv)) 945 { 946 TCGv dest = dest_gpr(ctx, rt); 947 948 /* Perform the operation, and writeback. */ 949 fn(dest, in1, in2); 950 save_gpr(ctx, rt, dest); 951 952 /* Install the new nullification. */ 953 cond_free(&ctx->null_cond); 954 if (cf) { 955 ctx->null_cond = do_log_cond(cf, dest); 956 } 957 return NO_EXIT; 958 } 959 960 static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1, 961 TCGv in2, unsigned cf, bool is_tc, 962 void (*fn)(TCGv, TCGv, TCGv)) 963 { 964 TCGv dest; 965 DisasCond cond; 966 967 if (cf == 0) { 968 dest = dest_gpr(ctx, rt); 969 fn(dest, in1, in2); 970 save_gpr(ctx, rt, dest); 971 cond_free(&ctx->null_cond); 972 } else { 973 dest = tcg_temp_new(); 974 fn(dest, in1, in2); 975 976 cond = do_unit_cond(cf, dest, in1, in2); 977 978 if (is_tc) { 979 TCGv tmp = tcg_temp_new(); 980 cond_prep(&cond); 981 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 982 gen_helper_tcond(cpu_env, tmp); 983 tcg_temp_free(tmp); 984 } 985 save_gpr(ctx, rt, dest); 986 987 cond_free(&ctx->null_cond); 988 ctx->null_cond = cond; 989 } 990 return NO_EXIT; 991 } 992 993 /* Emit a memory load. The modify parameter should be 994 * < 0 for pre-modify, 995 * > 0 for post-modify, 996 * = 0 for no base register update. 997 */ 998 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 999 unsigned rx, int scale, target_long disp, 1000 int modify, TCGMemOp mop) 1001 { 1002 TCGv addr, base; 1003 1004 /* Caller uses nullify_over/nullify_end. */ 1005 assert(ctx->null_cond.c == TCG_COND_NEVER); 1006 1007 addr = tcg_temp_new(); 1008 base = load_gpr(ctx, rb); 1009 1010 /* Note that RX is mutually exclusive with DISP. */ 1011 if (rx) { 1012 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1013 tcg_gen_add_tl(addr, addr, base); 1014 } else { 1015 tcg_gen_addi_tl(addr, base, disp); 1016 } 1017 1018 if (modify == 0) { 1019 tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop); 1020 } else { 1021 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base), 1022 MMU_USER_IDX, mop); 1023 save_gpr(ctx, rb, addr); 1024 } 1025 tcg_temp_free(addr); 1026 } 1027 1028 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1029 unsigned rx, int scale, target_long disp, 1030 int modify, TCGMemOp mop) 1031 { 1032 TCGv addr, base; 1033 1034 /* Caller uses nullify_over/nullify_end. */ 1035 assert(ctx->null_cond.c == TCG_COND_NEVER); 1036 1037 addr = tcg_temp_new(); 1038 base = load_gpr(ctx, rb); 1039 1040 /* Note that RX is mutually exclusive with DISP. */ 1041 if (rx) { 1042 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1043 tcg_gen_add_tl(addr, addr, base); 1044 } else { 1045 tcg_gen_addi_tl(addr, base, disp); 1046 } 1047 1048 if (modify == 0) { 1049 tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop); 1050 } else { 1051 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base), 1052 MMU_USER_IDX, mop); 1053 save_gpr(ctx, rb, addr); 1054 } 1055 tcg_temp_free(addr); 1056 } 1057 1058 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1059 unsigned rx, int scale, target_long disp, 1060 int modify, TCGMemOp mop) 1061 { 1062 TCGv addr, base; 1063 1064 /* Caller uses nullify_over/nullify_end. */ 1065 assert(ctx->null_cond.c == TCG_COND_NEVER); 1066 1067 addr = tcg_temp_new(); 1068 base = load_gpr(ctx, rb); 1069 1070 /* Note that RX is mutually exclusive with DISP. */ 1071 if (rx) { 1072 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1073 tcg_gen_add_tl(addr, addr, base); 1074 } else { 1075 tcg_gen_addi_tl(addr, base, disp); 1076 } 1077 1078 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop); 1079 1080 if (modify != 0) { 1081 save_gpr(ctx, rb, addr); 1082 } 1083 tcg_temp_free(addr); 1084 } 1085 1086 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1087 unsigned rx, int scale, target_long disp, 1088 int modify, TCGMemOp mop) 1089 { 1090 TCGv addr, base; 1091 1092 /* Caller uses nullify_over/nullify_end. */ 1093 assert(ctx->null_cond.c == TCG_COND_NEVER); 1094 1095 addr = tcg_temp_new(); 1096 base = load_gpr(ctx, rb); 1097 1098 /* Note that RX is mutually exclusive with DISP. */ 1099 if (rx) { 1100 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 1101 tcg_gen_add_tl(addr, addr, base); 1102 } else { 1103 tcg_gen_addi_tl(addr, base, disp); 1104 } 1105 1106 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop); 1107 1108 if (modify != 0) { 1109 save_gpr(ctx, rb, addr); 1110 } 1111 tcg_temp_free(addr); 1112 } 1113 1114 #if TARGET_LONG_BITS == 64 1115 #define do_load_tl do_load_64 1116 #define do_store_tl do_store_64 1117 #else 1118 #define do_load_tl do_load_32 1119 #define do_store_tl do_store_32 1120 #endif 1121 1122 static ExitStatus do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1123 unsigned rx, int scale, target_long disp, 1124 int modify, TCGMemOp mop) 1125 { 1126 TCGv dest; 1127 1128 nullify_over(ctx); 1129 1130 if (modify == 0) { 1131 /* No base register update. */ 1132 dest = dest_gpr(ctx, rt); 1133 } else { 1134 /* Make sure if RT == RB, we see the result of the load. */ 1135 dest = get_temp(ctx); 1136 } 1137 do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop); 1138 save_gpr(ctx, rt, dest); 1139 1140 return nullify_end(ctx, NO_EXIT); 1141 } 1142 1143 static ExitStatus do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1144 unsigned rx, int scale, target_long disp, 1145 int modify) 1146 { 1147 TCGv_i32 tmp; 1148 1149 nullify_over(ctx); 1150 1151 tmp = tcg_temp_new_i32(); 1152 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL); 1153 save_frw_i32(rt, tmp); 1154 tcg_temp_free_i32(tmp); 1155 1156 if (rt == 0) { 1157 gen_helper_loaded_fr0(cpu_env); 1158 } 1159 1160 return nullify_end(ctx, NO_EXIT); 1161 } 1162 1163 static ExitStatus do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1164 unsigned rx, int scale, target_long disp, 1165 int modify) 1166 { 1167 TCGv_i64 tmp; 1168 1169 nullify_over(ctx); 1170 1171 tmp = tcg_temp_new_i64(); 1172 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ); 1173 save_frd(rt, tmp); 1174 tcg_temp_free_i64(tmp); 1175 1176 if (rt == 0) { 1177 gen_helper_loaded_fr0(cpu_env); 1178 } 1179 1180 return nullify_end(ctx, NO_EXIT); 1181 } 1182 1183 static ExitStatus do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1184 target_long disp, int modify, TCGMemOp mop) 1185 { 1186 nullify_over(ctx); 1187 do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop); 1188 return nullify_end(ctx, NO_EXIT); 1189 } 1190 1191 static ExitStatus do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1192 unsigned rx, int scale, target_long disp, 1193 int modify) 1194 { 1195 TCGv_i32 tmp; 1196 1197 nullify_over(ctx); 1198 1199 tmp = load_frw_i32(rt); 1200 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL); 1201 tcg_temp_free_i32(tmp); 1202 1203 return nullify_end(ctx, NO_EXIT); 1204 } 1205 1206 static ExitStatus do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1207 unsigned rx, int scale, target_long disp, 1208 int modify) 1209 { 1210 TCGv_i64 tmp; 1211 1212 nullify_over(ctx); 1213 1214 tmp = load_frd(rt); 1215 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ); 1216 tcg_temp_free_i64(tmp); 1217 1218 return nullify_end(ctx, NO_EXIT); 1219 } 1220 1221 /* Emit an unconditional branch to a direct target, which may or may not 1222 have already had nullification handled. */ 1223 static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest, 1224 unsigned link, bool is_n) 1225 { 1226 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1227 if (link != 0) { 1228 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1229 } 1230 ctx->iaoq_n = dest; 1231 if (is_n) { 1232 ctx->null_cond.c = TCG_COND_ALWAYS; 1233 } 1234 return NO_EXIT; 1235 } else { 1236 nullify_over(ctx); 1237 1238 if (link != 0) { 1239 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1240 } 1241 1242 if (is_n && use_nullify_skip(ctx)) { 1243 nullify_set(ctx, 0); 1244 gen_goto_tb(ctx, 0, dest, dest + 4); 1245 } else { 1246 nullify_set(ctx, is_n); 1247 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1248 } 1249 1250 nullify_end(ctx, NO_EXIT); 1251 1252 nullify_set(ctx, 0); 1253 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1254 return EXIT_GOTO_TB; 1255 } 1256 } 1257 1258 /* Emit a conditional branch to a direct target. If the branch itself 1259 is nullified, we should have already used nullify_over. */ 1260 static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n, 1261 DisasCond *cond) 1262 { 1263 target_ulong dest = iaoq_dest(ctx, disp); 1264 TCGLabel *taken = NULL; 1265 TCGCond c = cond->c; 1266 int which = 0; 1267 bool n; 1268 1269 assert(ctx->null_cond.c == TCG_COND_NEVER); 1270 1271 /* Handle TRUE and NEVER as direct branches. */ 1272 if (c == TCG_COND_ALWAYS) { 1273 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1274 } 1275 if (c == TCG_COND_NEVER) { 1276 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1277 } 1278 1279 taken = gen_new_label(); 1280 cond_prep(cond); 1281 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken); 1282 cond_free(cond); 1283 1284 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1285 n = is_n && disp < 0; 1286 if (n && use_nullify_skip(ctx)) { 1287 nullify_set(ctx, 0); 1288 gen_goto_tb(ctx, which++, ctx->iaoq_n, ctx->iaoq_n + 4); 1289 } else { 1290 if (!n && ctx->null_lab) { 1291 gen_set_label(ctx->null_lab); 1292 ctx->null_lab = NULL; 1293 } 1294 nullify_set(ctx, n); 1295 gen_goto_tb(ctx, which++, ctx->iaoq_b, ctx->iaoq_n); 1296 } 1297 1298 gen_set_label(taken); 1299 1300 /* Taken: Condition satisfied; nullify on forward branches. */ 1301 n = is_n && disp >= 0; 1302 if (n && use_nullify_skip(ctx)) { 1303 nullify_set(ctx, 0); 1304 gen_goto_tb(ctx, which++, dest, dest + 4); 1305 } else { 1306 nullify_set(ctx, n); 1307 gen_goto_tb(ctx, which++, ctx->iaoq_b, dest); 1308 } 1309 1310 /* Not taken: the branch itself was nullified. */ 1311 if (ctx->null_lab) { 1312 gen_set_label(ctx->null_lab); 1313 ctx->null_lab = NULL; 1314 if (which < 2) { 1315 nullify_set(ctx, 0); 1316 gen_goto_tb(ctx, which, ctx->iaoq_b, ctx->iaoq_n); 1317 return EXIT_GOTO_TB; 1318 } else { 1319 return EXIT_IAQ_N_STALE; 1320 } 1321 } else { 1322 return EXIT_GOTO_TB; 1323 } 1324 } 1325 1326 /* Emit an unconditional branch to an indirect target. This handles 1327 nullification of the branch itself. */ 1328 static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest, 1329 unsigned link, bool is_n) 1330 { 1331 TCGv a0, a1, next, tmp; 1332 TCGCond c; 1333 1334 assert(ctx->null_lab == NULL); 1335 1336 if (ctx->null_cond.c == TCG_COND_NEVER) { 1337 if (link != 0) { 1338 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1339 } 1340 next = get_temp(ctx); 1341 tcg_gen_mov_tl(next, dest); 1342 ctx->iaoq_n = -1; 1343 ctx->iaoq_n_var = next; 1344 if (is_n) { 1345 ctx->null_cond.c = TCG_COND_ALWAYS; 1346 } 1347 } else if (is_n && use_nullify_skip(ctx)) { 1348 /* The (conditional) branch, B, nullifies the next insn, N, 1349 and we're allowed to skip execution N (no single-step or 1350 tracepoint in effect). Since the exit_tb that we must use 1351 for the indirect branch consumes no special resources, we 1352 can (conditionally) skip B and continue execution. */ 1353 /* The use_nullify_skip test implies we have a known control path. */ 1354 tcg_debug_assert(ctx->iaoq_b != -1); 1355 tcg_debug_assert(ctx->iaoq_n != -1); 1356 1357 /* We do have to handle the non-local temporary, DEST, before 1358 branching. Since IOAQ_F is not really live at this point, we 1359 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1360 tcg_gen_mov_tl(cpu_iaoq_f, dest); 1361 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4); 1362 1363 nullify_over(ctx); 1364 if (link != 0) { 1365 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n); 1366 } 1367 tcg_gen_exit_tb(0); 1368 return nullify_end(ctx, NO_EXIT); 1369 } else { 1370 cond_prep(&ctx->null_cond); 1371 c = ctx->null_cond.c; 1372 a0 = ctx->null_cond.a0; 1373 a1 = ctx->null_cond.a1; 1374 1375 tmp = tcg_temp_new(); 1376 next = get_temp(ctx); 1377 1378 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1379 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest); 1380 ctx->iaoq_n = -1; 1381 ctx->iaoq_n_var = next; 1382 1383 if (link != 0) { 1384 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1385 } 1386 1387 if (is_n) { 1388 /* The branch nullifies the next insn, which means the state of N 1389 after the branch is the inverse of the state of N that applied 1390 to the branch. */ 1391 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1392 cond_free(&ctx->null_cond); 1393 ctx->null_cond = cond_make_n(); 1394 ctx->psw_n_nonzero = true; 1395 } else { 1396 cond_free(&ctx->null_cond); 1397 } 1398 } 1399 1400 return NO_EXIT; 1401 } 1402 1403 /* On Linux, page zero is normally marked execute only + gateway. 1404 Therefore normal read or write is supposed to fail, but specific 1405 offsets have kernel code mapped to raise permissions to implement 1406 system calls. Handling this via an explicit check here, rather 1407 in than the "be disp(sr2,r0)" instruction that probably sent us 1408 here, is the easiest way to handle the branch delay slot on the 1409 aforementioned BE. */ 1410 static ExitStatus do_page_zero(DisasContext *ctx) 1411 { 1412 /* If by some means we get here with PSW[N]=1, that implies that 1413 the B,GATE instruction would be skipped, and we'd fault on the 1414 next insn within the privilaged page. */ 1415 switch (ctx->null_cond.c) { 1416 case TCG_COND_NEVER: 1417 break; 1418 case TCG_COND_ALWAYS: 1419 tcg_gen_movi_tl(cpu_psw_n, 0); 1420 goto do_sigill; 1421 default: 1422 /* Since this is always the first (and only) insn within the 1423 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1424 g_assert_not_reached(); 1425 } 1426 1427 /* Check that we didn't arrive here via some means that allowed 1428 non-sequential instruction execution. Normally the PSW[B] bit 1429 detects this by disallowing the B,GATE instruction to execute 1430 under such conditions. */ 1431 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1432 goto do_sigill; 1433 } 1434 1435 switch (ctx->iaoq_f) { 1436 case 0x00: /* Null pointer call */ 1437 gen_excp_1(EXCP_SIGSEGV); 1438 return EXIT_NORETURN; 1439 1440 case 0xb0: /* LWS */ 1441 gen_excp_1(EXCP_SYSCALL_LWS); 1442 return EXIT_NORETURN; 1443 1444 case 0xe0: /* SET_THREAD_POINTER */ 1445 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]); 1446 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]); 1447 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4); 1448 return EXIT_IAQ_N_UPDATED; 1449 1450 case 0x100: /* SYSCALL */ 1451 gen_excp_1(EXCP_SYSCALL); 1452 return EXIT_NORETURN; 1453 1454 default: 1455 do_sigill: 1456 gen_excp_1(EXCP_SIGILL); 1457 return EXIT_NORETURN; 1458 } 1459 } 1460 1461 static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn, 1462 const DisasInsn *di) 1463 { 1464 cond_free(&ctx->null_cond); 1465 return NO_EXIT; 1466 } 1467 1468 static ExitStatus trans_break(DisasContext *ctx, uint32_t insn, 1469 const DisasInsn *di) 1470 { 1471 nullify_over(ctx); 1472 return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG)); 1473 } 1474 1475 static ExitStatus trans_sync(DisasContext *ctx, uint32_t insn, 1476 const DisasInsn *di) 1477 { 1478 /* No point in nullifying the memory barrier. */ 1479 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 1480 1481 cond_free(&ctx->null_cond); 1482 return NO_EXIT; 1483 } 1484 1485 static ExitStatus trans_mfia(DisasContext *ctx, uint32_t insn, 1486 const DisasInsn *di) 1487 { 1488 unsigned rt = extract32(insn, 0, 5); 1489 TCGv tmp = dest_gpr(ctx, rt); 1490 tcg_gen_movi_tl(tmp, ctx->iaoq_f); 1491 save_gpr(ctx, rt, tmp); 1492 1493 cond_free(&ctx->null_cond); 1494 return NO_EXIT; 1495 } 1496 1497 static ExitStatus trans_mfsp(DisasContext *ctx, uint32_t insn, 1498 const DisasInsn *di) 1499 { 1500 unsigned rt = extract32(insn, 0, 5); 1501 TCGv tmp = dest_gpr(ctx, rt); 1502 1503 /* ??? We don't implement space registers. */ 1504 tcg_gen_movi_tl(tmp, 0); 1505 save_gpr(ctx, rt, tmp); 1506 1507 cond_free(&ctx->null_cond); 1508 return NO_EXIT; 1509 } 1510 1511 static ExitStatus trans_mfctl(DisasContext *ctx, uint32_t insn, 1512 const DisasInsn *di) 1513 { 1514 unsigned rt = extract32(insn, 0, 5); 1515 unsigned ctl = extract32(insn, 21, 5); 1516 TCGv tmp; 1517 1518 switch (ctl) { 1519 case 11: /* SAR */ 1520 #ifdef TARGET_HPPA64 1521 if (extract32(insn, 14, 1) == 0) { 1522 /* MFSAR without ,W masks low 5 bits. */ 1523 tmp = dest_gpr(ctx, rt); 1524 tcg_gen_andi_tl(tmp, cpu_sar, 31); 1525 save_gpr(ctx, rt, tmp); 1526 break; 1527 } 1528 #endif 1529 save_gpr(ctx, rt, cpu_sar); 1530 break; 1531 case 16: /* Interval Timer */ 1532 tmp = dest_gpr(ctx, rt); 1533 tcg_gen_movi_tl(tmp, 0); /* FIXME */ 1534 save_gpr(ctx, rt, tmp); 1535 break; 1536 case 26: 1537 save_gpr(ctx, rt, cpu_cr26); 1538 break; 1539 case 27: 1540 save_gpr(ctx, rt, cpu_cr27); 1541 break; 1542 default: 1543 /* All other control registers are privileged. */ 1544 return gen_illegal(ctx); 1545 } 1546 1547 cond_free(&ctx->null_cond); 1548 return NO_EXIT; 1549 } 1550 1551 static ExitStatus trans_mtctl(DisasContext *ctx, uint32_t insn, 1552 const DisasInsn *di) 1553 { 1554 unsigned rin = extract32(insn, 16, 5); 1555 unsigned ctl = extract32(insn, 21, 5); 1556 TCGv tmp; 1557 1558 if (ctl == 11) { /* SAR */ 1559 tmp = tcg_temp_new(); 1560 tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1); 1561 save_or_nullify(ctx, cpu_sar, tmp); 1562 tcg_temp_free(tmp); 1563 } else { 1564 /* All other control registers are privileged or read-only. */ 1565 return gen_illegal(ctx); 1566 } 1567 1568 cond_free(&ctx->null_cond); 1569 return NO_EXIT; 1570 } 1571 1572 static ExitStatus trans_mtsarcm(DisasContext *ctx, uint32_t insn, 1573 const DisasInsn *di) 1574 { 1575 unsigned rin = extract32(insn, 16, 5); 1576 TCGv tmp = tcg_temp_new(); 1577 1578 tcg_gen_not_tl(tmp, load_gpr(ctx, rin)); 1579 tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1); 1580 save_or_nullify(ctx, cpu_sar, tmp); 1581 tcg_temp_free(tmp); 1582 1583 cond_free(&ctx->null_cond); 1584 return NO_EXIT; 1585 } 1586 1587 static ExitStatus trans_ldsid(DisasContext *ctx, uint32_t insn, 1588 const DisasInsn *di) 1589 { 1590 unsigned rt = extract32(insn, 0, 5); 1591 TCGv dest = dest_gpr(ctx, rt); 1592 1593 /* Since we don't implement space registers, this returns zero. */ 1594 tcg_gen_movi_tl(dest, 0); 1595 save_gpr(ctx, rt, dest); 1596 1597 cond_free(&ctx->null_cond); 1598 return NO_EXIT; 1599 } 1600 1601 static const DisasInsn table_system[] = { 1602 { 0x00000000u, 0xfc001fe0u, trans_break }, 1603 /* We don't implement space register, so MTSP is a nop. */ 1604 { 0x00001820u, 0xffe01fffu, trans_nop }, 1605 { 0x00001840u, 0xfc00ffffu, trans_mtctl }, 1606 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm }, 1607 { 0x000014a0u, 0xffffffe0u, trans_mfia }, 1608 { 0x000004a0u, 0xffff1fe0u, trans_mfsp }, 1609 { 0x000008a0u, 0xfc1fffe0u, trans_mfctl }, 1610 { 0x00000400u, 0xffffffffu, trans_sync }, 1611 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid }, 1612 }; 1613 1614 static ExitStatus trans_base_idx_mod(DisasContext *ctx, uint32_t insn, 1615 const DisasInsn *di) 1616 { 1617 unsigned rb = extract32(insn, 21, 5); 1618 unsigned rx = extract32(insn, 16, 5); 1619 TCGv dest = dest_gpr(ctx, rb); 1620 TCGv src1 = load_gpr(ctx, rb); 1621 TCGv src2 = load_gpr(ctx, rx); 1622 1623 /* The only thing we need to do is the base register modification. */ 1624 tcg_gen_add_tl(dest, src1, src2); 1625 save_gpr(ctx, rb, dest); 1626 1627 cond_free(&ctx->null_cond); 1628 return NO_EXIT; 1629 } 1630 1631 static ExitStatus trans_probe(DisasContext *ctx, uint32_t insn, 1632 const DisasInsn *di) 1633 { 1634 unsigned rt = extract32(insn, 0, 5); 1635 unsigned rb = extract32(insn, 21, 5); 1636 unsigned is_write = extract32(insn, 6, 1); 1637 TCGv dest; 1638 1639 nullify_over(ctx); 1640 1641 /* ??? Do something with priv level operand. */ 1642 dest = dest_gpr(ctx, rt); 1643 if (is_write) { 1644 gen_helper_probe_w(dest, load_gpr(ctx, rb)); 1645 } else { 1646 gen_helper_probe_r(dest, load_gpr(ctx, rb)); 1647 } 1648 save_gpr(ctx, rt, dest); 1649 return nullify_end(ctx, NO_EXIT); 1650 } 1651 1652 static const DisasInsn table_mem_mgmt[] = { 1653 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */ 1654 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */ 1655 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */ 1656 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */ 1657 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */ 1658 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */ 1659 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */ 1660 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */ 1661 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */ 1662 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */ 1663 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */ 1664 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */ 1665 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */ 1666 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */ 1667 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */ 1668 }; 1669 1670 static ExitStatus trans_add(DisasContext *ctx, uint32_t insn, 1671 const DisasInsn *di) 1672 { 1673 unsigned r2 = extract32(insn, 21, 5); 1674 unsigned r1 = extract32(insn, 16, 5); 1675 unsigned cf = extract32(insn, 12, 4); 1676 unsigned ext = extract32(insn, 8, 4); 1677 unsigned shift = extract32(insn, 6, 2); 1678 unsigned rt = extract32(insn, 0, 5); 1679 TCGv tcg_r1, tcg_r2; 1680 bool is_c = false; 1681 bool is_l = false; 1682 bool is_tc = false; 1683 bool is_tsv = false; 1684 ExitStatus ret; 1685 1686 switch (ext) { 1687 case 0x6: /* ADD, SHLADD */ 1688 break; 1689 case 0xa: /* ADD,L, SHLADD,L */ 1690 is_l = true; 1691 break; 1692 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */ 1693 is_tsv = true; 1694 break; 1695 case 0x7: /* ADD,C */ 1696 is_c = true; 1697 break; 1698 case 0xf: /* ADD,C,TSV */ 1699 is_c = is_tsv = true; 1700 break; 1701 default: 1702 return gen_illegal(ctx); 1703 } 1704 1705 if (cf) { 1706 nullify_over(ctx); 1707 } 1708 tcg_r1 = load_gpr(ctx, r1); 1709 tcg_r2 = load_gpr(ctx, r2); 1710 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf); 1711 return nullify_end(ctx, ret); 1712 } 1713 1714 static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn, 1715 const DisasInsn *di) 1716 { 1717 unsigned r2 = extract32(insn, 21, 5); 1718 unsigned r1 = extract32(insn, 16, 5); 1719 unsigned cf = extract32(insn, 12, 4); 1720 unsigned ext = extract32(insn, 6, 6); 1721 unsigned rt = extract32(insn, 0, 5); 1722 TCGv tcg_r1, tcg_r2; 1723 bool is_b = false; 1724 bool is_tc = false; 1725 bool is_tsv = false; 1726 ExitStatus ret; 1727 1728 switch (ext) { 1729 case 0x10: /* SUB */ 1730 break; 1731 case 0x30: /* SUB,TSV */ 1732 is_tsv = true; 1733 break; 1734 case 0x14: /* SUB,B */ 1735 is_b = true; 1736 break; 1737 case 0x34: /* SUB,B,TSV */ 1738 is_b = is_tsv = true; 1739 break; 1740 case 0x13: /* SUB,TC */ 1741 is_tc = true; 1742 break; 1743 case 0x33: /* SUB,TSV,TC */ 1744 is_tc = is_tsv = true; 1745 break; 1746 default: 1747 return gen_illegal(ctx); 1748 } 1749 1750 if (cf) { 1751 nullify_over(ctx); 1752 } 1753 tcg_r1 = load_gpr(ctx, r1); 1754 tcg_r2 = load_gpr(ctx, r2); 1755 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf); 1756 return nullify_end(ctx, ret); 1757 } 1758 1759 static ExitStatus trans_log(DisasContext *ctx, uint32_t insn, 1760 const DisasInsn *di) 1761 { 1762 unsigned r2 = extract32(insn, 21, 5); 1763 unsigned r1 = extract32(insn, 16, 5); 1764 unsigned cf = extract32(insn, 12, 4); 1765 unsigned rt = extract32(insn, 0, 5); 1766 TCGv tcg_r1, tcg_r2; 1767 ExitStatus ret; 1768 1769 if (cf) { 1770 nullify_over(ctx); 1771 } 1772 tcg_r1 = load_gpr(ctx, r1); 1773 tcg_r2 = load_gpr(ctx, r2); 1774 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt); 1775 return nullify_end(ctx, ret); 1776 } 1777 1778 /* OR r,0,t -> COPY (according to gas) */ 1779 static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn, 1780 const DisasInsn *di) 1781 { 1782 unsigned r1 = extract32(insn, 16, 5); 1783 unsigned rt = extract32(insn, 0, 5); 1784 1785 if (r1 == 0) { 1786 TCGv dest = dest_gpr(ctx, rt); 1787 tcg_gen_movi_tl(dest, 0); 1788 save_gpr(ctx, rt, dest); 1789 } else { 1790 save_gpr(ctx, rt, cpu_gr[r1]); 1791 } 1792 cond_free(&ctx->null_cond); 1793 return NO_EXIT; 1794 } 1795 1796 static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn, 1797 const DisasInsn *di) 1798 { 1799 unsigned r2 = extract32(insn, 21, 5); 1800 unsigned r1 = extract32(insn, 16, 5); 1801 unsigned cf = extract32(insn, 12, 4); 1802 unsigned rt = extract32(insn, 0, 5); 1803 TCGv tcg_r1, tcg_r2; 1804 ExitStatus ret; 1805 1806 if (cf) { 1807 nullify_over(ctx); 1808 } 1809 tcg_r1 = load_gpr(ctx, r1); 1810 tcg_r2 = load_gpr(ctx, r2); 1811 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf); 1812 return nullify_end(ctx, ret); 1813 } 1814 1815 static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn, 1816 const DisasInsn *di) 1817 { 1818 unsigned r2 = extract32(insn, 21, 5); 1819 unsigned r1 = extract32(insn, 16, 5); 1820 unsigned cf = extract32(insn, 12, 4); 1821 unsigned rt = extract32(insn, 0, 5); 1822 TCGv tcg_r1, tcg_r2; 1823 ExitStatus ret; 1824 1825 if (cf) { 1826 nullify_over(ctx); 1827 } 1828 tcg_r1 = load_gpr(ctx, r1); 1829 tcg_r2 = load_gpr(ctx, r2); 1830 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl); 1831 return nullify_end(ctx, ret); 1832 } 1833 1834 static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn, 1835 const DisasInsn *di) 1836 { 1837 unsigned r2 = extract32(insn, 21, 5); 1838 unsigned r1 = extract32(insn, 16, 5); 1839 unsigned cf = extract32(insn, 12, 4); 1840 unsigned is_tc = extract32(insn, 6, 1); 1841 unsigned rt = extract32(insn, 0, 5); 1842 TCGv tcg_r1, tcg_r2, tmp; 1843 ExitStatus ret; 1844 1845 if (cf) { 1846 nullify_over(ctx); 1847 } 1848 tcg_r1 = load_gpr(ctx, r1); 1849 tcg_r2 = load_gpr(ctx, r2); 1850 tmp = get_temp(ctx); 1851 tcg_gen_not_tl(tmp, tcg_r2); 1852 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl); 1853 return nullify_end(ctx, ret); 1854 } 1855 1856 static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn, 1857 const DisasInsn *di) 1858 { 1859 unsigned r2 = extract32(insn, 21, 5); 1860 unsigned cf = extract32(insn, 12, 4); 1861 unsigned is_i = extract32(insn, 6, 1); 1862 unsigned rt = extract32(insn, 0, 5); 1863 TCGv tmp; 1864 ExitStatus ret; 1865 1866 nullify_over(ctx); 1867 1868 tmp = get_temp(ctx); 1869 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3); 1870 if (!is_i) { 1871 tcg_gen_not_tl(tmp, tmp); 1872 } 1873 tcg_gen_andi_tl(tmp, tmp, 0x11111111); 1874 tcg_gen_muli_tl(tmp, tmp, 6); 1875 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false, 1876 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl); 1877 1878 return nullify_end(ctx, ret); 1879 } 1880 1881 static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn, 1882 const DisasInsn *di) 1883 { 1884 unsigned r2 = extract32(insn, 21, 5); 1885 unsigned r1 = extract32(insn, 16, 5); 1886 unsigned cf = extract32(insn, 12, 4); 1887 unsigned rt = extract32(insn, 0, 5); 1888 TCGv dest, add1, add2, addc, zero, in1, in2; 1889 1890 nullify_over(ctx); 1891 1892 in1 = load_gpr(ctx, r1); 1893 in2 = load_gpr(ctx, r2); 1894 1895 add1 = tcg_temp_new(); 1896 add2 = tcg_temp_new(); 1897 addc = tcg_temp_new(); 1898 dest = tcg_temp_new(); 1899 zero = tcg_const_tl(0); 1900 1901 /* Form R1 << 1 | PSW[CB]{8}. */ 1902 tcg_gen_add_tl(add1, in1, in1); 1903 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb); 1904 1905 /* Add or subtract R2, depending on PSW[V]. Proper computation of 1906 carry{8} requires that we subtract via + ~R2 + 1, as described in 1907 the manual. By extracting and masking V, we can produce the 1908 proper inputs to the addition without movcond. */ 1909 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1); 1910 tcg_gen_xor_tl(add2, in2, addc); 1911 tcg_gen_andi_tl(addc, addc, 1); 1912 /* ??? This is only correct for 32-bit. */ 1913 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 1914 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 1915 1916 tcg_temp_free(addc); 1917 tcg_temp_free(zero); 1918 1919 /* Write back the result register. */ 1920 save_gpr(ctx, rt, dest); 1921 1922 /* Write back PSW[CB]. */ 1923 tcg_gen_xor_tl(cpu_psw_cb, add1, add2); 1924 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest); 1925 1926 /* Write back PSW[V] for the division step. */ 1927 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb); 1928 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2); 1929 1930 /* Install the new nullification. */ 1931 if (cf) { 1932 TCGv sv; 1933 TCGV_UNUSED(sv); 1934 if (cf >> 1 == 6) { 1935 /* ??? The lshift is supposed to contribute to overflow. */ 1936 sv = do_add_sv(ctx, dest, add1, add2); 1937 } 1938 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv); 1939 } 1940 1941 tcg_temp_free(add1); 1942 tcg_temp_free(add2); 1943 tcg_temp_free(dest); 1944 1945 return nullify_end(ctx, NO_EXIT); 1946 } 1947 1948 static const DisasInsn table_arith_log[] = { 1949 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */ 1950 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */ 1951 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl }, 1952 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl }, 1953 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl }, 1954 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl }, 1955 { 0x08000880u, 0xfc000fe0u, trans_cmpclr }, 1956 { 0x08000380u, 0xfc000fe0u, trans_uxor }, 1957 { 0x08000980u, 0xfc000fa0u, trans_uaddcm }, 1958 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor }, 1959 { 0x08000440u, 0xfc000fe0u, trans_ds }, 1960 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */ 1961 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */ 1962 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */ 1963 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */ 1964 }; 1965 1966 static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn) 1967 { 1968 target_long im = low_sextract(insn, 0, 11); 1969 unsigned e1 = extract32(insn, 11, 1); 1970 unsigned cf = extract32(insn, 12, 4); 1971 unsigned rt = extract32(insn, 16, 5); 1972 unsigned r2 = extract32(insn, 21, 5); 1973 unsigned o1 = extract32(insn, 26, 1); 1974 TCGv tcg_im, tcg_r2; 1975 ExitStatus ret; 1976 1977 if (cf) { 1978 nullify_over(ctx); 1979 } 1980 1981 tcg_im = load_const(ctx, im); 1982 tcg_r2 = load_gpr(ctx, r2); 1983 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf); 1984 1985 return nullify_end(ctx, ret); 1986 } 1987 1988 static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn) 1989 { 1990 target_long im = low_sextract(insn, 0, 11); 1991 unsigned e1 = extract32(insn, 11, 1); 1992 unsigned cf = extract32(insn, 12, 4); 1993 unsigned rt = extract32(insn, 16, 5); 1994 unsigned r2 = extract32(insn, 21, 5); 1995 TCGv tcg_im, tcg_r2; 1996 ExitStatus ret; 1997 1998 if (cf) { 1999 nullify_over(ctx); 2000 } 2001 2002 tcg_im = load_const(ctx, im); 2003 tcg_r2 = load_gpr(ctx, r2); 2004 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf); 2005 2006 return nullify_end(ctx, ret); 2007 } 2008 2009 static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn) 2010 { 2011 target_long im = low_sextract(insn, 0, 11); 2012 unsigned cf = extract32(insn, 12, 4); 2013 unsigned rt = extract32(insn, 16, 5); 2014 unsigned r2 = extract32(insn, 21, 5); 2015 TCGv tcg_im, tcg_r2; 2016 ExitStatus ret; 2017 2018 if (cf) { 2019 nullify_over(ctx); 2020 } 2021 2022 tcg_im = load_const(ctx, im); 2023 tcg_r2 = load_gpr(ctx, r2); 2024 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf); 2025 2026 return nullify_end(ctx, ret); 2027 } 2028 2029 static ExitStatus trans_ld_idx_i(DisasContext *ctx, uint32_t insn, 2030 const DisasInsn *di) 2031 { 2032 unsigned rt = extract32(insn, 0, 5); 2033 unsigned m = extract32(insn, 5, 1); 2034 unsigned sz = extract32(insn, 6, 2); 2035 unsigned a = extract32(insn, 13, 1); 2036 int disp = low_sextract(insn, 16, 5); 2037 unsigned rb = extract32(insn, 21, 5); 2038 int modify = (m ? (a ? -1 : 1) : 0); 2039 TCGMemOp mop = MO_TE | sz; 2040 2041 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop); 2042 } 2043 2044 static ExitStatus trans_ld_idx_x(DisasContext *ctx, uint32_t insn, 2045 const DisasInsn *di) 2046 { 2047 unsigned rt = extract32(insn, 0, 5); 2048 unsigned m = extract32(insn, 5, 1); 2049 unsigned sz = extract32(insn, 6, 2); 2050 unsigned u = extract32(insn, 13, 1); 2051 unsigned rx = extract32(insn, 16, 5); 2052 unsigned rb = extract32(insn, 21, 5); 2053 TCGMemOp mop = MO_TE | sz; 2054 2055 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop); 2056 } 2057 2058 static ExitStatus trans_st_idx_i(DisasContext *ctx, uint32_t insn, 2059 const DisasInsn *di) 2060 { 2061 int disp = low_sextract(insn, 0, 5); 2062 unsigned m = extract32(insn, 5, 1); 2063 unsigned sz = extract32(insn, 6, 2); 2064 unsigned a = extract32(insn, 13, 1); 2065 unsigned rr = extract32(insn, 16, 5); 2066 unsigned rb = extract32(insn, 21, 5); 2067 int modify = (m ? (a ? -1 : 1) : 0); 2068 TCGMemOp mop = MO_TE | sz; 2069 2070 return do_store(ctx, rr, rb, disp, modify, mop); 2071 } 2072 2073 static ExitStatus trans_ldcw(DisasContext *ctx, uint32_t insn, 2074 const DisasInsn *di) 2075 { 2076 unsigned rt = extract32(insn, 0, 5); 2077 unsigned m = extract32(insn, 5, 1); 2078 unsigned i = extract32(insn, 12, 1); 2079 unsigned au = extract32(insn, 13, 1); 2080 unsigned rx = extract32(insn, 16, 5); 2081 unsigned rb = extract32(insn, 21, 5); 2082 TCGMemOp mop = MO_TEUL | MO_ALIGN_16; 2083 TCGv zero, addr, base, dest; 2084 int modify, disp = 0, scale = 0; 2085 2086 nullify_over(ctx); 2087 2088 /* ??? Share more code with do_load and do_load_{32,64}. */ 2089 2090 if (i) { 2091 modify = (m ? (au ? -1 : 1) : 0); 2092 disp = low_sextract(rx, 0, 5); 2093 rx = 0; 2094 } else { 2095 modify = m; 2096 if (au) { 2097 scale = mop & MO_SIZE; 2098 } 2099 } 2100 if (modify) { 2101 /* Base register modification. Make sure if RT == RB, we see 2102 the result of the load. */ 2103 dest = get_temp(ctx); 2104 } else { 2105 dest = dest_gpr(ctx, rt); 2106 } 2107 2108 addr = tcg_temp_new(); 2109 base = load_gpr(ctx, rb); 2110 if (rx) { 2111 tcg_gen_shli_tl(addr, cpu_gr[rx], scale); 2112 tcg_gen_add_tl(addr, addr, base); 2113 } else { 2114 tcg_gen_addi_tl(addr, base, disp); 2115 } 2116 2117 zero = tcg_const_tl(0); 2118 tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base), 2119 zero, MMU_USER_IDX, mop); 2120 if (modify) { 2121 save_gpr(ctx, rb, addr); 2122 } 2123 save_gpr(ctx, rt, dest); 2124 2125 return nullify_end(ctx, NO_EXIT); 2126 } 2127 2128 static ExitStatus trans_stby(DisasContext *ctx, uint32_t insn, 2129 const DisasInsn *di) 2130 { 2131 target_long disp = low_sextract(insn, 0, 5); 2132 unsigned m = extract32(insn, 5, 1); 2133 unsigned a = extract32(insn, 13, 1); 2134 unsigned rt = extract32(insn, 16, 5); 2135 unsigned rb = extract32(insn, 21, 5); 2136 TCGv addr, val; 2137 2138 nullify_over(ctx); 2139 2140 addr = tcg_temp_new(); 2141 if (m || disp == 0) { 2142 tcg_gen_mov_tl(addr, load_gpr(ctx, rb)); 2143 } else { 2144 tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp); 2145 } 2146 val = load_gpr(ctx, rt); 2147 2148 if (a) { 2149 gen_helper_stby_e(cpu_env, addr, val); 2150 } else { 2151 gen_helper_stby_b(cpu_env, addr, val); 2152 } 2153 2154 if (m) { 2155 tcg_gen_addi_tl(addr, addr, disp); 2156 tcg_gen_andi_tl(addr, addr, ~3); 2157 save_gpr(ctx, rb, addr); 2158 } 2159 tcg_temp_free(addr); 2160 2161 return nullify_end(ctx, NO_EXIT); 2162 } 2163 2164 static const DisasInsn table_index_mem[] = { 2165 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */ 2166 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */ 2167 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */ 2168 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw }, 2169 { 0x0c001300u, 0xfc0013c0, trans_stby }, 2170 }; 2171 2172 static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn) 2173 { 2174 unsigned rt = extract32(insn, 21, 5); 2175 target_long i = assemble_21(insn); 2176 TCGv tcg_rt = dest_gpr(ctx, rt); 2177 2178 tcg_gen_movi_tl(tcg_rt, i); 2179 save_gpr(ctx, rt, tcg_rt); 2180 cond_free(&ctx->null_cond); 2181 2182 return NO_EXIT; 2183 } 2184 2185 static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn) 2186 { 2187 unsigned rt = extract32(insn, 21, 5); 2188 target_long i = assemble_21(insn); 2189 TCGv tcg_rt = load_gpr(ctx, rt); 2190 TCGv tcg_r1 = dest_gpr(ctx, 1); 2191 2192 tcg_gen_addi_tl(tcg_r1, tcg_rt, i); 2193 save_gpr(ctx, 1, tcg_r1); 2194 cond_free(&ctx->null_cond); 2195 2196 return NO_EXIT; 2197 } 2198 2199 static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn) 2200 { 2201 unsigned rb = extract32(insn, 21, 5); 2202 unsigned rt = extract32(insn, 16, 5); 2203 target_long i = assemble_16(insn); 2204 TCGv tcg_rt = dest_gpr(ctx, rt); 2205 2206 /* Special case rb == 0, for the LDI pseudo-op. 2207 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 2208 if (rb == 0) { 2209 tcg_gen_movi_tl(tcg_rt, i); 2210 } else { 2211 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i); 2212 } 2213 save_gpr(ctx, rt, tcg_rt); 2214 cond_free(&ctx->null_cond); 2215 2216 return NO_EXIT; 2217 } 2218 2219 static ExitStatus trans_load(DisasContext *ctx, uint32_t insn, 2220 bool is_mod, TCGMemOp mop) 2221 { 2222 unsigned rb = extract32(insn, 21, 5); 2223 unsigned rt = extract32(insn, 16, 5); 2224 target_long i = assemble_16(insn); 2225 2226 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop); 2227 } 2228 2229 static ExitStatus trans_load_w(DisasContext *ctx, uint32_t insn) 2230 { 2231 unsigned rb = extract32(insn, 21, 5); 2232 unsigned rt = extract32(insn, 16, 5); 2233 target_long i = assemble_16a(insn); 2234 unsigned ext2 = extract32(insn, 1, 2); 2235 2236 switch (ext2) { 2237 case 0: 2238 case 1: 2239 /* FLDW without modification. */ 2240 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0); 2241 case 2: 2242 /* LDW with modification. Note that the sign of I selects 2243 post-dec vs pre-inc. */ 2244 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL); 2245 default: 2246 return gen_illegal(ctx); 2247 } 2248 } 2249 2250 static ExitStatus trans_fload_mod(DisasContext *ctx, uint32_t insn) 2251 { 2252 target_long i = assemble_16a(insn); 2253 unsigned t1 = extract32(insn, 1, 1); 2254 unsigned a = extract32(insn, 2, 1); 2255 unsigned t0 = extract32(insn, 16, 5); 2256 unsigned rb = extract32(insn, 21, 5); 2257 2258 /* FLDW with modification. */ 2259 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1)); 2260 } 2261 2262 static ExitStatus trans_store(DisasContext *ctx, uint32_t insn, 2263 bool is_mod, TCGMemOp mop) 2264 { 2265 unsigned rb = extract32(insn, 21, 5); 2266 unsigned rt = extract32(insn, 16, 5); 2267 target_long i = assemble_16(insn); 2268 2269 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop); 2270 } 2271 2272 static ExitStatus trans_store_w(DisasContext *ctx, uint32_t insn) 2273 { 2274 unsigned rb = extract32(insn, 21, 5); 2275 unsigned rt = extract32(insn, 16, 5); 2276 target_long i = assemble_16a(insn); 2277 unsigned ext2 = extract32(insn, 1, 2); 2278 2279 switch (ext2) { 2280 case 0: 2281 case 1: 2282 /* FSTW without modification. */ 2283 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0); 2284 case 2: 2285 /* LDW with modification. */ 2286 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL); 2287 default: 2288 return gen_illegal(ctx); 2289 } 2290 } 2291 2292 static ExitStatus trans_fstore_mod(DisasContext *ctx, uint32_t insn) 2293 { 2294 target_long i = assemble_16a(insn); 2295 unsigned t1 = extract32(insn, 1, 1); 2296 unsigned a = extract32(insn, 2, 1); 2297 unsigned t0 = extract32(insn, 16, 5); 2298 unsigned rb = extract32(insn, 21, 5); 2299 2300 /* FSTW with modification. */ 2301 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1)); 2302 } 2303 2304 static ExitStatus trans_copr_w(DisasContext *ctx, uint32_t insn) 2305 { 2306 unsigned t0 = extract32(insn, 0, 5); 2307 unsigned m = extract32(insn, 5, 1); 2308 unsigned t1 = extract32(insn, 6, 1); 2309 unsigned ext3 = extract32(insn, 7, 3); 2310 /* unsigned cc = extract32(insn, 10, 2); */ 2311 unsigned i = extract32(insn, 12, 1); 2312 unsigned ua = extract32(insn, 13, 1); 2313 unsigned rx = extract32(insn, 16, 5); 2314 unsigned rb = extract32(insn, 21, 5); 2315 unsigned rt = t1 * 32 + t0; 2316 int modify = (m ? (ua ? -1 : 1) : 0); 2317 int disp, scale; 2318 2319 if (i == 0) { 2320 scale = (ua ? 2 : 0); 2321 disp = 0; 2322 modify = m; 2323 } else { 2324 disp = low_sextract(rx, 0, 5); 2325 scale = 0; 2326 rx = 0; 2327 modify = (m ? (ua ? -1 : 1) : 0); 2328 } 2329 2330 switch (ext3) { 2331 case 0: /* FLDW */ 2332 return do_floadw(ctx, rt, rb, rx, scale, disp, modify); 2333 case 4: /* FSTW */ 2334 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify); 2335 } 2336 return gen_illegal(ctx); 2337 } 2338 2339 static ExitStatus trans_copr_dw(DisasContext *ctx, uint32_t insn) 2340 { 2341 unsigned rt = extract32(insn, 0, 5); 2342 unsigned m = extract32(insn, 5, 1); 2343 unsigned ext4 = extract32(insn, 6, 4); 2344 /* unsigned cc = extract32(insn, 10, 2); */ 2345 unsigned i = extract32(insn, 12, 1); 2346 unsigned ua = extract32(insn, 13, 1); 2347 unsigned rx = extract32(insn, 16, 5); 2348 unsigned rb = extract32(insn, 21, 5); 2349 int modify = (m ? (ua ? -1 : 1) : 0); 2350 int disp, scale; 2351 2352 if (i == 0) { 2353 scale = (ua ? 3 : 0); 2354 disp = 0; 2355 modify = m; 2356 } else { 2357 disp = low_sextract(rx, 0, 5); 2358 scale = 0; 2359 rx = 0; 2360 modify = (m ? (ua ? -1 : 1) : 0); 2361 } 2362 2363 switch (ext4) { 2364 case 0: /* FLDD */ 2365 return do_floadd(ctx, rt, rb, rx, scale, disp, modify); 2366 case 8: /* FSTD */ 2367 return do_fstored(ctx, rt, rb, rx, scale, disp, modify); 2368 default: 2369 return gen_illegal(ctx); 2370 } 2371 } 2372 2373 static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn, 2374 bool is_true, bool is_imm, bool is_dw) 2375 { 2376 target_long disp = assemble_12(insn) * 4; 2377 unsigned n = extract32(insn, 1, 1); 2378 unsigned c = extract32(insn, 13, 3); 2379 unsigned r = extract32(insn, 21, 5); 2380 unsigned cf = c * 2 + !is_true; 2381 TCGv dest, in1, in2, sv; 2382 DisasCond cond; 2383 2384 nullify_over(ctx); 2385 2386 if (is_imm) { 2387 in1 = load_const(ctx, low_sextract(insn, 16, 5)); 2388 } else { 2389 in1 = load_gpr(ctx, extract32(insn, 16, 5)); 2390 } 2391 in2 = load_gpr(ctx, r); 2392 dest = get_temp(ctx); 2393 2394 tcg_gen_sub_tl(dest, in1, in2); 2395 2396 TCGV_UNUSED(sv); 2397 if (c == 6) { 2398 sv = do_sub_sv(ctx, dest, in1, in2); 2399 } 2400 2401 cond = do_sub_cond(cf, dest, in1, in2, sv); 2402 return do_cbranch(ctx, disp, n, &cond); 2403 } 2404 2405 static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn, 2406 bool is_true, bool is_imm) 2407 { 2408 target_long disp = assemble_12(insn) * 4; 2409 unsigned n = extract32(insn, 1, 1); 2410 unsigned c = extract32(insn, 13, 3); 2411 unsigned r = extract32(insn, 21, 5); 2412 unsigned cf = c * 2 + !is_true; 2413 TCGv dest, in1, in2, sv, cb_msb; 2414 DisasCond cond; 2415 2416 nullify_over(ctx); 2417 2418 if (is_imm) { 2419 in1 = load_const(ctx, low_sextract(insn, 16, 5)); 2420 } else { 2421 in1 = load_gpr(ctx, extract32(insn, 16, 5)); 2422 } 2423 in2 = load_gpr(ctx, r); 2424 dest = dest_gpr(ctx, r); 2425 TCGV_UNUSED(sv); 2426 TCGV_UNUSED(cb_msb); 2427 2428 switch (c) { 2429 default: 2430 tcg_gen_add_tl(dest, in1, in2); 2431 break; 2432 case 4: case 5: 2433 cb_msb = get_temp(ctx); 2434 tcg_gen_movi_tl(cb_msb, 0); 2435 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb); 2436 break; 2437 case 6: 2438 tcg_gen_add_tl(dest, in1, in2); 2439 sv = do_add_sv(ctx, dest, in1, in2); 2440 break; 2441 } 2442 2443 cond = do_cond(cf, dest, cb_msb, sv); 2444 return do_cbranch(ctx, disp, n, &cond); 2445 } 2446 2447 static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn) 2448 { 2449 target_long disp = assemble_12(insn) * 4; 2450 unsigned n = extract32(insn, 1, 1); 2451 unsigned c = extract32(insn, 15, 1); 2452 unsigned r = extract32(insn, 16, 5); 2453 unsigned p = extract32(insn, 21, 5); 2454 unsigned i = extract32(insn, 26, 1); 2455 TCGv tmp, tcg_r; 2456 DisasCond cond; 2457 2458 nullify_over(ctx); 2459 2460 tmp = tcg_temp_new(); 2461 tcg_r = load_gpr(ctx, r); 2462 if (i) { 2463 tcg_gen_shli_tl(tmp, tcg_r, p); 2464 } else { 2465 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar); 2466 } 2467 2468 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp); 2469 tcg_temp_free(tmp); 2470 return do_cbranch(ctx, disp, n, &cond); 2471 } 2472 2473 static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm) 2474 { 2475 target_long disp = assemble_12(insn) * 4; 2476 unsigned n = extract32(insn, 1, 1); 2477 unsigned c = extract32(insn, 13, 3); 2478 unsigned t = extract32(insn, 16, 5); 2479 unsigned r = extract32(insn, 21, 5); 2480 TCGv dest; 2481 DisasCond cond; 2482 2483 nullify_over(ctx); 2484 2485 dest = dest_gpr(ctx, r); 2486 if (is_imm) { 2487 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5)); 2488 } else if (t == 0) { 2489 tcg_gen_movi_tl(dest, 0); 2490 } else { 2491 tcg_gen_mov_tl(dest, cpu_gr[t]); 2492 } 2493 2494 cond = do_sed_cond(c, dest); 2495 return do_cbranch(ctx, disp, n, &cond); 2496 } 2497 2498 static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn, 2499 const DisasInsn *di) 2500 { 2501 unsigned rt = extract32(insn, 0, 5); 2502 unsigned c = extract32(insn, 13, 3); 2503 unsigned r1 = extract32(insn, 16, 5); 2504 unsigned r2 = extract32(insn, 21, 5); 2505 TCGv dest; 2506 2507 if (c) { 2508 nullify_over(ctx); 2509 } 2510 2511 dest = dest_gpr(ctx, rt); 2512 if (r1 == 0) { 2513 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2)); 2514 tcg_gen_shr_tl(dest, dest, cpu_sar); 2515 } else if (r1 == r2) { 2516 TCGv_i32 t32 = tcg_temp_new_i32(); 2517 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2)); 2518 tcg_gen_rotr_i32(t32, t32, cpu_sar); 2519 tcg_gen_extu_i32_tl(dest, t32); 2520 tcg_temp_free_i32(t32); 2521 } else { 2522 TCGv_i64 t = tcg_temp_new_i64(); 2523 TCGv_i64 s = tcg_temp_new_i64(); 2524 2525 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1)); 2526 tcg_gen_extu_tl_i64(s, cpu_sar); 2527 tcg_gen_shr_i64(t, t, s); 2528 tcg_gen_trunc_i64_tl(dest, t); 2529 2530 tcg_temp_free_i64(t); 2531 tcg_temp_free_i64(s); 2532 } 2533 save_gpr(ctx, rt, dest); 2534 2535 /* Install the new nullification. */ 2536 cond_free(&ctx->null_cond); 2537 if (c) { 2538 ctx->null_cond = do_sed_cond(c, dest); 2539 } 2540 return nullify_end(ctx, NO_EXIT); 2541 } 2542 2543 static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn, 2544 const DisasInsn *di) 2545 { 2546 unsigned rt = extract32(insn, 0, 5); 2547 unsigned cpos = extract32(insn, 5, 5); 2548 unsigned c = extract32(insn, 13, 3); 2549 unsigned r1 = extract32(insn, 16, 5); 2550 unsigned r2 = extract32(insn, 21, 5); 2551 unsigned sa = 31 - cpos; 2552 TCGv dest, t2; 2553 2554 if (c) { 2555 nullify_over(ctx); 2556 } 2557 2558 dest = dest_gpr(ctx, rt); 2559 t2 = load_gpr(ctx, r2); 2560 if (r1 == r2) { 2561 TCGv_i32 t32 = tcg_temp_new_i32(); 2562 tcg_gen_trunc_tl_i32(t32, t2); 2563 tcg_gen_rotri_i32(t32, t32, sa); 2564 tcg_gen_extu_i32_tl(dest, t32); 2565 tcg_temp_free_i32(t32); 2566 } else if (r1 == 0) { 2567 tcg_gen_extract_tl(dest, t2, sa, 32 - sa); 2568 } else { 2569 TCGv t0 = tcg_temp_new(); 2570 tcg_gen_extract_tl(t0, t2, sa, 32 - sa); 2571 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa); 2572 tcg_temp_free(t0); 2573 } 2574 save_gpr(ctx, rt, dest); 2575 2576 /* Install the new nullification. */ 2577 cond_free(&ctx->null_cond); 2578 if (c) { 2579 ctx->null_cond = do_sed_cond(c, dest); 2580 } 2581 return nullify_end(ctx, NO_EXIT); 2582 } 2583 2584 static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn, 2585 const DisasInsn *di) 2586 { 2587 unsigned clen = extract32(insn, 0, 5); 2588 unsigned is_se = extract32(insn, 10, 1); 2589 unsigned c = extract32(insn, 13, 3); 2590 unsigned rt = extract32(insn, 16, 5); 2591 unsigned rr = extract32(insn, 21, 5); 2592 unsigned len = 32 - clen; 2593 TCGv dest, src, tmp; 2594 2595 if (c) { 2596 nullify_over(ctx); 2597 } 2598 2599 dest = dest_gpr(ctx, rt); 2600 src = load_gpr(ctx, rr); 2601 tmp = tcg_temp_new(); 2602 2603 /* Recall that SAR is using big-endian bit numbering. */ 2604 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1); 2605 if (is_se) { 2606 tcg_gen_sar_tl(dest, src, tmp); 2607 tcg_gen_sextract_tl(dest, dest, 0, len); 2608 } else { 2609 tcg_gen_shr_tl(dest, src, tmp); 2610 tcg_gen_extract_tl(dest, dest, 0, len); 2611 } 2612 tcg_temp_free(tmp); 2613 save_gpr(ctx, rt, dest); 2614 2615 /* Install the new nullification. */ 2616 cond_free(&ctx->null_cond); 2617 if (c) { 2618 ctx->null_cond = do_sed_cond(c, dest); 2619 } 2620 return nullify_end(ctx, NO_EXIT); 2621 } 2622 2623 static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn, 2624 const DisasInsn *di) 2625 { 2626 unsigned clen = extract32(insn, 0, 5); 2627 unsigned pos = extract32(insn, 5, 5); 2628 unsigned is_se = extract32(insn, 10, 1); 2629 unsigned c = extract32(insn, 13, 3); 2630 unsigned rt = extract32(insn, 16, 5); 2631 unsigned rr = extract32(insn, 21, 5); 2632 unsigned len = 32 - clen; 2633 unsigned cpos = 31 - pos; 2634 TCGv dest, src; 2635 2636 if (c) { 2637 nullify_over(ctx); 2638 } 2639 2640 dest = dest_gpr(ctx, rt); 2641 src = load_gpr(ctx, rr); 2642 if (is_se) { 2643 tcg_gen_sextract_tl(dest, src, cpos, len); 2644 } else { 2645 tcg_gen_extract_tl(dest, src, cpos, len); 2646 } 2647 save_gpr(ctx, rt, dest); 2648 2649 /* Install the new nullification. */ 2650 cond_free(&ctx->null_cond); 2651 if (c) { 2652 ctx->null_cond = do_sed_cond(c, dest); 2653 } 2654 return nullify_end(ctx, NO_EXIT); 2655 } 2656 2657 static const DisasInsn table_sh_ex[] = { 2658 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar }, 2659 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm }, 2660 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar }, 2661 { 0xd0001800u, 0xfc001800u, trans_extrw_imm }, 2662 }; 2663 2664 static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn, 2665 const DisasInsn *di) 2666 { 2667 unsigned clen = extract32(insn, 0, 5); 2668 unsigned cpos = extract32(insn, 5, 5); 2669 unsigned nz = extract32(insn, 10, 1); 2670 unsigned c = extract32(insn, 13, 3); 2671 target_long val = low_sextract(insn, 16, 5); 2672 unsigned rt = extract32(insn, 21, 5); 2673 unsigned len = 32 - clen; 2674 target_long mask0, mask1; 2675 TCGv dest; 2676 2677 if (c) { 2678 nullify_over(ctx); 2679 } 2680 if (cpos + len > 32) { 2681 len = 32 - cpos; 2682 } 2683 2684 dest = dest_gpr(ctx, rt); 2685 mask0 = deposit64(0, cpos, len, val); 2686 mask1 = deposit64(-1, cpos, len, val); 2687 2688 if (nz) { 2689 TCGv src = load_gpr(ctx, rt); 2690 if (mask1 != -1) { 2691 tcg_gen_andi_tl(dest, src, mask1); 2692 src = dest; 2693 } 2694 tcg_gen_ori_tl(dest, src, mask0); 2695 } else { 2696 tcg_gen_movi_tl(dest, mask0); 2697 } 2698 save_gpr(ctx, rt, dest); 2699 2700 /* Install the new nullification. */ 2701 cond_free(&ctx->null_cond); 2702 if (c) { 2703 ctx->null_cond = do_sed_cond(c, dest); 2704 } 2705 return nullify_end(ctx, NO_EXIT); 2706 } 2707 2708 static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn, 2709 const DisasInsn *di) 2710 { 2711 unsigned clen = extract32(insn, 0, 5); 2712 unsigned cpos = extract32(insn, 5, 5); 2713 unsigned nz = extract32(insn, 10, 1); 2714 unsigned c = extract32(insn, 13, 3); 2715 unsigned rr = extract32(insn, 16, 5); 2716 unsigned rt = extract32(insn, 21, 5); 2717 unsigned rs = nz ? rt : 0; 2718 unsigned len = 32 - clen; 2719 TCGv dest, val; 2720 2721 if (c) { 2722 nullify_over(ctx); 2723 } 2724 if (cpos + len > 32) { 2725 len = 32 - cpos; 2726 } 2727 2728 dest = dest_gpr(ctx, rt); 2729 val = load_gpr(ctx, rr); 2730 if (rs == 0) { 2731 tcg_gen_deposit_z_tl(dest, val, cpos, len); 2732 } else { 2733 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len); 2734 } 2735 save_gpr(ctx, rt, dest); 2736 2737 /* Install the new nullification. */ 2738 cond_free(&ctx->null_cond); 2739 if (c) { 2740 ctx->null_cond = do_sed_cond(c, dest); 2741 } 2742 return nullify_end(ctx, NO_EXIT); 2743 } 2744 2745 static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn, 2746 const DisasInsn *di) 2747 { 2748 unsigned clen = extract32(insn, 0, 5); 2749 unsigned nz = extract32(insn, 10, 1); 2750 unsigned i = extract32(insn, 12, 1); 2751 unsigned c = extract32(insn, 13, 3); 2752 unsigned rt = extract32(insn, 21, 5); 2753 unsigned rs = nz ? rt : 0; 2754 unsigned len = 32 - clen; 2755 TCGv val, mask, tmp, shift, dest; 2756 unsigned msb = 1U << (len - 1); 2757 2758 if (c) { 2759 nullify_over(ctx); 2760 } 2761 2762 if (i) { 2763 val = load_const(ctx, low_sextract(insn, 16, 5)); 2764 } else { 2765 val = load_gpr(ctx, extract32(insn, 16, 5)); 2766 } 2767 dest = dest_gpr(ctx, rt); 2768 shift = tcg_temp_new(); 2769 tmp = tcg_temp_new(); 2770 2771 /* Convert big-endian bit numbering in SAR to left-shift. */ 2772 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1); 2773 2774 mask = tcg_const_tl(msb + (msb - 1)); 2775 tcg_gen_and_tl(tmp, val, mask); 2776 if (rs) { 2777 tcg_gen_shl_tl(mask, mask, shift); 2778 tcg_gen_shl_tl(tmp, tmp, shift); 2779 tcg_gen_andc_tl(dest, cpu_gr[rs], mask); 2780 tcg_gen_or_tl(dest, dest, tmp); 2781 } else { 2782 tcg_gen_shl_tl(dest, tmp, shift); 2783 } 2784 tcg_temp_free(shift); 2785 tcg_temp_free(mask); 2786 tcg_temp_free(tmp); 2787 save_gpr(ctx, rt, dest); 2788 2789 /* Install the new nullification. */ 2790 cond_free(&ctx->null_cond); 2791 if (c) { 2792 ctx->null_cond = do_sed_cond(c, dest); 2793 } 2794 return nullify_end(ctx, NO_EXIT); 2795 } 2796 2797 static const DisasInsn table_depw[] = { 2798 { 0xd4000000u, 0xfc000be0u, trans_depw_sar }, 2799 { 0xd4000800u, 0xfc001800u, trans_depw_imm }, 2800 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c }, 2801 }; 2802 2803 static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l) 2804 { 2805 unsigned n = extract32(insn, 1, 1); 2806 unsigned b = extract32(insn, 21, 5); 2807 target_long disp = assemble_17(insn); 2808 2809 /* unsigned s = low_uextract(insn, 13, 3); */ 2810 /* ??? It seems like there should be a good way of using 2811 "be disp(sr2, r0)", the canonical gateway entry mechanism 2812 to our advantage. But that appears to be inconvenient to 2813 manage along side branch delay slots. Therefore we handle 2814 entry into the gateway page via absolute address. */ 2815 2816 /* Since we don't implement spaces, just branch. Do notice the special 2817 case of "be disp(*,r0)" using a direct branch to disp, so that we can 2818 goto_tb to the TB containing the syscall. */ 2819 if (b == 0) { 2820 return do_dbranch(ctx, disp, is_l ? 31 : 0, n); 2821 } else { 2822 TCGv tmp = get_temp(ctx); 2823 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp); 2824 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n); 2825 } 2826 } 2827 2828 static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn, 2829 const DisasInsn *di) 2830 { 2831 unsigned n = extract32(insn, 1, 1); 2832 unsigned link = extract32(insn, 21, 5); 2833 target_long disp = assemble_17(insn); 2834 2835 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n); 2836 } 2837 2838 static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn, 2839 const DisasInsn *di) 2840 { 2841 unsigned n = extract32(insn, 1, 1); 2842 target_long disp = assemble_22(insn); 2843 2844 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n); 2845 } 2846 2847 static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn, 2848 const DisasInsn *di) 2849 { 2850 unsigned n = extract32(insn, 1, 1); 2851 unsigned rx = extract32(insn, 16, 5); 2852 unsigned link = extract32(insn, 21, 5); 2853 TCGv tmp = get_temp(ctx); 2854 2855 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3); 2856 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8); 2857 return do_ibranch(ctx, tmp, link, n); 2858 } 2859 2860 static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn, 2861 const DisasInsn *di) 2862 { 2863 unsigned n = extract32(insn, 1, 1); 2864 unsigned rx = extract32(insn, 16, 5); 2865 unsigned rb = extract32(insn, 21, 5); 2866 TCGv dest; 2867 2868 if (rx == 0) { 2869 dest = load_gpr(ctx, rb); 2870 } else { 2871 dest = get_temp(ctx); 2872 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3); 2873 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb)); 2874 } 2875 return do_ibranch(ctx, dest, 0, n); 2876 } 2877 2878 static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn, 2879 const DisasInsn *di) 2880 { 2881 unsigned n = extract32(insn, 1, 1); 2882 unsigned rb = extract32(insn, 21, 5); 2883 unsigned link = extract32(insn, 13, 1) ? 2 : 0; 2884 2885 return do_ibranch(ctx, load_gpr(ctx, rb), link, n); 2886 } 2887 2888 static const DisasInsn table_branch[] = { 2889 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */ 2890 { 0xe800a000u, 0xfc00e000u, trans_bl_long }, 2891 { 0xe8004000u, 0xfc00fffdu, trans_blr }, 2892 { 0xe800c000u, 0xfc00fffdu, trans_bv }, 2893 { 0xe800d000u, 0xfc00dffcu, trans_bve }, 2894 }; 2895 2896 static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn, 2897 const DisasInsn table[], size_t n) 2898 { 2899 size_t i; 2900 for (i = 0; i < n; ++i) { 2901 if ((insn & table[i].mask) == table[i].insn) { 2902 return table[i].trans(ctx, insn, &table[i]); 2903 } 2904 } 2905 return gen_illegal(ctx); 2906 } 2907 2908 #define translate_table(ctx, insn, table) \ 2909 translate_table_int(ctx, insn, table, ARRAY_SIZE(table)) 2910 2911 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) 2912 { 2913 uint32_t opc = extract32(insn, 26, 6); 2914 2915 switch (opc) { 2916 case 0x00: /* system op */ 2917 return translate_table(ctx, insn, table_system); 2918 case 0x01: 2919 return translate_table(ctx, insn, table_mem_mgmt); 2920 case 0x02: 2921 return translate_table(ctx, insn, table_arith_log); 2922 case 0x03: 2923 return translate_table(ctx, insn, table_index_mem); 2924 case 0x08: 2925 return trans_ldil(ctx, insn); 2926 case 0x09: 2927 return trans_copr_w(ctx, insn); 2928 case 0x0A: 2929 return trans_addil(ctx, insn); 2930 case 0x0B: 2931 return trans_copr_dw(ctx, insn); 2932 case 0x0D: 2933 return trans_ldo(ctx, insn); 2934 2935 case 0x10: 2936 return trans_load(ctx, insn, false, MO_UB); 2937 case 0x11: 2938 return trans_load(ctx, insn, false, MO_TEUW); 2939 case 0x12: 2940 return trans_load(ctx, insn, false, MO_TEUL); 2941 case 0x13: 2942 return trans_load(ctx, insn, true, MO_TEUL); 2943 case 0x16: 2944 return trans_fload_mod(ctx, insn); 2945 case 0x17: 2946 return trans_load_w(ctx, insn); 2947 case 0x18: 2948 return trans_store(ctx, insn, false, MO_UB); 2949 case 0x19: 2950 return trans_store(ctx, insn, false, MO_TEUW); 2951 case 0x1A: 2952 return trans_store(ctx, insn, false, MO_TEUL); 2953 case 0x1B: 2954 return trans_store(ctx, insn, true, MO_TEUL); 2955 case 0x1E: 2956 return trans_fstore_mod(ctx, insn); 2957 case 0x1F: 2958 return trans_store_w(ctx, insn); 2959 2960 case 0x20: 2961 return trans_cmpb(ctx, insn, true, false, false); 2962 case 0x21: 2963 return trans_cmpb(ctx, insn, true, true, false); 2964 case 0x22: 2965 return trans_cmpb(ctx, insn, false, false, false); 2966 case 0x23: 2967 return trans_cmpb(ctx, insn, false, true, false); 2968 case 0x24: 2969 return trans_cmpiclr(ctx, insn); 2970 case 0x25: 2971 return trans_subi(ctx, insn); 2972 case 0x27: 2973 return trans_cmpb(ctx, insn, true, false, true); 2974 case 0x28: 2975 return trans_addb(ctx, insn, true, false); 2976 case 0x29: 2977 return trans_addb(ctx, insn, true, true); 2978 case 0x2A: 2979 return trans_addb(ctx, insn, false, false); 2980 case 0x2B: 2981 return trans_addb(ctx, insn, false, true); 2982 case 0x2C: 2983 case 0x2D: 2984 return trans_addi(ctx, insn); 2985 case 0x2F: 2986 return trans_cmpb(ctx, insn, false, false, true); 2987 2988 case 0x30: 2989 case 0x31: 2990 return trans_bb(ctx, insn); 2991 case 0x32: 2992 return trans_movb(ctx, insn, false); 2993 case 0x33: 2994 return trans_movb(ctx, insn, true); 2995 case 0x34: 2996 return translate_table(ctx, insn, table_sh_ex); 2997 case 0x35: 2998 return translate_table(ctx, insn, table_depw); 2999 case 0x38: 3000 return trans_be(ctx, insn, false); 3001 case 0x39: 3002 return trans_be(ctx, insn, true); 3003 case 0x3A: 3004 return translate_table(ctx, insn, table_branch); 3005 3006 case 0x04: /* spopn */ 3007 case 0x05: /* diag */ 3008 case 0x0F: /* product specific */ 3009 break; 3010 3011 case 0x07: /* unassigned */ 3012 case 0x15: /* unassigned */ 3013 case 0x1D: /* unassigned */ 3014 case 0x37: /* unassigned */ 3015 case 0x3F: /* unassigned */ 3016 default: 3017 break; 3018 } 3019 return gen_illegal(ctx); 3020 } 3021 3022 void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb) 3023 { 3024 HPPACPU *cpu = hppa_env_get_cpu(env); 3025 CPUState *cs = CPU(cpu); 3026 DisasContext ctx; 3027 ExitStatus ret; 3028 int num_insns, max_insns, i; 3029 3030 ctx.tb = tb; 3031 ctx.cs = cs; 3032 ctx.iaoq_f = tb->pc; 3033 ctx.iaoq_b = tb->cs_base; 3034 ctx.singlestep_enabled = cs->singlestep_enabled; 3035 3036 ctx.ntemps = 0; 3037 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) { 3038 TCGV_UNUSED(ctx.temps[i]); 3039 } 3040 3041 /* Compute the maximum number of insns to execute, as bounded by 3042 (1) icount, (2) single-stepping, (3) branch delay slots, or 3043 (4) the number of insns remaining on the current page. */ 3044 max_insns = tb->cflags & CF_COUNT_MASK; 3045 if (max_insns == 0) { 3046 max_insns = CF_COUNT_MASK; 3047 } 3048 if (ctx.singlestep_enabled || singlestep) { 3049 max_insns = 1; 3050 } else if (max_insns > TCG_MAX_INSNS) { 3051 max_insns = TCG_MAX_INSNS; 3052 } 3053 3054 num_insns = 0; 3055 gen_tb_start(tb); 3056 3057 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */ 3058 ctx.null_cond = cond_make_f(); 3059 ctx.psw_n_nonzero = false; 3060 if (tb->flags & 1) { 3061 ctx.null_cond.c = TCG_COND_ALWAYS; 3062 ctx.psw_n_nonzero = true; 3063 } 3064 ctx.null_lab = NULL; 3065 3066 do { 3067 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b); 3068 num_insns++; 3069 3070 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) { 3071 ret = gen_excp(&ctx, EXCP_DEBUG); 3072 break; 3073 } 3074 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { 3075 gen_io_start(); 3076 } 3077 3078 if (ctx.iaoq_f < TARGET_PAGE_SIZE) { 3079 ret = do_page_zero(&ctx); 3080 assert(ret != NO_EXIT); 3081 } else { 3082 /* Always fetch the insn, even if nullified, so that we check 3083 the page permissions for execute. */ 3084 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f); 3085 3086 /* Set up the IA queue for the next insn. 3087 This will be overwritten by a branch. */ 3088 if (ctx.iaoq_b == -1) { 3089 ctx.iaoq_n = -1; 3090 ctx.iaoq_n_var = get_temp(&ctx); 3091 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4); 3092 } else { 3093 ctx.iaoq_n = ctx.iaoq_b + 4; 3094 TCGV_UNUSED(ctx.iaoq_n_var); 3095 } 3096 3097 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) { 3098 ctx.null_cond.c = TCG_COND_NEVER; 3099 ret = NO_EXIT; 3100 } else { 3101 ret = translate_one(&ctx, insn); 3102 assert(ctx.null_lab == NULL); 3103 } 3104 } 3105 3106 for (i = 0; i < ctx.ntemps; ++i) { 3107 tcg_temp_free(ctx.temps[i]); 3108 TCGV_UNUSED(ctx.temps[i]); 3109 } 3110 ctx.ntemps = 0; 3111 3112 /* If we see non-linear instructions, exhaust instruction count, 3113 or run out of buffer space, stop generation. */ 3114 /* ??? The non-linear instruction restriction is purely due to 3115 the debugging dump. Otherwise we *could* follow unconditional 3116 branches within the same page. */ 3117 if (ret == NO_EXIT 3118 && (ctx.iaoq_b != ctx.iaoq_f + 4 3119 || num_insns >= max_insns 3120 || tcg_op_buf_full())) { 3121 if (ctx.null_cond.c == TCG_COND_NEVER 3122 || ctx.null_cond.c == TCG_COND_ALWAYS) { 3123 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS); 3124 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n); 3125 ret = EXIT_GOTO_TB; 3126 } else { 3127 ret = EXIT_IAQ_N_STALE; 3128 } 3129 } 3130 3131 ctx.iaoq_f = ctx.iaoq_b; 3132 ctx.iaoq_b = ctx.iaoq_n; 3133 if (ret == EXIT_NORETURN 3134 || ret == EXIT_GOTO_TB 3135 || ret == EXIT_IAQ_N_UPDATED) { 3136 break; 3137 } 3138 if (ctx.iaoq_f == -1) { 3139 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b); 3140 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var); 3141 nullify_save(&ctx); 3142 ret = EXIT_IAQ_N_UPDATED; 3143 break; 3144 } 3145 if (ctx.iaoq_b == -1) { 3146 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var); 3147 } 3148 } while (ret == NO_EXIT); 3149 3150 if (tb->cflags & CF_LAST_IO) { 3151 gen_io_end(); 3152 } 3153 3154 switch (ret) { 3155 case EXIT_GOTO_TB: 3156 case EXIT_NORETURN: 3157 break; 3158 case EXIT_IAQ_N_STALE: 3159 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f); 3160 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b); 3161 nullify_save(&ctx); 3162 /* FALLTHRU */ 3163 case EXIT_IAQ_N_UPDATED: 3164 if (ctx.singlestep_enabled) { 3165 gen_excp_1(EXCP_DEBUG); 3166 } else { 3167 tcg_gen_exit_tb(0); 3168 } 3169 break; 3170 default: 3171 abort(); 3172 } 3173 3174 gen_tb_end(tb, num_insns); 3175 3176 tb->size = num_insns * 4; 3177 tb->icount = num_insns; 3178 3179 #ifdef DEBUG_DISAS 3180 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 3181 && qemu_log_in_addr_range(tb->pc)) { 3182 qemu_log_lock(); 3183 switch (tb->pc) { 3184 case 0x00: 3185 qemu_log("IN:\n0x00000000: (null)\n\n"); 3186 break; 3187 case 0xb0: 3188 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n"); 3189 break; 3190 case 0xe0: 3191 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n"); 3192 break; 3193 case 0x100: 3194 qemu_log("IN:\n0x00000100: syscall\n\n"); 3195 break; 3196 default: 3197 qemu_log("IN: %s\n", lookup_symbol(tb->pc)); 3198 log_target_disas(cs, tb->pc, tb->size, 1); 3199 qemu_log("\n"); 3200 break; 3201 } 3202 qemu_log_unlock(); 3203 } 3204 #endif 3205 } 3206 3207 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb, 3208 target_ulong *data) 3209 { 3210 env->iaoq_f = data[0]; 3211 if (data[1] != -1) { 3212 env->iaoq_b = data[1]; 3213 } 3214 /* Since we were executing the instruction at IAOQ_F, and took some 3215 sort of action that provoked the cpu_restore_state, we can infer 3216 that the instruction was not nullified. */ 3217 env->psw_n = 0; 3218 } 3219