1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg-op.h" 26 #include "exec/cpu_ldst.h" 27 28 #include "exec/helper-proto.h" 29 #include "exec/helper-gen.h" 30 31 #include "trace-tcg.h" 32 #include "exec/log.h" 33 34 typedef struct DisasCond { 35 TCGCond c; 36 TCGv a0, a1; 37 bool a0_is_n; 38 bool a1_is_0; 39 } DisasCond; 40 41 typedef struct DisasContext { 42 struct TranslationBlock *tb; 43 CPUState *cs; 44 45 target_ulong iaoq_f; 46 target_ulong iaoq_b; 47 target_ulong iaoq_n; 48 TCGv iaoq_n_var; 49 50 int ntemps; 51 TCGv temps[8]; 52 53 DisasCond null_cond; 54 TCGLabel *null_lab; 55 56 bool singlestep_enabled; 57 bool psw_n_nonzero; 58 } DisasContext; 59 60 /* Return values from translate_one, indicating the state of the TB. 61 Note that zero indicates that we are not exiting the TB. */ 62 63 typedef enum { 64 NO_EXIT, 65 66 /* We have emitted one or more goto_tb. No fixup required. */ 67 EXIT_GOTO_TB, 68 69 /* We are not using a goto_tb (for whatever reason), but have updated 70 the iaq (for whatever reason), so don't do it again on exit. */ 71 EXIT_IAQ_N_UPDATED, 72 73 /* We are exiting the TB, but have neither emitted a goto_tb, nor 74 updated the iaq for the next instruction to be executed. */ 75 EXIT_IAQ_N_STALE, 76 77 /* We are ending the TB with a noreturn function call, e.g. longjmp. 78 No following code will be executed. */ 79 EXIT_NORETURN, 80 } ExitStatus; 81 82 typedef struct DisasInsn { 83 uint32_t insn, mask; 84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn, 85 const struct DisasInsn *f); 86 union { 87 void (*f_ttt)(TCGv, TCGv, TCGv); 88 }; 89 } DisasInsn; 90 91 /* global register indexes */ 92 static TCGv_env cpu_env; 93 static TCGv cpu_gr[32]; 94 static TCGv cpu_iaoq_f; 95 static TCGv cpu_iaoq_b; 96 static TCGv cpu_sar; 97 static TCGv cpu_psw_n; 98 static TCGv cpu_psw_v; 99 static TCGv cpu_psw_cb; 100 static TCGv cpu_psw_cb_msb; 101 static TCGv cpu_cr26; 102 static TCGv cpu_cr27; 103 104 #include "exec/gen-icount.h" 105 106 void hppa_translate_init(void) 107 { 108 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 109 110 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; 111 static const GlobalVar vars[] = { 112 DEF_VAR(sar), 113 DEF_VAR(cr26), 114 DEF_VAR(cr27), 115 DEF_VAR(psw_n), 116 DEF_VAR(psw_v), 117 DEF_VAR(psw_cb), 118 DEF_VAR(psw_cb_msb), 119 DEF_VAR(iaoq_f), 120 DEF_VAR(iaoq_b), 121 }; 122 123 #undef DEF_VAR 124 125 /* Use the symbolic register names that match the disassembler. */ 126 static const char gr_names[32][4] = { 127 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 128 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 129 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 130 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 131 }; 132 133 static bool done_init = 0; 134 int i; 135 136 if (done_init) { 137 return; 138 } 139 done_init = 1; 140 141 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 142 tcg_ctx.tcg_env = cpu_env; 143 144 TCGV_UNUSED(cpu_gr[0]); 145 for (i = 1; i < 32; i++) { 146 cpu_gr[i] = tcg_global_mem_new(cpu_env, 147 offsetof(CPUHPPAState, gr[i]), 148 gr_names[i]); 149 } 150 151 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 152 const GlobalVar *v = &vars[i]; 153 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name); 154 } 155 } 156 157 static DisasCond cond_make_f(void) 158 { 159 DisasCond r = { .c = TCG_COND_NEVER }; 160 TCGV_UNUSED(r.a0); 161 TCGV_UNUSED(r.a1); 162 return r; 163 } 164 165 static DisasCond cond_make_n(void) 166 { 167 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true }; 168 r.a0 = cpu_psw_n; 169 TCGV_UNUSED(r.a1); 170 return r; 171 } 172 173 static DisasCond cond_make_0(TCGCond c, TCGv a0) 174 { 175 DisasCond r = { .c = c, .a1_is_0 = true }; 176 177 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 178 r.a0 = tcg_temp_new(); 179 tcg_gen_mov_tl(r.a0, a0); 180 TCGV_UNUSED(r.a1); 181 182 return r; 183 } 184 185 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1) 186 { 187 DisasCond r = { .c = c }; 188 189 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 190 r.a0 = tcg_temp_new(); 191 tcg_gen_mov_tl(r.a0, a0); 192 r.a1 = tcg_temp_new(); 193 tcg_gen_mov_tl(r.a1, a1); 194 195 return r; 196 } 197 198 static void cond_prep(DisasCond *cond) 199 { 200 if (cond->a1_is_0) { 201 cond->a1_is_0 = false; 202 cond->a1 = tcg_const_tl(0); 203 } 204 } 205 206 static void cond_free(DisasCond *cond) 207 { 208 switch (cond->c) { 209 default: 210 if (!cond->a0_is_n) { 211 tcg_temp_free(cond->a0); 212 } 213 if (!cond->a1_is_0) { 214 tcg_temp_free(cond->a1); 215 } 216 cond->a0_is_n = false; 217 cond->a1_is_0 = false; 218 TCGV_UNUSED(cond->a0); 219 TCGV_UNUSED(cond->a1); 220 /* fallthru */ 221 case TCG_COND_ALWAYS: 222 cond->c = TCG_COND_NEVER; 223 break; 224 case TCG_COND_NEVER: 225 break; 226 } 227 } 228 229 static TCGv get_temp(DisasContext *ctx) 230 { 231 unsigned i = ctx->ntemps++; 232 g_assert(i < ARRAY_SIZE(ctx->temps)); 233 return ctx->temps[i] = tcg_temp_new(); 234 } 235 236 static TCGv load_const(DisasContext *ctx, target_long v) 237 { 238 TCGv t = get_temp(ctx); 239 tcg_gen_movi_tl(t, v); 240 return t; 241 } 242 243 static TCGv load_gpr(DisasContext *ctx, unsigned reg) 244 { 245 if (reg == 0) { 246 TCGv t = get_temp(ctx); 247 tcg_gen_movi_tl(t, 0); 248 return t; 249 } else { 250 return cpu_gr[reg]; 251 } 252 } 253 254 static TCGv dest_gpr(DisasContext *ctx, unsigned reg) 255 { 256 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 257 return get_temp(ctx); 258 } else { 259 return cpu_gr[reg]; 260 } 261 } 262 263 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t) 264 { 265 if (ctx->null_cond.c != TCG_COND_NEVER) { 266 cond_prep(&ctx->null_cond); 267 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0, 268 ctx->null_cond.a1, dest, t); 269 } else { 270 tcg_gen_mov_tl(dest, t); 271 } 272 } 273 274 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t) 275 { 276 if (reg != 0) { 277 save_or_nullify(ctx, cpu_gr[reg], t); 278 } 279 } 280 281 /* Skip over the implementation of an insn that has been nullified. 282 Use this when the insn is too complex for a conditional move. */ 283 static void nullify_over(DisasContext *ctx) 284 { 285 if (ctx->null_cond.c != TCG_COND_NEVER) { 286 /* The always condition should have been handled in the main loop. */ 287 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 288 289 ctx->null_lab = gen_new_label(); 290 cond_prep(&ctx->null_cond); 291 292 /* If we're using PSW[N], copy it to a temp because... */ 293 if (ctx->null_cond.a0_is_n) { 294 ctx->null_cond.a0_is_n = false; 295 ctx->null_cond.a0 = tcg_temp_new(); 296 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n); 297 } 298 /* ... we clear it before branching over the implementation, 299 so that (1) it's clear after nullifying this insn and 300 (2) if this insn nullifies the next, PSW[N] is valid. */ 301 if (ctx->psw_n_nonzero) { 302 ctx->psw_n_nonzero = false; 303 tcg_gen_movi_tl(cpu_psw_n, 0); 304 } 305 306 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0, 307 ctx->null_cond.a1, ctx->null_lab); 308 cond_free(&ctx->null_cond); 309 } 310 } 311 312 /* Save the current nullification state to PSW[N]. */ 313 static void nullify_save(DisasContext *ctx) 314 { 315 if (ctx->null_cond.c == TCG_COND_NEVER) { 316 if (ctx->psw_n_nonzero) { 317 tcg_gen_movi_tl(cpu_psw_n, 0); 318 } 319 return; 320 } 321 if (!ctx->null_cond.a0_is_n) { 322 cond_prep(&ctx->null_cond); 323 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n, 324 ctx->null_cond.a0, ctx->null_cond.a1); 325 ctx->psw_n_nonzero = true; 326 } 327 cond_free(&ctx->null_cond); 328 } 329 330 /* Set a PSW[N] to X. The intention is that this is used immediately 331 before a goto_tb/exit_tb, so that there is no fallthru path to other 332 code within the TB. Therefore we do not update psw_n_nonzero. */ 333 static void nullify_set(DisasContext *ctx, bool x) 334 { 335 if (ctx->psw_n_nonzero || x) { 336 tcg_gen_movi_tl(cpu_psw_n, x); 337 } 338 } 339 340 /* Mark the end of an instruction that may have been nullified. 341 This is the pair to nullify_over. */ 342 static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status) 343 { 344 TCGLabel *null_lab = ctx->null_lab; 345 346 if (likely(null_lab == NULL)) { 347 /* The current insn wasn't conditional or handled the condition 348 applied to it without a branch, so the (new) setting of 349 NULL_COND can be applied directly to the next insn. */ 350 return status; 351 } 352 ctx->null_lab = NULL; 353 354 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 355 /* The next instruction will be unconditional, 356 and NULL_COND already reflects that. */ 357 gen_set_label(null_lab); 358 } else { 359 /* The insn that we just executed is itself nullifying the next 360 instruction. Store the condition in the PSW[N] global. 361 We asserted PSW[N] = 0 in nullify_over, so that after the 362 label we have the proper value in place. */ 363 nullify_save(ctx); 364 gen_set_label(null_lab); 365 ctx->null_cond = cond_make_n(); 366 } 367 368 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED); 369 if (status == EXIT_NORETURN) { 370 status = NO_EXIT; 371 } 372 return status; 373 } 374 375 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval) 376 { 377 if (unlikely(ival == -1)) { 378 tcg_gen_mov_tl(dest, vval); 379 } else { 380 tcg_gen_movi_tl(dest, ival); 381 } 382 } 383 384 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp) 385 { 386 return ctx->iaoq_f + disp + 8; 387 } 388 389 static void gen_excp_1(int exception) 390 { 391 TCGv_i32 t = tcg_const_i32(exception); 392 gen_helper_excp(cpu_env, t); 393 tcg_temp_free_i32(t); 394 } 395 396 static ExitStatus gen_excp(DisasContext *ctx, int exception) 397 { 398 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 399 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 400 nullify_save(ctx); 401 gen_excp_1(exception); 402 return EXIT_NORETURN; 403 } 404 405 static ExitStatus gen_illegal(DisasContext *ctx) 406 { 407 nullify_over(ctx); 408 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL)); 409 } 410 411 static bool use_goto_tb(DisasContext *ctx, target_ulong dest) 412 { 413 /* Suppress goto_tb in the case of single-steping and IO. */ 414 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) { 415 return false; 416 } 417 return true; 418 } 419 420 /* If the next insn is to be nullified, and it's on the same page, 421 and we're not attempting to set a breakpoint on it, then we can 422 totally skip the nullified insn. This avoids creating and 423 executing a TB that merely branches to the next TB. */ 424 static bool use_nullify_skip(DisasContext *ctx) 425 { 426 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 427 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 428 } 429 430 static void gen_goto_tb(DisasContext *ctx, int which, 431 target_ulong f, target_ulong b) 432 { 433 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 434 tcg_gen_goto_tb(which); 435 tcg_gen_movi_tl(cpu_iaoq_f, f); 436 tcg_gen_movi_tl(cpu_iaoq_b, b); 437 tcg_gen_exit_tb((uintptr_t)ctx->tb + which); 438 } else { 439 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 440 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 441 if (ctx->singlestep_enabled) { 442 gen_excp_1(EXCP_DEBUG); 443 } else { 444 tcg_gen_exit_tb(0); 445 } 446 } 447 } 448 449 /* PA has a habit of taking the LSB of a field and using that as the sign, 450 with the rest of the field becoming the least significant bits. */ 451 static target_long low_sextract(uint32_t val, int pos, int len) 452 { 453 target_ulong x = -(target_ulong)extract32(val, pos, 1); 454 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1); 455 return x; 456 } 457 458 static target_long assemble_12(uint32_t insn) 459 { 460 target_ulong x = -(target_ulong)(insn & 1); 461 x = (x << 1) | extract32(insn, 2, 1); 462 x = (x << 10) | extract32(insn, 3, 10); 463 return x; 464 } 465 466 static target_long assemble_16(uint32_t insn) 467 { 468 /* Take the name from PA2.0, which produces a 16-bit number 469 only with wide mode; otherwise a 14-bit number. Since we don't 470 implement wide mode, this is always the 14-bit number. */ 471 return low_sextract(insn, 0, 14); 472 } 473 474 static target_long assemble_17(uint32_t insn) 475 { 476 target_ulong x = -(target_ulong)(insn & 1); 477 x = (x << 5) | extract32(insn, 16, 5); 478 x = (x << 1) | extract32(insn, 2, 1); 479 x = (x << 10) | extract32(insn, 3, 10); 480 return x << 2; 481 } 482 483 static target_long assemble_21(uint32_t insn) 484 { 485 target_ulong x = -(target_ulong)(insn & 1); 486 x = (x << 11) | extract32(insn, 1, 11); 487 x = (x << 2) | extract32(insn, 14, 2); 488 x = (x << 5) | extract32(insn, 16, 5); 489 x = (x << 2) | extract32(insn, 12, 2); 490 return x << 11; 491 } 492 493 static target_long assemble_22(uint32_t insn) 494 { 495 target_ulong x = -(target_ulong)(insn & 1); 496 x = (x << 10) | extract32(insn, 16, 10); 497 x = (x << 1) | extract32(insn, 2, 1); 498 x = (x << 10) | extract32(insn, 3, 10); 499 return x << 2; 500 } 501 502 /* The parisc documentation describes only the general interpretation of 503 the conditions, without describing their exact implementation. The 504 interpretations do not stand up well when considering ADD,C and SUB,B. 505 However, considering the Addition, Subtraction and Logical conditions 506 as a whole it would appear that these relations are similar to what 507 a traditional NZCV set of flags would produce. */ 508 509 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv) 510 { 511 DisasCond cond; 512 TCGv tmp; 513 514 switch (cf >> 1) { 515 case 0: /* Never / TR */ 516 cond = cond_make_f(); 517 break; 518 case 1: /* = / <> (Z / !Z) */ 519 cond = cond_make_0(TCG_COND_EQ, res); 520 break; 521 case 2: /* < / >= (N / !N) */ 522 cond = cond_make_0(TCG_COND_LT, res); 523 break; 524 case 3: /* <= / > (N | Z / !N & !Z) */ 525 cond = cond_make_0(TCG_COND_LE, res); 526 break; 527 case 4: /* NUV / UV (!C / C) */ 528 cond = cond_make_0(TCG_COND_EQ, cb_msb); 529 break; 530 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 531 tmp = tcg_temp_new(); 532 tcg_gen_neg_tl(tmp, cb_msb); 533 tcg_gen_and_tl(tmp, tmp, res); 534 cond = cond_make_0(TCG_COND_EQ, tmp); 535 tcg_temp_free(tmp); 536 break; 537 case 6: /* SV / NSV (V / !V) */ 538 cond = cond_make_0(TCG_COND_LT, sv); 539 break; 540 case 7: /* OD / EV */ 541 tmp = tcg_temp_new(); 542 tcg_gen_andi_tl(tmp, res, 1); 543 cond = cond_make_0(TCG_COND_NE, tmp); 544 tcg_temp_free(tmp); 545 break; 546 default: 547 g_assert_not_reached(); 548 } 549 if (cf & 1) { 550 cond.c = tcg_invert_cond(cond.c); 551 } 552 553 return cond; 554 } 555 556 /* Similar, but for the special case of subtraction without borrow, we 557 can use the inputs directly. This can allow other computation to be 558 deleted as unused. */ 559 560 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv) 561 { 562 DisasCond cond; 563 564 switch (cf >> 1) { 565 case 1: /* = / <> */ 566 cond = cond_make(TCG_COND_EQ, in1, in2); 567 break; 568 case 2: /* < / >= */ 569 cond = cond_make(TCG_COND_LT, in1, in2); 570 break; 571 case 3: /* <= / > */ 572 cond = cond_make(TCG_COND_LE, in1, in2); 573 break; 574 case 4: /* << / >>= */ 575 cond = cond_make(TCG_COND_LTU, in1, in2); 576 break; 577 case 5: /* <<= / >> */ 578 cond = cond_make(TCG_COND_LEU, in1, in2); 579 break; 580 default: 581 return do_cond(cf, res, sv, sv); 582 } 583 if (cf & 1) { 584 cond.c = tcg_invert_cond(cond.c); 585 } 586 587 return cond; 588 } 589 590 /* Similar, but for logicals, where the carry and overflow bits are not 591 computed, and use of them is undefined. */ 592 593 static DisasCond do_log_cond(unsigned cf, TCGv res) 594 { 595 switch (cf >> 1) { 596 case 4: case 5: case 6: 597 cf &= 1; 598 break; 599 } 600 return do_cond(cf, res, res, res); 601 } 602 603 /* Similar, but for shift/extract/deposit conditions. */ 604 605 static DisasCond do_sed_cond(unsigned orig, TCGv res) 606 { 607 unsigned c, f; 608 609 /* Convert the compressed condition codes to standard. 610 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 611 4-7 are the reverse of 0-3. */ 612 c = orig & 3; 613 if (c == 3) { 614 c = 7; 615 } 616 f = (orig & 4) / 4; 617 618 return do_log_cond(c * 2 + f, res); 619 } 620 621 /* Similar, but for unit conditions. */ 622 623 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2) 624 { 625 DisasCond cond; 626 TCGv tmp, cb; 627 628 TCGV_UNUSED(cb); 629 if (cf & 8) { 630 /* Since we want to test lots of carry-out bits all at once, do not 631 * do our normal thing and compute carry-in of bit B+1 since that 632 * leaves us with carry bits spread across two words. 633 */ 634 cb = tcg_temp_new(); 635 tmp = tcg_temp_new(); 636 tcg_gen_or_tl(cb, in1, in2); 637 tcg_gen_and_tl(tmp, in1, in2); 638 tcg_gen_andc_tl(cb, cb, res); 639 tcg_gen_or_tl(cb, cb, tmp); 640 tcg_temp_free(tmp); 641 } 642 643 switch (cf >> 1) { 644 case 0: /* never / TR */ 645 case 1: /* undefined */ 646 case 5: /* undefined */ 647 cond = cond_make_f(); 648 break; 649 650 case 2: /* SBZ / NBZ */ 651 /* See hasless(v,1) from 652 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 653 */ 654 tmp = tcg_temp_new(); 655 tcg_gen_subi_tl(tmp, res, 0x01010101u); 656 tcg_gen_andc_tl(tmp, tmp, res); 657 tcg_gen_andi_tl(tmp, tmp, 0x80808080u); 658 cond = cond_make_0(TCG_COND_NE, tmp); 659 tcg_temp_free(tmp); 660 break; 661 662 case 3: /* SHZ / NHZ */ 663 tmp = tcg_temp_new(); 664 tcg_gen_subi_tl(tmp, res, 0x00010001u); 665 tcg_gen_andc_tl(tmp, tmp, res); 666 tcg_gen_andi_tl(tmp, tmp, 0x80008000u); 667 cond = cond_make_0(TCG_COND_NE, tmp); 668 tcg_temp_free(tmp); 669 break; 670 671 case 4: /* SDC / NDC */ 672 tcg_gen_andi_tl(cb, cb, 0x88888888u); 673 cond = cond_make_0(TCG_COND_NE, cb); 674 break; 675 676 case 6: /* SBC / NBC */ 677 tcg_gen_andi_tl(cb, cb, 0x80808080u); 678 cond = cond_make_0(TCG_COND_NE, cb); 679 break; 680 681 case 7: /* SHC / NHC */ 682 tcg_gen_andi_tl(cb, cb, 0x80008000u); 683 cond = cond_make_0(TCG_COND_NE, cb); 684 break; 685 686 default: 687 g_assert_not_reached(); 688 } 689 if (cf & 8) { 690 tcg_temp_free(cb); 691 } 692 if (cf & 1) { 693 cond.c = tcg_invert_cond(cond.c); 694 } 695 696 return cond; 697 } 698 699 /* Compute signed overflow for addition. */ 700 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2) 701 { 702 TCGv sv = get_temp(ctx); 703 TCGv tmp = tcg_temp_new(); 704 705 tcg_gen_xor_tl(sv, res, in1); 706 tcg_gen_xor_tl(tmp, in1, in2); 707 tcg_gen_andc_tl(sv, sv, tmp); 708 tcg_temp_free(tmp); 709 710 return sv; 711 } 712 713 /* Compute signed overflow for subtraction. */ 714 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2) 715 { 716 TCGv sv = get_temp(ctx); 717 TCGv tmp = tcg_temp_new(); 718 719 tcg_gen_xor_tl(sv, res, in1); 720 tcg_gen_xor_tl(tmp, in1, in2); 721 tcg_gen_and_tl(sv, sv, tmp); 722 tcg_temp_free(tmp); 723 724 return sv; 725 } 726 727 static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 728 unsigned shift, bool is_l, bool is_tsv, bool is_tc, 729 bool is_c, unsigned cf) 730 { 731 TCGv dest, cb, cb_msb, sv, tmp; 732 unsigned c = cf >> 1; 733 DisasCond cond; 734 735 dest = tcg_temp_new(); 736 TCGV_UNUSED(cb); 737 TCGV_UNUSED(cb_msb); 738 739 if (shift) { 740 tmp = get_temp(ctx); 741 tcg_gen_shli_tl(tmp, in1, shift); 742 in1 = tmp; 743 } 744 745 if (!is_l || c == 4 || c == 5) { 746 TCGv zero = tcg_const_tl(0); 747 cb_msb = get_temp(ctx); 748 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero); 749 if (is_c) { 750 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 751 } 752 tcg_temp_free(zero); 753 if (!is_l) { 754 cb = get_temp(ctx); 755 tcg_gen_xor_tl(cb, in1, in2); 756 tcg_gen_xor_tl(cb, cb, dest); 757 } 758 } else { 759 tcg_gen_add_tl(dest, in1, in2); 760 if (is_c) { 761 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb); 762 } 763 } 764 765 /* Compute signed overflow if required. */ 766 TCGV_UNUSED(sv); 767 if (is_tsv || c == 6) { 768 sv = do_add_sv(ctx, dest, in1, in2); 769 if (is_tsv) { 770 /* ??? Need to include overflow from shift. */ 771 gen_helper_tsv(cpu_env, sv); 772 } 773 } 774 775 /* Emit any conditional trap before any writeback. */ 776 cond = do_cond(cf, dest, cb_msb, sv); 777 if (is_tc) { 778 cond_prep(&cond); 779 tmp = tcg_temp_new(); 780 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 781 gen_helper_tcond(cpu_env, tmp); 782 tcg_temp_free(tmp); 783 } 784 785 /* Write back the result. */ 786 if (!is_l) { 787 save_or_nullify(ctx, cpu_psw_cb, cb); 788 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 789 } 790 save_gpr(ctx, rt, dest); 791 tcg_temp_free(dest); 792 793 /* Install the new nullification. */ 794 cond_free(&ctx->null_cond); 795 ctx->null_cond = cond; 796 return NO_EXIT; 797 } 798 799 static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 800 bool is_tsv, bool is_b, bool is_tc, unsigned cf) 801 { 802 TCGv dest, sv, cb, cb_msb, zero, tmp; 803 unsigned c = cf >> 1; 804 DisasCond cond; 805 806 dest = tcg_temp_new(); 807 cb = tcg_temp_new(); 808 cb_msb = tcg_temp_new(); 809 810 zero = tcg_const_tl(0); 811 if (is_b) { 812 /* DEST,C = IN1 + ~IN2 + C. */ 813 tcg_gen_not_tl(cb, in2); 814 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 815 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero); 816 tcg_gen_xor_tl(cb, cb, in1); 817 tcg_gen_xor_tl(cb, cb, dest); 818 } else { 819 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 820 operations by seeding the high word with 1 and subtracting. */ 821 tcg_gen_movi_tl(cb_msb, 1); 822 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero); 823 tcg_gen_eqv_tl(cb, in1, in2); 824 tcg_gen_xor_tl(cb, cb, dest); 825 } 826 tcg_temp_free(zero); 827 828 /* Compute signed overflow if required. */ 829 TCGV_UNUSED(sv); 830 if (is_tsv || c == 6) { 831 sv = do_sub_sv(ctx, dest, in1, in2); 832 if (is_tsv) { 833 gen_helper_tsv(cpu_env, sv); 834 } 835 } 836 837 /* Compute the condition. We cannot use the special case for borrow. */ 838 if (!is_b) { 839 cond = do_sub_cond(cf, dest, in1, in2, sv); 840 } else { 841 cond = do_cond(cf, dest, cb_msb, sv); 842 } 843 844 /* Emit any conditional trap before any writeback. */ 845 if (is_tc) { 846 cond_prep(&cond); 847 tmp = tcg_temp_new(); 848 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 849 gen_helper_tcond(cpu_env, tmp); 850 tcg_temp_free(tmp); 851 } 852 853 /* Write back the result. */ 854 save_or_nullify(ctx, cpu_psw_cb, cb); 855 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 856 save_gpr(ctx, rt, dest); 857 tcg_temp_free(dest); 858 859 /* Install the new nullification. */ 860 cond_free(&ctx->null_cond); 861 ctx->null_cond = cond; 862 return NO_EXIT; 863 } 864 865 static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1, 866 TCGv in2, unsigned cf) 867 { 868 TCGv dest, sv; 869 DisasCond cond; 870 871 dest = tcg_temp_new(); 872 tcg_gen_sub_tl(dest, in1, in2); 873 874 /* Compute signed overflow if required. */ 875 TCGV_UNUSED(sv); 876 if ((cf >> 1) == 6) { 877 sv = do_sub_sv(ctx, dest, in1, in2); 878 } 879 880 /* Form the condition for the compare. */ 881 cond = do_sub_cond(cf, dest, in1, in2, sv); 882 883 /* Clear. */ 884 tcg_gen_movi_tl(dest, 0); 885 save_gpr(ctx, rt, dest); 886 tcg_temp_free(dest); 887 888 /* Install the new nullification. */ 889 cond_free(&ctx->null_cond); 890 ctx->null_cond = cond; 891 return NO_EXIT; 892 } 893 894 static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2, 895 unsigned cf, void (*fn)(TCGv, TCGv, TCGv)) 896 { 897 TCGv dest = dest_gpr(ctx, rt); 898 899 /* Perform the operation, and writeback. */ 900 fn(dest, in1, in2); 901 save_gpr(ctx, rt, dest); 902 903 /* Install the new nullification. */ 904 cond_free(&ctx->null_cond); 905 if (cf) { 906 ctx->null_cond = do_log_cond(cf, dest); 907 } 908 return NO_EXIT; 909 } 910 911 static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1, 912 TCGv in2, unsigned cf, bool is_tc, 913 void (*fn)(TCGv, TCGv, TCGv)) 914 { 915 TCGv dest; 916 DisasCond cond; 917 918 if (cf == 0) { 919 dest = dest_gpr(ctx, rt); 920 fn(dest, in1, in2); 921 save_gpr(ctx, rt, dest); 922 cond_free(&ctx->null_cond); 923 } else { 924 dest = tcg_temp_new(); 925 fn(dest, in1, in2); 926 927 cond = do_unit_cond(cf, dest, in1, in2); 928 929 if (is_tc) { 930 TCGv tmp = tcg_temp_new(); 931 cond_prep(&cond); 932 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1); 933 gen_helper_tcond(cpu_env, tmp); 934 tcg_temp_free(tmp); 935 } 936 save_gpr(ctx, rt, dest); 937 938 cond_free(&ctx->null_cond); 939 ctx->null_cond = cond; 940 } 941 return NO_EXIT; 942 } 943 944 /* Emit an unconditional branch to a direct target, which may or may not 945 have already had nullification handled. */ 946 static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest, 947 unsigned link, bool is_n) 948 { 949 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 950 if (link != 0) { 951 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 952 } 953 ctx->iaoq_n = dest; 954 if (is_n) { 955 ctx->null_cond.c = TCG_COND_ALWAYS; 956 } 957 return NO_EXIT; 958 } else { 959 nullify_over(ctx); 960 961 if (link != 0) { 962 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 963 } 964 965 if (is_n && use_nullify_skip(ctx)) { 966 nullify_set(ctx, 0); 967 gen_goto_tb(ctx, 0, dest, dest + 4); 968 } else { 969 nullify_set(ctx, is_n); 970 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 971 } 972 973 nullify_end(ctx, NO_EXIT); 974 975 nullify_set(ctx, 0); 976 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 977 return EXIT_GOTO_TB; 978 } 979 } 980 981 /* Emit a conditional branch to a direct target. If the branch itself 982 is nullified, we should have already used nullify_over. */ 983 static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n, 984 DisasCond *cond) 985 { 986 target_ulong dest = iaoq_dest(ctx, disp); 987 TCGLabel *taken = NULL; 988 TCGCond c = cond->c; 989 int which = 0; 990 bool n; 991 992 assert(ctx->null_cond.c == TCG_COND_NEVER); 993 994 /* Handle TRUE and NEVER as direct branches. */ 995 if (c == TCG_COND_ALWAYS) { 996 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 997 } 998 if (c == TCG_COND_NEVER) { 999 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1000 } 1001 1002 taken = gen_new_label(); 1003 cond_prep(cond); 1004 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken); 1005 cond_free(cond); 1006 1007 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1008 n = is_n && disp < 0; 1009 if (n && use_nullify_skip(ctx)) { 1010 nullify_set(ctx, 0); 1011 gen_goto_tb(ctx, which++, ctx->iaoq_n, ctx->iaoq_n + 4); 1012 } else { 1013 if (!n && ctx->null_lab) { 1014 gen_set_label(ctx->null_lab); 1015 ctx->null_lab = NULL; 1016 } 1017 nullify_set(ctx, n); 1018 gen_goto_tb(ctx, which++, ctx->iaoq_b, ctx->iaoq_n); 1019 } 1020 1021 gen_set_label(taken); 1022 1023 /* Taken: Condition satisfied; nullify on forward branches. */ 1024 n = is_n && disp >= 0; 1025 if (n && use_nullify_skip(ctx)) { 1026 nullify_set(ctx, 0); 1027 gen_goto_tb(ctx, which++, dest, dest + 4); 1028 } else { 1029 nullify_set(ctx, n); 1030 gen_goto_tb(ctx, which++, ctx->iaoq_b, dest); 1031 } 1032 1033 /* Not taken: the branch itself was nullified. */ 1034 if (ctx->null_lab) { 1035 gen_set_label(ctx->null_lab); 1036 ctx->null_lab = NULL; 1037 if (which < 2) { 1038 nullify_set(ctx, 0); 1039 gen_goto_tb(ctx, which, ctx->iaoq_b, ctx->iaoq_n); 1040 return EXIT_GOTO_TB; 1041 } else { 1042 return EXIT_IAQ_N_STALE; 1043 } 1044 } else { 1045 return EXIT_GOTO_TB; 1046 } 1047 } 1048 1049 /* Emit an unconditional branch to an indirect target. This handles 1050 nullification of the branch itself. */ 1051 static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest, 1052 unsigned link, bool is_n) 1053 { 1054 TCGv a0, a1, next, tmp; 1055 TCGCond c; 1056 1057 assert(ctx->null_lab == NULL); 1058 1059 if (ctx->null_cond.c == TCG_COND_NEVER) { 1060 if (link != 0) { 1061 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1062 } 1063 next = get_temp(ctx); 1064 tcg_gen_mov_tl(next, dest); 1065 ctx->iaoq_n = -1; 1066 ctx->iaoq_n_var = next; 1067 if (is_n) { 1068 ctx->null_cond.c = TCG_COND_ALWAYS; 1069 } 1070 } else if (is_n && use_nullify_skip(ctx)) { 1071 /* The (conditional) branch, B, nullifies the next insn, N, 1072 and we're allowed to skip execution N (no single-step or 1073 tracepoint in effect). Since the exit_tb that we must use 1074 for the indirect branch consumes no special resources, we 1075 can (conditionally) skip B and continue execution. */ 1076 /* The use_nullify_skip test implies we have a known control path. */ 1077 tcg_debug_assert(ctx->iaoq_b != -1); 1078 tcg_debug_assert(ctx->iaoq_n != -1); 1079 1080 /* We do have to handle the non-local temporary, DEST, before 1081 branching. Since IOAQ_F is not really live at this point, we 1082 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1083 tcg_gen_mov_tl(cpu_iaoq_f, dest); 1084 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4); 1085 1086 nullify_over(ctx); 1087 if (link != 0) { 1088 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n); 1089 } 1090 tcg_gen_exit_tb(0); 1091 return nullify_end(ctx, NO_EXIT); 1092 } else { 1093 cond_prep(&ctx->null_cond); 1094 c = ctx->null_cond.c; 1095 a0 = ctx->null_cond.a0; 1096 a1 = ctx->null_cond.a1; 1097 1098 tmp = tcg_temp_new(); 1099 next = get_temp(ctx); 1100 1101 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1102 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest); 1103 ctx->iaoq_n = -1; 1104 ctx->iaoq_n_var = next; 1105 1106 if (link != 0) { 1107 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1108 } 1109 1110 if (is_n) { 1111 /* The branch nullifies the next insn, which means the state of N 1112 after the branch is the inverse of the state of N that applied 1113 to the branch. */ 1114 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1115 cond_free(&ctx->null_cond); 1116 ctx->null_cond = cond_make_n(); 1117 ctx->psw_n_nonzero = true; 1118 } else { 1119 cond_free(&ctx->null_cond); 1120 } 1121 } 1122 1123 return NO_EXIT; 1124 } 1125 1126 /* On Linux, page zero is normally marked execute only + gateway. 1127 Therefore normal read or write is supposed to fail, but specific 1128 offsets have kernel code mapped to raise permissions to implement 1129 system calls. Handling this via an explicit check here, rather 1130 in than the "be disp(sr2,r0)" instruction that probably sent us 1131 here, is the easiest way to handle the branch delay slot on the 1132 aforementioned BE. */ 1133 static ExitStatus do_page_zero(DisasContext *ctx) 1134 { 1135 /* If by some means we get here with PSW[N]=1, that implies that 1136 the B,GATE instruction would be skipped, and we'd fault on the 1137 next insn within the privilaged page. */ 1138 switch (ctx->null_cond.c) { 1139 case TCG_COND_NEVER: 1140 break; 1141 case TCG_COND_ALWAYS: 1142 tcg_gen_movi_tl(cpu_psw_n, 0); 1143 goto do_sigill; 1144 default: 1145 /* Since this is always the first (and only) insn within the 1146 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1147 g_assert_not_reached(); 1148 } 1149 1150 /* Check that we didn't arrive here via some means that allowed 1151 non-sequential instruction execution. Normally the PSW[B] bit 1152 detects this by disallowing the B,GATE instruction to execute 1153 under such conditions. */ 1154 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1155 goto do_sigill; 1156 } 1157 1158 switch (ctx->iaoq_f) { 1159 case 0x00: /* Null pointer call */ 1160 gen_excp_1(EXCP_SIGSEGV); 1161 return EXIT_NORETURN; 1162 1163 case 0xb0: /* LWS */ 1164 gen_excp_1(EXCP_SYSCALL_LWS); 1165 return EXIT_NORETURN; 1166 1167 case 0xe0: /* SET_THREAD_POINTER */ 1168 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]); 1169 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]); 1170 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4); 1171 return EXIT_IAQ_N_UPDATED; 1172 1173 case 0x100: /* SYSCALL */ 1174 gen_excp_1(EXCP_SYSCALL); 1175 return EXIT_NORETURN; 1176 1177 default: 1178 do_sigill: 1179 gen_excp_1(EXCP_SIGILL); 1180 return EXIT_NORETURN; 1181 } 1182 } 1183 1184 static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn, 1185 const DisasInsn *di) 1186 { 1187 cond_free(&ctx->null_cond); 1188 return NO_EXIT; 1189 } 1190 1191 static ExitStatus trans_add(DisasContext *ctx, uint32_t insn, 1192 const DisasInsn *di) 1193 { 1194 unsigned r2 = extract32(insn, 21, 5); 1195 unsigned r1 = extract32(insn, 16, 5); 1196 unsigned cf = extract32(insn, 12, 4); 1197 unsigned ext = extract32(insn, 8, 4); 1198 unsigned shift = extract32(insn, 6, 2); 1199 unsigned rt = extract32(insn, 0, 5); 1200 TCGv tcg_r1, tcg_r2; 1201 bool is_c = false; 1202 bool is_l = false; 1203 bool is_tc = false; 1204 bool is_tsv = false; 1205 ExitStatus ret; 1206 1207 switch (ext) { 1208 case 0x6: /* ADD, SHLADD */ 1209 break; 1210 case 0xa: /* ADD,L, SHLADD,L */ 1211 is_l = true; 1212 break; 1213 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */ 1214 is_tsv = true; 1215 break; 1216 case 0x7: /* ADD,C */ 1217 is_c = true; 1218 break; 1219 case 0xf: /* ADD,C,TSV */ 1220 is_c = is_tsv = true; 1221 break; 1222 default: 1223 return gen_illegal(ctx); 1224 } 1225 1226 if (cf) { 1227 nullify_over(ctx); 1228 } 1229 tcg_r1 = load_gpr(ctx, r1); 1230 tcg_r2 = load_gpr(ctx, r2); 1231 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf); 1232 return nullify_end(ctx, ret); 1233 } 1234 1235 static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn, 1236 const DisasInsn *di) 1237 { 1238 unsigned r2 = extract32(insn, 21, 5); 1239 unsigned r1 = extract32(insn, 16, 5); 1240 unsigned cf = extract32(insn, 12, 4); 1241 unsigned ext = extract32(insn, 6, 6); 1242 unsigned rt = extract32(insn, 0, 5); 1243 TCGv tcg_r1, tcg_r2; 1244 bool is_b = false; 1245 bool is_tc = false; 1246 bool is_tsv = false; 1247 ExitStatus ret; 1248 1249 switch (ext) { 1250 case 0x10: /* SUB */ 1251 break; 1252 case 0x30: /* SUB,TSV */ 1253 is_tsv = true; 1254 break; 1255 case 0x14: /* SUB,B */ 1256 is_b = true; 1257 break; 1258 case 0x34: /* SUB,B,TSV */ 1259 is_b = is_tsv = true; 1260 break; 1261 case 0x13: /* SUB,TC */ 1262 is_tc = true; 1263 break; 1264 case 0x33: /* SUB,TSV,TC */ 1265 is_tc = is_tsv = true; 1266 break; 1267 default: 1268 return gen_illegal(ctx); 1269 } 1270 1271 if (cf) { 1272 nullify_over(ctx); 1273 } 1274 tcg_r1 = load_gpr(ctx, r1); 1275 tcg_r2 = load_gpr(ctx, r2); 1276 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf); 1277 return nullify_end(ctx, ret); 1278 } 1279 1280 static ExitStatus trans_log(DisasContext *ctx, uint32_t insn, 1281 const DisasInsn *di) 1282 { 1283 unsigned r2 = extract32(insn, 21, 5); 1284 unsigned r1 = extract32(insn, 16, 5); 1285 unsigned cf = extract32(insn, 12, 4); 1286 unsigned rt = extract32(insn, 0, 5); 1287 TCGv tcg_r1, tcg_r2; 1288 ExitStatus ret; 1289 1290 if (cf) { 1291 nullify_over(ctx); 1292 } 1293 tcg_r1 = load_gpr(ctx, r1); 1294 tcg_r2 = load_gpr(ctx, r2); 1295 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt); 1296 return nullify_end(ctx, ret); 1297 } 1298 1299 /* OR r,0,t -> COPY (according to gas) */ 1300 static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn, 1301 const DisasInsn *di) 1302 { 1303 unsigned r1 = extract32(insn, 16, 5); 1304 unsigned rt = extract32(insn, 0, 5); 1305 1306 if (r1 == 0) { 1307 TCGv dest = dest_gpr(ctx, rt); 1308 tcg_gen_movi_tl(dest, 0); 1309 save_gpr(ctx, rt, dest); 1310 } else { 1311 save_gpr(ctx, rt, cpu_gr[r1]); 1312 } 1313 cond_free(&ctx->null_cond); 1314 return NO_EXIT; 1315 } 1316 1317 static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn, 1318 const DisasInsn *di) 1319 { 1320 unsigned r2 = extract32(insn, 21, 5); 1321 unsigned r1 = extract32(insn, 16, 5); 1322 unsigned cf = extract32(insn, 12, 4); 1323 unsigned rt = extract32(insn, 0, 5); 1324 TCGv tcg_r1, tcg_r2; 1325 ExitStatus ret; 1326 1327 if (cf) { 1328 nullify_over(ctx); 1329 } 1330 tcg_r1 = load_gpr(ctx, r1); 1331 tcg_r2 = load_gpr(ctx, r2); 1332 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf); 1333 return nullify_end(ctx, ret); 1334 } 1335 1336 static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn, 1337 const DisasInsn *di) 1338 { 1339 unsigned r2 = extract32(insn, 21, 5); 1340 unsigned r1 = extract32(insn, 16, 5); 1341 unsigned cf = extract32(insn, 12, 4); 1342 unsigned rt = extract32(insn, 0, 5); 1343 TCGv tcg_r1, tcg_r2; 1344 ExitStatus ret; 1345 1346 if (cf) { 1347 nullify_over(ctx); 1348 } 1349 tcg_r1 = load_gpr(ctx, r1); 1350 tcg_r2 = load_gpr(ctx, r2); 1351 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl); 1352 return nullify_end(ctx, ret); 1353 } 1354 1355 static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn, 1356 const DisasInsn *di) 1357 { 1358 unsigned r2 = extract32(insn, 21, 5); 1359 unsigned r1 = extract32(insn, 16, 5); 1360 unsigned cf = extract32(insn, 12, 4); 1361 unsigned is_tc = extract32(insn, 6, 1); 1362 unsigned rt = extract32(insn, 0, 5); 1363 TCGv tcg_r1, tcg_r2, tmp; 1364 ExitStatus ret; 1365 1366 if (cf) { 1367 nullify_over(ctx); 1368 } 1369 tcg_r1 = load_gpr(ctx, r1); 1370 tcg_r2 = load_gpr(ctx, r2); 1371 tmp = get_temp(ctx); 1372 tcg_gen_not_tl(tmp, tcg_r2); 1373 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl); 1374 return nullify_end(ctx, ret); 1375 } 1376 1377 static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn, 1378 const DisasInsn *di) 1379 { 1380 unsigned r2 = extract32(insn, 21, 5); 1381 unsigned cf = extract32(insn, 12, 4); 1382 unsigned is_i = extract32(insn, 6, 1); 1383 unsigned rt = extract32(insn, 0, 5); 1384 TCGv tmp; 1385 ExitStatus ret; 1386 1387 nullify_over(ctx); 1388 1389 tmp = get_temp(ctx); 1390 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3); 1391 if (!is_i) { 1392 tcg_gen_not_tl(tmp, tmp); 1393 } 1394 tcg_gen_andi_tl(tmp, tmp, 0x11111111); 1395 tcg_gen_muli_tl(tmp, tmp, 6); 1396 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false, 1397 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl); 1398 1399 return nullify_end(ctx, ret); 1400 } 1401 1402 static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn, 1403 const DisasInsn *di) 1404 { 1405 unsigned r2 = extract32(insn, 21, 5); 1406 unsigned r1 = extract32(insn, 16, 5); 1407 unsigned cf = extract32(insn, 12, 4); 1408 unsigned rt = extract32(insn, 0, 5); 1409 TCGv dest, add1, add2, addc, zero, in1, in2; 1410 1411 nullify_over(ctx); 1412 1413 in1 = load_gpr(ctx, r1); 1414 in2 = load_gpr(ctx, r2); 1415 1416 add1 = tcg_temp_new(); 1417 add2 = tcg_temp_new(); 1418 addc = tcg_temp_new(); 1419 dest = tcg_temp_new(); 1420 zero = tcg_const_tl(0); 1421 1422 /* Form R1 << 1 | PSW[CB]{8}. */ 1423 tcg_gen_add_tl(add1, in1, in1); 1424 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb); 1425 1426 /* Add or subtract R2, depending on PSW[V]. Proper computation of 1427 carry{8} requires that we subtract via + ~R2 + 1, as described in 1428 the manual. By extracting and masking V, we can produce the 1429 proper inputs to the addition without movcond. */ 1430 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1); 1431 tcg_gen_xor_tl(add2, in2, addc); 1432 tcg_gen_andi_tl(addc, addc, 1); 1433 /* ??? This is only correct for 32-bit. */ 1434 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 1435 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 1436 1437 tcg_temp_free(addc); 1438 tcg_temp_free(zero); 1439 1440 /* Write back the result register. */ 1441 save_gpr(ctx, rt, dest); 1442 1443 /* Write back PSW[CB]. */ 1444 tcg_gen_xor_tl(cpu_psw_cb, add1, add2); 1445 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest); 1446 1447 /* Write back PSW[V] for the division step. */ 1448 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb); 1449 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2); 1450 1451 /* Install the new nullification. */ 1452 if (cf) { 1453 TCGv sv; 1454 TCGV_UNUSED(sv); 1455 if (cf >> 1 == 6) { 1456 /* ??? The lshift is supposed to contribute to overflow. */ 1457 sv = do_add_sv(ctx, dest, add1, add2); 1458 } 1459 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv); 1460 } 1461 1462 tcg_temp_free(add1); 1463 tcg_temp_free(add2); 1464 tcg_temp_free(dest); 1465 1466 return nullify_end(ctx, NO_EXIT); 1467 } 1468 1469 static const DisasInsn table_arith_log[] = { 1470 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */ 1471 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */ 1472 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl }, 1473 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl }, 1474 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl }, 1475 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl }, 1476 { 0x08000880u, 0xfc000fe0u, trans_cmpclr }, 1477 { 0x08000380u, 0xfc000fe0u, trans_uxor }, 1478 { 0x08000980u, 0xfc000fa0u, trans_uaddcm }, 1479 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor }, 1480 { 0x08000440u, 0xfc000fe0u, trans_ds }, 1481 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */ 1482 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */ 1483 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */ 1484 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */ 1485 }; 1486 1487 static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn) 1488 { 1489 target_long im = low_sextract(insn, 0, 11); 1490 unsigned e1 = extract32(insn, 11, 1); 1491 unsigned cf = extract32(insn, 12, 4); 1492 unsigned rt = extract32(insn, 16, 5); 1493 unsigned r2 = extract32(insn, 21, 5); 1494 unsigned o1 = extract32(insn, 26, 1); 1495 TCGv tcg_im, tcg_r2; 1496 ExitStatus ret; 1497 1498 if (cf) { 1499 nullify_over(ctx); 1500 } 1501 1502 tcg_im = load_const(ctx, im); 1503 tcg_r2 = load_gpr(ctx, r2); 1504 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf); 1505 1506 return nullify_end(ctx, ret); 1507 } 1508 1509 static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn) 1510 { 1511 target_long im = low_sextract(insn, 0, 11); 1512 unsigned e1 = extract32(insn, 11, 1); 1513 unsigned cf = extract32(insn, 12, 4); 1514 unsigned rt = extract32(insn, 16, 5); 1515 unsigned r2 = extract32(insn, 21, 5); 1516 TCGv tcg_im, tcg_r2; 1517 ExitStatus ret; 1518 1519 if (cf) { 1520 nullify_over(ctx); 1521 } 1522 1523 tcg_im = load_const(ctx, im); 1524 tcg_r2 = load_gpr(ctx, r2); 1525 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf); 1526 1527 return nullify_end(ctx, ret); 1528 } 1529 1530 static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn) 1531 { 1532 target_long im = low_sextract(insn, 0, 11); 1533 unsigned cf = extract32(insn, 12, 4); 1534 unsigned rt = extract32(insn, 16, 5); 1535 unsigned r2 = extract32(insn, 21, 5); 1536 TCGv tcg_im, tcg_r2; 1537 ExitStatus ret; 1538 1539 if (cf) { 1540 nullify_over(ctx); 1541 } 1542 1543 tcg_im = load_const(ctx, im); 1544 tcg_r2 = load_gpr(ctx, r2); 1545 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf); 1546 1547 return nullify_end(ctx, ret); 1548 } 1549 1550 static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn) 1551 { 1552 unsigned rt = extract32(insn, 21, 5); 1553 target_long i = assemble_21(insn); 1554 TCGv tcg_rt = dest_gpr(ctx, rt); 1555 1556 tcg_gen_movi_tl(tcg_rt, i); 1557 save_gpr(ctx, rt, tcg_rt); 1558 cond_free(&ctx->null_cond); 1559 1560 return NO_EXIT; 1561 } 1562 1563 static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn) 1564 { 1565 unsigned rt = extract32(insn, 21, 5); 1566 target_long i = assemble_21(insn); 1567 TCGv tcg_rt = load_gpr(ctx, rt); 1568 TCGv tcg_r1 = dest_gpr(ctx, 1); 1569 1570 tcg_gen_addi_tl(tcg_r1, tcg_rt, i); 1571 save_gpr(ctx, 1, tcg_r1); 1572 cond_free(&ctx->null_cond); 1573 1574 return NO_EXIT; 1575 } 1576 1577 static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn) 1578 { 1579 unsigned rb = extract32(insn, 21, 5); 1580 unsigned rt = extract32(insn, 16, 5); 1581 target_long i = assemble_16(insn); 1582 TCGv tcg_rt = dest_gpr(ctx, rt); 1583 1584 /* Special case rb == 0, for the LDI pseudo-op. 1585 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 1586 if (rb == 0) { 1587 tcg_gen_movi_tl(tcg_rt, i); 1588 } else { 1589 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i); 1590 } 1591 save_gpr(ctx, rt, tcg_rt); 1592 cond_free(&ctx->null_cond); 1593 1594 return NO_EXIT; 1595 } 1596 1597 static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn, 1598 bool is_true, bool is_imm, bool is_dw) 1599 { 1600 target_long disp = assemble_12(insn) * 4; 1601 unsigned n = extract32(insn, 1, 1); 1602 unsigned c = extract32(insn, 13, 3); 1603 unsigned r = extract32(insn, 21, 5); 1604 unsigned cf = c * 2 + !is_true; 1605 TCGv dest, in1, in2, sv; 1606 DisasCond cond; 1607 1608 nullify_over(ctx); 1609 1610 if (is_imm) { 1611 in1 = load_const(ctx, low_sextract(insn, 16, 5)); 1612 } else { 1613 in1 = load_gpr(ctx, extract32(insn, 16, 5)); 1614 } 1615 in2 = load_gpr(ctx, r); 1616 dest = get_temp(ctx); 1617 1618 tcg_gen_sub_tl(dest, in1, in2); 1619 1620 TCGV_UNUSED(sv); 1621 if (c == 6) { 1622 sv = do_sub_sv(ctx, dest, in1, in2); 1623 } 1624 1625 cond = do_sub_cond(cf, dest, in1, in2, sv); 1626 return do_cbranch(ctx, disp, n, &cond); 1627 } 1628 1629 static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn, 1630 bool is_true, bool is_imm) 1631 { 1632 target_long disp = assemble_12(insn) * 4; 1633 unsigned n = extract32(insn, 1, 1); 1634 unsigned c = extract32(insn, 13, 3); 1635 unsigned r = extract32(insn, 21, 5); 1636 unsigned cf = c * 2 + !is_true; 1637 TCGv dest, in1, in2, sv, cb_msb; 1638 DisasCond cond; 1639 1640 nullify_over(ctx); 1641 1642 if (is_imm) { 1643 in1 = load_const(ctx, low_sextract(insn, 16, 5)); 1644 } else { 1645 in1 = load_gpr(ctx, extract32(insn, 16, 5)); 1646 } 1647 in2 = load_gpr(ctx, r); 1648 dest = dest_gpr(ctx, r); 1649 TCGV_UNUSED(sv); 1650 TCGV_UNUSED(cb_msb); 1651 1652 switch (c) { 1653 default: 1654 tcg_gen_add_tl(dest, in1, in2); 1655 break; 1656 case 4: case 5: 1657 cb_msb = get_temp(ctx); 1658 tcg_gen_movi_tl(cb_msb, 0); 1659 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb); 1660 break; 1661 case 6: 1662 tcg_gen_add_tl(dest, in1, in2); 1663 sv = do_add_sv(ctx, dest, in1, in2); 1664 break; 1665 } 1666 1667 cond = do_cond(cf, dest, cb_msb, sv); 1668 return do_cbranch(ctx, disp, n, &cond); 1669 } 1670 1671 static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn) 1672 { 1673 target_long disp = assemble_12(insn) * 4; 1674 unsigned n = extract32(insn, 1, 1); 1675 unsigned c = extract32(insn, 15, 1); 1676 unsigned r = extract32(insn, 16, 5); 1677 unsigned p = extract32(insn, 21, 5); 1678 unsigned i = extract32(insn, 26, 1); 1679 TCGv tmp, tcg_r; 1680 DisasCond cond; 1681 1682 nullify_over(ctx); 1683 1684 tmp = tcg_temp_new(); 1685 tcg_r = load_gpr(ctx, r); 1686 if (i) { 1687 tcg_gen_shli_tl(tmp, tcg_r, p); 1688 } else { 1689 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar); 1690 } 1691 1692 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp); 1693 tcg_temp_free(tmp); 1694 return do_cbranch(ctx, disp, n, &cond); 1695 } 1696 1697 static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm) 1698 { 1699 target_long disp = assemble_12(insn) * 4; 1700 unsigned n = extract32(insn, 1, 1); 1701 unsigned c = extract32(insn, 13, 3); 1702 unsigned t = extract32(insn, 16, 5); 1703 unsigned r = extract32(insn, 21, 5); 1704 TCGv dest; 1705 DisasCond cond; 1706 1707 nullify_over(ctx); 1708 1709 dest = dest_gpr(ctx, r); 1710 if (is_imm) { 1711 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5)); 1712 } else if (t == 0) { 1713 tcg_gen_movi_tl(dest, 0); 1714 } else { 1715 tcg_gen_mov_tl(dest, cpu_gr[t]); 1716 } 1717 1718 cond = do_sed_cond(c, dest); 1719 return do_cbranch(ctx, disp, n, &cond); 1720 } 1721 1722 static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l) 1723 { 1724 unsigned n = extract32(insn, 1, 1); 1725 unsigned b = extract32(insn, 21, 5); 1726 target_long disp = assemble_17(insn); 1727 1728 /* unsigned s = low_uextract(insn, 13, 3); */ 1729 /* ??? It seems like there should be a good way of using 1730 "be disp(sr2, r0)", the canonical gateway entry mechanism 1731 to our advantage. But that appears to be inconvenient to 1732 manage along side branch delay slots. Therefore we handle 1733 entry into the gateway page via absolute address. */ 1734 1735 /* Since we don't implement spaces, just branch. Do notice the special 1736 case of "be disp(*,r0)" using a direct branch to disp, so that we can 1737 goto_tb to the TB containing the syscall. */ 1738 if (b == 0) { 1739 return do_dbranch(ctx, disp, is_l ? 31 : 0, n); 1740 } else { 1741 TCGv tmp = get_temp(ctx); 1742 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp); 1743 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n); 1744 } 1745 } 1746 1747 static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn, 1748 const DisasInsn *di) 1749 { 1750 unsigned n = extract32(insn, 1, 1); 1751 unsigned link = extract32(insn, 21, 5); 1752 target_long disp = assemble_17(insn); 1753 1754 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n); 1755 } 1756 1757 static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn, 1758 const DisasInsn *di) 1759 { 1760 unsigned n = extract32(insn, 1, 1); 1761 target_long disp = assemble_22(insn); 1762 1763 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n); 1764 } 1765 1766 static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn, 1767 const DisasInsn *di) 1768 { 1769 unsigned n = extract32(insn, 1, 1); 1770 unsigned rx = extract32(insn, 16, 5); 1771 unsigned link = extract32(insn, 21, 5); 1772 TCGv tmp = get_temp(ctx); 1773 1774 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3); 1775 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8); 1776 return do_ibranch(ctx, tmp, link, n); 1777 } 1778 1779 static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn, 1780 const DisasInsn *di) 1781 { 1782 unsigned n = extract32(insn, 1, 1); 1783 unsigned rx = extract32(insn, 16, 5); 1784 unsigned rb = extract32(insn, 21, 5); 1785 TCGv dest; 1786 1787 if (rx == 0) { 1788 dest = load_gpr(ctx, rb); 1789 } else { 1790 dest = get_temp(ctx); 1791 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3); 1792 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb)); 1793 } 1794 return do_ibranch(ctx, dest, 0, n); 1795 } 1796 1797 static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn, 1798 const DisasInsn *di) 1799 { 1800 unsigned n = extract32(insn, 1, 1); 1801 unsigned rb = extract32(insn, 21, 5); 1802 unsigned link = extract32(insn, 13, 1) ? 2 : 0; 1803 1804 return do_ibranch(ctx, load_gpr(ctx, rb), link, n); 1805 } 1806 1807 static const DisasInsn table_branch[] = { 1808 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */ 1809 { 0xe800a000u, 0xfc00e000u, trans_bl_long }, 1810 { 0xe8004000u, 0xfc00fffdu, trans_blr }, 1811 { 0xe800c000u, 0xfc00fffdu, trans_bv }, 1812 { 0xe800d000u, 0xfc00dffcu, trans_bve }, 1813 }; 1814 1815 static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn, 1816 const DisasInsn table[], size_t n) 1817 { 1818 size_t i; 1819 for (i = 0; i < n; ++i) { 1820 if ((insn & table[i].mask) == table[i].insn) { 1821 return table[i].trans(ctx, insn, &table[i]); 1822 } 1823 } 1824 return gen_illegal(ctx); 1825 } 1826 1827 #define translate_table(ctx, insn, table) \ 1828 translate_table_int(ctx, insn, table, ARRAY_SIZE(table)) 1829 1830 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) 1831 { 1832 uint32_t opc = extract32(insn, 26, 6); 1833 1834 switch (opc) { 1835 case 0x02: 1836 return translate_table(ctx, insn, table_arith_log); 1837 case 0x08: 1838 return trans_ldil(ctx, insn); 1839 case 0x0A: 1840 return trans_addil(ctx, insn); 1841 case 0x0D: 1842 return trans_ldo(ctx, insn); 1843 case 0x20: 1844 return trans_cmpb(ctx, insn, true, false, false); 1845 case 0x21: 1846 return trans_cmpb(ctx, insn, true, true, false); 1847 case 0x22: 1848 return trans_cmpb(ctx, insn, false, false, false); 1849 case 0x23: 1850 return trans_cmpb(ctx, insn, false, true, false); 1851 case 0x24: 1852 return trans_cmpiclr(ctx, insn); 1853 case 0x25: 1854 return trans_subi(ctx, insn); 1855 case 0x27: 1856 return trans_cmpb(ctx, insn, true, false, true); 1857 case 0x28: 1858 return trans_addb(ctx, insn, true, false); 1859 case 0x29: 1860 return trans_addb(ctx, insn, true, true); 1861 case 0x2A: 1862 return trans_addb(ctx, insn, false, false); 1863 case 0x2B: 1864 return trans_addb(ctx, insn, false, true); 1865 case 0x2C: 1866 case 0x2D: 1867 return trans_addi(ctx, insn); 1868 case 0x2F: 1869 return trans_cmpb(ctx, insn, false, false, true); 1870 case 0x30: 1871 case 0x31: 1872 return trans_bb(ctx, insn); 1873 case 0x32: 1874 return trans_movb(ctx, insn, false); 1875 case 0x33: 1876 return trans_movb(ctx, insn, true); 1877 case 0x38: 1878 return trans_be(ctx, insn, false); 1879 case 0x39: 1880 return trans_be(ctx, insn, true); 1881 case 0x3A: 1882 return translate_table(ctx, insn, table_branch); 1883 default: 1884 break; 1885 } 1886 return gen_illegal(ctx); 1887 } 1888 1889 void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb) 1890 { 1891 HPPACPU *cpu = hppa_env_get_cpu(env); 1892 CPUState *cs = CPU(cpu); 1893 DisasContext ctx; 1894 ExitStatus ret; 1895 int num_insns, max_insns, i; 1896 1897 ctx.tb = tb; 1898 ctx.cs = cs; 1899 ctx.iaoq_f = tb->pc; 1900 ctx.iaoq_b = tb->cs_base; 1901 ctx.singlestep_enabled = cs->singlestep_enabled; 1902 1903 ctx.ntemps = 0; 1904 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) { 1905 TCGV_UNUSED(ctx.temps[i]); 1906 } 1907 1908 /* Compute the maximum number of insns to execute, as bounded by 1909 (1) icount, (2) single-stepping, (3) branch delay slots, or 1910 (4) the number of insns remaining on the current page. */ 1911 max_insns = tb->cflags & CF_COUNT_MASK; 1912 if (max_insns == 0) { 1913 max_insns = CF_COUNT_MASK; 1914 } 1915 if (ctx.singlestep_enabled || singlestep) { 1916 max_insns = 1; 1917 } else if (max_insns > TCG_MAX_INSNS) { 1918 max_insns = TCG_MAX_INSNS; 1919 } 1920 1921 num_insns = 0; 1922 gen_tb_start(tb); 1923 1924 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */ 1925 ctx.null_cond = cond_make_f(); 1926 ctx.psw_n_nonzero = false; 1927 if (tb->flags & 1) { 1928 ctx.null_cond.c = TCG_COND_ALWAYS; 1929 ctx.psw_n_nonzero = true; 1930 } 1931 ctx.null_lab = NULL; 1932 1933 do { 1934 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b); 1935 num_insns++; 1936 1937 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) { 1938 ret = gen_excp(&ctx, EXCP_DEBUG); 1939 break; 1940 } 1941 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { 1942 gen_io_start(); 1943 } 1944 1945 if (ctx.iaoq_f < TARGET_PAGE_SIZE) { 1946 ret = do_page_zero(&ctx); 1947 assert(ret != NO_EXIT); 1948 } else { 1949 /* Always fetch the insn, even if nullified, so that we check 1950 the page permissions for execute. */ 1951 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f); 1952 1953 /* Set up the IA queue for the next insn. 1954 This will be overwritten by a branch. */ 1955 if (ctx.iaoq_b == -1) { 1956 ctx.iaoq_n = -1; 1957 ctx.iaoq_n_var = get_temp(&ctx); 1958 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4); 1959 } else { 1960 ctx.iaoq_n = ctx.iaoq_b + 4; 1961 TCGV_UNUSED(ctx.iaoq_n_var); 1962 } 1963 1964 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) { 1965 ctx.null_cond.c = TCG_COND_NEVER; 1966 ret = NO_EXIT; 1967 } else { 1968 ret = translate_one(&ctx, insn); 1969 assert(ctx.null_lab == NULL); 1970 } 1971 } 1972 1973 for (i = 0; i < ctx.ntemps; ++i) { 1974 tcg_temp_free(ctx.temps[i]); 1975 TCGV_UNUSED(ctx.temps[i]); 1976 } 1977 ctx.ntemps = 0; 1978 1979 /* If we see non-linear instructions, exhaust instruction count, 1980 or run out of buffer space, stop generation. */ 1981 /* ??? The non-linear instruction restriction is purely due to 1982 the debugging dump. Otherwise we *could* follow unconditional 1983 branches within the same page. */ 1984 if (ret == NO_EXIT 1985 && (ctx.iaoq_b != ctx.iaoq_f + 4 1986 || num_insns >= max_insns 1987 || tcg_op_buf_full())) { 1988 if (ctx.null_cond.c == TCG_COND_NEVER 1989 || ctx.null_cond.c == TCG_COND_ALWAYS) { 1990 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS); 1991 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n); 1992 ret = EXIT_GOTO_TB; 1993 } else { 1994 ret = EXIT_IAQ_N_STALE; 1995 } 1996 } 1997 1998 ctx.iaoq_f = ctx.iaoq_b; 1999 ctx.iaoq_b = ctx.iaoq_n; 2000 if (ret == EXIT_NORETURN 2001 || ret == EXIT_GOTO_TB 2002 || ret == EXIT_IAQ_N_UPDATED) { 2003 break; 2004 } 2005 if (ctx.iaoq_f == -1) { 2006 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b); 2007 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var); 2008 nullify_save(&ctx); 2009 ret = EXIT_IAQ_N_UPDATED; 2010 break; 2011 } 2012 if (ctx.iaoq_b == -1) { 2013 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var); 2014 } 2015 } while (ret == NO_EXIT); 2016 2017 if (tb->cflags & CF_LAST_IO) { 2018 gen_io_end(); 2019 } 2020 2021 switch (ret) { 2022 case EXIT_GOTO_TB: 2023 case EXIT_NORETURN: 2024 break; 2025 case EXIT_IAQ_N_STALE: 2026 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f); 2027 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b); 2028 nullify_save(&ctx); 2029 /* FALLTHRU */ 2030 case EXIT_IAQ_N_UPDATED: 2031 if (ctx.singlestep_enabled) { 2032 gen_excp_1(EXCP_DEBUG); 2033 } else { 2034 tcg_gen_exit_tb(0); 2035 } 2036 break; 2037 default: 2038 abort(); 2039 } 2040 2041 gen_tb_end(tb, num_insns); 2042 2043 tb->size = num_insns * 4; 2044 tb->icount = num_insns; 2045 2046 #ifdef DEBUG_DISAS 2047 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 2048 && qemu_log_in_addr_range(tb->pc)) { 2049 qemu_log_lock(); 2050 switch (tb->pc) { 2051 case 0x00: 2052 qemu_log("IN:\n0x00000000: (null)\n\n"); 2053 break; 2054 case 0xb0: 2055 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n"); 2056 break; 2057 case 0xe0: 2058 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n"); 2059 break; 2060 case 0x100: 2061 qemu_log("IN:\n0x00000100: syscall\n\n"); 2062 break; 2063 default: 2064 qemu_log("IN: %s\n", lookup_symbol(tb->pc)); 2065 log_target_disas(cs, tb->pc, tb->size, 1); 2066 qemu_log("\n"); 2067 break; 2068 } 2069 qemu_log_unlock(); 2070 } 2071 #endif 2072 } 2073 2074 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb, 2075 target_ulong *data) 2076 { 2077 env->iaoq_f = data[0]; 2078 if (data[1] != -1) { 2079 env->iaoq_b = data[1]; 2080 } 2081 /* Since we were executing the instruction at IAOQ_F, and took some 2082 sort of action that provoked the cpu_restore_state, we can infer 2083 that the instruction was not nullified. */ 2084 env->psw_n = 0; 2085 } 2086