1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "tcg/tcg-op-gvec.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 #define HELPER_H "helper.h" 33 #include "exec/helper-info.c.inc" 34 #undef HELPER_H 35 36 /* Choose to use explicit sizes within this file. */ 37 #undef tcg_temp_new 38 39 typedef struct DisasCond { 40 TCGCond c; 41 TCGv_i64 a0, a1; 42 } DisasCond; 43 44 typedef struct DisasContext { 45 DisasContextBase base; 46 CPUState *cs; 47 48 uint64_t iaoq_f; 49 uint64_t iaoq_b; 50 uint64_t iaoq_n; 51 TCGv_i64 iaoq_n_var; 52 53 DisasCond null_cond; 54 TCGLabel *null_lab; 55 56 uint32_t insn; 57 uint32_t tb_flags; 58 int mmu_idx; 59 int privilege; 60 bool psw_n_nonzero; 61 bool is_pa20; 62 63 #ifdef CONFIG_USER_ONLY 64 MemOp unalign; 65 #endif 66 } DisasContext; 67 68 #ifdef CONFIG_USER_ONLY 69 #define UNALIGN(C) (C)->unalign 70 #else 71 #define UNALIGN(C) MO_ALIGN 72 #endif 73 74 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 75 static int expand_sm_imm(DisasContext *ctx, int val) 76 { 77 if (val & PSW_SM_E) { 78 val = (val & ~PSW_SM_E) | PSW_E; 79 } 80 if (val & PSW_SM_W) { 81 val = (val & ~PSW_SM_W) | PSW_W; 82 } 83 return val; 84 } 85 86 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 87 static int expand_sr3x(DisasContext *ctx, int val) 88 { 89 return ~val; 90 } 91 92 /* Convert the M:A bits within a memory insn to the tri-state value 93 we use for the final M. */ 94 static int ma_to_m(DisasContext *ctx, int val) 95 { 96 return val & 2 ? (val & 1 ? -1 : 1) : 0; 97 } 98 99 /* Convert the sign of the displacement to a pre or post-modify. */ 100 static int pos_to_m(DisasContext *ctx, int val) 101 { 102 return val ? 1 : -1; 103 } 104 105 static int neg_to_m(DisasContext *ctx, int val) 106 { 107 return val ? -1 : 1; 108 } 109 110 /* Used for branch targets and fp memory ops. */ 111 static int expand_shl2(DisasContext *ctx, int val) 112 { 113 return val << 2; 114 } 115 116 /* Used for fp memory ops. */ 117 static int expand_shl3(DisasContext *ctx, int val) 118 { 119 return val << 3; 120 } 121 122 /* Used for assemble_21. */ 123 static int expand_shl11(DisasContext *ctx, int val) 124 { 125 return val << 11; 126 } 127 128 static int assemble_6(DisasContext *ctx, int val) 129 { 130 /* 131 * Officially, 32 * x + 32 - y. 132 * Here, x is already in bit 5, and y is [4:0]. 133 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1, 134 * with the overflow from bit 4 summing with x. 135 */ 136 return (val ^ 31) + 1; 137 } 138 139 /* Translate CMPI doubleword conditions to standard. */ 140 static int cmpbid_c(DisasContext *ctx, int val) 141 { 142 return val ? val : 4; /* 0 == "*<<" */ 143 } 144 145 146 /* Include the auto-generated decoder. */ 147 #include "decode-insns.c.inc" 148 149 /* We are not using a goto_tb (for whatever reason), but have updated 150 the iaq (for whatever reason), so don't do it again on exit. */ 151 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 152 153 /* We are exiting the TB, but have neither emitted a goto_tb, nor 154 updated the iaq for the next instruction to be executed. */ 155 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 156 157 /* Similarly, but we want to return to the main loop immediately 158 to recognize unmasked interrupts. */ 159 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 160 #define DISAS_EXIT DISAS_TARGET_3 161 162 /* global register indexes */ 163 static TCGv_i64 cpu_gr[32]; 164 static TCGv_i64 cpu_sr[4]; 165 static TCGv_i64 cpu_srH; 166 static TCGv_i64 cpu_iaoq_f; 167 static TCGv_i64 cpu_iaoq_b; 168 static TCGv_i64 cpu_iasq_f; 169 static TCGv_i64 cpu_iasq_b; 170 static TCGv_i64 cpu_sar; 171 static TCGv_i64 cpu_psw_n; 172 static TCGv_i64 cpu_psw_v; 173 static TCGv_i64 cpu_psw_cb; 174 static TCGv_i64 cpu_psw_cb_msb; 175 176 void hppa_translate_init(void) 177 { 178 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 179 180 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar; 181 static const GlobalVar vars[] = { 182 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 183 DEF_VAR(psw_n), 184 DEF_VAR(psw_v), 185 DEF_VAR(psw_cb), 186 DEF_VAR(psw_cb_msb), 187 DEF_VAR(iaoq_f), 188 DEF_VAR(iaoq_b), 189 }; 190 191 #undef DEF_VAR 192 193 /* Use the symbolic register names that match the disassembler. */ 194 static const char gr_names[32][4] = { 195 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 196 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 197 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 198 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 199 }; 200 /* SR[4-7] are not global registers so that we can index them. */ 201 static const char sr_names[5][4] = { 202 "sr0", "sr1", "sr2", "sr3", "srH" 203 }; 204 205 int i; 206 207 cpu_gr[0] = NULL; 208 for (i = 1; i < 32; i++) { 209 cpu_gr[i] = tcg_global_mem_new(tcg_env, 210 offsetof(CPUHPPAState, gr[i]), 211 gr_names[i]); 212 } 213 for (i = 0; i < 4; i++) { 214 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env, 215 offsetof(CPUHPPAState, sr[i]), 216 sr_names[i]); 217 } 218 cpu_srH = tcg_global_mem_new_i64(tcg_env, 219 offsetof(CPUHPPAState, sr[4]), 220 sr_names[4]); 221 222 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 223 const GlobalVar *v = &vars[i]; 224 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name); 225 } 226 227 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env, 228 offsetof(CPUHPPAState, iasq_f), 229 "iasq_f"); 230 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env, 231 offsetof(CPUHPPAState, iasq_b), 232 "iasq_b"); 233 } 234 235 static DisasCond cond_make_f(void) 236 { 237 return (DisasCond){ 238 .c = TCG_COND_NEVER, 239 .a0 = NULL, 240 .a1 = NULL, 241 }; 242 } 243 244 static DisasCond cond_make_t(void) 245 { 246 return (DisasCond){ 247 .c = TCG_COND_ALWAYS, 248 .a0 = NULL, 249 .a1 = NULL, 250 }; 251 } 252 253 static DisasCond cond_make_n(void) 254 { 255 return (DisasCond){ 256 .c = TCG_COND_NE, 257 .a0 = cpu_psw_n, 258 .a1 = tcg_constant_i64(0) 259 }; 260 } 261 262 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 263 { 264 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 265 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 }; 266 } 267 268 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0) 269 { 270 return cond_make_tmp(c, a0, tcg_constant_i64(0)); 271 } 272 273 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0) 274 { 275 TCGv_i64 tmp = tcg_temp_new_i64(); 276 tcg_gen_mov_i64(tmp, a0); 277 return cond_make_0_tmp(c, tmp); 278 } 279 280 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 281 { 282 TCGv_i64 t0 = tcg_temp_new_i64(); 283 TCGv_i64 t1 = tcg_temp_new_i64(); 284 285 tcg_gen_mov_i64(t0, a0); 286 tcg_gen_mov_i64(t1, a1); 287 return cond_make_tmp(c, t0, t1); 288 } 289 290 static void cond_free(DisasCond *cond) 291 { 292 switch (cond->c) { 293 default: 294 cond->a0 = NULL; 295 cond->a1 = NULL; 296 /* fallthru */ 297 case TCG_COND_ALWAYS: 298 cond->c = TCG_COND_NEVER; 299 break; 300 case TCG_COND_NEVER: 301 break; 302 } 303 } 304 305 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg) 306 { 307 if (reg == 0) { 308 TCGv_i64 t = tcg_temp_new_i64(); 309 tcg_gen_movi_i64(t, 0); 310 return t; 311 } else { 312 return cpu_gr[reg]; 313 } 314 } 315 316 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg) 317 { 318 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 319 return tcg_temp_new_i64(); 320 } else { 321 return cpu_gr[reg]; 322 } 323 } 324 325 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t) 326 { 327 if (ctx->null_cond.c != TCG_COND_NEVER) { 328 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0, 329 ctx->null_cond.a1, dest, t); 330 } else { 331 tcg_gen_mov_i64(dest, t); 332 } 333 } 334 335 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t) 336 { 337 if (reg != 0) { 338 save_or_nullify(ctx, cpu_gr[reg], t); 339 } 340 } 341 342 #if HOST_BIG_ENDIAN 343 # define HI_OFS 0 344 # define LO_OFS 4 345 #else 346 # define HI_OFS 4 347 # define LO_OFS 0 348 #endif 349 350 static TCGv_i32 load_frw_i32(unsigned rt) 351 { 352 TCGv_i32 ret = tcg_temp_new_i32(); 353 tcg_gen_ld_i32(ret, tcg_env, 354 offsetof(CPUHPPAState, fr[rt & 31]) 355 + (rt & 32 ? LO_OFS : HI_OFS)); 356 return ret; 357 } 358 359 static TCGv_i32 load_frw0_i32(unsigned rt) 360 { 361 if (rt == 0) { 362 TCGv_i32 ret = tcg_temp_new_i32(); 363 tcg_gen_movi_i32(ret, 0); 364 return ret; 365 } else { 366 return load_frw_i32(rt); 367 } 368 } 369 370 static TCGv_i64 load_frw0_i64(unsigned rt) 371 { 372 TCGv_i64 ret = tcg_temp_new_i64(); 373 if (rt == 0) { 374 tcg_gen_movi_i64(ret, 0); 375 } else { 376 tcg_gen_ld32u_i64(ret, tcg_env, 377 offsetof(CPUHPPAState, fr[rt & 31]) 378 + (rt & 32 ? LO_OFS : HI_OFS)); 379 } 380 return ret; 381 } 382 383 static void save_frw_i32(unsigned rt, TCGv_i32 val) 384 { 385 tcg_gen_st_i32(val, tcg_env, 386 offsetof(CPUHPPAState, fr[rt & 31]) 387 + (rt & 32 ? LO_OFS : HI_OFS)); 388 } 389 390 #undef HI_OFS 391 #undef LO_OFS 392 393 static TCGv_i64 load_frd(unsigned rt) 394 { 395 TCGv_i64 ret = tcg_temp_new_i64(); 396 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt])); 397 return ret; 398 } 399 400 static TCGv_i64 load_frd0(unsigned rt) 401 { 402 if (rt == 0) { 403 TCGv_i64 ret = tcg_temp_new_i64(); 404 tcg_gen_movi_i64(ret, 0); 405 return ret; 406 } else { 407 return load_frd(rt); 408 } 409 } 410 411 static void save_frd(unsigned rt, TCGv_i64 val) 412 { 413 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt])); 414 } 415 416 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 417 { 418 #ifdef CONFIG_USER_ONLY 419 tcg_gen_movi_i64(dest, 0); 420 #else 421 if (reg < 4) { 422 tcg_gen_mov_i64(dest, cpu_sr[reg]); 423 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 424 tcg_gen_mov_i64(dest, cpu_srH); 425 } else { 426 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg])); 427 } 428 #endif 429 } 430 431 /* Skip over the implementation of an insn that has been nullified. 432 Use this when the insn is too complex for a conditional move. */ 433 static void nullify_over(DisasContext *ctx) 434 { 435 if (ctx->null_cond.c != TCG_COND_NEVER) { 436 /* The always condition should have been handled in the main loop. */ 437 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 438 439 ctx->null_lab = gen_new_label(); 440 441 /* If we're using PSW[N], copy it to a temp because... */ 442 if (ctx->null_cond.a0 == cpu_psw_n) { 443 ctx->null_cond.a0 = tcg_temp_new_i64(); 444 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n); 445 } 446 /* ... we clear it before branching over the implementation, 447 so that (1) it's clear after nullifying this insn and 448 (2) if this insn nullifies the next, PSW[N] is valid. */ 449 if (ctx->psw_n_nonzero) { 450 ctx->psw_n_nonzero = false; 451 tcg_gen_movi_i64(cpu_psw_n, 0); 452 } 453 454 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0, 455 ctx->null_cond.a1, ctx->null_lab); 456 cond_free(&ctx->null_cond); 457 } 458 } 459 460 /* Save the current nullification state to PSW[N]. */ 461 static void nullify_save(DisasContext *ctx) 462 { 463 if (ctx->null_cond.c == TCG_COND_NEVER) { 464 if (ctx->psw_n_nonzero) { 465 tcg_gen_movi_i64(cpu_psw_n, 0); 466 } 467 return; 468 } 469 if (ctx->null_cond.a0 != cpu_psw_n) { 470 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n, 471 ctx->null_cond.a0, ctx->null_cond.a1); 472 ctx->psw_n_nonzero = true; 473 } 474 cond_free(&ctx->null_cond); 475 } 476 477 /* Set a PSW[N] to X. The intention is that this is used immediately 478 before a goto_tb/exit_tb, so that there is no fallthru path to other 479 code within the TB. Therefore we do not update psw_n_nonzero. */ 480 static void nullify_set(DisasContext *ctx, bool x) 481 { 482 if (ctx->psw_n_nonzero || x) { 483 tcg_gen_movi_i64(cpu_psw_n, x); 484 } 485 } 486 487 /* Mark the end of an instruction that may have been nullified. 488 This is the pair to nullify_over. Always returns true so that 489 it may be tail-called from a translate function. */ 490 static bool nullify_end(DisasContext *ctx) 491 { 492 TCGLabel *null_lab = ctx->null_lab; 493 DisasJumpType status = ctx->base.is_jmp; 494 495 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 496 For UPDATED, we cannot update on the nullified path. */ 497 assert(status != DISAS_IAQ_N_UPDATED); 498 499 if (likely(null_lab == NULL)) { 500 /* The current insn wasn't conditional or handled the condition 501 applied to it without a branch, so the (new) setting of 502 NULL_COND can be applied directly to the next insn. */ 503 return true; 504 } 505 ctx->null_lab = NULL; 506 507 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 508 /* The next instruction will be unconditional, 509 and NULL_COND already reflects that. */ 510 gen_set_label(null_lab); 511 } else { 512 /* The insn that we just executed is itself nullifying the next 513 instruction. Store the condition in the PSW[N] global. 514 We asserted PSW[N] = 0 in nullify_over, so that after the 515 label we have the proper value in place. */ 516 nullify_save(ctx); 517 gen_set_label(null_lab); 518 ctx->null_cond = cond_make_n(); 519 } 520 if (status == DISAS_NORETURN) { 521 ctx->base.is_jmp = DISAS_NEXT; 522 } 523 return true; 524 } 525 526 static uint64_t gva_offset_mask(DisasContext *ctx) 527 { 528 return (ctx->tb_flags & PSW_W 529 ? MAKE_64BIT_MASK(0, 62) 530 : MAKE_64BIT_MASK(0, 32)); 531 } 532 533 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, 534 uint64_t ival, TCGv_i64 vval) 535 { 536 uint64_t mask = gva_offset_mask(ctx); 537 538 if (ival != -1) { 539 tcg_gen_movi_i64(dest, ival & mask); 540 return; 541 } 542 tcg_debug_assert(vval != NULL); 543 544 /* 545 * We know that the IAOQ is already properly masked. 546 * This optimization is primarily for "iaoq_f = iaoq_b". 547 */ 548 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) { 549 tcg_gen_mov_i64(dest, vval); 550 } else { 551 tcg_gen_andi_i64(dest, vval, mask); 552 } 553 } 554 555 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp) 556 { 557 return ctx->iaoq_f + disp + 8; 558 } 559 560 static void gen_excp_1(int exception) 561 { 562 gen_helper_excp(tcg_env, tcg_constant_i32(exception)); 563 } 564 565 static void gen_excp(DisasContext *ctx, int exception) 566 { 567 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 568 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 569 nullify_save(ctx); 570 gen_excp_1(exception); 571 ctx->base.is_jmp = DISAS_NORETURN; 572 } 573 574 static bool gen_excp_iir(DisasContext *ctx, int exc) 575 { 576 nullify_over(ctx); 577 tcg_gen_st_i64(tcg_constant_i64(ctx->insn), 578 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); 579 gen_excp(ctx, exc); 580 return nullify_end(ctx); 581 } 582 583 static bool gen_illegal(DisasContext *ctx) 584 { 585 return gen_excp_iir(ctx, EXCP_ILL); 586 } 587 588 #ifdef CONFIG_USER_ONLY 589 #define CHECK_MOST_PRIVILEGED(EXCP) \ 590 return gen_excp_iir(ctx, EXCP) 591 #else 592 #define CHECK_MOST_PRIVILEGED(EXCP) \ 593 do { \ 594 if (ctx->privilege != 0) { \ 595 return gen_excp_iir(ctx, EXCP); \ 596 } \ 597 } while (0) 598 #endif 599 600 static bool use_goto_tb(DisasContext *ctx, uint64_t dest) 601 { 602 return translator_use_goto_tb(&ctx->base, dest); 603 } 604 605 /* If the next insn is to be nullified, and it's on the same page, 606 and we're not attempting to set a breakpoint on it, then we can 607 totally skip the nullified insn. This avoids creating and 608 executing a TB that merely branches to the next TB. */ 609 static bool use_nullify_skip(DisasContext *ctx) 610 { 611 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 612 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 613 } 614 615 static void gen_goto_tb(DisasContext *ctx, int which, 616 uint64_t f, uint64_t b) 617 { 618 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 619 tcg_gen_goto_tb(which); 620 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL); 621 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL); 622 tcg_gen_exit_tb(ctx->base.tb, which); 623 } else { 624 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b); 625 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var); 626 tcg_gen_lookup_and_goto_ptr(); 627 } 628 } 629 630 static bool cond_need_sv(int c) 631 { 632 return c == 2 || c == 3 || c == 6; 633 } 634 635 static bool cond_need_cb(int c) 636 { 637 return c == 4 || c == 5; 638 } 639 640 /* Need extensions from TCGv_i32 to TCGv_i64. */ 641 static bool cond_need_ext(DisasContext *ctx, bool d) 642 { 643 return !(ctx->is_pa20 && d); 644 } 645 646 /* 647 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 648 * the Parisc 1.1 Architecture Reference Manual for details. 649 */ 650 651 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, 652 TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv) 653 { 654 DisasCond cond; 655 TCGv_i64 tmp; 656 657 switch (cf >> 1) { 658 case 0: /* Never / TR (0 / 1) */ 659 cond = cond_make_f(); 660 break; 661 case 1: /* = / <> (Z / !Z) */ 662 if (cond_need_ext(ctx, d)) { 663 tmp = tcg_temp_new_i64(); 664 tcg_gen_ext32u_i64(tmp, res); 665 res = tmp; 666 } 667 cond = cond_make_0(TCG_COND_EQ, res); 668 break; 669 case 2: /* < / >= (N ^ V / !(N ^ V) */ 670 tmp = tcg_temp_new_i64(); 671 tcg_gen_xor_i64(tmp, res, sv); 672 if (cond_need_ext(ctx, d)) { 673 tcg_gen_ext32s_i64(tmp, tmp); 674 } 675 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 676 break; 677 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 678 /* 679 * Simplify: 680 * (N ^ V) | Z 681 * ((res < 0) ^ (sv < 0)) | !res 682 * ((res ^ sv) < 0) | !res 683 * (~(res ^ sv) >= 0) | !res 684 * !(~(res ^ sv) >> 31) | !res 685 * !(~(res ^ sv) >> 31 & res) 686 */ 687 tmp = tcg_temp_new_i64(); 688 tcg_gen_eqv_i64(tmp, res, sv); 689 if (cond_need_ext(ctx, d)) { 690 tcg_gen_sextract_i64(tmp, tmp, 31, 1); 691 tcg_gen_and_i64(tmp, tmp, res); 692 tcg_gen_ext32u_i64(tmp, tmp); 693 } else { 694 tcg_gen_sari_i64(tmp, tmp, 63); 695 tcg_gen_and_i64(tmp, tmp, res); 696 } 697 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 698 break; 699 case 4: /* NUV / UV (!C / C) */ 700 /* Only bit 0 of cb_msb is ever set. */ 701 cond = cond_make_0(TCG_COND_EQ, cb_msb); 702 break; 703 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 704 tmp = tcg_temp_new_i64(); 705 tcg_gen_neg_i64(tmp, cb_msb); 706 tcg_gen_and_i64(tmp, tmp, res); 707 if (cond_need_ext(ctx, d)) { 708 tcg_gen_ext32u_i64(tmp, tmp); 709 } 710 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 711 break; 712 case 6: /* SV / NSV (V / !V) */ 713 if (cond_need_ext(ctx, d)) { 714 tmp = tcg_temp_new_i64(); 715 tcg_gen_ext32s_i64(tmp, sv); 716 sv = tmp; 717 } 718 cond = cond_make_0(TCG_COND_LT, sv); 719 break; 720 case 7: /* OD / EV */ 721 tmp = tcg_temp_new_i64(); 722 tcg_gen_andi_i64(tmp, res, 1); 723 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 724 break; 725 default: 726 g_assert_not_reached(); 727 } 728 if (cf & 1) { 729 cond.c = tcg_invert_cond(cond.c); 730 } 731 732 return cond; 733 } 734 735 /* Similar, but for the special case of subtraction without borrow, we 736 can use the inputs directly. This can allow other computation to be 737 deleted as unused. */ 738 739 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, 740 TCGv_i64 res, TCGv_i64 in1, 741 TCGv_i64 in2, TCGv_i64 sv) 742 { 743 TCGCond tc; 744 bool ext_uns; 745 746 switch (cf >> 1) { 747 case 1: /* = / <> */ 748 tc = TCG_COND_EQ; 749 ext_uns = true; 750 break; 751 case 2: /* < / >= */ 752 tc = TCG_COND_LT; 753 ext_uns = false; 754 break; 755 case 3: /* <= / > */ 756 tc = TCG_COND_LE; 757 ext_uns = false; 758 break; 759 case 4: /* << / >>= */ 760 tc = TCG_COND_LTU; 761 ext_uns = true; 762 break; 763 case 5: /* <<= / >> */ 764 tc = TCG_COND_LEU; 765 ext_uns = true; 766 break; 767 default: 768 return do_cond(ctx, cf, d, res, NULL, sv); 769 } 770 771 if (cf & 1) { 772 tc = tcg_invert_cond(tc); 773 } 774 if (cond_need_ext(ctx, d)) { 775 TCGv_i64 t1 = tcg_temp_new_i64(); 776 TCGv_i64 t2 = tcg_temp_new_i64(); 777 778 if (ext_uns) { 779 tcg_gen_ext32u_i64(t1, in1); 780 tcg_gen_ext32u_i64(t2, in2); 781 } else { 782 tcg_gen_ext32s_i64(t1, in1); 783 tcg_gen_ext32s_i64(t2, in2); 784 } 785 return cond_make_tmp(tc, t1, t2); 786 } 787 return cond_make(tc, in1, in2); 788 } 789 790 /* 791 * Similar, but for logicals, where the carry and overflow bits are not 792 * computed, and use of them is undefined. 793 * 794 * Undefined or not, hardware does not trap. It seems reasonable to 795 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 796 * how cases c={2,3} are treated. 797 */ 798 799 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, 800 TCGv_i64 res) 801 { 802 TCGCond tc; 803 bool ext_uns; 804 805 switch (cf) { 806 case 0: /* never */ 807 case 9: /* undef, C */ 808 case 11: /* undef, C & !Z */ 809 case 12: /* undef, V */ 810 return cond_make_f(); 811 812 case 1: /* true */ 813 case 8: /* undef, !C */ 814 case 10: /* undef, !C | Z */ 815 case 13: /* undef, !V */ 816 return cond_make_t(); 817 818 case 2: /* == */ 819 tc = TCG_COND_EQ; 820 ext_uns = true; 821 break; 822 case 3: /* <> */ 823 tc = TCG_COND_NE; 824 ext_uns = true; 825 break; 826 case 4: /* < */ 827 tc = TCG_COND_LT; 828 ext_uns = false; 829 break; 830 case 5: /* >= */ 831 tc = TCG_COND_GE; 832 ext_uns = false; 833 break; 834 case 6: /* <= */ 835 tc = TCG_COND_LE; 836 ext_uns = false; 837 break; 838 case 7: /* > */ 839 tc = TCG_COND_GT; 840 ext_uns = false; 841 break; 842 843 case 14: /* OD */ 844 case 15: /* EV */ 845 return do_cond(ctx, cf, d, res, NULL, NULL); 846 847 default: 848 g_assert_not_reached(); 849 } 850 851 if (cond_need_ext(ctx, d)) { 852 TCGv_i64 tmp = tcg_temp_new_i64(); 853 854 if (ext_uns) { 855 tcg_gen_ext32u_i64(tmp, res); 856 } else { 857 tcg_gen_ext32s_i64(tmp, res); 858 } 859 return cond_make_0_tmp(tc, tmp); 860 } 861 return cond_make_0(tc, res); 862 } 863 864 /* Similar, but for shift/extract/deposit conditions. */ 865 866 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, 867 TCGv_i64 res) 868 { 869 unsigned c, f; 870 871 /* Convert the compressed condition codes to standard. 872 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 873 4-7 are the reverse of 0-3. */ 874 c = orig & 3; 875 if (c == 3) { 876 c = 7; 877 } 878 f = (orig & 4) / 4; 879 880 return do_log_cond(ctx, c * 2 + f, d, res); 881 } 882 883 /* Similar, but for unit conditions. */ 884 885 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res, 886 TCGv_i64 in1, TCGv_i64 in2) 887 { 888 DisasCond cond; 889 TCGv_i64 tmp, cb = NULL; 890 uint64_t d_repl = d ? 0x0000000100000001ull : 1; 891 892 if (cf & 8) { 893 /* Since we want to test lots of carry-out bits all at once, do not 894 * do our normal thing and compute carry-in of bit B+1 since that 895 * leaves us with carry bits spread across two words. 896 */ 897 cb = tcg_temp_new_i64(); 898 tmp = tcg_temp_new_i64(); 899 tcg_gen_or_i64(cb, in1, in2); 900 tcg_gen_and_i64(tmp, in1, in2); 901 tcg_gen_andc_i64(cb, cb, res); 902 tcg_gen_or_i64(cb, cb, tmp); 903 } 904 905 switch (cf >> 1) { 906 case 0: /* never / TR */ 907 case 1: /* undefined */ 908 case 5: /* undefined */ 909 cond = cond_make_f(); 910 break; 911 912 case 2: /* SBZ / NBZ */ 913 /* See hasless(v,1) from 914 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 915 */ 916 tmp = tcg_temp_new_i64(); 917 tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u); 918 tcg_gen_andc_i64(tmp, tmp, res); 919 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u); 920 cond = cond_make_0(TCG_COND_NE, tmp); 921 break; 922 923 case 3: /* SHZ / NHZ */ 924 tmp = tcg_temp_new_i64(); 925 tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u); 926 tcg_gen_andc_i64(tmp, tmp, res); 927 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u); 928 cond = cond_make_0(TCG_COND_NE, tmp); 929 break; 930 931 case 4: /* SDC / NDC */ 932 tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u); 933 cond = cond_make_0(TCG_COND_NE, cb); 934 break; 935 936 case 6: /* SBC / NBC */ 937 tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u); 938 cond = cond_make_0(TCG_COND_NE, cb); 939 break; 940 941 case 7: /* SHC / NHC */ 942 tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u); 943 cond = cond_make_0(TCG_COND_NE, cb); 944 break; 945 946 default: 947 g_assert_not_reached(); 948 } 949 if (cf & 1) { 950 cond.c = tcg_invert_cond(cond.c); 951 } 952 953 return cond; 954 } 955 956 static TCGv_i64 get_carry(DisasContext *ctx, bool d, 957 TCGv_i64 cb, TCGv_i64 cb_msb) 958 { 959 if (cond_need_ext(ctx, d)) { 960 TCGv_i64 t = tcg_temp_new_i64(); 961 tcg_gen_extract_i64(t, cb, 32, 1); 962 return t; 963 } 964 return cb_msb; 965 } 966 967 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d) 968 { 969 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb); 970 } 971 972 /* Compute signed overflow for addition. */ 973 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, 974 TCGv_i64 in1, TCGv_i64 in2) 975 { 976 TCGv_i64 sv = tcg_temp_new_i64(); 977 TCGv_i64 tmp = tcg_temp_new_i64(); 978 979 tcg_gen_xor_i64(sv, res, in1); 980 tcg_gen_xor_i64(tmp, in1, in2); 981 tcg_gen_andc_i64(sv, sv, tmp); 982 983 return sv; 984 } 985 986 /* Compute signed overflow for subtraction. */ 987 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, 988 TCGv_i64 in1, TCGv_i64 in2) 989 { 990 TCGv_i64 sv = tcg_temp_new_i64(); 991 TCGv_i64 tmp = tcg_temp_new_i64(); 992 993 tcg_gen_xor_i64(sv, res, in1); 994 tcg_gen_xor_i64(tmp, in1, in2); 995 tcg_gen_and_i64(sv, sv, tmp); 996 997 return sv; 998 } 999 1000 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1001 TCGv_i64 in2, unsigned shift, bool is_l, 1002 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) 1003 { 1004 TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp; 1005 unsigned c = cf >> 1; 1006 DisasCond cond; 1007 1008 dest = tcg_temp_new_i64(); 1009 cb = NULL; 1010 cb_msb = NULL; 1011 cb_cond = NULL; 1012 1013 if (shift) { 1014 tmp = tcg_temp_new_i64(); 1015 tcg_gen_shli_i64(tmp, in1, shift); 1016 in1 = tmp; 1017 } 1018 1019 if (!is_l || cond_need_cb(c)) { 1020 TCGv_i64 zero = tcg_constant_i64(0); 1021 cb_msb = tcg_temp_new_i64(); 1022 cb = tcg_temp_new_i64(); 1023 1024 tcg_gen_add2_i64(dest, cb_msb, in1, zero, in2, zero); 1025 if (is_c) { 1026 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, 1027 get_psw_carry(ctx, d), zero); 1028 } 1029 tcg_gen_xor_i64(cb, in1, in2); 1030 tcg_gen_xor_i64(cb, cb, dest); 1031 if (cond_need_cb(c)) { 1032 cb_cond = get_carry(ctx, d, cb, cb_msb); 1033 } 1034 } else { 1035 tcg_gen_add_i64(dest, in1, in2); 1036 if (is_c) { 1037 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d)); 1038 } 1039 } 1040 1041 /* Compute signed overflow if required. */ 1042 sv = NULL; 1043 if (is_tsv || cond_need_sv(c)) { 1044 sv = do_add_sv(ctx, dest, in1, in2); 1045 if (is_tsv) { 1046 /* ??? Need to include overflow from shift. */ 1047 gen_helper_tsv(tcg_env, sv); 1048 } 1049 } 1050 1051 /* Emit any conditional trap before any writeback. */ 1052 cond = do_cond(ctx, cf, d, dest, cb_cond, sv); 1053 if (is_tc) { 1054 tmp = tcg_temp_new_i64(); 1055 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); 1056 gen_helper_tcond(tcg_env, tmp); 1057 } 1058 1059 /* Write back the result. */ 1060 if (!is_l) { 1061 save_or_nullify(ctx, cpu_psw_cb, cb); 1062 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1063 } 1064 save_gpr(ctx, rt, dest); 1065 1066 /* Install the new nullification. */ 1067 cond_free(&ctx->null_cond); 1068 ctx->null_cond = cond; 1069 } 1070 1071 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a, 1072 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1073 { 1074 TCGv_i64 tcg_r1, tcg_r2; 1075 1076 if (a->cf) { 1077 nullify_over(ctx); 1078 } 1079 tcg_r1 = load_gpr(ctx, a->r1); 1080 tcg_r2 = load_gpr(ctx, a->r2); 1081 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, 1082 is_tsv, is_tc, is_c, a->cf, a->d); 1083 return nullify_end(ctx); 1084 } 1085 1086 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1087 bool is_tsv, bool is_tc) 1088 { 1089 TCGv_i64 tcg_im, tcg_r2; 1090 1091 if (a->cf) { 1092 nullify_over(ctx); 1093 } 1094 tcg_im = tcg_constant_i64(a->i); 1095 tcg_r2 = load_gpr(ctx, a->r); 1096 /* All ADDI conditions are 32-bit. */ 1097 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false); 1098 return nullify_end(ctx); 1099 } 1100 1101 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1102 TCGv_i64 in2, bool is_tsv, bool is_b, 1103 bool is_tc, unsigned cf, bool d) 1104 { 1105 TCGv_i64 dest, sv, cb, cb_msb, zero, tmp; 1106 unsigned c = cf >> 1; 1107 DisasCond cond; 1108 1109 dest = tcg_temp_new_i64(); 1110 cb = tcg_temp_new_i64(); 1111 cb_msb = tcg_temp_new_i64(); 1112 1113 zero = tcg_constant_i64(0); 1114 if (is_b) { 1115 /* DEST,C = IN1 + ~IN2 + C. */ 1116 tcg_gen_not_i64(cb, in2); 1117 tcg_gen_add2_i64(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero); 1118 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, zero); 1119 tcg_gen_xor_i64(cb, cb, in1); 1120 tcg_gen_xor_i64(cb, cb, dest); 1121 } else { 1122 /* 1123 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1124 * operations by seeding the high word with 1 and subtracting. 1125 */ 1126 TCGv_i64 one = tcg_constant_i64(1); 1127 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, zero); 1128 tcg_gen_eqv_i64(cb, in1, in2); 1129 tcg_gen_xor_i64(cb, cb, dest); 1130 } 1131 1132 /* Compute signed overflow if required. */ 1133 sv = NULL; 1134 if (is_tsv || cond_need_sv(c)) { 1135 sv = do_sub_sv(ctx, dest, in1, in2); 1136 if (is_tsv) { 1137 gen_helper_tsv(tcg_env, sv); 1138 } 1139 } 1140 1141 /* Compute the condition. We cannot use the special case for borrow. */ 1142 if (!is_b) { 1143 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1144 } else { 1145 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv); 1146 } 1147 1148 /* Emit any conditional trap before any writeback. */ 1149 if (is_tc) { 1150 tmp = tcg_temp_new_i64(); 1151 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); 1152 gen_helper_tcond(tcg_env, tmp); 1153 } 1154 1155 /* Write back the result. */ 1156 save_or_nullify(ctx, cpu_psw_cb, cb); 1157 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1158 save_gpr(ctx, rt, dest); 1159 1160 /* Install the new nullification. */ 1161 cond_free(&ctx->null_cond); 1162 ctx->null_cond = cond; 1163 } 1164 1165 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1166 bool is_tsv, bool is_b, bool is_tc) 1167 { 1168 TCGv_i64 tcg_r1, tcg_r2; 1169 1170 if (a->cf) { 1171 nullify_over(ctx); 1172 } 1173 tcg_r1 = load_gpr(ctx, a->r1); 1174 tcg_r2 = load_gpr(ctx, a->r2); 1175 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d); 1176 return nullify_end(ctx); 1177 } 1178 1179 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1180 { 1181 TCGv_i64 tcg_im, tcg_r2; 1182 1183 if (a->cf) { 1184 nullify_over(ctx); 1185 } 1186 tcg_im = tcg_constant_i64(a->i); 1187 tcg_r2 = load_gpr(ctx, a->r); 1188 /* All SUBI conditions are 32-bit. */ 1189 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false); 1190 return nullify_end(ctx); 1191 } 1192 1193 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1194 TCGv_i64 in2, unsigned cf, bool d) 1195 { 1196 TCGv_i64 dest, sv; 1197 DisasCond cond; 1198 1199 dest = tcg_temp_new_i64(); 1200 tcg_gen_sub_i64(dest, in1, in2); 1201 1202 /* Compute signed overflow if required. */ 1203 sv = NULL; 1204 if (cond_need_sv(cf >> 1)) { 1205 sv = do_sub_sv(ctx, dest, in1, in2); 1206 } 1207 1208 /* Form the condition for the compare. */ 1209 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1210 1211 /* Clear. */ 1212 tcg_gen_movi_i64(dest, 0); 1213 save_gpr(ctx, rt, dest); 1214 1215 /* Install the new nullification. */ 1216 cond_free(&ctx->null_cond); 1217 ctx->null_cond = cond; 1218 } 1219 1220 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1221 TCGv_i64 in2, unsigned cf, bool d, 1222 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1223 { 1224 TCGv_i64 dest = dest_gpr(ctx, rt); 1225 1226 /* Perform the operation, and writeback. */ 1227 fn(dest, in1, in2); 1228 save_gpr(ctx, rt, dest); 1229 1230 /* Install the new nullification. */ 1231 cond_free(&ctx->null_cond); 1232 if (cf) { 1233 ctx->null_cond = do_log_cond(ctx, cf, d, dest); 1234 } 1235 } 1236 1237 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1238 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1239 { 1240 TCGv_i64 tcg_r1, tcg_r2; 1241 1242 if (a->cf) { 1243 nullify_over(ctx); 1244 } 1245 tcg_r1 = load_gpr(ctx, a->r1); 1246 tcg_r2 = load_gpr(ctx, a->r2); 1247 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn); 1248 return nullify_end(ctx); 1249 } 1250 1251 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1252 TCGv_i64 in2, unsigned cf, bool d, bool is_tc, 1253 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1254 { 1255 TCGv_i64 dest; 1256 DisasCond cond; 1257 1258 if (cf == 0) { 1259 dest = dest_gpr(ctx, rt); 1260 fn(dest, in1, in2); 1261 save_gpr(ctx, rt, dest); 1262 cond_free(&ctx->null_cond); 1263 } else { 1264 dest = tcg_temp_new_i64(); 1265 fn(dest, in1, in2); 1266 1267 cond = do_unit_cond(cf, d, dest, in1, in2); 1268 1269 if (is_tc) { 1270 TCGv_i64 tmp = tcg_temp_new_i64(); 1271 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); 1272 gen_helper_tcond(tcg_env, tmp); 1273 } 1274 save_gpr(ctx, rt, dest); 1275 1276 cond_free(&ctx->null_cond); 1277 ctx->null_cond = cond; 1278 } 1279 } 1280 1281 #ifndef CONFIG_USER_ONLY 1282 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1283 from the top 2 bits of the base register. There are a few system 1284 instructions that have a 3-bit space specifier, for which SR0 is 1285 not special. To handle this, pass ~SP. */ 1286 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base) 1287 { 1288 TCGv_ptr ptr; 1289 TCGv_i64 tmp; 1290 TCGv_i64 spc; 1291 1292 if (sp != 0) { 1293 if (sp < 0) { 1294 sp = ~sp; 1295 } 1296 spc = tcg_temp_new_i64(); 1297 load_spr(ctx, spc, sp); 1298 return spc; 1299 } 1300 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1301 return cpu_srH; 1302 } 1303 1304 ptr = tcg_temp_new_ptr(); 1305 tmp = tcg_temp_new_i64(); 1306 spc = tcg_temp_new_i64(); 1307 1308 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */ 1309 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5); 1310 tcg_gen_andi_i64(tmp, tmp, 030); 1311 tcg_gen_trunc_i64_ptr(ptr, tmp); 1312 1313 tcg_gen_add_ptr(ptr, ptr, tcg_env); 1314 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1315 1316 return spc; 1317 } 1318 #endif 1319 1320 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, 1321 unsigned rb, unsigned rx, int scale, int64_t disp, 1322 unsigned sp, int modify, bool is_phys) 1323 { 1324 TCGv_i64 base = load_gpr(ctx, rb); 1325 TCGv_i64 ofs; 1326 TCGv_i64 addr; 1327 1328 /* Note that RX is mutually exclusive with DISP. */ 1329 if (rx) { 1330 ofs = tcg_temp_new_i64(); 1331 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale); 1332 tcg_gen_add_i64(ofs, ofs, base); 1333 } else if (disp || modify) { 1334 ofs = tcg_temp_new_i64(); 1335 tcg_gen_addi_i64(ofs, base, disp); 1336 } else { 1337 ofs = base; 1338 } 1339 1340 *pofs = ofs; 1341 *pgva = addr = tcg_temp_new_i64(); 1342 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx)); 1343 #ifndef CONFIG_USER_ONLY 1344 if (!is_phys) { 1345 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); 1346 } 1347 #endif 1348 } 1349 1350 /* Emit a memory load. The modify parameter should be 1351 * < 0 for pre-modify, 1352 * > 0 for post-modify, 1353 * = 0 for no base register update. 1354 */ 1355 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1356 unsigned rx, int scale, int64_t disp, 1357 unsigned sp, int modify, MemOp mop) 1358 { 1359 TCGv_i64 ofs; 1360 TCGv_i64 addr; 1361 1362 /* Caller uses nullify_over/nullify_end. */ 1363 assert(ctx->null_cond.c == TCG_COND_NEVER); 1364 1365 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1366 ctx->mmu_idx == MMU_PHYS_IDX); 1367 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1368 if (modify) { 1369 save_gpr(ctx, rb, ofs); 1370 } 1371 } 1372 1373 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1374 unsigned rx, int scale, int64_t disp, 1375 unsigned sp, int modify, MemOp mop) 1376 { 1377 TCGv_i64 ofs; 1378 TCGv_i64 addr; 1379 1380 /* Caller uses nullify_over/nullify_end. */ 1381 assert(ctx->null_cond.c == TCG_COND_NEVER); 1382 1383 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1384 ctx->mmu_idx == MMU_PHYS_IDX); 1385 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1386 if (modify) { 1387 save_gpr(ctx, rb, ofs); 1388 } 1389 } 1390 1391 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1392 unsigned rx, int scale, int64_t disp, 1393 unsigned sp, int modify, MemOp mop) 1394 { 1395 TCGv_i64 ofs; 1396 TCGv_i64 addr; 1397 1398 /* Caller uses nullify_over/nullify_end. */ 1399 assert(ctx->null_cond.c == TCG_COND_NEVER); 1400 1401 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1402 ctx->mmu_idx == MMU_PHYS_IDX); 1403 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1404 if (modify) { 1405 save_gpr(ctx, rb, ofs); 1406 } 1407 } 1408 1409 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1410 unsigned rx, int scale, int64_t disp, 1411 unsigned sp, int modify, MemOp mop) 1412 { 1413 TCGv_i64 ofs; 1414 TCGv_i64 addr; 1415 1416 /* Caller uses nullify_over/nullify_end. */ 1417 assert(ctx->null_cond.c == TCG_COND_NEVER); 1418 1419 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1420 ctx->mmu_idx == MMU_PHYS_IDX); 1421 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1422 if (modify) { 1423 save_gpr(ctx, rb, ofs); 1424 } 1425 } 1426 1427 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1428 unsigned rx, int scale, int64_t disp, 1429 unsigned sp, int modify, MemOp mop) 1430 { 1431 TCGv_i64 dest; 1432 1433 nullify_over(ctx); 1434 1435 if (modify == 0) { 1436 /* No base register update. */ 1437 dest = dest_gpr(ctx, rt); 1438 } else { 1439 /* Make sure if RT == RB, we see the result of the load. */ 1440 dest = tcg_temp_new_i64(); 1441 } 1442 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1443 save_gpr(ctx, rt, dest); 1444 1445 return nullify_end(ctx); 1446 } 1447 1448 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1449 unsigned rx, int scale, int64_t disp, 1450 unsigned sp, int modify) 1451 { 1452 TCGv_i32 tmp; 1453 1454 nullify_over(ctx); 1455 1456 tmp = tcg_temp_new_i32(); 1457 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1458 save_frw_i32(rt, tmp); 1459 1460 if (rt == 0) { 1461 gen_helper_loaded_fr0(tcg_env); 1462 } 1463 1464 return nullify_end(ctx); 1465 } 1466 1467 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1468 { 1469 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1470 a->disp, a->sp, a->m); 1471 } 1472 1473 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1474 unsigned rx, int scale, int64_t disp, 1475 unsigned sp, int modify) 1476 { 1477 TCGv_i64 tmp; 1478 1479 nullify_over(ctx); 1480 1481 tmp = tcg_temp_new_i64(); 1482 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1483 save_frd(rt, tmp); 1484 1485 if (rt == 0) { 1486 gen_helper_loaded_fr0(tcg_env); 1487 } 1488 1489 return nullify_end(ctx); 1490 } 1491 1492 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1493 { 1494 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1495 a->disp, a->sp, a->m); 1496 } 1497 1498 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1499 int64_t disp, unsigned sp, 1500 int modify, MemOp mop) 1501 { 1502 nullify_over(ctx); 1503 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1504 return nullify_end(ctx); 1505 } 1506 1507 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1508 unsigned rx, int scale, int64_t disp, 1509 unsigned sp, int modify) 1510 { 1511 TCGv_i32 tmp; 1512 1513 nullify_over(ctx); 1514 1515 tmp = load_frw_i32(rt); 1516 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1517 1518 return nullify_end(ctx); 1519 } 1520 1521 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1522 { 1523 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1524 a->disp, a->sp, a->m); 1525 } 1526 1527 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1528 unsigned rx, int scale, int64_t disp, 1529 unsigned sp, int modify) 1530 { 1531 TCGv_i64 tmp; 1532 1533 nullify_over(ctx); 1534 1535 tmp = load_frd(rt); 1536 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1537 1538 return nullify_end(ctx); 1539 } 1540 1541 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1542 { 1543 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1544 a->disp, a->sp, a->m); 1545 } 1546 1547 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1548 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1549 { 1550 TCGv_i32 tmp; 1551 1552 nullify_over(ctx); 1553 tmp = load_frw0_i32(ra); 1554 1555 func(tmp, tcg_env, tmp); 1556 1557 save_frw_i32(rt, tmp); 1558 return nullify_end(ctx); 1559 } 1560 1561 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1562 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1563 { 1564 TCGv_i32 dst; 1565 TCGv_i64 src; 1566 1567 nullify_over(ctx); 1568 src = load_frd(ra); 1569 dst = tcg_temp_new_i32(); 1570 1571 func(dst, tcg_env, src); 1572 1573 save_frw_i32(rt, dst); 1574 return nullify_end(ctx); 1575 } 1576 1577 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1578 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1579 { 1580 TCGv_i64 tmp; 1581 1582 nullify_over(ctx); 1583 tmp = load_frd0(ra); 1584 1585 func(tmp, tcg_env, tmp); 1586 1587 save_frd(rt, tmp); 1588 return nullify_end(ctx); 1589 } 1590 1591 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1592 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1593 { 1594 TCGv_i32 src; 1595 TCGv_i64 dst; 1596 1597 nullify_over(ctx); 1598 src = load_frw0_i32(ra); 1599 dst = tcg_temp_new_i64(); 1600 1601 func(dst, tcg_env, src); 1602 1603 save_frd(rt, dst); 1604 return nullify_end(ctx); 1605 } 1606 1607 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1608 unsigned ra, unsigned rb, 1609 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1610 { 1611 TCGv_i32 a, b; 1612 1613 nullify_over(ctx); 1614 a = load_frw0_i32(ra); 1615 b = load_frw0_i32(rb); 1616 1617 func(a, tcg_env, a, b); 1618 1619 save_frw_i32(rt, a); 1620 return nullify_end(ctx); 1621 } 1622 1623 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1624 unsigned ra, unsigned rb, 1625 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1626 { 1627 TCGv_i64 a, b; 1628 1629 nullify_over(ctx); 1630 a = load_frd0(ra); 1631 b = load_frd0(rb); 1632 1633 func(a, tcg_env, a, b); 1634 1635 save_frd(rt, a); 1636 return nullify_end(ctx); 1637 } 1638 1639 /* Emit an unconditional branch to a direct target, which may or may not 1640 have already had nullification handled. */ 1641 static bool do_dbranch(DisasContext *ctx, uint64_t dest, 1642 unsigned link, bool is_n) 1643 { 1644 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1645 if (link != 0) { 1646 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1647 } 1648 ctx->iaoq_n = dest; 1649 if (is_n) { 1650 ctx->null_cond.c = TCG_COND_ALWAYS; 1651 } 1652 } else { 1653 nullify_over(ctx); 1654 1655 if (link != 0) { 1656 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1657 } 1658 1659 if (is_n && use_nullify_skip(ctx)) { 1660 nullify_set(ctx, 0); 1661 gen_goto_tb(ctx, 0, dest, dest + 4); 1662 } else { 1663 nullify_set(ctx, is_n); 1664 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1665 } 1666 1667 nullify_end(ctx); 1668 1669 nullify_set(ctx, 0); 1670 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1671 ctx->base.is_jmp = DISAS_NORETURN; 1672 } 1673 return true; 1674 } 1675 1676 /* Emit a conditional branch to a direct target. If the branch itself 1677 is nullified, we should have already used nullify_over. */ 1678 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n, 1679 DisasCond *cond) 1680 { 1681 uint64_t dest = iaoq_dest(ctx, disp); 1682 TCGLabel *taken = NULL; 1683 TCGCond c = cond->c; 1684 bool n; 1685 1686 assert(ctx->null_cond.c == TCG_COND_NEVER); 1687 1688 /* Handle TRUE and NEVER as direct branches. */ 1689 if (c == TCG_COND_ALWAYS) { 1690 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1691 } 1692 if (c == TCG_COND_NEVER) { 1693 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1694 } 1695 1696 taken = gen_new_label(); 1697 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken); 1698 cond_free(cond); 1699 1700 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1701 n = is_n && disp < 0; 1702 if (n && use_nullify_skip(ctx)) { 1703 nullify_set(ctx, 0); 1704 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1705 } else { 1706 if (!n && ctx->null_lab) { 1707 gen_set_label(ctx->null_lab); 1708 ctx->null_lab = NULL; 1709 } 1710 nullify_set(ctx, n); 1711 if (ctx->iaoq_n == -1) { 1712 /* The temporary iaoq_n_var died at the branch above. 1713 Regenerate it here instead of saving it. */ 1714 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1715 } 1716 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1717 } 1718 1719 gen_set_label(taken); 1720 1721 /* Taken: Condition satisfied; nullify on forward branches. */ 1722 n = is_n && disp >= 0; 1723 if (n && use_nullify_skip(ctx)) { 1724 nullify_set(ctx, 0); 1725 gen_goto_tb(ctx, 1, dest, dest + 4); 1726 } else { 1727 nullify_set(ctx, n); 1728 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1729 } 1730 1731 /* Not taken: the branch itself was nullified. */ 1732 if (ctx->null_lab) { 1733 gen_set_label(ctx->null_lab); 1734 ctx->null_lab = NULL; 1735 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1736 } else { 1737 ctx->base.is_jmp = DISAS_NORETURN; 1738 } 1739 return true; 1740 } 1741 1742 /* Emit an unconditional branch to an indirect target. This handles 1743 nullification of the branch itself. */ 1744 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest, 1745 unsigned link, bool is_n) 1746 { 1747 TCGv_i64 a0, a1, next, tmp; 1748 TCGCond c; 1749 1750 assert(ctx->null_lab == NULL); 1751 1752 if (ctx->null_cond.c == TCG_COND_NEVER) { 1753 if (link != 0) { 1754 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1755 } 1756 next = tcg_temp_new_i64(); 1757 tcg_gen_mov_i64(next, dest); 1758 if (is_n) { 1759 if (use_nullify_skip(ctx)) { 1760 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next); 1761 tcg_gen_addi_i64(next, next, 4); 1762 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); 1763 nullify_set(ctx, 0); 1764 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1765 return true; 1766 } 1767 ctx->null_cond.c = TCG_COND_ALWAYS; 1768 } 1769 ctx->iaoq_n = -1; 1770 ctx->iaoq_n_var = next; 1771 } else if (is_n && use_nullify_skip(ctx)) { 1772 /* The (conditional) branch, B, nullifies the next insn, N, 1773 and we're allowed to skip execution N (no single-step or 1774 tracepoint in effect). Since the goto_ptr that we must use 1775 for the indirect branch consumes no special resources, we 1776 can (conditionally) skip B and continue execution. */ 1777 /* The use_nullify_skip test implies we have a known control path. */ 1778 tcg_debug_assert(ctx->iaoq_b != -1); 1779 tcg_debug_assert(ctx->iaoq_n != -1); 1780 1781 /* We do have to handle the non-local temporary, DEST, before 1782 branching. Since IOAQ_F is not really live at this point, we 1783 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1784 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest); 1785 next = tcg_temp_new_i64(); 1786 tcg_gen_addi_i64(next, dest, 4); 1787 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); 1788 1789 nullify_over(ctx); 1790 if (link != 0) { 1791 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1792 } 1793 tcg_gen_lookup_and_goto_ptr(); 1794 return nullify_end(ctx); 1795 } else { 1796 c = ctx->null_cond.c; 1797 a0 = ctx->null_cond.a0; 1798 a1 = ctx->null_cond.a1; 1799 1800 tmp = tcg_temp_new_i64(); 1801 next = tcg_temp_new_i64(); 1802 1803 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1804 tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest); 1805 ctx->iaoq_n = -1; 1806 ctx->iaoq_n_var = next; 1807 1808 if (link != 0) { 1809 tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1810 } 1811 1812 if (is_n) { 1813 /* The branch nullifies the next insn, which means the state of N 1814 after the branch is the inverse of the state of N that applied 1815 to the branch. */ 1816 tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1817 cond_free(&ctx->null_cond); 1818 ctx->null_cond = cond_make_n(); 1819 ctx->psw_n_nonzero = true; 1820 } else { 1821 cond_free(&ctx->null_cond); 1822 } 1823 } 1824 return true; 1825 } 1826 1827 /* Implement 1828 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 1829 * IAOQ_Next{30..31} ← GR[b]{30..31}; 1830 * else 1831 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 1832 * which keeps the privilege level from being increased. 1833 */ 1834 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset) 1835 { 1836 TCGv_i64 dest; 1837 switch (ctx->privilege) { 1838 case 0: 1839 /* Privilege 0 is maximum and is allowed to decrease. */ 1840 return offset; 1841 case 3: 1842 /* Privilege 3 is minimum and is never allowed to increase. */ 1843 dest = tcg_temp_new_i64(); 1844 tcg_gen_ori_i64(dest, offset, 3); 1845 break; 1846 default: 1847 dest = tcg_temp_new_i64(); 1848 tcg_gen_andi_i64(dest, offset, -4); 1849 tcg_gen_ori_i64(dest, dest, ctx->privilege); 1850 tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset); 1851 break; 1852 } 1853 return dest; 1854 } 1855 1856 #ifdef CONFIG_USER_ONLY 1857 /* On Linux, page zero is normally marked execute only + gateway. 1858 Therefore normal read or write is supposed to fail, but specific 1859 offsets have kernel code mapped to raise permissions to implement 1860 system calls. Handling this via an explicit check here, rather 1861 in than the "be disp(sr2,r0)" instruction that probably sent us 1862 here, is the easiest way to handle the branch delay slot on the 1863 aforementioned BE. */ 1864 static void do_page_zero(DisasContext *ctx) 1865 { 1866 TCGv_i64 tmp; 1867 1868 /* If by some means we get here with PSW[N]=1, that implies that 1869 the B,GATE instruction would be skipped, and we'd fault on the 1870 next insn within the privileged page. */ 1871 switch (ctx->null_cond.c) { 1872 case TCG_COND_NEVER: 1873 break; 1874 case TCG_COND_ALWAYS: 1875 tcg_gen_movi_i64(cpu_psw_n, 0); 1876 goto do_sigill; 1877 default: 1878 /* Since this is always the first (and only) insn within the 1879 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1880 g_assert_not_reached(); 1881 } 1882 1883 /* Check that we didn't arrive here via some means that allowed 1884 non-sequential instruction execution. Normally the PSW[B] bit 1885 detects this by disallowing the B,GATE instruction to execute 1886 under such conditions. */ 1887 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1888 goto do_sigill; 1889 } 1890 1891 switch (ctx->iaoq_f & -4) { 1892 case 0x00: /* Null pointer call */ 1893 gen_excp_1(EXCP_IMP); 1894 ctx->base.is_jmp = DISAS_NORETURN; 1895 break; 1896 1897 case 0xb0: /* LWS */ 1898 gen_excp_1(EXCP_SYSCALL_LWS); 1899 ctx->base.is_jmp = DISAS_NORETURN; 1900 break; 1901 1902 case 0xe0: /* SET_THREAD_POINTER */ 1903 tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27])); 1904 tmp = tcg_temp_new_i64(); 1905 tcg_gen_ori_i64(tmp, cpu_gr[31], 3); 1906 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); 1907 tcg_gen_addi_i64(tmp, tmp, 4); 1908 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 1909 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1910 break; 1911 1912 case 0x100: /* SYSCALL */ 1913 gen_excp_1(EXCP_SYSCALL); 1914 ctx->base.is_jmp = DISAS_NORETURN; 1915 break; 1916 1917 default: 1918 do_sigill: 1919 gen_excp_1(EXCP_ILL); 1920 ctx->base.is_jmp = DISAS_NORETURN; 1921 break; 1922 } 1923 } 1924 #endif 1925 1926 static bool trans_nop(DisasContext *ctx, arg_nop *a) 1927 { 1928 cond_free(&ctx->null_cond); 1929 return true; 1930 } 1931 1932 static bool trans_break(DisasContext *ctx, arg_break *a) 1933 { 1934 return gen_excp_iir(ctx, EXCP_BREAK); 1935 } 1936 1937 static bool trans_sync(DisasContext *ctx, arg_sync *a) 1938 { 1939 /* No point in nullifying the memory barrier. */ 1940 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 1941 1942 cond_free(&ctx->null_cond); 1943 return true; 1944 } 1945 1946 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 1947 { 1948 unsigned rt = a->t; 1949 TCGv_i64 tmp = dest_gpr(ctx, rt); 1950 tcg_gen_movi_i64(tmp, ctx->iaoq_f); 1951 save_gpr(ctx, rt, tmp); 1952 1953 cond_free(&ctx->null_cond); 1954 return true; 1955 } 1956 1957 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 1958 { 1959 unsigned rt = a->t; 1960 unsigned rs = a->sp; 1961 TCGv_i64 t0 = tcg_temp_new_i64(); 1962 1963 load_spr(ctx, t0, rs); 1964 tcg_gen_shri_i64(t0, t0, 32); 1965 1966 save_gpr(ctx, rt, t0); 1967 1968 cond_free(&ctx->null_cond); 1969 return true; 1970 } 1971 1972 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 1973 { 1974 unsigned rt = a->t; 1975 unsigned ctl = a->r; 1976 TCGv_i64 tmp; 1977 1978 switch (ctl) { 1979 case CR_SAR: 1980 if (a->e == 0) { 1981 /* MFSAR without ,W masks low 5 bits. */ 1982 tmp = dest_gpr(ctx, rt); 1983 tcg_gen_andi_i64(tmp, cpu_sar, 31); 1984 save_gpr(ctx, rt, tmp); 1985 goto done; 1986 } 1987 save_gpr(ctx, rt, cpu_sar); 1988 goto done; 1989 case CR_IT: /* Interval Timer */ 1990 /* FIXME: Respect PSW_S bit. */ 1991 nullify_over(ctx); 1992 tmp = dest_gpr(ctx, rt); 1993 if (translator_io_start(&ctx->base)) { 1994 gen_helper_read_interval_timer(tmp); 1995 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1996 } else { 1997 gen_helper_read_interval_timer(tmp); 1998 } 1999 save_gpr(ctx, rt, tmp); 2000 return nullify_end(ctx); 2001 case 26: 2002 case 27: 2003 break; 2004 default: 2005 /* All other control registers are privileged. */ 2006 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2007 break; 2008 } 2009 2010 tmp = tcg_temp_new_i64(); 2011 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2012 save_gpr(ctx, rt, tmp); 2013 2014 done: 2015 cond_free(&ctx->null_cond); 2016 return true; 2017 } 2018 2019 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2020 { 2021 unsigned rr = a->r; 2022 unsigned rs = a->sp; 2023 TCGv_i64 tmp; 2024 2025 if (rs >= 5) { 2026 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2027 } 2028 nullify_over(ctx); 2029 2030 tmp = tcg_temp_new_i64(); 2031 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32); 2032 2033 if (rs >= 4) { 2034 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs])); 2035 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2036 } else { 2037 tcg_gen_mov_i64(cpu_sr[rs], tmp); 2038 } 2039 2040 return nullify_end(ctx); 2041 } 2042 2043 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2044 { 2045 unsigned ctl = a->t; 2046 TCGv_i64 reg; 2047 TCGv_i64 tmp; 2048 2049 if (ctl == CR_SAR) { 2050 reg = load_gpr(ctx, a->r); 2051 tmp = tcg_temp_new_i64(); 2052 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31); 2053 save_or_nullify(ctx, cpu_sar, tmp); 2054 2055 cond_free(&ctx->null_cond); 2056 return true; 2057 } 2058 2059 /* All other control registers are privileged or read-only. */ 2060 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2061 2062 #ifndef CONFIG_USER_ONLY 2063 nullify_over(ctx); 2064 reg = load_gpr(ctx, a->r); 2065 2066 switch (ctl) { 2067 case CR_IT: 2068 gen_helper_write_interval_timer(tcg_env, reg); 2069 break; 2070 case CR_EIRR: 2071 gen_helper_write_eirr(tcg_env, reg); 2072 break; 2073 case CR_EIEM: 2074 gen_helper_write_eiem(tcg_env, reg); 2075 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2076 break; 2077 2078 case CR_IIASQ: 2079 case CR_IIAOQ: 2080 /* FIXME: Respect PSW_Q bit */ 2081 /* The write advances the queue and stores to the back element. */ 2082 tmp = tcg_temp_new_i64(); 2083 tcg_gen_ld_i64(tmp, tcg_env, 2084 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2085 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2086 tcg_gen_st_i64(reg, tcg_env, 2087 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2088 break; 2089 2090 case CR_PID1: 2091 case CR_PID2: 2092 case CR_PID3: 2093 case CR_PID4: 2094 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2095 #ifndef CONFIG_USER_ONLY 2096 gen_helper_change_prot_id(tcg_env); 2097 #endif 2098 break; 2099 2100 default: 2101 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2102 break; 2103 } 2104 return nullify_end(ctx); 2105 #endif 2106 } 2107 2108 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2109 { 2110 TCGv_i64 tmp = tcg_temp_new_i64(); 2111 2112 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r)); 2113 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31); 2114 save_or_nullify(ctx, cpu_sar, tmp); 2115 2116 cond_free(&ctx->null_cond); 2117 return true; 2118 } 2119 2120 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2121 { 2122 TCGv_i64 dest = dest_gpr(ctx, a->t); 2123 2124 #ifdef CONFIG_USER_ONLY 2125 /* We don't implement space registers in user mode. */ 2126 tcg_gen_movi_i64(dest, 0); 2127 #else 2128 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2129 tcg_gen_shri_i64(dest, dest, 32); 2130 #endif 2131 save_gpr(ctx, a->t, dest); 2132 2133 cond_free(&ctx->null_cond); 2134 return true; 2135 } 2136 2137 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2138 { 2139 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2140 #ifndef CONFIG_USER_ONLY 2141 TCGv_i64 tmp; 2142 2143 nullify_over(ctx); 2144 2145 tmp = tcg_temp_new_i64(); 2146 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2147 tcg_gen_andi_i64(tmp, tmp, ~a->i); 2148 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2149 save_gpr(ctx, a->t, tmp); 2150 2151 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2152 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2153 return nullify_end(ctx); 2154 #endif 2155 } 2156 2157 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2158 { 2159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2160 #ifndef CONFIG_USER_ONLY 2161 TCGv_i64 tmp; 2162 2163 nullify_over(ctx); 2164 2165 tmp = tcg_temp_new_i64(); 2166 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2167 tcg_gen_ori_i64(tmp, tmp, a->i); 2168 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2169 save_gpr(ctx, a->t, tmp); 2170 2171 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2172 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2173 return nullify_end(ctx); 2174 #endif 2175 } 2176 2177 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2178 { 2179 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2180 #ifndef CONFIG_USER_ONLY 2181 TCGv_i64 tmp, reg; 2182 nullify_over(ctx); 2183 2184 reg = load_gpr(ctx, a->r); 2185 tmp = tcg_temp_new_i64(); 2186 gen_helper_swap_system_mask(tmp, tcg_env, reg); 2187 2188 /* Exit the TB to recognize new interrupts. */ 2189 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2190 return nullify_end(ctx); 2191 #endif 2192 } 2193 2194 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2195 { 2196 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2197 #ifndef CONFIG_USER_ONLY 2198 nullify_over(ctx); 2199 2200 if (rfi_r) { 2201 gen_helper_rfi_r(tcg_env); 2202 } else { 2203 gen_helper_rfi(tcg_env); 2204 } 2205 /* Exit the TB to recognize new interrupts. */ 2206 tcg_gen_exit_tb(NULL, 0); 2207 ctx->base.is_jmp = DISAS_NORETURN; 2208 2209 return nullify_end(ctx); 2210 #endif 2211 } 2212 2213 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2214 { 2215 return do_rfi(ctx, false); 2216 } 2217 2218 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2219 { 2220 return do_rfi(ctx, true); 2221 } 2222 2223 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2224 { 2225 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2226 #ifndef CONFIG_USER_ONLY 2227 nullify_over(ctx); 2228 gen_helper_halt(tcg_env); 2229 ctx->base.is_jmp = DISAS_NORETURN; 2230 return nullify_end(ctx); 2231 #endif 2232 } 2233 2234 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2235 { 2236 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2237 #ifndef CONFIG_USER_ONLY 2238 nullify_over(ctx); 2239 gen_helper_reset(tcg_env); 2240 ctx->base.is_jmp = DISAS_NORETURN; 2241 return nullify_end(ctx); 2242 #endif 2243 } 2244 2245 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2246 { 2247 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2248 #ifndef CONFIG_USER_ONLY 2249 nullify_over(ctx); 2250 gen_helper_getshadowregs(tcg_env); 2251 return nullify_end(ctx); 2252 #endif 2253 } 2254 2255 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2256 { 2257 if (a->m) { 2258 TCGv_i64 dest = dest_gpr(ctx, a->b); 2259 TCGv_i64 src1 = load_gpr(ctx, a->b); 2260 TCGv_i64 src2 = load_gpr(ctx, a->x); 2261 2262 /* The only thing we need to do is the base register modification. */ 2263 tcg_gen_add_i64(dest, src1, src2); 2264 save_gpr(ctx, a->b, dest); 2265 } 2266 cond_free(&ctx->null_cond); 2267 return true; 2268 } 2269 2270 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2271 { 2272 TCGv_i64 dest, ofs; 2273 TCGv_i32 level, want; 2274 TCGv_i64 addr; 2275 2276 nullify_over(ctx); 2277 2278 dest = dest_gpr(ctx, a->t); 2279 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2280 2281 if (a->imm) { 2282 level = tcg_constant_i32(a->ri); 2283 } else { 2284 level = tcg_temp_new_i32(); 2285 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri)); 2286 tcg_gen_andi_i32(level, level, 3); 2287 } 2288 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2289 2290 gen_helper_probe(dest, tcg_env, addr, level, want); 2291 2292 save_gpr(ctx, a->t, dest); 2293 return nullify_end(ctx); 2294 } 2295 2296 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2297 { 2298 if (ctx->is_pa20) { 2299 return false; 2300 } 2301 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2302 #ifndef CONFIG_USER_ONLY 2303 TCGv_i64 addr; 2304 TCGv_i64 ofs, reg; 2305 2306 nullify_over(ctx); 2307 2308 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2309 reg = load_gpr(ctx, a->r); 2310 if (a->addr) { 2311 gen_helper_itlba_pa11(tcg_env, addr, reg); 2312 } else { 2313 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2314 } 2315 2316 /* Exit TB for TLB change if mmu is enabled. */ 2317 if (ctx->tb_flags & PSW_C) { 2318 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2319 } 2320 return nullify_end(ctx); 2321 #endif 2322 } 2323 2324 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) 2325 { 2326 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2327 #ifndef CONFIG_USER_ONLY 2328 TCGv_i64 addr; 2329 TCGv_i64 ofs; 2330 2331 nullify_over(ctx); 2332 2333 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2334 if (a->m) { 2335 save_gpr(ctx, a->b, ofs); 2336 } 2337 if (a->local) { 2338 gen_helper_ptlbe(tcg_env); 2339 } else { 2340 gen_helper_ptlb(tcg_env, addr); 2341 } 2342 2343 /* Exit TB for TLB change if mmu is enabled. */ 2344 if (ctx->tb_flags & PSW_C) { 2345 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2346 } 2347 return nullify_end(ctx); 2348 #endif 2349 } 2350 2351 /* 2352 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2353 * See 2354 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2355 * page 13-9 (195/206) 2356 */ 2357 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2358 { 2359 if (ctx->is_pa20) { 2360 return false; 2361 } 2362 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2363 #ifndef CONFIG_USER_ONLY 2364 TCGv_i64 addr, atl, stl; 2365 TCGv_i64 reg; 2366 2367 nullify_over(ctx); 2368 2369 /* 2370 * FIXME: 2371 * if (not (pcxl or pcxl2)) 2372 * return gen_illegal(ctx); 2373 */ 2374 2375 atl = tcg_temp_new_i64(); 2376 stl = tcg_temp_new_i64(); 2377 addr = tcg_temp_new_i64(); 2378 2379 tcg_gen_ld32u_i64(stl, tcg_env, 2380 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2381 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2382 tcg_gen_ld32u_i64(atl, tcg_env, 2383 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2384 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2385 tcg_gen_shli_i64(stl, stl, 32); 2386 tcg_gen_or_i64(addr, atl, stl); 2387 2388 reg = load_gpr(ctx, a->r); 2389 if (a->addr) { 2390 gen_helper_itlba_pa11(tcg_env, addr, reg); 2391 } else { 2392 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2393 } 2394 2395 /* Exit TB for TLB change if mmu is enabled. */ 2396 if (ctx->tb_flags & PSW_C) { 2397 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2398 } 2399 return nullify_end(ctx); 2400 #endif 2401 } 2402 2403 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a) 2404 { 2405 if (!ctx->is_pa20) { 2406 return false; 2407 } 2408 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2409 #ifndef CONFIG_USER_ONLY 2410 nullify_over(ctx); 2411 { 2412 TCGv_i64 src1 = load_gpr(ctx, a->r1); 2413 TCGv_i64 src2 = load_gpr(ctx, a->r2); 2414 2415 if (a->data) { 2416 gen_helper_idtlbt_pa20(tcg_env, src1, src2); 2417 } else { 2418 gen_helper_iitlbt_pa20(tcg_env, src1, src2); 2419 } 2420 } 2421 /* Exit TB for TLB change if mmu is enabled. */ 2422 if (ctx->tb_flags & PSW_C) { 2423 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2424 } 2425 return nullify_end(ctx); 2426 #endif 2427 } 2428 2429 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2430 { 2431 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2432 #ifndef CONFIG_USER_ONLY 2433 TCGv_i64 vaddr; 2434 TCGv_i64 ofs, paddr; 2435 2436 nullify_over(ctx); 2437 2438 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2439 2440 paddr = tcg_temp_new_i64(); 2441 gen_helper_lpa(paddr, tcg_env, vaddr); 2442 2443 /* Note that physical address result overrides base modification. */ 2444 if (a->m) { 2445 save_gpr(ctx, a->b, ofs); 2446 } 2447 save_gpr(ctx, a->t, paddr); 2448 2449 return nullify_end(ctx); 2450 #endif 2451 } 2452 2453 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2454 { 2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2456 2457 /* The Coherence Index is an implementation-defined function of the 2458 physical address. Two addresses with the same CI have a coherent 2459 view of the cache. Our implementation is to return 0 for all, 2460 since the entire address space is coherent. */ 2461 save_gpr(ctx, a->t, tcg_constant_i64(0)); 2462 2463 cond_free(&ctx->null_cond); 2464 return true; 2465 } 2466 2467 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2468 { 2469 return do_add_reg(ctx, a, false, false, false, false); 2470 } 2471 2472 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2473 { 2474 return do_add_reg(ctx, a, true, false, false, false); 2475 } 2476 2477 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2478 { 2479 return do_add_reg(ctx, a, false, true, false, false); 2480 } 2481 2482 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2483 { 2484 return do_add_reg(ctx, a, false, false, false, true); 2485 } 2486 2487 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2488 { 2489 return do_add_reg(ctx, a, false, true, false, true); 2490 } 2491 2492 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a) 2493 { 2494 return do_sub_reg(ctx, a, false, false, false); 2495 } 2496 2497 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2498 { 2499 return do_sub_reg(ctx, a, true, false, false); 2500 } 2501 2502 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2503 { 2504 return do_sub_reg(ctx, a, false, false, true); 2505 } 2506 2507 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2508 { 2509 return do_sub_reg(ctx, a, true, false, true); 2510 } 2511 2512 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a) 2513 { 2514 return do_sub_reg(ctx, a, false, true, false); 2515 } 2516 2517 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2518 { 2519 return do_sub_reg(ctx, a, true, true, false); 2520 } 2521 2522 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a) 2523 { 2524 return do_log_reg(ctx, a, tcg_gen_andc_i64); 2525 } 2526 2527 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a) 2528 { 2529 return do_log_reg(ctx, a, tcg_gen_and_i64); 2530 } 2531 2532 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a) 2533 { 2534 if (a->cf == 0) { 2535 unsigned r2 = a->r2; 2536 unsigned r1 = a->r1; 2537 unsigned rt = a->t; 2538 2539 if (rt == 0) { /* NOP */ 2540 cond_free(&ctx->null_cond); 2541 return true; 2542 } 2543 if (r2 == 0) { /* COPY */ 2544 if (r1 == 0) { 2545 TCGv_i64 dest = dest_gpr(ctx, rt); 2546 tcg_gen_movi_i64(dest, 0); 2547 save_gpr(ctx, rt, dest); 2548 } else { 2549 save_gpr(ctx, rt, cpu_gr[r1]); 2550 } 2551 cond_free(&ctx->null_cond); 2552 return true; 2553 } 2554 #ifndef CONFIG_USER_ONLY 2555 /* These are QEMU extensions and are nops in the real architecture: 2556 * 2557 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2558 * or %r31,%r31,%r31 -- death loop; offline cpu 2559 * currently implemented as idle. 2560 */ 2561 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2562 /* No need to check for supervisor, as userland can only pause 2563 until the next timer interrupt. */ 2564 nullify_over(ctx); 2565 2566 /* Advance the instruction queue. */ 2567 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2568 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2569 nullify_set(ctx, 0); 2570 2571 /* Tell the qemu main loop to halt until this cpu has work. */ 2572 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 2573 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2574 gen_excp_1(EXCP_HALTED); 2575 ctx->base.is_jmp = DISAS_NORETURN; 2576 2577 return nullify_end(ctx); 2578 } 2579 #endif 2580 } 2581 return do_log_reg(ctx, a, tcg_gen_or_i64); 2582 } 2583 2584 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a) 2585 { 2586 return do_log_reg(ctx, a, tcg_gen_xor_i64); 2587 } 2588 2589 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) 2590 { 2591 TCGv_i64 tcg_r1, tcg_r2; 2592 2593 if (a->cf) { 2594 nullify_over(ctx); 2595 } 2596 tcg_r1 = load_gpr(ctx, a->r1); 2597 tcg_r2 = load_gpr(ctx, a->r2); 2598 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d); 2599 return nullify_end(ctx); 2600 } 2601 2602 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) 2603 { 2604 TCGv_i64 tcg_r1, tcg_r2; 2605 2606 if (a->cf) { 2607 nullify_over(ctx); 2608 } 2609 tcg_r1 = load_gpr(ctx, a->r1); 2610 tcg_r2 = load_gpr(ctx, a->r2); 2611 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64); 2612 return nullify_end(ctx); 2613 } 2614 2615 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) 2616 { 2617 TCGv_i64 tcg_r1, tcg_r2, tmp; 2618 2619 if (a->cf) { 2620 nullify_over(ctx); 2621 } 2622 tcg_r1 = load_gpr(ctx, a->r1); 2623 tcg_r2 = load_gpr(ctx, a->r2); 2624 tmp = tcg_temp_new_i64(); 2625 tcg_gen_not_i64(tmp, tcg_r2); 2626 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64); 2627 return nullify_end(ctx); 2628 } 2629 2630 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a) 2631 { 2632 return do_uaddcm(ctx, a, false); 2633 } 2634 2635 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2636 { 2637 return do_uaddcm(ctx, a, true); 2638 } 2639 2640 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) 2641 { 2642 TCGv_i64 tmp; 2643 2644 nullify_over(ctx); 2645 2646 tmp = tcg_temp_new_i64(); 2647 tcg_gen_shri_i64(tmp, cpu_psw_cb, 3); 2648 if (!is_i) { 2649 tcg_gen_not_i64(tmp, tmp); 2650 } 2651 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull); 2652 tcg_gen_muli_i64(tmp, tmp, 6); 2653 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false, 2654 is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64); 2655 return nullify_end(ctx); 2656 } 2657 2658 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a) 2659 { 2660 return do_dcor(ctx, a, false); 2661 } 2662 2663 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) 2664 { 2665 return do_dcor(ctx, a, true); 2666 } 2667 2668 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2669 { 2670 TCGv_i64 dest, add1, add2, addc, zero, in1, in2; 2671 TCGv_i64 cout; 2672 2673 nullify_over(ctx); 2674 2675 in1 = load_gpr(ctx, a->r1); 2676 in2 = load_gpr(ctx, a->r2); 2677 2678 add1 = tcg_temp_new_i64(); 2679 add2 = tcg_temp_new_i64(); 2680 addc = tcg_temp_new_i64(); 2681 dest = tcg_temp_new_i64(); 2682 zero = tcg_constant_i64(0); 2683 2684 /* Form R1 << 1 | PSW[CB]{8}. */ 2685 tcg_gen_add_i64(add1, in1, in1); 2686 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false)); 2687 2688 /* 2689 * Add or subtract R2, depending on PSW[V]. Proper computation of 2690 * carry requires that we subtract via + ~R2 + 1, as described in 2691 * the manual. By extracting and masking V, we can produce the 2692 * proper inputs to the addition without movcond. 2693 */ 2694 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1); 2695 tcg_gen_xor_i64(add2, in2, addc); 2696 tcg_gen_andi_i64(addc, addc, 1); 2697 2698 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2699 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2700 2701 /* Write back the result register. */ 2702 save_gpr(ctx, a->t, dest); 2703 2704 /* Write back PSW[CB]. */ 2705 tcg_gen_xor_i64(cpu_psw_cb, add1, add2); 2706 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest); 2707 2708 /* Write back PSW[V] for the division step. */ 2709 cout = get_psw_carry(ctx, false); 2710 tcg_gen_neg_i64(cpu_psw_v, cout); 2711 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2); 2712 2713 /* Install the new nullification. */ 2714 if (a->cf) { 2715 TCGv_i64 sv = NULL; 2716 if (cond_need_sv(a->cf >> 1)) { 2717 /* ??? The lshift is supposed to contribute to overflow. */ 2718 sv = do_add_sv(ctx, dest, add1, add2); 2719 } 2720 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv); 2721 } 2722 2723 return nullify_end(ctx); 2724 } 2725 2726 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2727 { 2728 return do_add_imm(ctx, a, false, false); 2729 } 2730 2731 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2732 { 2733 return do_add_imm(ctx, a, true, false); 2734 } 2735 2736 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2737 { 2738 return do_add_imm(ctx, a, false, true); 2739 } 2740 2741 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2742 { 2743 return do_add_imm(ctx, a, true, true); 2744 } 2745 2746 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2747 { 2748 return do_sub_imm(ctx, a, false); 2749 } 2750 2751 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2752 { 2753 return do_sub_imm(ctx, a, true); 2754 } 2755 2756 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a) 2757 { 2758 TCGv_i64 tcg_im, tcg_r2; 2759 2760 if (a->cf) { 2761 nullify_over(ctx); 2762 } 2763 2764 tcg_im = tcg_constant_i64(a->i); 2765 tcg_r2 = load_gpr(ctx, a->r); 2766 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d); 2767 2768 return nullify_end(ctx); 2769 } 2770 2771 static bool do_multimedia(DisasContext *ctx, arg_rrr *a, 2772 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 2773 { 2774 TCGv_i64 r1, r2, dest; 2775 2776 if (!ctx->is_pa20) { 2777 return false; 2778 } 2779 2780 nullify_over(ctx); 2781 2782 r1 = load_gpr(ctx, a->r1); 2783 r2 = load_gpr(ctx, a->r2); 2784 dest = dest_gpr(ctx, a->t); 2785 2786 fn(dest, r1, r2); 2787 save_gpr(ctx, a->t, dest); 2788 2789 return nullify_end(ctx); 2790 } 2791 2792 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a, 2793 void (*fn)(TCGv_i64, TCGv_i64, int64_t)) 2794 { 2795 TCGv_i64 r, dest; 2796 2797 if (!ctx->is_pa20) { 2798 return false; 2799 } 2800 2801 nullify_over(ctx); 2802 2803 r = load_gpr(ctx, a->r); 2804 dest = dest_gpr(ctx, a->t); 2805 2806 fn(dest, r, a->i); 2807 save_gpr(ctx, a->t, dest); 2808 2809 return nullify_end(ctx); 2810 } 2811 2812 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a, 2813 void (*fn)(TCGv_i64, TCGv_i64, 2814 TCGv_i64, TCGv_i32)) 2815 { 2816 TCGv_i64 r1, r2, dest; 2817 2818 if (!ctx->is_pa20) { 2819 return false; 2820 } 2821 2822 nullify_over(ctx); 2823 2824 r1 = load_gpr(ctx, a->r1); 2825 r2 = load_gpr(ctx, a->r2); 2826 dest = dest_gpr(ctx, a->t); 2827 2828 fn(dest, r1, r2, tcg_constant_i32(a->sh)); 2829 save_gpr(ctx, a->t, dest); 2830 2831 return nullify_end(ctx); 2832 } 2833 2834 static bool trans_hadd(DisasContext *ctx, arg_rrr *a) 2835 { 2836 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64); 2837 } 2838 2839 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a) 2840 { 2841 return do_multimedia(ctx, a, gen_helper_hadd_ss); 2842 } 2843 2844 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a) 2845 { 2846 return do_multimedia(ctx, a, gen_helper_hadd_us); 2847 } 2848 2849 static bool trans_havg(DisasContext *ctx, arg_rrr *a) 2850 { 2851 return do_multimedia(ctx, a, gen_helper_havg); 2852 } 2853 2854 static bool trans_hshl(DisasContext *ctx, arg_rri *a) 2855 { 2856 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64); 2857 } 2858 2859 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a) 2860 { 2861 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64); 2862 } 2863 2864 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a) 2865 { 2866 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64); 2867 } 2868 2869 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a) 2870 { 2871 return do_multimedia_shadd(ctx, a, gen_helper_hshladd); 2872 } 2873 2874 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a) 2875 { 2876 return do_multimedia_shadd(ctx, a, gen_helper_hshradd); 2877 } 2878 2879 static bool trans_hsub(DisasContext *ctx, arg_rrr *a) 2880 { 2881 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64); 2882 } 2883 2884 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a) 2885 { 2886 return do_multimedia(ctx, a, gen_helper_hsub_ss); 2887 } 2888 2889 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a) 2890 { 2891 return do_multimedia(ctx, a, gen_helper_hsub_us); 2892 } 2893 2894 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2895 { 2896 uint64_t mask = 0xffff0000ffff0000ull; 2897 TCGv_i64 tmp = tcg_temp_new_i64(); 2898 2899 tcg_gen_andi_i64(tmp, r2, mask); 2900 tcg_gen_andi_i64(dst, r1, mask); 2901 tcg_gen_shri_i64(tmp, tmp, 16); 2902 tcg_gen_or_i64(dst, dst, tmp); 2903 } 2904 2905 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a) 2906 { 2907 return do_multimedia(ctx, a, gen_mixh_l); 2908 } 2909 2910 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2911 { 2912 uint64_t mask = 0x0000ffff0000ffffull; 2913 TCGv_i64 tmp = tcg_temp_new_i64(); 2914 2915 tcg_gen_andi_i64(tmp, r1, mask); 2916 tcg_gen_andi_i64(dst, r2, mask); 2917 tcg_gen_shli_i64(tmp, tmp, 16); 2918 tcg_gen_or_i64(dst, dst, tmp); 2919 } 2920 2921 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a) 2922 { 2923 return do_multimedia(ctx, a, gen_mixh_r); 2924 } 2925 2926 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2927 { 2928 TCGv_i64 tmp = tcg_temp_new_i64(); 2929 2930 tcg_gen_shri_i64(tmp, r2, 32); 2931 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32); 2932 } 2933 2934 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a) 2935 { 2936 return do_multimedia(ctx, a, gen_mixw_l); 2937 } 2938 2939 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2940 { 2941 tcg_gen_deposit_i64(dst, r2, r1, 32, 32); 2942 } 2943 2944 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a) 2945 { 2946 return do_multimedia(ctx, a, gen_mixw_r); 2947 } 2948 2949 static bool trans_permh(DisasContext *ctx, arg_permh *a) 2950 { 2951 TCGv_i64 r, t0, t1, t2, t3; 2952 2953 if (!ctx->is_pa20) { 2954 return false; 2955 } 2956 2957 nullify_over(ctx); 2958 2959 r = load_gpr(ctx, a->r1); 2960 t0 = tcg_temp_new_i64(); 2961 t1 = tcg_temp_new_i64(); 2962 t2 = tcg_temp_new_i64(); 2963 t3 = tcg_temp_new_i64(); 2964 2965 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16); 2966 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16); 2967 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16); 2968 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16); 2969 2970 tcg_gen_deposit_i64(t0, t1, t0, 16, 48); 2971 tcg_gen_deposit_i64(t2, t3, t2, 16, 48); 2972 tcg_gen_deposit_i64(t0, t2, t0, 32, 32); 2973 2974 save_gpr(ctx, a->t, t0); 2975 return nullify_end(ctx); 2976 } 2977 2978 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 2979 { 2980 if (!ctx->is_pa20 && a->size > MO_32) { 2981 return gen_illegal(ctx); 2982 } 2983 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 2984 a->disp, a->sp, a->m, a->size | MO_TE); 2985 } 2986 2987 static bool trans_st(DisasContext *ctx, arg_ldst *a) 2988 { 2989 assert(a->x == 0 && a->scale == 0); 2990 if (!ctx->is_pa20 && a->size > MO_32) { 2991 return gen_illegal(ctx); 2992 } 2993 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 2994 } 2995 2996 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 2997 { 2998 MemOp mop = MO_TE | MO_ALIGN | a->size; 2999 TCGv_i64 zero, dest, ofs; 3000 TCGv_i64 addr; 3001 3002 if (!ctx->is_pa20 && a->size > MO_32) { 3003 return gen_illegal(ctx); 3004 } 3005 3006 nullify_over(ctx); 3007 3008 if (a->m) { 3009 /* Base register modification. Make sure if RT == RB, 3010 we see the result of the load. */ 3011 dest = tcg_temp_new_i64(); 3012 } else { 3013 dest = dest_gpr(ctx, a->t); 3014 } 3015 3016 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 3017 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); 3018 3019 /* 3020 * For hppa1.1, LDCW is undefined unless aligned mod 16. 3021 * However actual hardware succeeds with aligned mod 4. 3022 * Detect this case and log a GUEST_ERROR. 3023 * 3024 * TODO: HPPA64 relaxes the over-alignment requirement 3025 * with the ,co completer. 3026 */ 3027 gen_helper_ldc_check(addr); 3028 3029 zero = tcg_constant_i64(0); 3030 tcg_gen_atomic_xchg_i64(dest, addr, zero, ctx->mmu_idx, mop); 3031 3032 if (a->m) { 3033 save_gpr(ctx, a->b, ofs); 3034 } 3035 save_gpr(ctx, a->t, dest); 3036 3037 return nullify_end(ctx); 3038 } 3039 3040 static bool trans_stby(DisasContext *ctx, arg_stby *a) 3041 { 3042 TCGv_i64 ofs, val; 3043 TCGv_i64 addr; 3044 3045 nullify_over(ctx); 3046 3047 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3048 ctx->mmu_idx == MMU_PHYS_IDX); 3049 val = load_gpr(ctx, a->r); 3050 if (a->a) { 3051 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3052 gen_helper_stby_e_parallel(tcg_env, addr, val); 3053 } else { 3054 gen_helper_stby_e(tcg_env, addr, val); 3055 } 3056 } else { 3057 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3058 gen_helper_stby_b_parallel(tcg_env, addr, val); 3059 } else { 3060 gen_helper_stby_b(tcg_env, addr, val); 3061 } 3062 } 3063 if (a->m) { 3064 tcg_gen_andi_i64(ofs, ofs, ~3); 3065 save_gpr(ctx, a->b, ofs); 3066 } 3067 3068 return nullify_end(ctx); 3069 } 3070 3071 static bool trans_stdby(DisasContext *ctx, arg_stby *a) 3072 { 3073 TCGv_i64 ofs, val; 3074 TCGv_i64 addr; 3075 3076 if (!ctx->is_pa20) { 3077 return false; 3078 } 3079 nullify_over(ctx); 3080 3081 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3082 ctx->mmu_idx == MMU_PHYS_IDX); 3083 val = load_gpr(ctx, a->r); 3084 if (a->a) { 3085 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3086 gen_helper_stdby_e_parallel(tcg_env, addr, val); 3087 } else { 3088 gen_helper_stdby_e(tcg_env, addr, val); 3089 } 3090 } else { 3091 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3092 gen_helper_stdby_b_parallel(tcg_env, addr, val); 3093 } else { 3094 gen_helper_stdby_b(tcg_env, addr, val); 3095 } 3096 } 3097 if (a->m) { 3098 tcg_gen_andi_i64(ofs, ofs, ~7); 3099 save_gpr(ctx, a->b, ofs); 3100 } 3101 3102 return nullify_end(ctx); 3103 } 3104 3105 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 3106 { 3107 int hold_mmu_idx = ctx->mmu_idx; 3108 3109 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3110 ctx->mmu_idx = MMU_PHYS_IDX; 3111 trans_ld(ctx, a); 3112 ctx->mmu_idx = hold_mmu_idx; 3113 return true; 3114 } 3115 3116 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 3117 { 3118 int hold_mmu_idx = ctx->mmu_idx; 3119 3120 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3121 ctx->mmu_idx = MMU_PHYS_IDX; 3122 trans_st(ctx, a); 3123 ctx->mmu_idx = hold_mmu_idx; 3124 return true; 3125 } 3126 3127 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 3128 { 3129 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3130 3131 tcg_gen_movi_i64(tcg_rt, a->i); 3132 save_gpr(ctx, a->t, tcg_rt); 3133 cond_free(&ctx->null_cond); 3134 return true; 3135 } 3136 3137 static bool trans_addil(DisasContext *ctx, arg_addil *a) 3138 { 3139 TCGv_i64 tcg_rt = load_gpr(ctx, a->r); 3140 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1); 3141 3142 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i); 3143 save_gpr(ctx, 1, tcg_r1); 3144 cond_free(&ctx->null_cond); 3145 return true; 3146 } 3147 3148 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 3149 { 3150 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3151 3152 /* Special case rb == 0, for the LDI pseudo-op. 3153 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */ 3154 if (a->b == 0) { 3155 tcg_gen_movi_i64(tcg_rt, a->i); 3156 } else { 3157 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i); 3158 } 3159 save_gpr(ctx, a->t, tcg_rt); 3160 cond_free(&ctx->null_cond); 3161 return true; 3162 } 3163 3164 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3165 unsigned c, unsigned f, bool d, unsigned n, int disp) 3166 { 3167 TCGv_i64 dest, in2, sv; 3168 DisasCond cond; 3169 3170 in2 = load_gpr(ctx, r); 3171 dest = tcg_temp_new_i64(); 3172 3173 tcg_gen_sub_i64(dest, in1, in2); 3174 3175 sv = NULL; 3176 if (cond_need_sv(c)) { 3177 sv = do_sub_sv(ctx, dest, in1, in2); 3178 } 3179 3180 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv); 3181 return do_cbranch(ctx, disp, n, &cond); 3182 } 3183 3184 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3185 { 3186 if (!ctx->is_pa20 && a->d) { 3187 return false; 3188 } 3189 nullify_over(ctx); 3190 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), 3191 a->c, a->f, a->d, a->n, a->disp); 3192 } 3193 3194 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3195 { 3196 if (!ctx->is_pa20 && a->d) { 3197 return false; 3198 } 3199 nullify_over(ctx); 3200 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i), 3201 a->c, a->f, a->d, a->n, a->disp); 3202 } 3203 3204 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3205 unsigned c, unsigned f, unsigned n, int disp) 3206 { 3207 TCGv_i64 dest, in2, sv, cb_cond; 3208 DisasCond cond; 3209 bool d = false; 3210 3211 /* 3212 * For hppa64, the ADDB conditions change with PSW.W, 3213 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE. 3214 */ 3215 if (ctx->tb_flags & PSW_W) { 3216 d = c >= 5; 3217 if (d) { 3218 c &= 3; 3219 } 3220 } 3221 3222 in2 = load_gpr(ctx, r); 3223 dest = tcg_temp_new_i64(); 3224 sv = NULL; 3225 cb_cond = NULL; 3226 3227 if (cond_need_cb(c)) { 3228 TCGv_i64 cb = tcg_temp_new_i64(); 3229 TCGv_i64 cb_msb = tcg_temp_new_i64(); 3230 3231 tcg_gen_movi_i64(cb_msb, 0); 3232 tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3233 tcg_gen_xor_i64(cb, in1, in2); 3234 tcg_gen_xor_i64(cb, cb, dest); 3235 cb_cond = get_carry(ctx, d, cb, cb_msb); 3236 } else { 3237 tcg_gen_add_i64(dest, in1, in2); 3238 } 3239 if (cond_need_sv(c)) { 3240 sv = do_add_sv(ctx, dest, in1, in2); 3241 } 3242 3243 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); 3244 save_gpr(ctx, r, dest); 3245 return do_cbranch(ctx, disp, n, &cond); 3246 } 3247 3248 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3249 { 3250 nullify_over(ctx); 3251 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3252 } 3253 3254 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3255 { 3256 nullify_over(ctx); 3257 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp); 3258 } 3259 3260 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3261 { 3262 TCGv_i64 tmp, tcg_r; 3263 DisasCond cond; 3264 3265 nullify_over(ctx); 3266 3267 tmp = tcg_temp_new_i64(); 3268 tcg_r = load_gpr(ctx, a->r); 3269 if (cond_need_ext(ctx, a->d)) { 3270 /* Force shift into [32,63] */ 3271 tcg_gen_ori_i64(tmp, cpu_sar, 32); 3272 tcg_gen_shl_i64(tmp, tcg_r, tmp); 3273 } else { 3274 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); 3275 } 3276 3277 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3278 return do_cbranch(ctx, a->disp, a->n, &cond); 3279 } 3280 3281 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3282 { 3283 TCGv_i64 tmp, tcg_r; 3284 DisasCond cond; 3285 int p; 3286 3287 nullify_over(ctx); 3288 3289 tmp = tcg_temp_new_i64(); 3290 tcg_r = load_gpr(ctx, a->r); 3291 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0); 3292 tcg_gen_shli_i64(tmp, tcg_r, p); 3293 3294 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3295 return do_cbranch(ctx, a->disp, a->n, &cond); 3296 } 3297 3298 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3299 { 3300 TCGv_i64 dest; 3301 DisasCond cond; 3302 3303 nullify_over(ctx); 3304 3305 dest = dest_gpr(ctx, a->r2); 3306 if (a->r1 == 0) { 3307 tcg_gen_movi_i64(dest, 0); 3308 } else { 3309 tcg_gen_mov_i64(dest, cpu_gr[a->r1]); 3310 } 3311 3312 /* All MOVB conditions are 32-bit. */ 3313 cond = do_sed_cond(ctx, a->c, false, dest); 3314 return do_cbranch(ctx, a->disp, a->n, &cond); 3315 } 3316 3317 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3318 { 3319 TCGv_i64 dest; 3320 DisasCond cond; 3321 3322 nullify_over(ctx); 3323 3324 dest = dest_gpr(ctx, a->r); 3325 tcg_gen_movi_i64(dest, a->i); 3326 3327 /* All MOVBI conditions are 32-bit. */ 3328 cond = do_sed_cond(ctx, a->c, false, dest); 3329 return do_cbranch(ctx, a->disp, a->n, &cond); 3330 } 3331 3332 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) 3333 { 3334 TCGv_i64 dest, src2; 3335 3336 if (!ctx->is_pa20 && a->d) { 3337 return false; 3338 } 3339 if (a->c) { 3340 nullify_over(ctx); 3341 } 3342 3343 dest = dest_gpr(ctx, a->t); 3344 src2 = load_gpr(ctx, a->r2); 3345 if (a->r1 == 0) { 3346 if (a->d) { 3347 tcg_gen_shr_i64(dest, src2, cpu_sar); 3348 } else { 3349 TCGv_i64 tmp = tcg_temp_new_i64(); 3350 3351 tcg_gen_ext32u_i64(dest, src2); 3352 tcg_gen_andi_i64(tmp, cpu_sar, 31); 3353 tcg_gen_shr_i64(dest, dest, tmp); 3354 } 3355 } else if (a->r1 == a->r2) { 3356 if (a->d) { 3357 tcg_gen_rotr_i64(dest, src2, cpu_sar); 3358 } else { 3359 TCGv_i32 t32 = tcg_temp_new_i32(); 3360 TCGv_i32 s32 = tcg_temp_new_i32(); 3361 3362 tcg_gen_extrl_i64_i32(t32, src2); 3363 tcg_gen_extrl_i64_i32(s32, cpu_sar); 3364 tcg_gen_andi_i32(s32, s32, 31); 3365 tcg_gen_rotr_i32(t32, t32, s32); 3366 tcg_gen_extu_i32_i64(dest, t32); 3367 } 3368 } else { 3369 TCGv_i64 src1 = load_gpr(ctx, a->r1); 3370 3371 if (a->d) { 3372 TCGv_i64 t = tcg_temp_new_i64(); 3373 TCGv_i64 n = tcg_temp_new_i64(); 3374 3375 tcg_gen_xori_i64(n, cpu_sar, 63); 3376 tcg_gen_shl_i64(t, src2, n); 3377 tcg_gen_shli_i64(t, t, 1); 3378 tcg_gen_shr_i64(dest, src1, cpu_sar); 3379 tcg_gen_or_i64(dest, dest, t); 3380 } else { 3381 TCGv_i64 t = tcg_temp_new_i64(); 3382 TCGv_i64 s = tcg_temp_new_i64(); 3383 3384 tcg_gen_concat32_i64(t, src2, src1); 3385 tcg_gen_andi_i64(s, cpu_sar, 31); 3386 tcg_gen_shr_i64(dest, t, s); 3387 } 3388 } 3389 save_gpr(ctx, a->t, dest); 3390 3391 /* Install the new nullification. */ 3392 cond_free(&ctx->null_cond); 3393 if (a->c) { 3394 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); 3395 } 3396 return nullify_end(ctx); 3397 } 3398 3399 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) 3400 { 3401 unsigned width, sa; 3402 TCGv_i64 dest, t2; 3403 3404 if (!ctx->is_pa20 && a->d) { 3405 return false; 3406 } 3407 if (a->c) { 3408 nullify_over(ctx); 3409 } 3410 3411 width = a->d ? 64 : 32; 3412 sa = width - 1 - a->cpos; 3413 3414 dest = dest_gpr(ctx, a->t); 3415 t2 = load_gpr(ctx, a->r2); 3416 if (a->r1 == 0) { 3417 tcg_gen_extract_i64(dest, t2, sa, width - sa); 3418 } else if (width == TARGET_LONG_BITS) { 3419 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa); 3420 } else { 3421 assert(!a->d); 3422 if (a->r1 == a->r2) { 3423 TCGv_i32 t32 = tcg_temp_new_i32(); 3424 tcg_gen_extrl_i64_i32(t32, t2); 3425 tcg_gen_rotri_i32(t32, t32, sa); 3426 tcg_gen_extu_i32_i64(dest, t32); 3427 } else { 3428 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]); 3429 tcg_gen_extract_i64(dest, dest, sa, 32); 3430 } 3431 } 3432 save_gpr(ctx, a->t, dest); 3433 3434 /* Install the new nullification. */ 3435 cond_free(&ctx->null_cond); 3436 if (a->c) { 3437 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); 3438 } 3439 return nullify_end(ctx); 3440 } 3441 3442 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a) 3443 { 3444 unsigned widthm1 = a->d ? 63 : 31; 3445 TCGv_i64 dest, src, tmp; 3446 3447 if (!ctx->is_pa20 && a->d) { 3448 return false; 3449 } 3450 if (a->c) { 3451 nullify_over(ctx); 3452 } 3453 3454 dest = dest_gpr(ctx, a->t); 3455 src = load_gpr(ctx, a->r); 3456 tmp = tcg_temp_new_i64(); 3457 3458 /* Recall that SAR is using big-endian bit numbering. */ 3459 tcg_gen_andi_i64(tmp, cpu_sar, widthm1); 3460 tcg_gen_xori_i64(tmp, tmp, widthm1); 3461 3462 if (a->se) { 3463 if (!a->d) { 3464 tcg_gen_ext32s_i64(dest, src); 3465 src = dest; 3466 } 3467 tcg_gen_sar_i64(dest, src, tmp); 3468 tcg_gen_sextract_i64(dest, dest, 0, a->len); 3469 } else { 3470 if (!a->d) { 3471 tcg_gen_ext32u_i64(dest, src); 3472 src = dest; 3473 } 3474 tcg_gen_shr_i64(dest, src, tmp); 3475 tcg_gen_extract_i64(dest, dest, 0, a->len); 3476 } 3477 save_gpr(ctx, a->t, dest); 3478 3479 /* Install the new nullification. */ 3480 cond_free(&ctx->null_cond); 3481 if (a->c) { 3482 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3483 } 3484 return nullify_end(ctx); 3485 } 3486 3487 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a) 3488 { 3489 unsigned len, cpos, width; 3490 TCGv_i64 dest, src; 3491 3492 if (!ctx->is_pa20 && a->d) { 3493 return false; 3494 } 3495 if (a->c) { 3496 nullify_over(ctx); 3497 } 3498 3499 len = a->len; 3500 width = a->d ? 64 : 32; 3501 cpos = width - 1 - a->pos; 3502 if (cpos + len > width) { 3503 len = width - cpos; 3504 } 3505 3506 dest = dest_gpr(ctx, a->t); 3507 src = load_gpr(ctx, a->r); 3508 if (a->se) { 3509 tcg_gen_sextract_i64(dest, src, cpos, len); 3510 } else { 3511 tcg_gen_extract_i64(dest, src, cpos, len); 3512 } 3513 save_gpr(ctx, a->t, dest); 3514 3515 /* Install the new nullification. */ 3516 cond_free(&ctx->null_cond); 3517 if (a->c) { 3518 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3519 } 3520 return nullify_end(ctx); 3521 } 3522 3523 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a) 3524 { 3525 unsigned len, width; 3526 uint64_t mask0, mask1; 3527 TCGv_i64 dest; 3528 3529 if (!ctx->is_pa20 && a->d) { 3530 return false; 3531 } 3532 if (a->c) { 3533 nullify_over(ctx); 3534 } 3535 3536 len = a->len; 3537 width = a->d ? 64 : 32; 3538 if (a->cpos + len > width) { 3539 len = width - a->cpos; 3540 } 3541 3542 dest = dest_gpr(ctx, a->t); 3543 mask0 = deposit64(0, a->cpos, len, a->i); 3544 mask1 = deposit64(-1, a->cpos, len, a->i); 3545 3546 if (a->nz) { 3547 TCGv_i64 src = load_gpr(ctx, a->t); 3548 tcg_gen_andi_i64(dest, src, mask1); 3549 tcg_gen_ori_i64(dest, dest, mask0); 3550 } else { 3551 tcg_gen_movi_i64(dest, mask0); 3552 } 3553 save_gpr(ctx, a->t, dest); 3554 3555 /* Install the new nullification. */ 3556 cond_free(&ctx->null_cond); 3557 if (a->c) { 3558 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3559 } 3560 return nullify_end(ctx); 3561 } 3562 3563 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a) 3564 { 3565 unsigned rs = a->nz ? a->t : 0; 3566 unsigned len, width; 3567 TCGv_i64 dest, val; 3568 3569 if (!ctx->is_pa20 && a->d) { 3570 return false; 3571 } 3572 if (a->c) { 3573 nullify_over(ctx); 3574 } 3575 3576 len = a->len; 3577 width = a->d ? 64 : 32; 3578 if (a->cpos + len > width) { 3579 len = width - a->cpos; 3580 } 3581 3582 dest = dest_gpr(ctx, a->t); 3583 val = load_gpr(ctx, a->r); 3584 if (rs == 0) { 3585 tcg_gen_deposit_z_i64(dest, val, a->cpos, len); 3586 } else { 3587 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len); 3588 } 3589 save_gpr(ctx, a->t, dest); 3590 3591 /* Install the new nullification. */ 3592 cond_free(&ctx->null_cond); 3593 if (a->c) { 3594 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3595 } 3596 return nullify_end(ctx); 3597 } 3598 3599 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c, 3600 bool d, bool nz, unsigned len, TCGv_i64 val) 3601 { 3602 unsigned rs = nz ? rt : 0; 3603 unsigned widthm1 = d ? 63 : 31; 3604 TCGv_i64 mask, tmp, shift, dest; 3605 uint64_t msb = 1ULL << (len - 1); 3606 3607 dest = dest_gpr(ctx, rt); 3608 shift = tcg_temp_new_i64(); 3609 tmp = tcg_temp_new_i64(); 3610 3611 /* Convert big-endian bit numbering in SAR to left-shift. */ 3612 tcg_gen_andi_i64(shift, cpu_sar, widthm1); 3613 tcg_gen_xori_i64(shift, shift, widthm1); 3614 3615 mask = tcg_temp_new_i64(); 3616 tcg_gen_movi_i64(mask, msb + (msb - 1)); 3617 tcg_gen_and_i64(tmp, val, mask); 3618 if (rs) { 3619 tcg_gen_shl_i64(mask, mask, shift); 3620 tcg_gen_shl_i64(tmp, tmp, shift); 3621 tcg_gen_andc_i64(dest, cpu_gr[rs], mask); 3622 tcg_gen_or_i64(dest, dest, tmp); 3623 } else { 3624 tcg_gen_shl_i64(dest, tmp, shift); 3625 } 3626 save_gpr(ctx, rt, dest); 3627 3628 /* Install the new nullification. */ 3629 cond_free(&ctx->null_cond); 3630 if (c) { 3631 ctx->null_cond = do_sed_cond(ctx, c, d, dest); 3632 } 3633 return nullify_end(ctx); 3634 } 3635 3636 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a) 3637 { 3638 if (!ctx->is_pa20 && a->d) { 3639 return false; 3640 } 3641 if (a->c) { 3642 nullify_over(ctx); 3643 } 3644 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3645 load_gpr(ctx, a->r)); 3646 } 3647 3648 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a) 3649 { 3650 if (!ctx->is_pa20 && a->d) { 3651 return false; 3652 } 3653 if (a->c) { 3654 nullify_over(ctx); 3655 } 3656 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3657 tcg_constant_i64(a->i)); 3658 } 3659 3660 static bool trans_be(DisasContext *ctx, arg_be *a) 3661 { 3662 TCGv_i64 tmp; 3663 3664 #ifdef CONFIG_USER_ONLY 3665 /* ??? It seems like there should be a good way of using 3666 "be disp(sr2, r0)", the canonical gateway entry mechanism 3667 to our advantage. But that appears to be inconvenient to 3668 manage along side branch delay slots. Therefore we handle 3669 entry into the gateway page via absolute address. */ 3670 /* Since we don't implement spaces, just branch. Do notice the special 3671 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3672 goto_tb to the TB containing the syscall. */ 3673 if (a->b == 0) { 3674 return do_dbranch(ctx, a->disp, a->l, a->n); 3675 } 3676 #else 3677 nullify_over(ctx); 3678 #endif 3679 3680 tmp = tcg_temp_new_i64(); 3681 tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp); 3682 tmp = do_ibranch_priv(ctx, tmp); 3683 3684 #ifdef CONFIG_USER_ONLY 3685 return do_ibranch(ctx, tmp, a->l, a->n); 3686 #else 3687 TCGv_i64 new_spc = tcg_temp_new_i64(); 3688 3689 load_spr(ctx, new_spc, a->sp); 3690 if (a->l) { 3691 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3692 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3693 } 3694 if (a->n && use_nullify_skip(ctx)) { 3695 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); 3696 tcg_gen_addi_i64(tmp, tmp, 4); 3697 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 3698 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3699 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3700 } else { 3701 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3702 if (ctx->iaoq_b == -1) { 3703 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3704 } 3705 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 3706 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3707 nullify_set(ctx, a->n); 3708 } 3709 tcg_gen_lookup_and_goto_ptr(); 3710 ctx->base.is_jmp = DISAS_NORETURN; 3711 return nullify_end(ctx); 3712 #endif 3713 } 3714 3715 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3716 { 3717 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3718 } 3719 3720 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3721 { 3722 uint64_t dest = iaoq_dest(ctx, a->disp); 3723 3724 nullify_over(ctx); 3725 3726 /* Make sure the caller hasn't done something weird with the queue. 3727 * ??? This is not quite the same as the PSW[B] bit, which would be 3728 * expensive to track. Real hardware will trap for 3729 * b gateway 3730 * b gateway+4 (in delay slot of first branch) 3731 * However, checking for a non-sequential instruction queue *will* 3732 * diagnose the security hole 3733 * b gateway 3734 * b evil 3735 * in which instructions at evil would run with increased privs. 3736 */ 3737 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3738 return gen_illegal(ctx); 3739 } 3740 3741 #ifndef CONFIG_USER_ONLY 3742 if (ctx->tb_flags & PSW_C) { 3743 CPUHPPAState *env = cpu_env(ctx->cs); 3744 int type = hppa_artype_for_page(env, ctx->base.pc_next); 3745 /* If we could not find a TLB entry, then we need to generate an 3746 ITLB miss exception so the kernel will provide it. 3747 The resulting TLB fill operation will invalidate this TB and 3748 we will re-translate, at which point we *will* be able to find 3749 the TLB entry and determine if this is in fact a gateway page. */ 3750 if (type < 0) { 3751 gen_excp(ctx, EXCP_ITLB_MISS); 3752 return true; 3753 } 3754 /* No change for non-gateway pages or for priv decrease. */ 3755 if (type >= 4 && type - 4 < ctx->privilege) { 3756 dest = deposit32(dest, 0, 2, type - 4); 3757 } 3758 } else { 3759 dest &= -4; /* priv = 0 */ 3760 } 3761 #endif 3762 3763 if (a->l) { 3764 TCGv_i64 tmp = dest_gpr(ctx, a->l); 3765 if (ctx->privilege < 3) { 3766 tcg_gen_andi_i64(tmp, tmp, -4); 3767 } 3768 tcg_gen_ori_i64(tmp, tmp, ctx->privilege); 3769 save_gpr(ctx, a->l, tmp); 3770 } 3771 3772 return do_dbranch(ctx, dest, 0, a->n); 3773 } 3774 3775 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3776 { 3777 if (a->x) { 3778 TCGv_i64 tmp = tcg_temp_new_i64(); 3779 tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3); 3780 tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8); 3781 /* The computation here never changes privilege level. */ 3782 return do_ibranch(ctx, tmp, a->l, a->n); 3783 } else { 3784 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3785 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3786 } 3787 } 3788 3789 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3790 { 3791 TCGv_i64 dest; 3792 3793 if (a->x == 0) { 3794 dest = load_gpr(ctx, a->b); 3795 } else { 3796 dest = tcg_temp_new_i64(); 3797 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3); 3798 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b)); 3799 } 3800 dest = do_ibranch_priv(ctx, dest); 3801 return do_ibranch(ctx, dest, 0, a->n); 3802 } 3803 3804 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3805 { 3806 TCGv_i64 dest; 3807 3808 #ifdef CONFIG_USER_ONLY 3809 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3810 return do_ibranch(ctx, dest, a->l, a->n); 3811 #else 3812 nullify_over(ctx); 3813 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3814 3815 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3816 if (ctx->iaoq_b == -1) { 3817 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3818 } 3819 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest); 3820 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3821 if (a->l) { 3822 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3823 } 3824 nullify_set(ctx, a->n); 3825 tcg_gen_lookup_and_goto_ptr(); 3826 ctx->base.is_jmp = DISAS_NORETURN; 3827 return nullify_end(ctx); 3828 #endif 3829 } 3830 3831 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a) 3832 { 3833 /* All branch target stack instructions implement as nop. */ 3834 return ctx->is_pa20; 3835 } 3836 3837 /* 3838 * Float class 0 3839 */ 3840 3841 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3842 { 3843 tcg_gen_mov_i32(dst, src); 3844 } 3845 3846 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 3847 { 3848 uint64_t ret; 3849 3850 if (ctx->is_pa20) { 3851 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ 3852 } else { 3853 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ 3854 } 3855 3856 nullify_over(ctx); 3857 save_frd(0, tcg_constant_i64(ret)); 3858 return nullify_end(ctx); 3859 } 3860 3861 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3862 { 3863 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3864 } 3865 3866 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3867 { 3868 tcg_gen_mov_i64(dst, src); 3869 } 3870 3871 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3872 { 3873 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3874 } 3875 3876 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3877 { 3878 tcg_gen_andi_i32(dst, src, INT32_MAX); 3879 } 3880 3881 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3882 { 3883 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3884 } 3885 3886 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3887 { 3888 tcg_gen_andi_i64(dst, src, INT64_MAX); 3889 } 3890 3891 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3892 { 3893 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3894 } 3895 3896 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3897 { 3898 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3899 } 3900 3901 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3902 { 3903 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3904 } 3905 3906 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3907 { 3908 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3909 } 3910 3911 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3912 { 3913 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3914 } 3915 3916 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3917 { 3918 tcg_gen_xori_i32(dst, src, INT32_MIN); 3919 } 3920 3921 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3922 { 3923 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3924 } 3925 3926 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3927 { 3928 tcg_gen_xori_i64(dst, src, INT64_MIN); 3929 } 3930 3931 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 3932 { 3933 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 3934 } 3935 3936 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3937 { 3938 tcg_gen_ori_i32(dst, src, INT32_MIN); 3939 } 3940 3941 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 3942 { 3943 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 3944 } 3945 3946 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3947 { 3948 tcg_gen_ori_i64(dst, src, INT64_MIN); 3949 } 3950 3951 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 3952 { 3953 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 3954 } 3955 3956 /* 3957 * Float class 1 3958 */ 3959 3960 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 3961 { 3962 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 3963 } 3964 3965 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 3966 { 3967 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 3968 } 3969 3970 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 3971 { 3972 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 3973 } 3974 3975 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 3976 { 3977 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 3978 } 3979 3980 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 3981 { 3982 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 3983 } 3984 3985 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 3986 { 3987 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 3988 } 3989 3990 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 3991 { 3992 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 3993 } 3994 3995 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 3996 { 3997 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 3998 } 3999 4000 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 4001 { 4002 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 4003 } 4004 4005 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 4006 { 4007 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 4008 } 4009 4010 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 4011 { 4012 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 4013 } 4014 4015 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 4016 { 4017 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 4018 } 4019 4020 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 4021 { 4022 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 4023 } 4024 4025 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 4026 { 4027 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 4028 } 4029 4030 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 4031 { 4032 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 4033 } 4034 4035 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 4036 { 4037 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 4038 } 4039 4040 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 4041 { 4042 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 4043 } 4044 4045 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 4046 { 4047 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 4048 } 4049 4050 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 4051 { 4052 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 4053 } 4054 4055 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 4056 { 4057 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 4058 } 4059 4060 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 4061 { 4062 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 4063 } 4064 4065 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 4066 { 4067 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 4068 } 4069 4070 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 4071 { 4072 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 4073 } 4074 4075 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 4076 { 4077 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 4078 } 4079 4080 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 4081 { 4082 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 4083 } 4084 4085 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 4086 { 4087 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 4088 } 4089 4090 /* 4091 * Float class 2 4092 */ 4093 4094 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 4095 { 4096 TCGv_i32 ta, tb, tc, ty; 4097 4098 nullify_over(ctx); 4099 4100 ta = load_frw0_i32(a->r1); 4101 tb = load_frw0_i32(a->r2); 4102 ty = tcg_constant_i32(a->y); 4103 tc = tcg_constant_i32(a->c); 4104 4105 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc); 4106 4107 return nullify_end(ctx); 4108 } 4109 4110 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 4111 { 4112 TCGv_i64 ta, tb; 4113 TCGv_i32 tc, ty; 4114 4115 nullify_over(ctx); 4116 4117 ta = load_frd0(a->r1); 4118 tb = load_frd0(a->r2); 4119 ty = tcg_constant_i32(a->y); 4120 tc = tcg_constant_i32(a->c); 4121 4122 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc); 4123 4124 return nullify_end(ctx); 4125 } 4126 4127 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 4128 { 4129 TCGv_i64 t; 4130 4131 nullify_over(ctx); 4132 4133 t = tcg_temp_new_i64(); 4134 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); 4135 4136 if (a->y == 1) { 4137 int mask; 4138 bool inv = false; 4139 4140 switch (a->c) { 4141 case 0: /* simple */ 4142 tcg_gen_andi_i64(t, t, 0x4000000); 4143 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 4144 goto done; 4145 case 2: /* rej */ 4146 inv = true; 4147 /* fallthru */ 4148 case 1: /* acc */ 4149 mask = 0x43ff800; 4150 break; 4151 case 6: /* rej8 */ 4152 inv = true; 4153 /* fallthru */ 4154 case 5: /* acc8 */ 4155 mask = 0x43f8000; 4156 break; 4157 case 9: /* acc6 */ 4158 mask = 0x43e0000; 4159 break; 4160 case 13: /* acc4 */ 4161 mask = 0x4380000; 4162 break; 4163 case 17: /* acc2 */ 4164 mask = 0x4200000; 4165 break; 4166 default: 4167 gen_illegal(ctx); 4168 return true; 4169 } 4170 if (inv) { 4171 TCGv_i64 c = tcg_constant_i64(mask); 4172 tcg_gen_or_i64(t, t, c); 4173 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 4174 } else { 4175 tcg_gen_andi_i64(t, t, mask); 4176 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 4177 } 4178 } else { 4179 unsigned cbit = (a->y ^ 1) - 1; 4180 4181 tcg_gen_extract_i64(t, t, 21 - cbit, 1); 4182 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 4183 } 4184 4185 done: 4186 return nullify_end(ctx); 4187 } 4188 4189 /* 4190 * Float class 2 4191 */ 4192 4193 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 4194 { 4195 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 4196 } 4197 4198 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 4199 { 4200 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 4201 } 4202 4203 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 4204 { 4205 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 4206 } 4207 4208 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 4209 { 4210 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 4211 } 4212 4213 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 4214 { 4215 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 4216 } 4217 4218 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 4219 { 4220 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 4221 } 4222 4223 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 4224 { 4225 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 4226 } 4227 4228 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 4229 { 4230 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 4231 } 4232 4233 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 4234 { 4235 TCGv_i64 x, y; 4236 4237 nullify_over(ctx); 4238 4239 x = load_frw0_i64(a->r1); 4240 y = load_frw0_i64(a->r2); 4241 tcg_gen_mul_i64(x, x, y); 4242 save_frd(a->t, x); 4243 4244 return nullify_end(ctx); 4245 } 4246 4247 /* Convert the fmpyadd single-precision register encodings to standard. */ 4248 static inline int fmpyadd_s_reg(unsigned r) 4249 { 4250 return (r & 16) * 2 + 16 + (r & 15); 4251 } 4252 4253 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4254 { 4255 int tm = fmpyadd_s_reg(a->tm); 4256 int ra = fmpyadd_s_reg(a->ra); 4257 int ta = fmpyadd_s_reg(a->ta); 4258 int rm2 = fmpyadd_s_reg(a->rm2); 4259 int rm1 = fmpyadd_s_reg(a->rm1); 4260 4261 nullify_over(ctx); 4262 4263 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 4264 do_fop_weww(ctx, ta, ta, ra, 4265 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 4266 4267 return nullify_end(ctx); 4268 } 4269 4270 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 4271 { 4272 return do_fmpyadd_s(ctx, a, false); 4273 } 4274 4275 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4276 { 4277 return do_fmpyadd_s(ctx, a, true); 4278 } 4279 4280 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4281 { 4282 nullify_over(ctx); 4283 4284 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4285 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4286 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4287 4288 return nullify_end(ctx); 4289 } 4290 4291 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4292 { 4293 return do_fmpyadd_d(ctx, a, false); 4294 } 4295 4296 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4297 { 4298 return do_fmpyadd_d(ctx, a, true); 4299 } 4300 4301 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4302 { 4303 TCGv_i32 x, y, z; 4304 4305 nullify_over(ctx); 4306 x = load_frw0_i32(a->rm1); 4307 y = load_frw0_i32(a->rm2); 4308 z = load_frw0_i32(a->ra3); 4309 4310 if (a->neg) { 4311 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z); 4312 } else { 4313 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z); 4314 } 4315 4316 save_frw_i32(a->t, x); 4317 return nullify_end(ctx); 4318 } 4319 4320 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4321 { 4322 TCGv_i64 x, y, z; 4323 4324 nullify_over(ctx); 4325 x = load_frd0(a->rm1); 4326 y = load_frd0(a->rm2); 4327 z = load_frd0(a->ra3); 4328 4329 if (a->neg) { 4330 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z); 4331 } else { 4332 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z); 4333 } 4334 4335 save_frd(a->t, x); 4336 return nullify_end(ctx); 4337 } 4338 4339 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4340 { 4341 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4342 #ifndef CONFIG_USER_ONLY 4343 if (a->i == 0x100) { 4344 /* emulate PDC BTLB, called by SeaBIOS-hppa */ 4345 nullify_over(ctx); 4346 gen_helper_diag_btlb(tcg_env); 4347 return nullify_end(ctx); 4348 } 4349 #endif 4350 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); 4351 return true; 4352 } 4353 4354 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4355 { 4356 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4357 int bound; 4358 4359 ctx->cs = cs; 4360 ctx->tb_flags = ctx->base.tb->flags; 4361 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4362 4363 #ifdef CONFIG_USER_ONLY 4364 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); 4365 ctx->mmu_idx = MMU_USER_IDX; 4366 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege; 4367 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege; 4368 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4369 #else 4370 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4371 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4372 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4373 : MMU_PHYS_IDX); 4374 4375 /* Recover the IAOQ values from the GVA + PRIV. */ 4376 uint64_t cs_base = ctx->base.tb->cs_base; 4377 uint64_t iasq_f = cs_base & ~0xffffffffull; 4378 int32_t diff = cs_base; 4379 4380 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4381 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4382 #endif 4383 ctx->iaoq_n = -1; 4384 ctx->iaoq_n_var = NULL; 4385 4386 /* Bound the number of instructions by those left on the page. */ 4387 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4388 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4389 } 4390 4391 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4392 { 4393 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4394 4395 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4396 ctx->null_cond = cond_make_f(); 4397 ctx->psw_n_nonzero = false; 4398 if (ctx->tb_flags & PSW_N) { 4399 ctx->null_cond.c = TCG_COND_ALWAYS; 4400 ctx->psw_n_nonzero = true; 4401 } 4402 ctx->null_lab = NULL; 4403 } 4404 4405 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4406 { 4407 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4408 4409 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); 4410 } 4411 4412 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4413 { 4414 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4415 CPUHPPAState *env = cpu_env(cs); 4416 DisasJumpType ret; 4417 4418 /* Execute one insn. */ 4419 #ifdef CONFIG_USER_ONLY 4420 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4421 do_page_zero(ctx); 4422 ret = ctx->base.is_jmp; 4423 assert(ret != DISAS_NEXT); 4424 } else 4425 #endif 4426 { 4427 /* Always fetch the insn, even if nullified, so that we check 4428 the page permissions for execute. */ 4429 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4430 4431 /* Set up the IA queue for the next insn. 4432 This will be overwritten by a branch. */ 4433 if (ctx->iaoq_b == -1) { 4434 ctx->iaoq_n = -1; 4435 ctx->iaoq_n_var = tcg_temp_new_i64(); 4436 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4437 } else { 4438 ctx->iaoq_n = ctx->iaoq_b + 4; 4439 ctx->iaoq_n_var = NULL; 4440 } 4441 4442 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4443 ctx->null_cond.c = TCG_COND_NEVER; 4444 ret = DISAS_NEXT; 4445 } else { 4446 ctx->insn = insn; 4447 if (!decode(ctx, insn)) { 4448 gen_illegal(ctx); 4449 } 4450 ret = ctx->base.is_jmp; 4451 assert(ctx->null_lab == NULL); 4452 } 4453 } 4454 4455 /* Advance the insn queue. Note that this check also detects 4456 a priority change within the instruction queue. */ 4457 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4458 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4459 && use_goto_tb(ctx, ctx->iaoq_b) 4460 && (ctx->null_cond.c == TCG_COND_NEVER 4461 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4462 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4463 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4464 ctx->base.is_jmp = ret = DISAS_NORETURN; 4465 } else { 4466 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4467 } 4468 } 4469 ctx->iaoq_f = ctx->iaoq_b; 4470 ctx->iaoq_b = ctx->iaoq_n; 4471 ctx->base.pc_next += 4; 4472 4473 switch (ret) { 4474 case DISAS_NORETURN: 4475 case DISAS_IAQ_N_UPDATED: 4476 break; 4477 4478 case DISAS_NEXT: 4479 case DISAS_IAQ_N_STALE: 4480 case DISAS_IAQ_N_STALE_EXIT: 4481 if (ctx->iaoq_f == -1) { 4482 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b); 4483 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4484 #ifndef CONFIG_USER_ONLY 4485 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4486 #endif 4487 nullify_save(ctx); 4488 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT 4489 ? DISAS_EXIT 4490 : DISAS_IAQ_N_UPDATED); 4491 } else if (ctx->iaoq_b == -1) { 4492 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var); 4493 } 4494 break; 4495 4496 default: 4497 g_assert_not_reached(); 4498 } 4499 } 4500 4501 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4502 { 4503 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4504 DisasJumpType is_jmp = ctx->base.is_jmp; 4505 4506 switch (is_jmp) { 4507 case DISAS_NORETURN: 4508 break; 4509 case DISAS_TOO_MANY: 4510 case DISAS_IAQ_N_STALE: 4511 case DISAS_IAQ_N_STALE_EXIT: 4512 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4513 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4514 nullify_save(ctx); 4515 /* FALLTHRU */ 4516 case DISAS_IAQ_N_UPDATED: 4517 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { 4518 tcg_gen_lookup_and_goto_ptr(); 4519 break; 4520 } 4521 /* FALLTHRU */ 4522 case DISAS_EXIT: 4523 tcg_gen_exit_tb(NULL, 0); 4524 break; 4525 default: 4526 g_assert_not_reached(); 4527 } 4528 } 4529 4530 static void hppa_tr_disas_log(const DisasContextBase *dcbase, 4531 CPUState *cs, FILE *logfile) 4532 { 4533 target_ulong pc = dcbase->pc_first; 4534 4535 #ifdef CONFIG_USER_ONLY 4536 switch (pc) { 4537 case 0x00: 4538 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4539 return; 4540 case 0xb0: 4541 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4542 return; 4543 case 0xe0: 4544 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4545 return; 4546 case 0x100: 4547 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4548 return; 4549 } 4550 #endif 4551 4552 fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); 4553 target_disas(logfile, cs, pc, dcbase->tb->size); 4554 } 4555 4556 static const TranslatorOps hppa_tr_ops = { 4557 .init_disas_context = hppa_tr_init_disas_context, 4558 .tb_start = hppa_tr_tb_start, 4559 .insn_start = hppa_tr_insn_start, 4560 .translate_insn = hppa_tr_translate_insn, 4561 .tb_stop = hppa_tr_tb_stop, 4562 .disas_log = hppa_tr_disas_log, 4563 }; 4564 4565 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 4566 target_ulong pc, void *host_pc) 4567 { 4568 DisasContext ctx; 4569 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4570 } 4571