1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "qemu/host-utils.h" 23 #include "exec/page-protection.h" 24 #include "tcg/tcg-op.h" 25 #include "tcg/tcg-op-gvec.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 #include "exec/translator.h" 29 #include "exec/translation-block.h" 30 #include "exec/target_page.h" 31 #include "exec/log.h" 32 33 #define HELPER_H "helper.h" 34 #include "exec/helper-info.c.inc" 35 #undef HELPER_H 36 37 /* Choose to use explicit sizes within this file. */ 38 #undef tcg_temp_new 39 40 typedef struct DisasCond { 41 TCGCond c; 42 TCGv_i64 a0, a1; 43 } DisasCond; 44 45 typedef struct DisasIAQE { 46 /* IASQ; may be null for no change from TB. */ 47 TCGv_i64 space; 48 /* IAOQ base; may be null for relative address. */ 49 TCGv_i64 base; 50 /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */ 51 int64_t disp; 52 } DisasIAQE; 53 54 typedef struct DisasDelayException { 55 struct DisasDelayException *next; 56 TCGLabel *lab; 57 uint32_t insn; 58 bool set_iir; 59 int8_t set_n; 60 uint8_t excp; 61 /* Saved state at parent insn. */ 62 DisasIAQE iaq_f, iaq_b; 63 } DisasDelayException; 64 65 typedef struct DisasContext { 66 DisasContextBase base; 67 CPUState *cs; 68 69 /* IAQ_Front, IAQ_Back. */ 70 DisasIAQE iaq_f, iaq_b; 71 /* IAQ_Next, for jumps, otherwise null for simple advance. */ 72 DisasIAQE iaq_j, *iaq_n; 73 74 /* IAOQ_Front at entry to TB. */ 75 uint64_t iaoq_first; 76 uint64_t gva_offset_mask; 77 78 DisasCond null_cond; 79 TCGLabel *null_lab; 80 81 DisasDelayException *delay_excp_list; 82 TCGv_i64 zero; 83 84 uint32_t insn; 85 uint32_t tb_flags; 86 int mmu_idx; 87 int privilege; 88 uint32_t psw_xb; 89 bool psw_n_nonzero; 90 bool psw_b_next; 91 bool is_pa20; 92 bool insn_start_updated; 93 94 #ifdef CONFIG_USER_ONLY 95 MemOp unalign; 96 #endif 97 } DisasContext; 98 99 #ifdef CONFIG_USER_ONLY 100 #define UNALIGN(C) (C)->unalign 101 #define MMU_DISABLED(C) false 102 #else 103 #define UNALIGN(C) MO_ALIGN 104 #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx) 105 #endif 106 107 static inline MemOp mo_endian(DisasContext *ctx) 108 { 109 /* The PSW_E bit sets the (little) endianness, but we don't implement it. */ 110 return MO_BE; 111 } 112 113 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 114 static int expand_sm_imm(DisasContext *ctx, int val) 115 { 116 /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */ 117 if (ctx->is_pa20) { 118 if (val & PSW_SM_W) { 119 val |= PSW_W; 120 } 121 val &= ~(PSW_SM_W | PSW_SM_E | PSW_G); 122 } else { 123 val &= ~(PSW_SM_W | PSW_SM_E | PSW_O); 124 } 125 return val; 126 } 127 128 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 129 static int expand_sr3x(DisasContext *ctx, int val) 130 { 131 return ~val; 132 } 133 134 /* Convert the M:A bits within a memory insn to the tri-state value 135 we use for the final M. */ 136 static int ma_to_m(DisasContext *ctx, int val) 137 { 138 return val & 2 ? (val & 1 ? -1 : 1) : 0; 139 } 140 141 /* Convert the sign of the displacement to a pre or post-modify. */ 142 static int pos_to_m(DisasContext *ctx, int val) 143 { 144 return val ? 1 : -1; 145 } 146 147 static int neg_to_m(DisasContext *ctx, int val) 148 { 149 return val ? -1 : 1; 150 } 151 152 /* Used for branch targets and fp memory ops. */ 153 static int expand_shl2(DisasContext *ctx, int val) 154 { 155 return val << 2; 156 } 157 158 /* Used for assemble_21. */ 159 static int expand_shl11(DisasContext *ctx, int val) 160 { 161 return val << 11; 162 } 163 164 static int assemble_6(DisasContext *ctx, int val) 165 { 166 /* 167 * Officially, 32 * x + 32 - y. 168 * Here, x is already in bit 5, and y is [4:0]. 169 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1, 170 * with the overflow from bit 4 summing with x. 171 */ 172 return (val ^ 31) + 1; 173 } 174 175 /* Expander for assemble_16a(s,cat(im10a,0),i). */ 176 static int expand_11a(DisasContext *ctx, int val) 177 { 178 /* 179 * @val is bit 0 and bits [4:15]. 180 * Swizzle thing around depending on PSW.W. 181 */ 182 int im10a = extract32(val, 1, 10); 183 int s = extract32(val, 11, 2); 184 int i = (-(val & 1) << 13) | (im10a << 3); 185 186 if (ctx->tb_flags & PSW_W) { 187 i ^= s << 13; 188 } 189 return i; 190 } 191 192 /* Expander for assemble_16a(s,im11a,i). */ 193 static int expand_12a(DisasContext *ctx, int val) 194 { 195 /* 196 * @val is bit 0 and bits [3:15]. 197 * Swizzle thing around depending on PSW.W. 198 */ 199 int im11a = extract32(val, 1, 11); 200 int s = extract32(val, 12, 2); 201 int i = (-(val & 1) << 13) | (im11a << 2); 202 203 if (ctx->tb_flags & PSW_W) { 204 i ^= s << 13; 205 } 206 return i; 207 } 208 209 /* Expander for assemble_16(s,im14). */ 210 static int expand_16(DisasContext *ctx, int val) 211 { 212 /* 213 * @val is bits [0:15], containing both im14 and s. 214 * Swizzle thing around depending on PSW.W. 215 */ 216 int s = extract32(val, 14, 2); 217 int i = (-(val & 1) << 13) | extract32(val, 1, 13); 218 219 if (ctx->tb_flags & PSW_W) { 220 i ^= s << 13; 221 } 222 return i; 223 } 224 225 /* The sp field is only present with !PSW_W. */ 226 static int sp0_if_wide(DisasContext *ctx, int sp) 227 { 228 return ctx->tb_flags & PSW_W ? 0 : sp; 229 } 230 231 /* Translate CMPI doubleword conditions to standard. */ 232 static int cmpbid_c(DisasContext *ctx, int val) 233 { 234 return val ? val : 4; /* 0 == "*<<" */ 235 } 236 237 /* 238 * In many places pa1.x did not decode the bit that later became 239 * the pa2.0 D bit. Suppress D unless the cpu is pa2.0. 240 */ 241 static int pa20_d(DisasContext *ctx, int val) 242 { 243 return ctx->is_pa20 & val; 244 } 245 246 /* Include the auto-generated decoder. */ 247 #include "decode-insns.c.inc" 248 249 /* We are not using a goto_tb (for whatever reason), but have updated 250 the iaq (for whatever reason), so don't do it again on exit. */ 251 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 252 253 /* We are exiting the TB, but have neither emitted a goto_tb, nor 254 updated the iaq for the next instruction to be executed. */ 255 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 256 257 /* Similarly, but we want to return to the main loop immediately 258 to recognize unmasked interrupts. */ 259 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 260 #define DISAS_EXIT DISAS_TARGET_3 261 262 /* global register indexes */ 263 static TCGv_i64 cpu_gr[32]; 264 static TCGv_i64 cpu_sr[4]; 265 static TCGv_i64 cpu_srH; 266 static TCGv_i64 cpu_iaoq_f; 267 static TCGv_i64 cpu_iaoq_b; 268 static TCGv_i64 cpu_iasq_f; 269 static TCGv_i64 cpu_iasq_b; 270 static TCGv_i64 cpu_sar; 271 static TCGv_i64 cpu_psw_n; 272 static TCGv_i64 cpu_psw_v; 273 static TCGv_i64 cpu_psw_cb; 274 static TCGv_i64 cpu_psw_cb_msb; 275 static TCGv_i32 cpu_psw_xb; 276 277 void hppa_translate_init(void) 278 { 279 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 280 281 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar; 282 static const GlobalVar vars[] = { 283 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 284 DEF_VAR(psw_n), 285 DEF_VAR(psw_v), 286 DEF_VAR(psw_cb), 287 DEF_VAR(psw_cb_msb), 288 DEF_VAR(iaoq_f), 289 DEF_VAR(iaoq_b), 290 }; 291 292 #undef DEF_VAR 293 294 /* Use the symbolic register names that match the disassembler. */ 295 static const char gr_names[32][4] = { 296 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 297 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 298 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 299 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 300 }; 301 /* SR[4-7] are not global registers so that we can index them. */ 302 static const char sr_names[5][4] = { 303 "sr0", "sr1", "sr2", "sr3", "srH" 304 }; 305 306 int i; 307 308 cpu_gr[0] = NULL; 309 for (i = 1; i < 32; i++) { 310 cpu_gr[i] = tcg_global_mem_new(tcg_env, 311 offsetof(CPUHPPAState, gr[i]), 312 gr_names[i]); 313 } 314 for (i = 0; i < 4; i++) { 315 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env, 316 offsetof(CPUHPPAState, sr[i]), 317 sr_names[i]); 318 } 319 cpu_srH = tcg_global_mem_new_i64(tcg_env, 320 offsetof(CPUHPPAState, sr[4]), 321 sr_names[4]); 322 323 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 324 const GlobalVar *v = &vars[i]; 325 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name); 326 } 327 328 cpu_psw_xb = tcg_global_mem_new_i32(tcg_env, 329 offsetof(CPUHPPAState, psw_xb), 330 "psw_xb"); 331 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env, 332 offsetof(CPUHPPAState, iasq_f), 333 "iasq_f"); 334 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env, 335 offsetof(CPUHPPAState, iasq_b), 336 "iasq_b"); 337 } 338 339 static void set_insn_breg(DisasContext *ctx, int breg) 340 { 341 assert(!ctx->insn_start_updated); 342 ctx->insn_start_updated = true; 343 tcg_set_insn_start_param(ctx->base.insn_start, 2, breg); 344 } 345 346 static DisasCond cond_make_f(void) 347 { 348 return (DisasCond){ 349 .c = TCG_COND_NEVER, 350 .a0 = NULL, 351 .a1 = NULL, 352 }; 353 } 354 355 static DisasCond cond_make_t(void) 356 { 357 return (DisasCond){ 358 .c = TCG_COND_ALWAYS, 359 .a0 = NULL, 360 .a1 = NULL, 361 }; 362 } 363 364 static DisasCond cond_make_n(void) 365 { 366 return (DisasCond){ 367 .c = TCG_COND_NE, 368 .a0 = cpu_psw_n, 369 .a1 = tcg_constant_i64(0) 370 }; 371 } 372 373 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 374 { 375 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 376 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 }; 377 } 378 379 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm) 380 { 381 return cond_make_tt(c, a0, tcg_constant_i64(imm)); 382 } 383 384 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm) 385 { 386 TCGv_i64 tmp = tcg_temp_new_i64(); 387 tcg_gen_mov_i64(tmp, a0); 388 return cond_make_ti(c, tmp, imm); 389 } 390 391 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 392 { 393 TCGv_i64 t0 = tcg_temp_new_i64(); 394 TCGv_i64 t1 = tcg_temp_new_i64(); 395 396 tcg_gen_mov_i64(t0, a0); 397 tcg_gen_mov_i64(t1, a1); 398 return cond_make_tt(c, t0, t1); 399 } 400 401 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg) 402 { 403 if (reg == 0) { 404 return ctx->zero; 405 } else { 406 return cpu_gr[reg]; 407 } 408 } 409 410 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg) 411 { 412 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 413 return tcg_temp_new_i64(); 414 } else { 415 return cpu_gr[reg]; 416 } 417 } 418 419 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t) 420 { 421 if (ctx->null_cond.c != TCG_COND_NEVER) { 422 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0, 423 ctx->null_cond.a1, dest, t); 424 } else { 425 tcg_gen_mov_i64(dest, t); 426 } 427 } 428 429 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t) 430 { 431 if (reg != 0) { 432 save_or_nullify(ctx, cpu_gr[reg], t); 433 } 434 } 435 436 #if HOST_BIG_ENDIAN 437 # define HI_OFS 0 438 # define LO_OFS 4 439 #else 440 # define HI_OFS 4 441 # define LO_OFS 0 442 #endif 443 444 static TCGv_i32 load_frw_i32(unsigned rt) 445 { 446 TCGv_i32 ret = tcg_temp_new_i32(); 447 tcg_gen_ld_i32(ret, tcg_env, 448 offsetof(CPUHPPAState, fr[rt & 31]) 449 + (rt & 32 ? LO_OFS : HI_OFS)); 450 return ret; 451 } 452 453 static TCGv_i32 load_frw0_i32(unsigned rt) 454 { 455 if (rt == 0) { 456 TCGv_i32 ret = tcg_temp_new_i32(); 457 tcg_gen_movi_i32(ret, 0); 458 return ret; 459 } else { 460 return load_frw_i32(rt); 461 } 462 } 463 464 static TCGv_i64 load_frw0_i64(unsigned rt) 465 { 466 TCGv_i64 ret = tcg_temp_new_i64(); 467 if (rt == 0) { 468 tcg_gen_movi_i64(ret, 0); 469 } else { 470 tcg_gen_ld32u_i64(ret, tcg_env, 471 offsetof(CPUHPPAState, fr[rt & 31]) 472 + (rt & 32 ? LO_OFS : HI_OFS)); 473 } 474 return ret; 475 } 476 477 static void save_frw_i32(unsigned rt, TCGv_i32 val) 478 { 479 tcg_gen_st_i32(val, tcg_env, 480 offsetof(CPUHPPAState, fr[rt & 31]) 481 + (rt & 32 ? LO_OFS : HI_OFS)); 482 } 483 484 #undef HI_OFS 485 #undef LO_OFS 486 487 static TCGv_i64 load_frd(unsigned rt) 488 { 489 TCGv_i64 ret = tcg_temp_new_i64(); 490 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt])); 491 return ret; 492 } 493 494 static TCGv_i64 load_frd0(unsigned rt) 495 { 496 if (rt == 0) { 497 TCGv_i64 ret = tcg_temp_new_i64(); 498 tcg_gen_movi_i64(ret, 0); 499 return ret; 500 } else { 501 return load_frd(rt); 502 } 503 } 504 505 static void save_frd(unsigned rt, TCGv_i64 val) 506 { 507 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt])); 508 } 509 510 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 511 { 512 #ifdef CONFIG_USER_ONLY 513 tcg_gen_movi_i64(dest, 0); 514 #else 515 if (reg < 4) { 516 tcg_gen_mov_i64(dest, cpu_sr[reg]); 517 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 518 tcg_gen_mov_i64(dest, cpu_srH); 519 } else { 520 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg])); 521 } 522 #endif 523 } 524 525 /* 526 * Write a value to psw_xb, bearing in mind the known value. 527 * To be used just before exiting the TB, so do not update the known value. 528 */ 529 static void store_psw_xb(DisasContext *ctx, uint32_t xb) 530 { 531 tcg_debug_assert(xb == 0 || xb == PSW_B); 532 if (ctx->psw_xb != xb) { 533 tcg_gen_movi_i32(cpu_psw_xb, xb); 534 } 535 } 536 537 /* Write a value to psw_xb, and update the known value. */ 538 static void set_psw_xb(DisasContext *ctx, uint32_t xb) 539 { 540 store_psw_xb(ctx, xb); 541 ctx->psw_xb = xb; 542 } 543 544 /* Skip over the implementation of an insn that has been nullified. 545 Use this when the insn is too complex for a conditional move. */ 546 static void nullify_over(DisasContext *ctx) 547 { 548 if (ctx->null_cond.c != TCG_COND_NEVER) { 549 /* The always condition should have been handled in the main loop. */ 550 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 551 552 ctx->null_lab = gen_new_label(); 553 554 /* If we're using PSW[N], copy it to a temp because... */ 555 if (ctx->null_cond.a0 == cpu_psw_n) { 556 ctx->null_cond.a0 = tcg_temp_new_i64(); 557 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n); 558 } 559 /* ... we clear it before branching over the implementation, 560 so that (1) it's clear after nullifying this insn and 561 (2) if this insn nullifies the next, PSW[N] is valid. */ 562 if (ctx->psw_n_nonzero) { 563 ctx->psw_n_nonzero = false; 564 tcg_gen_movi_i64(cpu_psw_n, 0); 565 } 566 567 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0, 568 ctx->null_cond.a1, ctx->null_lab); 569 ctx->null_cond = cond_make_f(); 570 } 571 } 572 573 /* Save the current nullification state to PSW[N]. */ 574 static void nullify_save(DisasContext *ctx) 575 { 576 if (ctx->null_cond.c == TCG_COND_NEVER) { 577 if (ctx->psw_n_nonzero) { 578 tcg_gen_movi_i64(cpu_psw_n, 0); 579 } 580 return; 581 } 582 if (ctx->null_cond.a0 != cpu_psw_n) { 583 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n, 584 ctx->null_cond.a0, ctx->null_cond.a1); 585 ctx->psw_n_nonzero = true; 586 } 587 ctx->null_cond = cond_make_f(); 588 } 589 590 /* Set a PSW[N] to X. The intention is that this is used immediately 591 before a goto_tb/exit_tb, so that there is no fallthru path to other 592 code within the TB. Therefore we do not update psw_n_nonzero. */ 593 static void nullify_set(DisasContext *ctx, bool x) 594 { 595 if (ctx->psw_n_nonzero || x) { 596 tcg_gen_movi_i64(cpu_psw_n, x); 597 } 598 } 599 600 /* Mark the end of an instruction that may have been nullified. 601 This is the pair to nullify_over. Always returns true so that 602 it may be tail-called from a translate function. */ 603 static bool nullify_end(DisasContext *ctx) 604 { 605 TCGLabel *null_lab = ctx->null_lab; 606 DisasJumpType status = ctx->base.is_jmp; 607 608 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 609 For UPDATED, we cannot update on the nullified path. */ 610 assert(status != DISAS_IAQ_N_UPDATED); 611 /* Taken branches are handled manually. */ 612 assert(!ctx->psw_b_next); 613 614 if (likely(null_lab == NULL)) { 615 /* The current insn wasn't conditional or handled the condition 616 applied to it without a branch, so the (new) setting of 617 NULL_COND can be applied directly to the next insn. */ 618 return true; 619 } 620 ctx->null_lab = NULL; 621 622 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 623 /* The next instruction will be unconditional, 624 and NULL_COND already reflects that. */ 625 gen_set_label(null_lab); 626 } else { 627 /* The insn that we just executed is itself nullifying the next 628 instruction. Store the condition in the PSW[N] global. 629 We asserted PSW[N] = 0 in nullify_over, so that after the 630 label we have the proper value in place. */ 631 nullify_save(ctx); 632 gen_set_label(null_lab); 633 ctx->null_cond = cond_make_n(); 634 } 635 if (status == DISAS_NORETURN) { 636 ctx->base.is_jmp = DISAS_NEXT; 637 } 638 return true; 639 } 640 641 static bool iaqe_variable(const DisasIAQE *e) 642 { 643 return e->base || e->space; 644 } 645 646 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp) 647 { 648 return (DisasIAQE){ 649 .space = e->space, 650 .base = e->base, 651 .disp = e->disp + disp, 652 }; 653 } 654 655 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp) 656 { 657 return (DisasIAQE){ 658 .space = ctx->iaq_b.space, 659 .disp = ctx->iaq_f.disp + 8 + disp, 660 }; 661 } 662 663 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var) 664 { 665 return (DisasIAQE){ 666 .space = ctx->iaq_b.space, 667 .base = var, 668 }; 669 } 670 671 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, 672 const DisasIAQE *src) 673 { 674 tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp); 675 } 676 677 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f, 678 const DisasIAQE *b) 679 { 680 DisasIAQE b_next; 681 682 if (b == NULL) { 683 b_next = iaqe_incr(f, 4); 684 b = &b_next; 685 } 686 687 /* 688 * There is an edge case 689 * bv r0(rN) 690 * b,l disp,r0 691 * for which F will use cpu_iaoq_b (from the indirect branch), 692 * and B will use cpu_iaoq_f (from the direct branch). 693 * In this case we need an extra temporary. 694 */ 695 if (f->base != cpu_iaoq_b) { 696 copy_iaoq_entry(ctx, cpu_iaoq_b, b); 697 copy_iaoq_entry(ctx, cpu_iaoq_f, f); 698 } else if (f->base == b->base) { 699 copy_iaoq_entry(ctx, cpu_iaoq_f, f); 700 tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp); 701 } else { 702 TCGv_i64 tmp = tcg_temp_new_i64(); 703 copy_iaoq_entry(ctx, tmp, b); 704 copy_iaoq_entry(ctx, cpu_iaoq_f, f); 705 tcg_gen_mov_i64(cpu_iaoq_b, tmp); 706 } 707 708 if (f->space) { 709 tcg_gen_mov_i64(cpu_iasq_f, f->space); 710 } 711 if (b->space || f->space) { 712 tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space); 713 } 714 } 715 716 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0) 717 { 718 tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER); 719 if (!link) { 720 return; 721 } 722 DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4); 723 copy_iaoq_entry(ctx, cpu_gr[link], &next); 724 #ifndef CONFIG_USER_ONLY 725 if (with_sr0) { 726 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b); 727 } 728 #endif 729 } 730 731 static void gen_excp_1(int exception) 732 { 733 gen_helper_excp(tcg_env, tcg_constant_i32(exception)); 734 } 735 736 static void gen_excp(DisasContext *ctx, int exception) 737 { 738 install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b); 739 nullify_save(ctx); 740 gen_excp_1(exception); 741 ctx->base.is_jmp = DISAS_NORETURN; 742 } 743 744 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp) 745 { 746 DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException)); 747 748 memset(e, 0, sizeof(*e)); 749 e->next = ctx->delay_excp_list; 750 ctx->delay_excp_list = e; 751 752 e->lab = gen_new_label(); 753 e->insn = ctx->insn; 754 e->set_iir = true; 755 e->set_n = ctx->psw_n_nonzero ? 0 : -1; 756 e->excp = excp; 757 e->iaq_f = ctx->iaq_f; 758 e->iaq_b = ctx->iaq_b; 759 760 return e; 761 } 762 763 static bool gen_excp_iir(DisasContext *ctx, int exc) 764 { 765 if (ctx->null_cond.c == TCG_COND_NEVER) { 766 tcg_gen_st_i64(tcg_constant_i64(ctx->insn), 767 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); 768 gen_excp(ctx, exc); 769 } else { 770 DisasDelayException *e = delay_excp(ctx, exc); 771 tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c), 772 ctx->null_cond.a0, ctx->null_cond.a1, e->lab); 773 ctx->null_cond = cond_make_f(); 774 } 775 return true; 776 } 777 778 static bool gen_illegal(DisasContext *ctx) 779 { 780 return gen_excp_iir(ctx, EXCP_ILL); 781 } 782 783 #ifdef CONFIG_USER_ONLY 784 #define CHECK_MOST_PRIVILEGED(EXCP) \ 785 return gen_excp_iir(ctx, EXCP) 786 #else 787 #define CHECK_MOST_PRIVILEGED(EXCP) \ 788 do { \ 789 if (ctx->privilege != 0) { \ 790 return gen_excp_iir(ctx, EXCP); \ 791 } \ 792 } while (0) 793 #endif 794 795 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f, 796 const DisasIAQE *b) 797 { 798 return (!iaqe_variable(f) && 799 (b == NULL || !iaqe_variable(b)) && 800 translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp)); 801 } 802 803 /* If the next insn is to be nullified, and it's on the same page, 804 and we're not attempting to set a breakpoint on it, then we can 805 totally skip the nullified insn. This avoids creating and 806 executing a TB that merely branches to the next TB. */ 807 static bool use_nullify_skip(DisasContext *ctx) 808 { 809 return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE) 810 && !iaqe_variable(&ctx->iaq_b) 811 && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first) 812 & TARGET_PAGE_MASK) == 0); 813 } 814 815 static void gen_goto_tb(DisasContext *ctx, int which, 816 const DisasIAQE *f, const DisasIAQE *b) 817 { 818 install_iaq_entries(ctx, f, b); 819 if (use_goto_tb(ctx, f, b)) { 820 tcg_gen_goto_tb(which); 821 tcg_gen_exit_tb(ctx->base.tb, which); 822 } else { 823 tcg_gen_lookup_and_goto_ptr(); 824 } 825 } 826 827 static bool cond_need_sv(int c) 828 { 829 return c == 2 || c == 3 || c == 6; 830 } 831 832 static bool cond_need_cb(int c) 833 { 834 return c == 4 || c == 5; 835 } 836 837 /* 838 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 839 * the Parisc 1.1 Architecture Reference Manual for details. 840 */ 841 842 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, 843 TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv) 844 { 845 TCGCond sign_cond, zero_cond; 846 uint64_t sign_imm, zero_imm; 847 DisasCond cond; 848 TCGv_i64 tmp; 849 850 if (d) { 851 /* 64-bit condition. */ 852 sign_imm = 0; 853 sign_cond = TCG_COND_LT; 854 zero_imm = 0; 855 zero_cond = TCG_COND_EQ; 856 } else { 857 /* 32-bit condition. */ 858 sign_imm = 1ull << 31; 859 sign_cond = TCG_COND_TSTNE; 860 zero_imm = UINT32_MAX; 861 zero_cond = TCG_COND_TSTEQ; 862 } 863 864 switch (cf >> 1) { 865 case 0: /* Never / TR (0 / 1) */ 866 cond = cond_make_f(); 867 break; 868 case 1: /* = / <> (Z / !Z) */ 869 cond = cond_make_vi(zero_cond, res, zero_imm); 870 break; 871 case 2: /* < / >= (N ^ V / !(N ^ V) */ 872 tmp = tcg_temp_new_i64(); 873 tcg_gen_xor_i64(tmp, res, sv); 874 cond = cond_make_ti(sign_cond, tmp, sign_imm); 875 break; 876 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 877 /* 878 * Simplify: 879 * (N ^ V) | Z 880 * ((res < 0) ^ (sv < 0)) | !res 881 * ((res ^ sv) < 0) | !res 882 * ((res ^ sv) < 0 ? 1 : !res) 883 * !((res ^ sv) < 0 ? 0 : res) 884 */ 885 tmp = tcg_temp_new_i64(); 886 tcg_gen_xor_i64(tmp, res, sv); 887 tcg_gen_movcond_i64(sign_cond, tmp, 888 tmp, tcg_constant_i64(sign_imm), 889 ctx->zero, res); 890 cond = cond_make_ti(zero_cond, tmp, zero_imm); 891 break; 892 case 4: /* NUV / UV (!UV / UV) */ 893 cond = cond_make_vi(TCG_COND_EQ, uv, 0); 894 break; 895 case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */ 896 tmp = tcg_temp_new_i64(); 897 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res); 898 cond = cond_make_ti(zero_cond, tmp, zero_imm); 899 break; 900 case 6: /* SV / NSV (V / !V) */ 901 cond = cond_make_vi(sign_cond, sv, sign_imm); 902 break; 903 case 7: /* OD / EV */ 904 cond = cond_make_vi(TCG_COND_TSTNE, res, 1); 905 break; 906 default: 907 g_assert_not_reached(); 908 } 909 if (cf & 1) { 910 cond.c = tcg_invert_cond(cond.c); 911 } 912 913 return cond; 914 } 915 916 /* Similar, but for the special case of subtraction without borrow, we 917 can use the inputs directly. This can allow other computation to be 918 deleted as unused. */ 919 920 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, 921 TCGv_i64 res, TCGv_i64 in1, 922 TCGv_i64 in2, TCGv_i64 sv) 923 { 924 TCGCond tc; 925 bool ext_uns; 926 927 switch (cf >> 1) { 928 case 1: /* = / <> */ 929 tc = TCG_COND_EQ; 930 ext_uns = true; 931 break; 932 case 2: /* < / >= */ 933 tc = TCG_COND_LT; 934 ext_uns = false; 935 break; 936 case 3: /* <= / > */ 937 tc = TCG_COND_LE; 938 ext_uns = false; 939 break; 940 case 4: /* << / >>= */ 941 tc = TCG_COND_LTU; 942 ext_uns = true; 943 break; 944 case 5: /* <<= / >> */ 945 tc = TCG_COND_LEU; 946 ext_uns = true; 947 break; 948 default: 949 return do_cond(ctx, cf, d, res, NULL, sv); 950 } 951 952 if (cf & 1) { 953 tc = tcg_invert_cond(tc); 954 } 955 if (!d) { 956 TCGv_i64 t1 = tcg_temp_new_i64(); 957 TCGv_i64 t2 = tcg_temp_new_i64(); 958 959 if (ext_uns) { 960 tcg_gen_ext32u_i64(t1, in1); 961 tcg_gen_ext32u_i64(t2, in2); 962 } else { 963 tcg_gen_ext32s_i64(t1, in1); 964 tcg_gen_ext32s_i64(t2, in2); 965 } 966 return cond_make_tt(tc, t1, t2); 967 } 968 return cond_make_vv(tc, in1, in2); 969 } 970 971 /* 972 * Similar, but for logicals, where the carry and overflow bits are not 973 * computed, and use of them is undefined. 974 * 975 * Undefined or not, hardware does not trap. It seems reasonable to 976 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 977 * how cases c={2,3} are treated. 978 */ 979 980 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, 981 TCGv_i64 res) 982 { 983 TCGCond tc; 984 uint64_t imm; 985 986 switch (cf >> 1) { 987 case 0: /* never / always */ 988 case 4: /* undef, C */ 989 case 5: /* undef, C & !Z */ 990 case 6: /* undef, V */ 991 return cf & 1 ? cond_make_t() : cond_make_f(); 992 case 1: /* == / <> */ 993 tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ; 994 imm = d ? 0 : UINT32_MAX; 995 break; 996 case 2: /* < / >= */ 997 tc = d ? TCG_COND_LT : TCG_COND_TSTNE; 998 imm = d ? 0 : 1ull << 31; 999 break; 1000 case 3: /* <= / > */ 1001 tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE; 1002 if (!d) { 1003 TCGv_i64 tmp = tcg_temp_new_i64(); 1004 tcg_gen_ext32s_i64(tmp, res); 1005 return cond_make_ti(tc, tmp, 0); 1006 } 1007 return cond_make_vi(tc, res, 0); 1008 case 7: /* OD / EV */ 1009 tc = TCG_COND_TSTNE; 1010 imm = 1; 1011 break; 1012 default: 1013 g_assert_not_reached(); 1014 } 1015 if (cf & 1) { 1016 tc = tcg_invert_cond(tc); 1017 } 1018 return cond_make_vi(tc, res, imm); 1019 } 1020 1021 /* Similar, but for shift/extract/deposit conditions. */ 1022 1023 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, 1024 TCGv_i64 res) 1025 { 1026 unsigned c, f; 1027 1028 /* Convert the compressed condition codes to standard. 1029 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 1030 4-7 are the reverse of 0-3. */ 1031 c = orig & 3; 1032 if (c == 3) { 1033 c = 7; 1034 } 1035 f = (orig & 4) / 4; 1036 1037 return do_log_cond(ctx, c * 2 + f, d, res); 1038 } 1039 1040 /* Similar, but for unit zero conditions. */ 1041 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res) 1042 { 1043 TCGv_i64 tmp; 1044 uint64_t d_repl = d ? 0x0000000100000001ull : 1; 1045 uint64_t ones = 0, sgns = 0; 1046 1047 switch (cf >> 1) { 1048 case 1: /* SBW / NBW */ 1049 if (d) { 1050 ones = d_repl; 1051 sgns = d_repl << 31; 1052 } 1053 break; 1054 case 2: /* SBZ / NBZ */ 1055 ones = d_repl * 0x01010101u; 1056 sgns = ones << 7; 1057 break; 1058 case 3: /* SHZ / NHZ */ 1059 ones = d_repl * 0x00010001u; 1060 sgns = ones << 15; 1061 break; 1062 } 1063 if (ones == 0) { 1064 /* Undefined, or 0/1 (never/always). */ 1065 return cf & 1 ? cond_make_t() : cond_make_f(); 1066 } 1067 1068 /* 1069 * See hasless(v,1) from 1070 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1071 */ 1072 tmp = tcg_temp_new_i64(); 1073 tcg_gen_subi_i64(tmp, res, ones); 1074 tcg_gen_andc_i64(tmp, tmp, res); 1075 1076 return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns); 1077 } 1078 1079 static TCGv_i64 get_carry(DisasContext *ctx, bool d, 1080 TCGv_i64 cb, TCGv_i64 cb_msb) 1081 { 1082 if (!d) { 1083 TCGv_i64 t = tcg_temp_new_i64(); 1084 tcg_gen_extract_i64(t, cb, 32, 1); 1085 return t; 1086 } 1087 return cb_msb; 1088 } 1089 1090 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d) 1091 { 1092 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb); 1093 } 1094 1095 /* Compute signed overflow for addition. */ 1096 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, 1097 TCGv_i64 in1, TCGv_i64 in2, 1098 TCGv_i64 orig_in1, int shift, bool d) 1099 { 1100 TCGv_i64 sv = tcg_temp_new_i64(); 1101 TCGv_i64 tmp = tcg_temp_new_i64(); 1102 1103 tcg_gen_xor_i64(sv, res, in1); 1104 tcg_gen_xor_i64(tmp, in1, in2); 1105 tcg_gen_andc_i64(sv, sv, tmp); 1106 1107 switch (shift) { 1108 case 0: 1109 break; 1110 case 1: 1111 /* Shift left by one and compare the sign. */ 1112 tcg_gen_add_i64(tmp, orig_in1, orig_in1); 1113 tcg_gen_xor_i64(tmp, tmp, orig_in1); 1114 /* Incorporate into the overflow. */ 1115 tcg_gen_or_i64(sv, sv, tmp); 1116 break; 1117 default: 1118 { 1119 int sign_bit = d ? 63 : 31; 1120 1121 /* Compare the sign against all lower bits. */ 1122 tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1); 1123 tcg_gen_xor_i64(tmp, tmp, orig_in1); 1124 /* 1125 * If one of the bits shifting into or through the sign 1126 * differs, then we have overflow. 1127 */ 1128 tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift); 1129 tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero, 1130 tcg_constant_i64(-1), sv); 1131 } 1132 } 1133 return sv; 1134 } 1135 1136 /* Compute unsigned overflow for addition. */ 1137 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb, 1138 TCGv_i64 in1, int shift, bool d) 1139 { 1140 if (shift == 0) { 1141 return get_carry(ctx, d, cb, cb_msb); 1142 } else { 1143 TCGv_i64 tmp = tcg_temp_new_i64(); 1144 tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift); 1145 tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb)); 1146 return tmp; 1147 } 1148 } 1149 1150 /* Compute signed overflow for subtraction. */ 1151 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, 1152 TCGv_i64 in1, TCGv_i64 in2) 1153 { 1154 TCGv_i64 sv = tcg_temp_new_i64(); 1155 TCGv_i64 tmp = tcg_temp_new_i64(); 1156 1157 tcg_gen_xor_i64(sv, res, in1); 1158 tcg_gen_xor_i64(tmp, in1, in2); 1159 tcg_gen_and_i64(sv, sv, tmp); 1160 1161 return sv; 1162 } 1163 1164 static void gen_tc(DisasContext *ctx, DisasCond *cond) 1165 { 1166 DisasDelayException *e; 1167 1168 switch (cond->c) { 1169 case TCG_COND_NEVER: 1170 break; 1171 case TCG_COND_ALWAYS: 1172 gen_excp_iir(ctx, EXCP_COND); 1173 break; 1174 default: 1175 e = delay_excp(ctx, EXCP_COND); 1176 tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab); 1177 /* In the non-trap path, the condition is known false. */ 1178 *cond = cond_make_f(); 1179 break; 1180 } 1181 } 1182 1183 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d) 1184 { 1185 DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv); 1186 DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW); 1187 1188 tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab); 1189 1190 /* In the non-trap path, V is known zero. */ 1191 *sv = tcg_constant_i64(0); 1192 } 1193 1194 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1, 1195 TCGv_i64 in2, unsigned shift, bool is_l, 1196 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) 1197 { 1198 TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp; 1199 unsigned c = cf >> 1; 1200 DisasCond cond; 1201 1202 dest = tcg_temp_new_i64(); 1203 cb = NULL; 1204 cb_msb = NULL; 1205 1206 in1 = orig_in1; 1207 if (shift) { 1208 tmp = tcg_temp_new_i64(); 1209 tcg_gen_shli_i64(tmp, in1, shift); 1210 in1 = tmp; 1211 } 1212 1213 if (!is_l || cond_need_cb(c)) { 1214 cb_msb = tcg_temp_new_i64(); 1215 cb = tcg_temp_new_i64(); 1216 1217 if (is_c) { 1218 tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d)); 1219 } else { 1220 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 1221 } 1222 tcg_gen_xor_i64(cb, in1, in2); 1223 tcg_gen_xor_i64(cb, cb, dest); 1224 } else { 1225 tcg_gen_add_i64(dest, in1, in2); 1226 if (is_c) { 1227 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d)); 1228 } 1229 } 1230 1231 /* Compute signed overflow if required. */ 1232 sv = NULL; 1233 if (is_tsv || cond_need_sv(c)) { 1234 sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d); 1235 if (is_tsv) { 1236 gen_tsv(ctx, &sv, d); 1237 } 1238 } 1239 1240 /* Compute unsigned overflow if required. */ 1241 uv = NULL; 1242 if (cond_need_cb(c)) { 1243 uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d); 1244 } 1245 1246 /* Emit any conditional trap before any writeback. */ 1247 cond = do_cond(ctx, cf, d, dest, uv, sv); 1248 if (is_tc) { 1249 gen_tc(ctx, &cond); 1250 } 1251 1252 /* Write back the result. */ 1253 if (!is_l) { 1254 save_or_nullify(ctx, cpu_psw_cb, cb); 1255 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1256 } 1257 save_gpr(ctx, rt, dest); 1258 1259 /* Install the new nullification. */ 1260 ctx->null_cond = cond; 1261 } 1262 1263 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a, 1264 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1265 { 1266 TCGv_i64 tcg_r1, tcg_r2; 1267 1268 if (unlikely(is_tc && a->cf == 1)) { 1269 /* Unconditional trap on condition. */ 1270 return gen_excp_iir(ctx, EXCP_COND); 1271 } 1272 if (a->cf) { 1273 nullify_over(ctx); 1274 } 1275 tcg_r1 = load_gpr(ctx, a->r1); 1276 tcg_r2 = load_gpr(ctx, a->r2); 1277 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, 1278 is_tsv, is_tc, is_c, a->cf, a->d); 1279 return nullify_end(ctx); 1280 } 1281 1282 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1283 bool is_tsv, bool is_tc) 1284 { 1285 TCGv_i64 tcg_im, tcg_r2; 1286 1287 if (unlikely(is_tc && a->cf == 1)) { 1288 /* Unconditional trap on condition. */ 1289 return gen_excp_iir(ctx, EXCP_COND); 1290 } 1291 if (a->cf) { 1292 nullify_over(ctx); 1293 } 1294 tcg_im = tcg_constant_i64(a->i); 1295 tcg_r2 = load_gpr(ctx, a->r); 1296 /* All ADDI conditions are 32-bit. */ 1297 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false); 1298 return nullify_end(ctx); 1299 } 1300 1301 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1302 TCGv_i64 in2, bool is_tsv, bool is_b, 1303 bool is_tc, unsigned cf, bool d) 1304 { 1305 TCGv_i64 dest, sv, cb, cb_msb; 1306 unsigned c = cf >> 1; 1307 DisasCond cond; 1308 1309 dest = tcg_temp_new_i64(); 1310 cb = tcg_temp_new_i64(); 1311 cb_msb = tcg_temp_new_i64(); 1312 1313 if (is_b) { 1314 /* DEST,C = IN1 + ~IN2 + C. */ 1315 tcg_gen_not_i64(cb, in2); 1316 tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d)); 1317 tcg_gen_xor_i64(cb, cb, in1); 1318 tcg_gen_xor_i64(cb, cb, dest); 1319 } else { 1320 /* 1321 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1322 * operations by seeding the high word with 1 and subtracting. 1323 */ 1324 TCGv_i64 one = tcg_constant_i64(1); 1325 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); 1326 tcg_gen_eqv_i64(cb, in1, in2); 1327 tcg_gen_xor_i64(cb, cb, dest); 1328 } 1329 1330 /* Compute signed overflow if required. */ 1331 sv = NULL; 1332 if (is_tsv || cond_need_sv(c)) { 1333 sv = do_sub_sv(ctx, dest, in1, in2); 1334 if (is_tsv) { 1335 gen_tsv(ctx, &sv, d); 1336 } 1337 } 1338 1339 /* Compute the condition. We cannot use the special case for borrow. */ 1340 if (!is_b) { 1341 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1342 } else { 1343 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv); 1344 } 1345 1346 /* Emit any conditional trap before any writeback. */ 1347 if (is_tc) { 1348 gen_tc(ctx, &cond); 1349 } 1350 1351 /* Write back the result. */ 1352 save_or_nullify(ctx, cpu_psw_cb, cb); 1353 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1354 save_gpr(ctx, rt, dest); 1355 1356 /* Install the new nullification. */ 1357 ctx->null_cond = cond; 1358 } 1359 1360 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1361 bool is_tsv, bool is_b, bool is_tc) 1362 { 1363 TCGv_i64 tcg_r1, tcg_r2; 1364 1365 if (a->cf) { 1366 nullify_over(ctx); 1367 } 1368 tcg_r1 = load_gpr(ctx, a->r1); 1369 tcg_r2 = load_gpr(ctx, a->r2); 1370 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d); 1371 return nullify_end(ctx); 1372 } 1373 1374 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1375 { 1376 TCGv_i64 tcg_im, tcg_r2; 1377 1378 if (a->cf) { 1379 nullify_over(ctx); 1380 } 1381 tcg_im = tcg_constant_i64(a->i); 1382 tcg_r2 = load_gpr(ctx, a->r); 1383 /* All SUBI conditions are 32-bit. */ 1384 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false); 1385 return nullify_end(ctx); 1386 } 1387 1388 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1389 TCGv_i64 in2, unsigned cf, bool d) 1390 { 1391 TCGv_i64 dest, sv; 1392 DisasCond cond; 1393 1394 dest = tcg_temp_new_i64(); 1395 tcg_gen_sub_i64(dest, in1, in2); 1396 1397 /* Compute signed overflow if required. */ 1398 sv = NULL; 1399 if (cond_need_sv(cf >> 1)) { 1400 sv = do_sub_sv(ctx, dest, in1, in2); 1401 } 1402 1403 /* Form the condition for the compare. */ 1404 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1405 1406 /* Clear. */ 1407 tcg_gen_movi_i64(dest, 0); 1408 save_gpr(ctx, rt, dest); 1409 1410 /* Install the new nullification. */ 1411 ctx->null_cond = cond; 1412 } 1413 1414 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1415 TCGv_i64 in2, unsigned cf, bool d, 1416 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1417 { 1418 TCGv_i64 dest = dest_gpr(ctx, rt); 1419 1420 /* Perform the operation, and writeback. */ 1421 fn(dest, in1, in2); 1422 save_gpr(ctx, rt, dest); 1423 1424 /* Install the new nullification. */ 1425 ctx->null_cond = do_log_cond(ctx, cf, d, dest); 1426 } 1427 1428 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1429 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1430 { 1431 TCGv_i64 tcg_r1, tcg_r2; 1432 1433 if (a->cf) { 1434 nullify_over(ctx); 1435 } 1436 tcg_r1 = load_gpr(ctx, a->r1); 1437 tcg_r2 = load_gpr(ctx, a->r2); 1438 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn); 1439 return nullify_end(ctx); 1440 } 1441 1442 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1443 TCGv_i64 in2, unsigned cf, bool d, 1444 bool is_tc, bool is_add) 1445 { 1446 TCGv_i64 dest = tcg_temp_new_i64(); 1447 uint64_t test_cb = 0; 1448 DisasCond cond; 1449 1450 /* Select which carry-out bits to test. */ 1451 switch (cf >> 1) { 1452 case 4: /* NDC / SDC -- 4-bit carries */ 1453 test_cb = dup_const(MO_8, 0x88); 1454 break; 1455 case 5: /* NWC / SWC -- 32-bit carries */ 1456 if (d) { 1457 test_cb = dup_const(MO_32, INT32_MIN); 1458 } else { 1459 cf &= 1; /* undefined -- map to never/always */ 1460 } 1461 break; 1462 case 6: /* NBC / SBC -- 8-bit carries */ 1463 test_cb = dup_const(MO_8, INT8_MIN); 1464 break; 1465 case 7: /* NHC / SHC -- 16-bit carries */ 1466 test_cb = dup_const(MO_16, INT16_MIN); 1467 break; 1468 } 1469 if (!d) { 1470 test_cb = (uint32_t)test_cb; 1471 } 1472 1473 if (!test_cb) { 1474 /* No need to compute carries if we don't need to test them. */ 1475 if (is_add) { 1476 tcg_gen_add_i64(dest, in1, in2); 1477 } else { 1478 tcg_gen_sub_i64(dest, in1, in2); 1479 } 1480 cond = do_unit_zero_cond(cf, d, dest); 1481 } else { 1482 TCGv_i64 cb = tcg_temp_new_i64(); 1483 1484 if (d) { 1485 TCGv_i64 cb_msb = tcg_temp_new_i64(); 1486 if (is_add) { 1487 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 1488 tcg_gen_xor_i64(cb, in1, in2); 1489 } else { 1490 /* See do_sub, !is_b. */ 1491 TCGv_i64 one = tcg_constant_i64(1); 1492 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); 1493 tcg_gen_eqv_i64(cb, in1, in2); 1494 } 1495 tcg_gen_xor_i64(cb, cb, dest); 1496 tcg_gen_extract2_i64(cb, cb, cb_msb, 1); 1497 } else { 1498 if (is_add) { 1499 tcg_gen_add_i64(dest, in1, in2); 1500 tcg_gen_xor_i64(cb, in1, in2); 1501 } else { 1502 tcg_gen_sub_i64(dest, in1, in2); 1503 tcg_gen_eqv_i64(cb, in1, in2); 1504 } 1505 tcg_gen_xor_i64(cb, cb, dest); 1506 tcg_gen_shri_i64(cb, cb, 1); 1507 } 1508 1509 cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, 1510 cb, test_cb); 1511 } 1512 1513 if (is_tc) { 1514 gen_tc(ctx, &cond); 1515 } 1516 save_gpr(ctx, rt, dest); 1517 1518 ctx->null_cond = cond; 1519 } 1520 1521 #ifndef CONFIG_USER_ONLY 1522 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1523 from the top 2 bits of the base register. There are a few system 1524 instructions that have a 3-bit space specifier, for which SR0 is 1525 not special. To handle this, pass ~SP. */ 1526 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base) 1527 { 1528 TCGv_ptr ptr; 1529 TCGv_i64 tmp; 1530 TCGv_i64 spc; 1531 1532 if (sp != 0) { 1533 if (sp < 0) { 1534 sp = ~sp; 1535 } 1536 spc = tcg_temp_new_i64(); 1537 load_spr(ctx, spc, sp); 1538 return spc; 1539 } 1540 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1541 return cpu_srH; 1542 } 1543 1544 ptr = tcg_temp_new_ptr(); 1545 tmp = tcg_temp_new_i64(); 1546 spc = tcg_temp_new_i64(); 1547 1548 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */ 1549 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5); 1550 tcg_gen_andi_i64(tmp, tmp, 030); 1551 tcg_gen_trunc_i64_ptr(ptr, tmp); 1552 1553 tcg_gen_add_ptr(ptr, ptr, tcg_env); 1554 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1555 1556 return spc; 1557 } 1558 #endif 1559 1560 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, 1561 unsigned rb, unsigned rx, int scale, int64_t disp, 1562 unsigned sp, int modify, bool is_phys) 1563 { 1564 TCGv_i64 base = load_gpr(ctx, rb); 1565 TCGv_i64 ofs; 1566 TCGv_i64 addr; 1567 1568 set_insn_breg(ctx, rb); 1569 1570 /* Note that RX is mutually exclusive with DISP. */ 1571 if (rx) { 1572 ofs = tcg_temp_new_i64(); 1573 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale); 1574 tcg_gen_add_i64(ofs, ofs, base); 1575 } else if (disp || modify) { 1576 ofs = tcg_temp_new_i64(); 1577 tcg_gen_addi_i64(ofs, base, disp); 1578 } else { 1579 ofs = base; 1580 } 1581 1582 *pofs = ofs; 1583 *pgva = addr = tcg_temp_new_i64(); 1584 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, 1585 ctx->gva_offset_mask); 1586 #ifndef CONFIG_USER_ONLY 1587 if (!is_phys) { 1588 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); 1589 } 1590 #endif 1591 } 1592 1593 /* Emit a memory load. The modify parameter should be 1594 * < 0 for pre-modify, 1595 * > 0 for post-modify, 1596 * = 0 for no base register update. 1597 */ 1598 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1599 unsigned rx, int scale, int64_t disp, 1600 unsigned sp, int modify, MemOp mop) 1601 { 1602 TCGv_i64 ofs; 1603 TCGv_i64 addr; 1604 1605 /* Caller uses nullify_over/nullify_end. */ 1606 assert(ctx->null_cond.c == TCG_COND_NEVER); 1607 1608 mop |= mo_endian(ctx); 1609 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1610 MMU_DISABLED(ctx)); 1611 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1612 if (modify) { 1613 save_gpr(ctx, rb, ofs); 1614 } 1615 } 1616 1617 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1618 unsigned rx, int scale, int64_t disp, 1619 unsigned sp, int modify, MemOp mop) 1620 { 1621 TCGv_i64 ofs; 1622 TCGv_i64 addr; 1623 1624 /* Caller uses nullify_over/nullify_end. */ 1625 assert(ctx->null_cond.c == TCG_COND_NEVER); 1626 1627 mop |= mo_endian(ctx); 1628 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1629 MMU_DISABLED(ctx)); 1630 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1631 if (modify) { 1632 save_gpr(ctx, rb, ofs); 1633 } 1634 } 1635 1636 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1637 unsigned rx, int scale, int64_t disp, 1638 unsigned sp, int modify, MemOp mop) 1639 { 1640 TCGv_i64 ofs; 1641 TCGv_i64 addr; 1642 1643 /* Caller uses nullify_over/nullify_end. */ 1644 assert(ctx->null_cond.c == TCG_COND_NEVER); 1645 1646 mop |= mo_endian(ctx); 1647 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1648 MMU_DISABLED(ctx)); 1649 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1650 if (modify) { 1651 save_gpr(ctx, rb, ofs); 1652 } 1653 } 1654 1655 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1656 unsigned rx, int scale, int64_t disp, 1657 unsigned sp, int modify, MemOp mop) 1658 { 1659 TCGv_i64 ofs; 1660 TCGv_i64 addr; 1661 1662 /* Caller uses nullify_over/nullify_end. */ 1663 assert(ctx->null_cond.c == TCG_COND_NEVER); 1664 1665 mop |= mo_endian(ctx); 1666 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1667 MMU_DISABLED(ctx)); 1668 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1669 if (modify) { 1670 save_gpr(ctx, rb, ofs); 1671 } 1672 } 1673 1674 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1675 unsigned rx, int scale, int64_t disp, 1676 unsigned sp, int modify, MemOp mop) 1677 { 1678 TCGv_i64 dest; 1679 1680 nullify_over(ctx); 1681 1682 if (modify == 0) { 1683 /* No base register update. */ 1684 dest = dest_gpr(ctx, rt); 1685 } else { 1686 /* Make sure if RT == RB, we see the result of the load. */ 1687 dest = tcg_temp_new_i64(); 1688 } 1689 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1690 save_gpr(ctx, rt, dest); 1691 1692 return nullify_end(ctx); 1693 } 1694 1695 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1696 unsigned rx, int scale, int64_t disp, 1697 unsigned sp, int modify) 1698 { 1699 TCGv_i32 tmp; 1700 1701 nullify_over(ctx); 1702 1703 tmp = tcg_temp_new_i32(); 1704 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL); 1705 save_frw_i32(rt, tmp); 1706 1707 if (rt == 0) { 1708 gen_helper_loaded_fr0(tcg_env); 1709 } 1710 1711 return nullify_end(ctx); 1712 } 1713 1714 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1715 { 1716 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1717 a->disp, a->sp, a->m); 1718 } 1719 1720 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1721 unsigned rx, int scale, int64_t disp, 1722 unsigned sp, int modify) 1723 { 1724 TCGv_i64 tmp; 1725 1726 nullify_over(ctx); 1727 1728 tmp = tcg_temp_new_i64(); 1729 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ); 1730 save_frd(rt, tmp); 1731 1732 if (rt == 0) { 1733 gen_helper_loaded_fr0(tcg_env); 1734 } 1735 1736 return nullify_end(ctx); 1737 } 1738 1739 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1740 { 1741 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1742 a->disp, a->sp, a->m); 1743 } 1744 1745 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1746 int64_t disp, unsigned sp, 1747 int modify, MemOp mop) 1748 { 1749 nullify_over(ctx); 1750 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1751 return nullify_end(ctx); 1752 } 1753 1754 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1755 unsigned rx, int scale, int64_t disp, 1756 unsigned sp, int modify) 1757 { 1758 TCGv_i32 tmp; 1759 1760 nullify_over(ctx); 1761 1762 tmp = load_frw_i32(rt); 1763 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL); 1764 1765 return nullify_end(ctx); 1766 } 1767 1768 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1769 { 1770 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1771 a->disp, a->sp, a->m); 1772 } 1773 1774 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1775 unsigned rx, int scale, int64_t disp, 1776 unsigned sp, int modify) 1777 { 1778 TCGv_i64 tmp; 1779 1780 nullify_over(ctx); 1781 1782 tmp = load_frd(rt); 1783 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ); 1784 1785 return nullify_end(ctx); 1786 } 1787 1788 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1789 { 1790 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1791 a->disp, a->sp, a->m); 1792 } 1793 1794 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1795 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1796 { 1797 TCGv_i32 tmp; 1798 1799 nullify_over(ctx); 1800 tmp = load_frw0_i32(ra); 1801 1802 func(tmp, tcg_env, tmp); 1803 1804 save_frw_i32(rt, tmp); 1805 return nullify_end(ctx); 1806 } 1807 1808 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1809 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1810 { 1811 TCGv_i32 dst; 1812 TCGv_i64 src; 1813 1814 nullify_over(ctx); 1815 src = load_frd(ra); 1816 dst = tcg_temp_new_i32(); 1817 1818 func(dst, tcg_env, src); 1819 1820 save_frw_i32(rt, dst); 1821 return nullify_end(ctx); 1822 } 1823 1824 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1825 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1826 { 1827 TCGv_i64 tmp; 1828 1829 nullify_over(ctx); 1830 tmp = load_frd0(ra); 1831 1832 func(tmp, tcg_env, tmp); 1833 1834 save_frd(rt, tmp); 1835 return nullify_end(ctx); 1836 } 1837 1838 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1839 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1840 { 1841 TCGv_i32 src; 1842 TCGv_i64 dst; 1843 1844 nullify_over(ctx); 1845 src = load_frw0_i32(ra); 1846 dst = tcg_temp_new_i64(); 1847 1848 func(dst, tcg_env, src); 1849 1850 save_frd(rt, dst); 1851 return nullify_end(ctx); 1852 } 1853 1854 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1855 unsigned ra, unsigned rb, 1856 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1857 { 1858 TCGv_i32 a, b; 1859 1860 nullify_over(ctx); 1861 a = load_frw0_i32(ra); 1862 b = load_frw0_i32(rb); 1863 1864 func(a, tcg_env, a, b); 1865 1866 save_frw_i32(rt, a); 1867 return nullify_end(ctx); 1868 } 1869 1870 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1871 unsigned ra, unsigned rb, 1872 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1873 { 1874 TCGv_i64 a, b; 1875 1876 nullify_over(ctx); 1877 a = load_frd0(ra); 1878 b = load_frd0(rb); 1879 1880 func(a, tcg_env, a, b); 1881 1882 save_frd(rt, a); 1883 return nullify_end(ctx); 1884 } 1885 1886 /* Emit an unconditional branch to a direct target, which may or may not 1887 have already had nullification handled. */ 1888 static bool do_dbranch(DisasContext *ctx, int64_t disp, 1889 unsigned link, bool is_n) 1890 { 1891 ctx->iaq_j = iaqe_branchi(ctx, disp); 1892 1893 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1894 install_link(ctx, link, false); 1895 if (is_n) { 1896 if (use_nullify_skip(ctx)) { 1897 nullify_set(ctx, 0); 1898 store_psw_xb(ctx, 0); 1899 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL); 1900 ctx->base.is_jmp = DISAS_NORETURN; 1901 return true; 1902 } 1903 ctx->null_cond.c = TCG_COND_ALWAYS; 1904 } 1905 ctx->iaq_n = &ctx->iaq_j; 1906 ctx->psw_b_next = true; 1907 } else { 1908 nullify_over(ctx); 1909 1910 install_link(ctx, link, false); 1911 if (is_n && use_nullify_skip(ctx)) { 1912 nullify_set(ctx, 0); 1913 store_psw_xb(ctx, 0); 1914 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL); 1915 } else { 1916 nullify_set(ctx, is_n); 1917 store_psw_xb(ctx, PSW_B); 1918 gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j); 1919 } 1920 nullify_end(ctx); 1921 1922 nullify_set(ctx, 0); 1923 store_psw_xb(ctx, 0); 1924 gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL); 1925 ctx->base.is_jmp = DISAS_NORETURN; 1926 } 1927 return true; 1928 } 1929 1930 /* Emit a conditional branch to a direct target. If the branch itself 1931 is nullified, we should have already used nullify_over. */ 1932 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n, 1933 DisasCond *cond) 1934 { 1935 DisasIAQE next; 1936 TCGLabel *taken = NULL; 1937 TCGCond c = cond->c; 1938 bool n; 1939 1940 assert(ctx->null_cond.c == TCG_COND_NEVER); 1941 1942 /* Handle TRUE and NEVER as direct branches. */ 1943 if (c == TCG_COND_ALWAYS) { 1944 return do_dbranch(ctx, disp, 0, is_n && disp >= 0); 1945 } 1946 1947 taken = gen_new_label(); 1948 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken); 1949 1950 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1951 n = is_n && disp < 0; 1952 if (n && use_nullify_skip(ctx)) { 1953 nullify_set(ctx, 0); 1954 store_psw_xb(ctx, 0); 1955 next = iaqe_incr(&ctx->iaq_b, 4); 1956 gen_goto_tb(ctx, 0, &next, NULL); 1957 } else { 1958 if (!n && ctx->null_lab) { 1959 gen_set_label(ctx->null_lab); 1960 ctx->null_lab = NULL; 1961 } 1962 nullify_set(ctx, n); 1963 store_psw_xb(ctx, 0); 1964 gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL); 1965 } 1966 1967 gen_set_label(taken); 1968 1969 /* Taken: Condition satisfied; nullify on forward branches. */ 1970 n = is_n && disp >= 0; 1971 1972 next = iaqe_branchi(ctx, disp); 1973 if (n && use_nullify_skip(ctx)) { 1974 nullify_set(ctx, 0); 1975 store_psw_xb(ctx, 0); 1976 gen_goto_tb(ctx, 1, &next, NULL); 1977 } else { 1978 nullify_set(ctx, n); 1979 store_psw_xb(ctx, PSW_B); 1980 gen_goto_tb(ctx, 1, &ctx->iaq_b, &next); 1981 } 1982 1983 /* Not taken: the branch itself was nullified. */ 1984 if (ctx->null_lab) { 1985 gen_set_label(ctx->null_lab); 1986 ctx->null_lab = NULL; 1987 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1988 } else { 1989 ctx->base.is_jmp = DISAS_NORETURN; 1990 } 1991 return true; 1992 } 1993 1994 /* 1995 * Emit an unconditional branch to an indirect target, in ctx->iaq_j. 1996 * This handles nullification of the branch itself. 1997 */ 1998 static bool do_ibranch(DisasContext *ctx, unsigned link, 1999 bool with_sr0, bool is_n) 2000 { 2001 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 2002 install_link(ctx, link, with_sr0); 2003 if (is_n) { 2004 if (use_nullify_skip(ctx)) { 2005 install_iaq_entries(ctx, &ctx->iaq_j, NULL); 2006 nullify_set(ctx, 0); 2007 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2008 return true; 2009 } 2010 ctx->null_cond.c = TCG_COND_ALWAYS; 2011 } 2012 ctx->iaq_n = &ctx->iaq_j; 2013 ctx->psw_b_next = true; 2014 return true; 2015 } 2016 2017 nullify_over(ctx); 2018 2019 install_link(ctx, link, with_sr0); 2020 if (is_n && use_nullify_skip(ctx)) { 2021 install_iaq_entries(ctx, &ctx->iaq_j, NULL); 2022 nullify_set(ctx, 0); 2023 store_psw_xb(ctx, 0); 2024 } else { 2025 install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j); 2026 nullify_set(ctx, is_n); 2027 store_psw_xb(ctx, PSW_B); 2028 } 2029 2030 tcg_gen_lookup_and_goto_ptr(); 2031 ctx->base.is_jmp = DISAS_NORETURN; 2032 return nullify_end(ctx); 2033 } 2034 2035 /* Implement 2036 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 2037 * IAOQ_Next{30..31} ← GR[b]{30..31}; 2038 * else 2039 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 2040 * which keeps the privilege level from being increased. 2041 */ 2042 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset) 2043 { 2044 TCGv_i64 dest = tcg_temp_new_i64(); 2045 switch (ctx->privilege) { 2046 case 0: 2047 /* Privilege 0 is maximum and is allowed to decrease. */ 2048 tcg_gen_mov_i64(dest, offset); 2049 break; 2050 case 3: 2051 /* Privilege 3 is minimum and is never allowed to increase. */ 2052 tcg_gen_ori_i64(dest, offset, 3); 2053 break; 2054 default: 2055 tcg_gen_andi_i64(dest, offset, -4); 2056 tcg_gen_ori_i64(dest, dest, ctx->privilege); 2057 tcg_gen_umax_i64(dest, dest, offset); 2058 break; 2059 } 2060 return dest; 2061 } 2062 2063 #ifdef CONFIG_USER_ONLY 2064 /* On Linux, page zero is normally marked execute only + gateway. 2065 Therefore normal read or write is supposed to fail, but specific 2066 offsets have kernel code mapped to raise permissions to implement 2067 system calls. Handling this via an explicit check here, rather 2068 in than the "be disp(sr2,r0)" instruction that probably sent us 2069 here, is the easiest way to handle the branch delay slot on the 2070 aforementioned BE. */ 2071 static void do_page_zero(DisasContext *ctx) 2072 { 2073 assert(ctx->iaq_f.disp == 0); 2074 2075 /* If by some means we get here with PSW[N]=1, that implies that 2076 the B,GATE instruction would be skipped, and we'd fault on the 2077 next insn within the privileged page. */ 2078 switch (ctx->null_cond.c) { 2079 case TCG_COND_NEVER: 2080 break; 2081 case TCG_COND_ALWAYS: 2082 tcg_gen_movi_i64(cpu_psw_n, 0); 2083 goto do_sigill; 2084 default: 2085 /* Since this is always the first (and only) insn within the 2086 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2087 g_assert_not_reached(); 2088 } 2089 2090 /* If PSW[B] is set, the B,GATE insn would trap. */ 2091 if (ctx->psw_xb & PSW_B) { 2092 goto do_sigill; 2093 } 2094 2095 switch (ctx->base.pc_first) { 2096 case 0x00: /* Null pointer call */ 2097 gen_excp_1(EXCP_IMP); 2098 ctx->base.is_jmp = DISAS_NORETURN; 2099 break; 2100 2101 case 0xb0: /* LWS */ 2102 gen_excp_1(EXCP_SYSCALL_LWS); 2103 ctx->base.is_jmp = DISAS_NORETURN; 2104 break; 2105 2106 case 0xe0: /* SET_THREAD_POINTER */ 2107 { 2108 DisasIAQE next = { .base = tcg_temp_new_i64() }; 2109 2110 tcg_gen_st_i64(cpu_gr[26], tcg_env, 2111 offsetof(CPUHPPAState, cr[27])); 2112 tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER); 2113 install_iaq_entries(ctx, &next, NULL); 2114 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2115 } 2116 break; 2117 2118 case 0x100: /* SYSCALL */ 2119 gen_excp_1(EXCP_SYSCALL); 2120 ctx->base.is_jmp = DISAS_NORETURN; 2121 break; 2122 2123 default: 2124 do_sigill: 2125 gen_excp_1(EXCP_ILL); 2126 ctx->base.is_jmp = DISAS_NORETURN; 2127 break; 2128 } 2129 } 2130 #endif 2131 2132 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2133 { 2134 ctx->null_cond = cond_make_f(); 2135 return true; 2136 } 2137 2138 static bool trans_break(DisasContext *ctx, arg_break *a) 2139 { 2140 return gen_excp_iir(ctx, EXCP_BREAK); 2141 } 2142 2143 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2144 { 2145 /* No point in nullifying the memory barrier. */ 2146 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2147 2148 ctx->null_cond = cond_make_f(); 2149 return true; 2150 } 2151 2152 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2153 { 2154 TCGv_i64 dest = dest_gpr(ctx, a->t); 2155 2156 copy_iaoq_entry(ctx, dest, &ctx->iaq_f); 2157 tcg_gen_andi_i64(dest, dest, -4); 2158 2159 save_gpr(ctx, a->t, dest); 2160 ctx->null_cond = cond_make_f(); 2161 return true; 2162 } 2163 2164 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2165 { 2166 unsigned rt = a->t; 2167 unsigned rs = a->sp; 2168 TCGv_i64 t0 = tcg_temp_new_i64(); 2169 2170 load_spr(ctx, t0, rs); 2171 tcg_gen_shri_i64(t0, t0, 32); 2172 2173 save_gpr(ctx, rt, t0); 2174 2175 ctx->null_cond = cond_make_f(); 2176 return true; 2177 } 2178 2179 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2180 { 2181 unsigned rt = a->t; 2182 unsigned ctl = a->r; 2183 TCGv_i64 tmp; 2184 2185 switch (ctl) { 2186 case CR_SAR: 2187 if (a->e == 0) { 2188 /* MFSAR without ,W masks low 5 bits. */ 2189 tmp = dest_gpr(ctx, rt); 2190 tcg_gen_andi_i64(tmp, cpu_sar, 31); 2191 save_gpr(ctx, rt, tmp); 2192 goto done; 2193 } 2194 save_gpr(ctx, rt, cpu_sar); 2195 goto done; 2196 case CR_IT: /* Interval Timer */ 2197 /* FIXME: Respect PSW_S bit. */ 2198 nullify_over(ctx); 2199 tmp = dest_gpr(ctx, rt); 2200 if (translator_io_start(&ctx->base)) { 2201 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2202 } 2203 gen_helper_read_interval_timer(tmp); 2204 save_gpr(ctx, rt, tmp); 2205 return nullify_end(ctx); 2206 case 26: 2207 case 27: 2208 break; 2209 default: 2210 /* All other control registers are privileged. */ 2211 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2212 break; 2213 } 2214 2215 tmp = tcg_temp_new_i64(); 2216 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2217 save_gpr(ctx, rt, tmp); 2218 2219 done: 2220 ctx->null_cond = cond_make_f(); 2221 return true; 2222 } 2223 2224 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2225 { 2226 unsigned rr = a->r; 2227 unsigned rs = a->sp; 2228 TCGv_i64 tmp; 2229 2230 if (rs >= 5) { 2231 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2232 } 2233 nullify_over(ctx); 2234 2235 tmp = tcg_temp_new_i64(); 2236 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32); 2237 2238 if (rs >= 4) { 2239 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs])); 2240 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2241 } else { 2242 tcg_gen_mov_i64(cpu_sr[rs], tmp); 2243 } 2244 2245 return nullify_end(ctx); 2246 } 2247 2248 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2249 { 2250 unsigned ctl = a->t; 2251 TCGv_i64 reg; 2252 TCGv_i64 tmp; 2253 2254 if (ctl == CR_SAR) { 2255 reg = load_gpr(ctx, a->r); 2256 tmp = tcg_temp_new_i64(); 2257 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31); 2258 save_or_nullify(ctx, cpu_sar, tmp); 2259 2260 ctx->null_cond = cond_make_f(); 2261 return true; 2262 } 2263 2264 /* All other control registers are privileged or read-only. */ 2265 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2266 2267 #ifndef CONFIG_USER_ONLY 2268 nullify_over(ctx); 2269 2270 if (ctx->is_pa20) { 2271 reg = load_gpr(ctx, a->r); 2272 } else { 2273 reg = tcg_temp_new_i64(); 2274 tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r)); 2275 } 2276 2277 switch (ctl) { 2278 case CR_IT: 2279 if (translator_io_start(&ctx->base)) { 2280 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2281 } 2282 gen_helper_write_interval_timer(tcg_env, reg); 2283 break; 2284 case CR_EIRR: 2285 /* Helper modifies interrupt lines and is therefore IO. */ 2286 translator_io_start(&ctx->base); 2287 gen_helper_write_eirr(tcg_env, reg); 2288 /* Exit to re-evaluate interrupts in the main loop. */ 2289 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2290 break; 2291 2292 case CR_IIASQ: 2293 case CR_IIAOQ: 2294 /* FIXME: Respect PSW_Q bit */ 2295 /* The write advances the queue and stores to the back element. */ 2296 tmp = tcg_temp_new_i64(); 2297 tcg_gen_ld_i64(tmp, tcg_env, 2298 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2299 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2300 tcg_gen_st_i64(reg, tcg_env, 2301 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2302 break; 2303 2304 case CR_PID1: 2305 case CR_PID2: 2306 case CR_PID3: 2307 case CR_PID4: 2308 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2309 #ifndef CONFIG_USER_ONLY 2310 gen_helper_change_prot_id(tcg_env); 2311 #endif 2312 break; 2313 2314 case CR_EIEM: 2315 /* Exit to re-evaluate interrupts in the main loop. */ 2316 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2317 /* FALLTHRU */ 2318 default: 2319 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2320 break; 2321 } 2322 return nullify_end(ctx); 2323 #endif 2324 } 2325 2326 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2327 { 2328 TCGv_i64 tmp = tcg_temp_new_i64(); 2329 2330 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r)); 2331 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31); 2332 save_or_nullify(ctx, cpu_sar, tmp); 2333 2334 ctx->null_cond = cond_make_f(); 2335 return true; 2336 } 2337 2338 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2339 { 2340 TCGv_i64 dest = dest_gpr(ctx, a->t); 2341 2342 #ifdef CONFIG_USER_ONLY 2343 /* We don't implement space registers in user mode. */ 2344 tcg_gen_movi_i64(dest, 0); 2345 #else 2346 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2347 tcg_gen_shri_i64(dest, dest, 32); 2348 #endif 2349 save_gpr(ctx, a->t, dest); 2350 2351 ctx->null_cond = cond_make_f(); 2352 return true; 2353 } 2354 2355 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2356 { 2357 #ifdef CONFIG_USER_ONLY 2358 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2359 #else 2360 TCGv_i64 tmp; 2361 2362 /* HP-UX 11i and HP ODE use rsm for read-access to PSW */ 2363 if (a->i) { 2364 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2365 } 2366 2367 nullify_over(ctx); 2368 2369 tmp = tcg_temp_new_i64(); 2370 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2371 tcg_gen_andi_i64(tmp, tmp, ~a->i); 2372 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2373 save_gpr(ctx, a->t, tmp); 2374 2375 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2376 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2377 return nullify_end(ctx); 2378 #endif 2379 } 2380 2381 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2382 { 2383 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2384 #ifndef CONFIG_USER_ONLY 2385 TCGv_i64 tmp; 2386 2387 nullify_over(ctx); 2388 2389 tmp = tcg_temp_new_i64(); 2390 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2391 tcg_gen_ori_i64(tmp, tmp, a->i); 2392 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2393 save_gpr(ctx, a->t, tmp); 2394 2395 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2396 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2397 return nullify_end(ctx); 2398 #endif 2399 } 2400 2401 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2402 { 2403 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2404 #ifndef CONFIG_USER_ONLY 2405 TCGv_i64 tmp, reg; 2406 nullify_over(ctx); 2407 2408 reg = load_gpr(ctx, a->r); 2409 tmp = tcg_temp_new_i64(); 2410 gen_helper_swap_system_mask(tmp, tcg_env, reg); 2411 2412 /* Exit the TB to recognize new interrupts. */ 2413 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2414 return nullify_end(ctx); 2415 #endif 2416 } 2417 2418 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2419 { 2420 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2421 #ifndef CONFIG_USER_ONLY 2422 nullify_over(ctx); 2423 2424 if (rfi_r) { 2425 gen_helper_rfi_r(tcg_env); 2426 } else { 2427 gen_helper_rfi(tcg_env); 2428 } 2429 /* Exit the TB to recognize new interrupts. */ 2430 tcg_gen_exit_tb(NULL, 0); 2431 ctx->base.is_jmp = DISAS_NORETURN; 2432 2433 return nullify_end(ctx); 2434 #endif 2435 } 2436 2437 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2438 { 2439 return do_rfi(ctx, false); 2440 } 2441 2442 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2443 { 2444 return do_rfi(ctx, true); 2445 } 2446 2447 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2448 { 2449 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2450 #ifndef CONFIG_USER_ONLY 2451 set_psw_xb(ctx, 0); 2452 nullify_over(ctx); 2453 gen_helper_halt(tcg_env); 2454 ctx->base.is_jmp = DISAS_NORETURN; 2455 return nullify_end(ctx); 2456 #endif 2457 } 2458 2459 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2460 { 2461 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2462 #ifndef CONFIG_USER_ONLY 2463 set_psw_xb(ctx, 0); 2464 nullify_over(ctx); 2465 gen_helper_reset(tcg_env); 2466 ctx->base.is_jmp = DISAS_NORETURN; 2467 return nullify_end(ctx); 2468 #endif 2469 } 2470 2471 static bool do_getshadowregs(DisasContext *ctx) 2472 { 2473 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2474 nullify_over(ctx); 2475 tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0])); 2476 tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1])); 2477 tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2])); 2478 tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3])); 2479 tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4])); 2480 tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5])); 2481 tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6])); 2482 return nullify_end(ctx); 2483 } 2484 2485 static bool do_putshadowregs(DisasContext *ctx) 2486 { 2487 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2488 nullify_over(ctx); 2489 tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0])); 2490 tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1])); 2491 tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2])); 2492 tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3])); 2493 tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4])); 2494 tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5])); 2495 tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6])); 2496 return nullify_end(ctx); 2497 } 2498 2499 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2500 { 2501 return do_getshadowregs(ctx); 2502 } 2503 2504 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2505 { 2506 if (a->m) { 2507 TCGv_i64 dest = dest_gpr(ctx, a->b); 2508 TCGv_i64 src1 = load_gpr(ctx, a->b); 2509 TCGv_i64 src2 = load_gpr(ctx, a->x); 2510 2511 /* The only thing we need to do is the base register modification. */ 2512 tcg_gen_add_i64(dest, src1, src2); 2513 save_gpr(ctx, a->b, dest); 2514 } 2515 ctx->null_cond = cond_make_f(); 2516 return true; 2517 } 2518 2519 static bool trans_fic(DisasContext *ctx, arg_ldst *a) 2520 { 2521 /* End TB for flush instruction cache, so we pick up new insns. */ 2522 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2523 return trans_nop_addrx(ctx, a); 2524 } 2525 2526 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2527 { 2528 TCGv_i64 dest, ofs; 2529 TCGv_i32 level, want; 2530 TCGv_i64 addr; 2531 2532 nullify_over(ctx); 2533 2534 dest = dest_gpr(ctx, a->t); 2535 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2536 2537 if (a->imm) { 2538 level = tcg_constant_i32(a->ri & 3); 2539 } else { 2540 level = tcg_temp_new_i32(); 2541 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri)); 2542 tcg_gen_andi_i32(level, level, 3); 2543 } 2544 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2545 2546 gen_helper_probe(dest, tcg_env, addr, level, want); 2547 2548 save_gpr(ctx, a->t, dest); 2549 return nullify_end(ctx); 2550 } 2551 2552 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2553 { 2554 if (ctx->is_pa20) { 2555 return false; 2556 } 2557 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2558 #ifndef CONFIG_USER_ONLY 2559 TCGv_i64 addr; 2560 TCGv_i64 ofs, reg; 2561 2562 nullify_over(ctx); 2563 2564 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2565 reg = load_gpr(ctx, a->r); 2566 if (a->addr) { 2567 gen_helper_itlba_pa11(tcg_env, addr, reg); 2568 } else { 2569 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2570 } 2571 2572 /* Exit TB for TLB change if mmu is enabled. */ 2573 if (ctx->tb_flags & PSW_C) { 2574 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2575 } 2576 return nullify_end(ctx); 2577 #endif 2578 } 2579 2580 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local) 2581 { 2582 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2583 #ifndef CONFIG_USER_ONLY 2584 TCGv_i64 addr; 2585 TCGv_i64 ofs; 2586 2587 nullify_over(ctx); 2588 2589 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2590 2591 /* 2592 * Page align now, rather than later, so that we can add in the 2593 * page_size field from pa2.0 from the low 4 bits of GR[b]. 2594 */ 2595 tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK); 2596 if (ctx->is_pa20) { 2597 tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4); 2598 } 2599 2600 if (local) { 2601 gen_helper_ptlb_l(tcg_env, addr); 2602 } else { 2603 gen_helper_ptlb(tcg_env, addr); 2604 } 2605 2606 if (a->m) { 2607 save_gpr(ctx, a->b, ofs); 2608 } 2609 2610 /* Exit TB for TLB change if mmu is enabled. */ 2611 if (ctx->tb_flags & PSW_C) { 2612 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2613 } 2614 return nullify_end(ctx); 2615 #endif 2616 } 2617 2618 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a) 2619 { 2620 return do_pxtlb(ctx, a, false); 2621 } 2622 2623 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a) 2624 { 2625 return ctx->is_pa20 && do_pxtlb(ctx, a, true); 2626 } 2627 2628 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a) 2629 { 2630 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2631 #ifndef CONFIG_USER_ONLY 2632 nullify_over(ctx); 2633 2634 trans_nop_addrx(ctx, a); 2635 gen_helper_ptlbe(tcg_env); 2636 2637 /* Exit TB for TLB change if mmu is enabled. */ 2638 if (ctx->tb_flags & PSW_C) { 2639 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2640 } 2641 return nullify_end(ctx); 2642 #endif 2643 } 2644 2645 /* 2646 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2647 * See 2648 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2649 * page 13-9 (195/206) 2650 */ 2651 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2652 { 2653 if (ctx->is_pa20) { 2654 return false; 2655 } 2656 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2657 #ifndef CONFIG_USER_ONLY 2658 TCGv_i64 addr, atl, stl; 2659 TCGv_i64 reg; 2660 2661 nullify_over(ctx); 2662 2663 /* 2664 * FIXME: 2665 * if (not (pcxl or pcxl2)) 2666 * return gen_illegal(ctx); 2667 */ 2668 2669 atl = tcg_temp_new_i64(); 2670 stl = tcg_temp_new_i64(); 2671 addr = tcg_temp_new_i64(); 2672 2673 tcg_gen_ld32u_i64(stl, tcg_env, 2674 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2675 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2676 tcg_gen_ld32u_i64(atl, tcg_env, 2677 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2678 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2679 tcg_gen_shli_i64(stl, stl, 32); 2680 tcg_gen_or_i64(addr, atl, stl); 2681 2682 reg = load_gpr(ctx, a->r); 2683 if (a->addr) { 2684 gen_helper_itlba_pa11(tcg_env, addr, reg); 2685 } else { 2686 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2687 } 2688 2689 /* Exit TB for TLB change if mmu is enabled. */ 2690 if (ctx->tb_flags & PSW_C) { 2691 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2692 } 2693 return nullify_end(ctx); 2694 #endif 2695 } 2696 2697 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a) 2698 { 2699 if (!ctx->is_pa20) { 2700 return false; 2701 } 2702 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2703 #ifndef CONFIG_USER_ONLY 2704 nullify_over(ctx); 2705 { 2706 TCGv_i64 src1 = load_gpr(ctx, a->r1); 2707 TCGv_i64 src2 = load_gpr(ctx, a->r2); 2708 2709 if (a->data) { 2710 gen_helper_idtlbt_pa20(tcg_env, src1, src2); 2711 } else { 2712 gen_helper_iitlbt_pa20(tcg_env, src1, src2); 2713 } 2714 } 2715 /* Exit TB for TLB change if mmu is enabled. */ 2716 if (ctx->tb_flags & PSW_C) { 2717 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2718 } 2719 return nullify_end(ctx); 2720 #endif 2721 } 2722 2723 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2724 { 2725 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2726 #ifndef CONFIG_USER_ONLY 2727 TCGv_i64 vaddr; 2728 TCGv_i64 ofs, paddr; 2729 2730 nullify_over(ctx); 2731 2732 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2733 2734 paddr = tcg_temp_new_i64(); 2735 gen_helper_lpa(paddr, tcg_env, vaddr); 2736 2737 /* Note that physical address result overrides base modification. */ 2738 if (a->m) { 2739 save_gpr(ctx, a->b, ofs); 2740 } 2741 save_gpr(ctx, a->t, paddr); 2742 2743 return nullify_end(ctx); 2744 #endif 2745 } 2746 2747 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2748 { 2749 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2750 2751 /* The Coherence Index is an implementation-defined function of the 2752 physical address. Two addresses with the same CI have a coherent 2753 view of the cache. Our implementation is to return 0 for all, 2754 since the entire address space is coherent. */ 2755 save_gpr(ctx, a->t, ctx->zero); 2756 2757 ctx->null_cond = cond_make_f(); 2758 return true; 2759 } 2760 2761 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2762 { 2763 return do_add_reg(ctx, a, false, false, false, false); 2764 } 2765 2766 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2767 { 2768 return do_add_reg(ctx, a, true, false, false, false); 2769 } 2770 2771 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2772 { 2773 return do_add_reg(ctx, a, false, true, false, false); 2774 } 2775 2776 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2777 { 2778 return do_add_reg(ctx, a, false, false, false, true); 2779 } 2780 2781 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2782 { 2783 return do_add_reg(ctx, a, false, true, false, true); 2784 } 2785 2786 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a) 2787 { 2788 return do_sub_reg(ctx, a, false, false, false); 2789 } 2790 2791 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2792 { 2793 return do_sub_reg(ctx, a, true, false, false); 2794 } 2795 2796 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2797 { 2798 return do_sub_reg(ctx, a, false, false, true); 2799 } 2800 2801 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2802 { 2803 return do_sub_reg(ctx, a, true, false, true); 2804 } 2805 2806 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a) 2807 { 2808 return do_sub_reg(ctx, a, false, true, false); 2809 } 2810 2811 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2812 { 2813 return do_sub_reg(ctx, a, true, true, false); 2814 } 2815 2816 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a) 2817 { 2818 return do_log_reg(ctx, a, tcg_gen_andc_i64); 2819 } 2820 2821 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a) 2822 { 2823 return do_log_reg(ctx, a, tcg_gen_and_i64); 2824 } 2825 2826 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a) 2827 { 2828 if (a->cf == 0) { 2829 unsigned r2 = a->r2; 2830 unsigned r1 = a->r1; 2831 unsigned rt = a->t; 2832 2833 if (rt == 0) { /* NOP */ 2834 ctx->null_cond = cond_make_f(); 2835 return true; 2836 } 2837 if (r2 == 0) { /* COPY */ 2838 if (r1 == 0) { 2839 TCGv_i64 dest = dest_gpr(ctx, rt); 2840 tcg_gen_movi_i64(dest, 0); 2841 save_gpr(ctx, rt, dest); 2842 } else { 2843 save_gpr(ctx, rt, cpu_gr[r1]); 2844 } 2845 ctx->null_cond = cond_make_f(); 2846 return true; 2847 } 2848 #ifndef CONFIG_USER_ONLY 2849 /* These are QEMU extensions and are nops in the real architecture: 2850 * 2851 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2852 * or %r31,%r31,%r31 -- death loop; offline cpu 2853 * currently implemented as idle. 2854 */ 2855 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2856 /* No need to check for supervisor, as userland can only pause 2857 until the next timer interrupt. */ 2858 2859 set_psw_xb(ctx, 0); 2860 2861 nullify_over(ctx); 2862 2863 /* Advance the instruction queue. */ 2864 install_iaq_entries(ctx, &ctx->iaq_b, NULL); 2865 nullify_set(ctx, 0); 2866 2867 /* Tell the qemu main loop to halt until this cpu has work. */ 2868 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 2869 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2870 gen_excp_1(EXCP_HALTED); 2871 ctx->base.is_jmp = DISAS_NORETURN; 2872 2873 return nullify_end(ctx); 2874 } 2875 #endif 2876 } 2877 return do_log_reg(ctx, a, tcg_gen_or_i64); 2878 } 2879 2880 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a) 2881 { 2882 return do_log_reg(ctx, a, tcg_gen_xor_i64); 2883 } 2884 2885 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) 2886 { 2887 TCGv_i64 tcg_r1, tcg_r2; 2888 2889 if (a->cf) { 2890 nullify_over(ctx); 2891 } 2892 tcg_r1 = load_gpr(ctx, a->r1); 2893 tcg_r2 = load_gpr(ctx, a->r2); 2894 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d); 2895 return nullify_end(ctx); 2896 } 2897 2898 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) 2899 { 2900 TCGv_i64 tcg_r1, tcg_r2, dest; 2901 2902 if (a->cf) { 2903 nullify_over(ctx); 2904 } 2905 2906 tcg_r1 = load_gpr(ctx, a->r1); 2907 tcg_r2 = load_gpr(ctx, a->r2); 2908 dest = dest_gpr(ctx, a->t); 2909 2910 tcg_gen_xor_i64(dest, tcg_r1, tcg_r2); 2911 save_gpr(ctx, a->t, dest); 2912 2913 ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest); 2914 return nullify_end(ctx); 2915 } 2916 2917 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) 2918 { 2919 TCGv_i64 tcg_r1, tcg_r2, tmp; 2920 2921 if (a->cf == 0) { 2922 tcg_r2 = load_gpr(ctx, a->r2); 2923 tmp = dest_gpr(ctx, a->t); 2924 2925 if (a->r1 == 0) { 2926 /* UADDCM r0,src,dst is the common idiom for dst = ~src. */ 2927 tcg_gen_not_i64(tmp, tcg_r2); 2928 } else { 2929 /* 2930 * Recall that r1 - r2 == r1 + ~r2 + 1. 2931 * Thus r1 + ~r2 == r1 - r2 - 1, 2932 * which does not require an extra temporary. 2933 */ 2934 tcg_r1 = load_gpr(ctx, a->r1); 2935 tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2); 2936 tcg_gen_subi_i64(tmp, tmp, 1); 2937 } 2938 save_gpr(ctx, a->t, tmp); 2939 ctx->null_cond = cond_make_f(); 2940 return true; 2941 } 2942 2943 nullify_over(ctx); 2944 tcg_r1 = load_gpr(ctx, a->r1); 2945 tcg_r2 = load_gpr(ctx, a->r2); 2946 tmp = tcg_temp_new_i64(); 2947 tcg_gen_not_i64(tmp, tcg_r2); 2948 do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true); 2949 return nullify_end(ctx); 2950 } 2951 2952 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a) 2953 { 2954 return do_uaddcm(ctx, a, false); 2955 } 2956 2957 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2958 { 2959 return do_uaddcm(ctx, a, true); 2960 } 2961 2962 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) 2963 { 2964 TCGv_i64 tmp; 2965 2966 nullify_over(ctx); 2967 2968 tmp = tcg_temp_new_i64(); 2969 tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4); 2970 if (!is_i) { 2971 tcg_gen_not_i64(tmp, tmp); 2972 } 2973 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull); 2974 tcg_gen_muli_i64(tmp, tmp, 6); 2975 do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp, 2976 a->cf, a->d, false, is_i); 2977 return nullify_end(ctx); 2978 } 2979 2980 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a) 2981 { 2982 return do_dcor(ctx, a, false); 2983 } 2984 2985 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) 2986 { 2987 return do_dcor(ctx, a, true); 2988 } 2989 2990 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2991 { 2992 TCGv_i64 dest, add1, add2, addc, in1, in2; 2993 2994 nullify_over(ctx); 2995 2996 in1 = load_gpr(ctx, a->r1); 2997 in2 = load_gpr(ctx, a->r2); 2998 2999 add1 = tcg_temp_new_i64(); 3000 add2 = tcg_temp_new_i64(); 3001 addc = tcg_temp_new_i64(); 3002 dest = tcg_temp_new_i64(); 3003 3004 /* Form R1 << 1 | PSW[CB]{8}. */ 3005 tcg_gen_add_i64(add1, in1, in1); 3006 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false)); 3007 3008 /* 3009 * Add or subtract R2, depending on PSW[V]. Proper computation of 3010 * carry requires that we subtract via + ~R2 + 1, as described in 3011 * the manual. By extracting and masking V, we can produce the 3012 * proper inputs to the addition without movcond. 3013 */ 3014 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1); 3015 tcg_gen_xor_i64(add2, in2, addc); 3016 tcg_gen_andi_i64(addc, addc, 1); 3017 3018 tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc); 3019 3020 /* Write back the result register. */ 3021 save_gpr(ctx, a->t, dest); 3022 3023 /* Write back PSW[CB]. */ 3024 tcg_gen_xor_i64(cpu_psw_cb, add1, add2); 3025 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest); 3026 3027 /* 3028 * Write back PSW[V] for the division step. 3029 * Shift cb{8} from where it lives in bit 32 to bit 31, 3030 * so that it overlaps r2{32} in bit 31. 3031 */ 3032 tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1); 3033 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2); 3034 3035 /* Install the new nullification. */ 3036 if (a->cf) { 3037 TCGv_i64 sv = NULL, uv = NULL; 3038 if (cond_need_sv(a->cf >> 1)) { 3039 sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false); 3040 } else if (cond_need_cb(a->cf >> 1)) { 3041 uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false); 3042 } 3043 ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv); 3044 } 3045 3046 return nullify_end(ctx); 3047 } 3048 3049 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 3050 { 3051 return do_add_imm(ctx, a, false, false); 3052 } 3053 3054 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 3055 { 3056 return do_add_imm(ctx, a, true, false); 3057 } 3058 3059 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 3060 { 3061 return do_add_imm(ctx, a, false, true); 3062 } 3063 3064 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 3065 { 3066 return do_add_imm(ctx, a, true, true); 3067 } 3068 3069 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 3070 { 3071 return do_sub_imm(ctx, a, false); 3072 } 3073 3074 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 3075 { 3076 return do_sub_imm(ctx, a, true); 3077 } 3078 3079 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a) 3080 { 3081 TCGv_i64 tcg_im, tcg_r2; 3082 3083 if (a->cf) { 3084 nullify_over(ctx); 3085 } 3086 3087 tcg_im = tcg_constant_i64(a->i); 3088 tcg_r2 = load_gpr(ctx, a->r); 3089 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d); 3090 3091 return nullify_end(ctx); 3092 } 3093 3094 static bool do_multimedia(DisasContext *ctx, arg_rrr *a, 3095 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 3096 { 3097 TCGv_i64 r1, r2, dest; 3098 3099 if (!ctx->is_pa20) { 3100 return false; 3101 } 3102 3103 nullify_over(ctx); 3104 3105 r1 = load_gpr(ctx, a->r1); 3106 r2 = load_gpr(ctx, a->r2); 3107 dest = dest_gpr(ctx, a->t); 3108 3109 fn(dest, r1, r2); 3110 save_gpr(ctx, a->t, dest); 3111 3112 return nullify_end(ctx); 3113 } 3114 3115 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a, 3116 void (*fn)(TCGv_i64, TCGv_i64, int64_t)) 3117 { 3118 TCGv_i64 r, dest; 3119 3120 if (!ctx->is_pa20) { 3121 return false; 3122 } 3123 3124 nullify_over(ctx); 3125 3126 r = load_gpr(ctx, a->r); 3127 dest = dest_gpr(ctx, a->t); 3128 3129 fn(dest, r, a->i); 3130 save_gpr(ctx, a->t, dest); 3131 3132 return nullify_end(ctx); 3133 } 3134 3135 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a, 3136 void (*fn)(TCGv_i64, TCGv_i64, 3137 TCGv_i64, TCGv_i32)) 3138 { 3139 TCGv_i64 r1, r2, dest; 3140 3141 if (!ctx->is_pa20) { 3142 return false; 3143 } 3144 3145 nullify_over(ctx); 3146 3147 r1 = load_gpr(ctx, a->r1); 3148 r2 = load_gpr(ctx, a->r2); 3149 dest = dest_gpr(ctx, a->t); 3150 3151 fn(dest, r1, r2, tcg_constant_i32(a->sh)); 3152 save_gpr(ctx, a->t, dest); 3153 3154 return nullify_end(ctx); 3155 } 3156 3157 static bool trans_hadd(DisasContext *ctx, arg_rrr *a) 3158 { 3159 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64); 3160 } 3161 3162 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a) 3163 { 3164 return do_multimedia(ctx, a, gen_helper_hadd_ss); 3165 } 3166 3167 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a) 3168 { 3169 return do_multimedia(ctx, a, gen_helper_hadd_us); 3170 } 3171 3172 static bool trans_havg(DisasContext *ctx, arg_rrr *a) 3173 { 3174 return do_multimedia(ctx, a, gen_helper_havg); 3175 } 3176 3177 static bool trans_hshl(DisasContext *ctx, arg_rri *a) 3178 { 3179 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64); 3180 } 3181 3182 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a) 3183 { 3184 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64); 3185 } 3186 3187 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a) 3188 { 3189 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64); 3190 } 3191 3192 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a) 3193 { 3194 return do_multimedia_shadd(ctx, a, gen_helper_hshladd); 3195 } 3196 3197 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a) 3198 { 3199 return do_multimedia_shadd(ctx, a, gen_helper_hshradd); 3200 } 3201 3202 static bool trans_hsub(DisasContext *ctx, arg_rrr *a) 3203 { 3204 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64); 3205 } 3206 3207 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a) 3208 { 3209 return do_multimedia(ctx, a, gen_helper_hsub_ss); 3210 } 3211 3212 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a) 3213 { 3214 return do_multimedia(ctx, a, gen_helper_hsub_us); 3215 } 3216 3217 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3218 { 3219 uint64_t mask = 0xffff0000ffff0000ull; 3220 TCGv_i64 tmp = tcg_temp_new_i64(); 3221 3222 tcg_gen_andi_i64(tmp, r2, mask); 3223 tcg_gen_andi_i64(dst, r1, mask); 3224 tcg_gen_shri_i64(tmp, tmp, 16); 3225 tcg_gen_or_i64(dst, dst, tmp); 3226 } 3227 3228 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a) 3229 { 3230 return do_multimedia(ctx, a, gen_mixh_l); 3231 } 3232 3233 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3234 { 3235 uint64_t mask = 0x0000ffff0000ffffull; 3236 TCGv_i64 tmp = tcg_temp_new_i64(); 3237 3238 tcg_gen_andi_i64(tmp, r1, mask); 3239 tcg_gen_andi_i64(dst, r2, mask); 3240 tcg_gen_shli_i64(tmp, tmp, 16); 3241 tcg_gen_or_i64(dst, dst, tmp); 3242 } 3243 3244 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a) 3245 { 3246 return do_multimedia(ctx, a, gen_mixh_r); 3247 } 3248 3249 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3250 { 3251 TCGv_i64 tmp = tcg_temp_new_i64(); 3252 3253 tcg_gen_shri_i64(tmp, r2, 32); 3254 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32); 3255 } 3256 3257 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a) 3258 { 3259 return do_multimedia(ctx, a, gen_mixw_l); 3260 } 3261 3262 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3263 { 3264 tcg_gen_deposit_i64(dst, r2, r1, 32, 32); 3265 } 3266 3267 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a) 3268 { 3269 return do_multimedia(ctx, a, gen_mixw_r); 3270 } 3271 3272 static bool trans_permh(DisasContext *ctx, arg_permh *a) 3273 { 3274 TCGv_i64 r, t0, t1, t2, t3; 3275 3276 if (!ctx->is_pa20) { 3277 return false; 3278 } 3279 3280 nullify_over(ctx); 3281 3282 r = load_gpr(ctx, a->r1); 3283 t0 = tcg_temp_new_i64(); 3284 t1 = tcg_temp_new_i64(); 3285 t2 = tcg_temp_new_i64(); 3286 t3 = tcg_temp_new_i64(); 3287 3288 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16); 3289 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16); 3290 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16); 3291 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16); 3292 3293 tcg_gen_deposit_i64(t0, t1, t0, 16, 48); 3294 tcg_gen_deposit_i64(t2, t3, t2, 16, 48); 3295 tcg_gen_deposit_i64(t0, t2, t0, 32, 32); 3296 3297 save_gpr(ctx, a->t, t0); 3298 return nullify_end(ctx); 3299 } 3300 3301 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 3302 { 3303 if (ctx->is_pa20) { 3304 /* 3305 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches. 3306 * Any base modification still occurs. 3307 */ 3308 if (a->t == 0) { 3309 return trans_nop_addrx(ctx, a); 3310 } 3311 } else if (a->size > MO_32) { 3312 return gen_illegal(ctx); 3313 } 3314 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 3315 a->disp, a->sp, a->m, a->size); 3316 } 3317 3318 static bool trans_st(DisasContext *ctx, arg_ldst *a) 3319 { 3320 assert(a->x == 0 && a->scale == 0); 3321 if (!ctx->is_pa20 && a->size > MO_32) { 3322 return gen_illegal(ctx); 3323 } 3324 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size); 3325 } 3326 3327 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 3328 { 3329 MemOp mop = mo_endian(ctx) | MO_ALIGN | a->size; 3330 TCGv_i64 dest, ofs; 3331 TCGv_i64 addr; 3332 3333 if (!ctx->is_pa20 && a->size > MO_32) { 3334 return gen_illegal(ctx); 3335 } 3336 3337 nullify_over(ctx); 3338 3339 if (a->m) { 3340 /* Base register modification. Make sure if RT == RB, 3341 we see the result of the load. */ 3342 dest = tcg_temp_new_i64(); 3343 } else { 3344 dest = dest_gpr(ctx, a->t); 3345 } 3346 3347 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0, 3348 a->disp, a->sp, a->m, MMU_DISABLED(ctx)); 3349 3350 /* 3351 * For hppa1.1, LDCW is undefined unless aligned mod 16. 3352 * However actual hardware succeeds with aligned mod 4. 3353 * Detect this case and log a GUEST_ERROR. 3354 * 3355 * TODO: HPPA64 relaxes the over-alignment requirement 3356 * with the ,co completer. 3357 */ 3358 gen_helper_ldc_check(addr); 3359 3360 tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop); 3361 3362 if (a->m) { 3363 save_gpr(ctx, a->b, ofs); 3364 } 3365 save_gpr(ctx, a->t, dest); 3366 3367 return nullify_end(ctx); 3368 } 3369 3370 static bool trans_stby(DisasContext *ctx, arg_stby *a) 3371 { 3372 TCGv_i64 ofs, val; 3373 TCGv_i64 addr; 3374 3375 nullify_over(ctx); 3376 3377 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3378 MMU_DISABLED(ctx)); 3379 val = load_gpr(ctx, a->r); 3380 if (a->a) { 3381 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3382 gen_helper_stby_e_parallel(tcg_env, addr, val); 3383 } else { 3384 gen_helper_stby_e(tcg_env, addr, val); 3385 } 3386 } else { 3387 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3388 gen_helper_stby_b_parallel(tcg_env, addr, val); 3389 } else { 3390 gen_helper_stby_b(tcg_env, addr, val); 3391 } 3392 } 3393 if (a->m) { 3394 tcg_gen_andi_i64(ofs, ofs, ~3); 3395 save_gpr(ctx, a->b, ofs); 3396 } 3397 3398 return nullify_end(ctx); 3399 } 3400 3401 static bool trans_stdby(DisasContext *ctx, arg_stby *a) 3402 { 3403 TCGv_i64 ofs, val; 3404 TCGv_i64 addr; 3405 3406 if (!ctx->is_pa20) { 3407 return false; 3408 } 3409 nullify_over(ctx); 3410 3411 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3412 MMU_DISABLED(ctx)); 3413 val = load_gpr(ctx, a->r); 3414 if (a->a) { 3415 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3416 gen_helper_stdby_e_parallel(tcg_env, addr, val); 3417 } else { 3418 gen_helper_stdby_e(tcg_env, addr, val); 3419 } 3420 } else { 3421 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3422 gen_helper_stdby_b_parallel(tcg_env, addr, val); 3423 } else { 3424 gen_helper_stdby_b(tcg_env, addr, val); 3425 } 3426 } 3427 if (a->m) { 3428 tcg_gen_andi_i64(ofs, ofs, ~7); 3429 save_gpr(ctx, a->b, ofs); 3430 } 3431 3432 return nullify_end(ctx); 3433 } 3434 3435 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 3436 { 3437 int hold_mmu_idx = ctx->mmu_idx; 3438 3439 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3440 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 3441 trans_ld(ctx, a); 3442 ctx->mmu_idx = hold_mmu_idx; 3443 return true; 3444 } 3445 3446 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 3447 { 3448 int hold_mmu_idx = ctx->mmu_idx; 3449 3450 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3451 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 3452 trans_st(ctx, a); 3453 ctx->mmu_idx = hold_mmu_idx; 3454 return true; 3455 } 3456 3457 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 3458 { 3459 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3460 3461 tcg_gen_movi_i64(tcg_rt, a->i); 3462 save_gpr(ctx, a->t, tcg_rt); 3463 ctx->null_cond = cond_make_f(); 3464 return true; 3465 } 3466 3467 static bool trans_addil(DisasContext *ctx, arg_addil *a) 3468 { 3469 TCGv_i64 tcg_rt = load_gpr(ctx, a->r); 3470 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1); 3471 3472 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i); 3473 save_gpr(ctx, 1, tcg_r1); 3474 ctx->null_cond = cond_make_f(); 3475 return true; 3476 } 3477 3478 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 3479 { 3480 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3481 3482 /* Special case rb == 0, for the LDI pseudo-op. 3483 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */ 3484 if (a->b == 0) { 3485 tcg_gen_movi_i64(tcg_rt, a->i); 3486 } else { 3487 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i); 3488 } 3489 save_gpr(ctx, a->t, tcg_rt); 3490 ctx->null_cond = cond_make_f(); 3491 return true; 3492 } 3493 3494 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3495 unsigned c, unsigned f, bool d, unsigned n, int disp) 3496 { 3497 TCGv_i64 dest, in2, sv; 3498 DisasCond cond; 3499 3500 in2 = load_gpr(ctx, r); 3501 dest = tcg_temp_new_i64(); 3502 3503 tcg_gen_sub_i64(dest, in1, in2); 3504 3505 sv = NULL; 3506 if (cond_need_sv(c)) { 3507 sv = do_sub_sv(ctx, dest, in1, in2); 3508 } 3509 3510 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv); 3511 return do_cbranch(ctx, disp, n, &cond); 3512 } 3513 3514 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3515 { 3516 if (!ctx->is_pa20 && a->d) { 3517 return false; 3518 } 3519 nullify_over(ctx); 3520 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), 3521 a->c, a->f, a->d, a->n, a->disp); 3522 } 3523 3524 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3525 { 3526 if (!ctx->is_pa20 && a->d) { 3527 return false; 3528 } 3529 nullify_over(ctx); 3530 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i), 3531 a->c, a->f, a->d, a->n, a->disp); 3532 } 3533 3534 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3535 unsigned c, unsigned f, unsigned n, int disp) 3536 { 3537 TCGv_i64 dest, in2, sv, cb_cond; 3538 DisasCond cond; 3539 bool d = false; 3540 3541 /* 3542 * For hppa64, the ADDB conditions change with PSW.W, 3543 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE. 3544 */ 3545 if (ctx->tb_flags & PSW_W) { 3546 d = c >= 5; 3547 if (d) { 3548 c &= 3; 3549 } 3550 } 3551 3552 in2 = load_gpr(ctx, r); 3553 dest = tcg_temp_new_i64(); 3554 sv = NULL; 3555 cb_cond = NULL; 3556 3557 if (cond_need_cb(c)) { 3558 TCGv_i64 cb = tcg_temp_new_i64(); 3559 TCGv_i64 cb_msb = tcg_temp_new_i64(); 3560 3561 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 3562 tcg_gen_xor_i64(cb, in1, in2); 3563 tcg_gen_xor_i64(cb, cb, dest); 3564 cb_cond = get_carry(ctx, d, cb, cb_msb); 3565 } else { 3566 tcg_gen_add_i64(dest, in1, in2); 3567 } 3568 if (cond_need_sv(c)) { 3569 sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d); 3570 } 3571 3572 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); 3573 save_gpr(ctx, r, dest); 3574 return do_cbranch(ctx, disp, n, &cond); 3575 } 3576 3577 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3578 { 3579 nullify_over(ctx); 3580 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3581 } 3582 3583 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3584 { 3585 nullify_over(ctx); 3586 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp); 3587 } 3588 3589 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3590 { 3591 TCGv_i64 tmp, tcg_r; 3592 DisasCond cond; 3593 3594 nullify_over(ctx); 3595 3596 tmp = tcg_temp_new_i64(); 3597 tcg_r = load_gpr(ctx, a->r); 3598 if (a->d) { 3599 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); 3600 } else { 3601 /* Force shift into [32,63] */ 3602 tcg_gen_ori_i64(tmp, cpu_sar, 32); 3603 tcg_gen_shl_i64(tmp, tcg_r, tmp); 3604 } 3605 3606 cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0); 3607 return do_cbranch(ctx, a->disp, a->n, &cond); 3608 } 3609 3610 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3611 { 3612 DisasCond cond; 3613 int p = a->p | (a->d ? 0 : 32); 3614 3615 nullify_over(ctx); 3616 cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE, 3617 load_gpr(ctx, a->r), 1ull << (63 - p)); 3618 return do_cbranch(ctx, a->disp, a->n, &cond); 3619 } 3620 3621 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3622 { 3623 TCGv_i64 dest; 3624 DisasCond cond; 3625 3626 nullify_over(ctx); 3627 3628 dest = dest_gpr(ctx, a->r2); 3629 if (a->r1 == 0) { 3630 tcg_gen_movi_i64(dest, 0); 3631 } else { 3632 tcg_gen_mov_i64(dest, cpu_gr[a->r1]); 3633 } 3634 3635 /* All MOVB conditions are 32-bit. */ 3636 cond = do_sed_cond(ctx, a->c, false, dest); 3637 return do_cbranch(ctx, a->disp, a->n, &cond); 3638 } 3639 3640 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3641 { 3642 TCGv_i64 dest; 3643 DisasCond cond; 3644 3645 nullify_over(ctx); 3646 3647 dest = dest_gpr(ctx, a->r); 3648 tcg_gen_movi_i64(dest, a->i); 3649 3650 /* All MOVBI conditions are 32-bit. */ 3651 cond = do_sed_cond(ctx, a->c, false, dest); 3652 return do_cbranch(ctx, a->disp, a->n, &cond); 3653 } 3654 3655 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) 3656 { 3657 TCGv_i64 dest, src2; 3658 3659 if (!ctx->is_pa20 && a->d) { 3660 return false; 3661 } 3662 if (a->c) { 3663 nullify_over(ctx); 3664 } 3665 3666 dest = dest_gpr(ctx, a->t); 3667 src2 = load_gpr(ctx, a->r2); 3668 if (a->r1 == 0) { 3669 if (a->d) { 3670 tcg_gen_shr_i64(dest, src2, cpu_sar); 3671 } else { 3672 TCGv_i64 tmp = tcg_temp_new_i64(); 3673 3674 tcg_gen_ext32u_i64(dest, src2); 3675 tcg_gen_andi_i64(tmp, cpu_sar, 31); 3676 tcg_gen_shr_i64(dest, dest, tmp); 3677 } 3678 } else if (a->r1 == a->r2) { 3679 if (a->d) { 3680 tcg_gen_rotr_i64(dest, src2, cpu_sar); 3681 } else { 3682 TCGv_i32 t32 = tcg_temp_new_i32(); 3683 TCGv_i32 s32 = tcg_temp_new_i32(); 3684 3685 tcg_gen_extrl_i64_i32(t32, src2); 3686 tcg_gen_extrl_i64_i32(s32, cpu_sar); 3687 tcg_gen_andi_i32(s32, s32, 31); 3688 tcg_gen_rotr_i32(t32, t32, s32); 3689 tcg_gen_extu_i32_i64(dest, t32); 3690 } 3691 } else { 3692 TCGv_i64 src1 = load_gpr(ctx, a->r1); 3693 3694 if (a->d) { 3695 TCGv_i64 t = tcg_temp_new_i64(); 3696 TCGv_i64 n = tcg_temp_new_i64(); 3697 3698 tcg_gen_xori_i64(n, cpu_sar, 63); 3699 tcg_gen_shl_i64(t, src1, n); 3700 tcg_gen_shli_i64(t, t, 1); 3701 tcg_gen_shr_i64(dest, src2, cpu_sar); 3702 tcg_gen_or_i64(dest, dest, t); 3703 } else { 3704 TCGv_i64 t = tcg_temp_new_i64(); 3705 TCGv_i64 s = tcg_temp_new_i64(); 3706 3707 tcg_gen_concat32_i64(t, src2, src1); 3708 tcg_gen_andi_i64(s, cpu_sar, 31); 3709 tcg_gen_shr_i64(dest, t, s); 3710 } 3711 } 3712 save_gpr(ctx, a->t, dest); 3713 3714 /* Install the new nullification. */ 3715 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3716 return nullify_end(ctx); 3717 } 3718 3719 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) 3720 { 3721 unsigned width, sa; 3722 TCGv_i64 dest, t2; 3723 3724 if (!ctx->is_pa20 && a->d) { 3725 return false; 3726 } 3727 if (a->c) { 3728 nullify_over(ctx); 3729 } 3730 3731 width = a->d ? 64 : 32; 3732 sa = width - 1 - a->cpos; 3733 3734 dest = dest_gpr(ctx, a->t); 3735 t2 = load_gpr(ctx, a->r2); 3736 if (a->r1 == 0) { 3737 tcg_gen_extract_i64(dest, t2, sa, width - sa); 3738 } else if (width == TARGET_LONG_BITS) { 3739 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa); 3740 } else { 3741 assert(!a->d); 3742 if (a->r1 == a->r2) { 3743 TCGv_i32 t32 = tcg_temp_new_i32(); 3744 tcg_gen_extrl_i64_i32(t32, t2); 3745 tcg_gen_rotri_i32(t32, t32, sa); 3746 tcg_gen_extu_i32_i64(dest, t32); 3747 } else { 3748 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]); 3749 tcg_gen_extract_i64(dest, dest, sa, 32); 3750 } 3751 } 3752 save_gpr(ctx, a->t, dest); 3753 3754 /* Install the new nullification. */ 3755 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3756 return nullify_end(ctx); 3757 } 3758 3759 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a) 3760 { 3761 unsigned widthm1 = a->d ? 63 : 31; 3762 TCGv_i64 dest, src, tmp; 3763 3764 if (!ctx->is_pa20 && a->d) { 3765 return false; 3766 } 3767 if (a->c) { 3768 nullify_over(ctx); 3769 } 3770 3771 dest = dest_gpr(ctx, a->t); 3772 src = load_gpr(ctx, a->r); 3773 tmp = tcg_temp_new_i64(); 3774 3775 /* Recall that SAR is using big-endian bit numbering. */ 3776 tcg_gen_andi_i64(tmp, cpu_sar, widthm1); 3777 tcg_gen_xori_i64(tmp, tmp, widthm1); 3778 3779 if (a->se) { 3780 if (!a->d) { 3781 tcg_gen_ext32s_i64(dest, src); 3782 src = dest; 3783 } 3784 tcg_gen_sar_i64(dest, src, tmp); 3785 tcg_gen_sextract_i64(dest, dest, 0, a->len); 3786 } else { 3787 if (!a->d) { 3788 tcg_gen_ext32u_i64(dest, src); 3789 src = dest; 3790 } 3791 tcg_gen_shr_i64(dest, src, tmp); 3792 tcg_gen_extract_i64(dest, dest, 0, a->len); 3793 } 3794 save_gpr(ctx, a->t, dest); 3795 3796 /* Install the new nullification. */ 3797 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3798 return nullify_end(ctx); 3799 } 3800 3801 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a) 3802 { 3803 unsigned len, cpos, width; 3804 TCGv_i64 dest, src; 3805 3806 if (!ctx->is_pa20 && a->d) { 3807 return false; 3808 } 3809 if (a->c) { 3810 nullify_over(ctx); 3811 } 3812 3813 len = a->len; 3814 width = a->d ? 64 : 32; 3815 cpos = width - 1 - a->pos; 3816 if (cpos + len > width) { 3817 len = width - cpos; 3818 } 3819 3820 dest = dest_gpr(ctx, a->t); 3821 src = load_gpr(ctx, a->r); 3822 if (a->se) { 3823 tcg_gen_sextract_i64(dest, src, cpos, len); 3824 } else { 3825 tcg_gen_extract_i64(dest, src, cpos, len); 3826 } 3827 save_gpr(ctx, a->t, dest); 3828 3829 /* Install the new nullification. */ 3830 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3831 return nullify_end(ctx); 3832 } 3833 3834 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a) 3835 { 3836 unsigned len, width; 3837 uint64_t mask0, mask1; 3838 TCGv_i64 dest; 3839 3840 if (!ctx->is_pa20 && a->d) { 3841 return false; 3842 } 3843 if (a->c) { 3844 nullify_over(ctx); 3845 } 3846 3847 len = a->len; 3848 width = a->d ? 64 : 32; 3849 if (a->cpos + len > width) { 3850 len = width - a->cpos; 3851 } 3852 3853 dest = dest_gpr(ctx, a->t); 3854 mask0 = deposit64(0, a->cpos, len, a->i); 3855 mask1 = deposit64(-1, a->cpos, len, a->i); 3856 3857 if (a->nz) { 3858 TCGv_i64 src = load_gpr(ctx, a->t); 3859 tcg_gen_andi_i64(dest, src, mask1); 3860 tcg_gen_ori_i64(dest, dest, mask0); 3861 } else { 3862 tcg_gen_movi_i64(dest, mask0); 3863 } 3864 save_gpr(ctx, a->t, dest); 3865 3866 /* Install the new nullification. */ 3867 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3868 return nullify_end(ctx); 3869 } 3870 3871 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a) 3872 { 3873 unsigned rs = a->nz ? a->t : 0; 3874 unsigned len, width; 3875 TCGv_i64 dest, val; 3876 3877 if (!ctx->is_pa20 && a->d) { 3878 return false; 3879 } 3880 if (a->c) { 3881 nullify_over(ctx); 3882 } 3883 3884 len = a->len; 3885 width = a->d ? 64 : 32; 3886 if (a->cpos + len > width) { 3887 len = width - a->cpos; 3888 } 3889 3890 dest = dest_gpr(ctx, a->t); 3891 val = load_gpr(ctx, a->r); 3892 if (rs == 0) { 3893 tcg_gen_deposit_z_i64(dest, val, a->cpos, len); 3894 } else { 3895 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len); 3896 } 3897 save_gpr(ctx, a->t, dest); 3898 3899 /* Install the new nullification. */ 3900 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3901 return nullify_end(ctx); 3902 } 3903 3904 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c, 3905 bool d, bool nz, unsigned len, TCGv_i64 val) 3906 { 3907 unsigned rs = nz ? rt : 0; 3908 unsigned widthm1 = d ? 63 : 31; 3909 TCGv_i64 mask, tmp, shift, dest; 3910 uint64_t msb = 1ULL << (len - 1); 3911 3912 dest = dest_gpr(ctx, rt); 3913 shift = tcg_temp_new_i64(); 3914 tmp = tcg_temp_new_i64(); 3915 3916 /* Convert big-endian bit numbering in SAR to left-shift. */ 3917 tcg_gen_andi_i64(shift, cpu_sar, widthm1); 3918 tcg_gen_xori_i64(shift, shift, widthm1); 3919 3920 mask = tcg_temp_new_i64(); 3921 tcg_gen_movi_i64(mask, msb + (msb - 1)); 3922 tcg_gen_and_i64(tmp, val, mask); 3923 if (rs) { 3924 tcg_gen_shl_i64(mask, mask, shift); 3925 tcg_gen_shl_i64(tmp, tmp, shift); 3926 tcg_gen_andc_i64(dest, cpu_gr[rs], mask); 3927 tcg_gen_or_i64(dest, dest, tmp); 3928 } else { 3929 tcg_gen_shl_i64(dest, tmp, shift); 3930 } 3931 save_gpr(ctx, rt, dest); 3932 3933 /* Install the new nullification. */ 3934 ctx->null_cond = do_sed_cond(ctx, c, d, dest); 3935 return nullify_end(ctx); 3936 } 3937 3938 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a) 3939 { 3940 if (!ctx->is_pa20 && a->d) { 3941 return false; 3942 } 3943 if (a->c) { 3944 nullify_over(ctx); 3945 } 3946 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3947 load_gpr(ctx, a->r)); 3948 } 3949 3950 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a) 3951 { 3952 if (!ctx->is_pa20 && a->d) { 3953 return false; 3954 } 3955 if (a->c) { 3956 nullify_over(ctx); 3957 } 3958 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3959 tcg_constant_i64(a->i)); 3960 } 3961 3962 static bool trans_be(DisasContext *ctx, arg_be *a) 3963 { 3964 #ifndef CONFIG_USER_ONLY 3965 ctx->iaq_j.space = tcg_temp_new_i64(); 3966 load_spr(ctx, ctx->iaq_j.space, a->sp); 3967 #endif 3968 3969 ctx->iaq_j.base = tcg_temp_new_i64(); 3970 ctx->iaq_j.disp = 0; 3971 3972 tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp); 3973 ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base); 3974 3975 return do_ibranch(ctx, a->l, true, a->n); 3976 } 3977 3978 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3979 { 3980 return do_dbranch(ctx, a->disp, a->l, a->n); 3981 } 3982 3983 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3984 { 3985 int64_t disp = a->disp; 3986 bool indirect = false; 3987 3988 /* Trap if PSW[B] is set. */ 3989 if (ctx->psw_xb & PSW_B) { 3990 return gen_illegal(ctx); 3991 } 3992 3993 nullify_over(ctx); 3994 3995 #ifndef CONFIG_USER_ONLY 3996 if (ctx->privilege == 0) { 3997 /* Privilege cannot decrease. */ 3998 } else if (!(ctx->tb_flags & PSW_C)) { 3999 /* With paging disabled, priv becomes 0. */ 4000 disp -= ctx->privilege; 4001 } else { 4002 /* Adjust the dest offset for the privilege change from the PTE. */ 4003 TCGv_i64 off = tcg_temp_new_i64(); 4004 4005 copy_iaoq_entry(ctx, off, &ctx->iaq_f); 4006 gen_helper_b_gate_priv(off, tcg_env, off); 4007 4008 ctx->iaq_j.base = off; 4009 ctx->iaq_j.disp = disp + 8; 4010 indirect = true; 4011 } 4012 #endif 4013 4014 if (a->l) { 4015 TCGv_i64 tmp = dest_gpr(ctx, a->l); 4016 if (ctx->privilege < 3) { 4017 tcg_gen_andi_i64(tmp, tmp, -4); 4018 } 4019 tcg_gen_ori_i64(tmp, tmp, ctx->privilege); 4020 save_gpr(ctx, a->l, tmp); 4021 } 4022 4023 if (indirect) { 4024 return do_ibranch(ctx, 0, false, a->n); 4025 } 4026 return do_dbranch(ctx, disp, 0, a->n); 4027 } 4028 4029 static bool trans_blr(DisasContext *ctx, arg_blr *a) 4030 { 4031 if (a->x) { 4032 DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8); 4033 TCGv_i64 t0 = tcg_temp_new_i64(); 4034 TCGv_i64 t1 = tcg_temp_new_i64(); 4035 4036 /* The computation here never changes privilege level. */ 4037 copy_iaoq_entry(ctx, t0, &next); 4038 tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3); 4039 tcg_gen_add_i64(t0, t0, t1); 4040 4041 ctx->iaq_j = iaqe_next_absv(ctx, t0); 4042 return do_ibranch(ctx, a->l, false, a->n); 4043 } else { 4044 /* BLR R0,RX is a good way to load PC+8 into RX. */ 4045 return do_dbranch(ctx, 0, a->l, a->n); 4046 } 4047 } 4048 4049 static bool trans_bv(DisasContext *ctx, arg_bv *a) 4050 { 4051 TCGv_i64 dest; 4052 4053 if (a->x == 0) { 4054 dest = load_gpr(ctx, a->b); 4055 } else { 4056 dest = tcg_temp_new_i64(); 4057 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3); 4058 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b)); 4059 } 4060 dest = do_ibranch_priv(ctx, dest); 4061 ctx->iaq_j = iaqe_next_absv(ctx, dest); 4062 4063 return do_ibranch(ctx, 0, false, a->n); 4064 } 4065 4066 static bool trans_bve(DisasContext *ctx, arg_bve *a) 4067 { 4068 TCGv_i64 b = load_gpr(ctx, a->b); 4069 4070 #ifndef CONFIG_USER_ONLY 4071 ctx->iaq_j.space = space_select(ctx, 0, b); 4072 #endif 4073 ctx->iaq_j.base = do_ibranch_priv(ctx, b); 4074 ctx->iaq_j.disp = 0; 4075 4076 return do_ibranch(ctx, a->l, false, a->n); 4077 } 4078 4079 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a) 4080 { 4081 /* All branch target stack instructions implement as nop. */ 4082 return ctx->is_pa20; 4083 } 4084 4085 /* 4086 * Float class 0 4087 */ 4088 4089 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4090 { 4091 tcg_gen_mov_i32(dst, src); 4092 } 4093 4094 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 4095 { 4096 uint64_t ret; 4097 4098 if (ctx->is_pa20) { 4099 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ 4100 } else { 4101 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ 4102 } 4103 4104 nullify_over(ctx); 4105 save_frd(0, tcg_constant_i64(ret)); 4106 return nullify_end(ctx); 4107 } 4108 4109 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 4110 { 4111 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 4112 } 4113 4114 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4115 { 4116 tcg_gen_mov_i64(dst, src); 4117 } 4118 4119 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 4120 { 4121 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 4122 } 4123 4124 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4125 { 4126 tcg_gen_andi_i32(dst, src, INT32_MAX); 4127 } 4128 4129 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 4130 { 4131 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 4132 } 4133 4134 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4135 { 4136 tcg_gen_andi_i64(dst, src, INT64_MAX); 4137 } 4138 4139 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 4140 { 4141 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 4142 } 4143 4144 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 4145 { 4146 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 4147 } 4148 4149 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 4150 { 4151 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 4152 } 4153 4154 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 4155 { 4156 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 4157 } 4158 4159 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 4160 { 4161 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 4162 } 4163 4164 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4165 { 4166 tcg_gen_xori_i32(dst, src, INT32_MIN); 4167 } 4168 4169 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 4170 { 4171 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 4172 } 4173 4174 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4175 { 4176 tcg_gen_xori_i64(dst, src, INT64_MIN); 4177 } 4178 4179 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 4180 { 4181 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 4182 } 4183 4184 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4185 { 4186 tcg_gen_ori_i32(dst, src, INT32_MIN); 4187 } 4188 4189 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 4190 { 4191 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 4192 } 4193 4194 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4195 { 4196 tcg_gen_ori_i64(dst, src, INT64_MIN); 4197 } 4198 4199 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 4200 { 4201 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 4202 } 4203 4204 /* 4205 * Float class 1 4206 */ 4207 4208 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 4209 { 4210 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 4211 } 4212 4213 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 4214 { 4215 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 4216 } 4217 4218 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 4219 { 4220 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 4221 } 4222 4223 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 4224 { 4225 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 4226 } 4227 4228 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 4229 { 4230 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 4231 } 4232 4233 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 4234 { 4235 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 4236 } 4237 4238 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 4239 { 4240 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 4241 } 4242 4243 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 4244 { 4245 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 4246 } 4247 4248 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 4249 { 4250 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 4251 } 4252 4253 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 4254 { 4255 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 4256 } 4257 4258 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 4259 { 4260 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 4261 } 4262 4263 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 4264 { 4265 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 4266 } 4267 4268 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 4269 { 4270 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 4271 } 4272 4273 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 4274 { 4275 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 4276 } 4277 4278 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 4279 { 4280 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 4281 } 4282 4283 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 4284 { 4285 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 4286 } 4287 4288 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 4289 { 4290 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 4291 } 4292 4293 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 4294 { 4295 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 4296 } 4297 4298 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 4299 { 4300 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 4301 } 4302 4303 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 4304 { 4305 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 4306 } 4307 4308 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 4309 { 4310 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 4311 } 4312 4313 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 4314 { 4315 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 4316 } 4317 4318 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 4319 { 4320 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 4321 } 4322 4323 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 4324 { 4325 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 4326 } 4327 4328 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 4329 { 4330 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 4331 } 4332 4333 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 4334 { 4335 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 4336 } 4337 4338 /* 4339 * Float class 2 4340 */ 4341 4342 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 4343 { 4344 TCGv_i32 ta, tb, tc, ty; 4345 4346 nullify_over(ctx); 4347 4348 ta = load_frw0_i32(a->r1); 4349 tb = load_frw0_i32(a->r2); 4350 ty = tcg_constant_i32(a->y); 4351 tc = tcg_constant_i32(a->c); 4352 4353 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc); 4354 4355 return nullify_end(ctx); 4356 } 4357 4358 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 4359 { 4360 TCGv_i64 ta, tb; 4361 TCGv_i32 tc, ty; 4362 4363 nullify_over(ctx); 4364 4365 ta = load_frd0(a->r1); 4366 tb = load_frd0(a->r2); 4367 ty = tcg_constant_i32(a->y); 4368 tc = tcg_constant_i32(a->c); 4369 4370 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc); 4371 4372 return nullify_end(ctx); 4373 } 4374 4375 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 4376 { 4377 TCGCond tc = TCG_COND_TSTNE; 4378 uint32_t mask; 4379 TCGv_i64 t; 4380 4381 nullify_over(ctx); 4382 4383 t = tcg_temp_new_i64(); 4384 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); 4385 4386 if (a->y == 1) { 4387 switch (a->c) { 4388 case 0: /* simple */ 4389 mask = R_FPSR_C_MASK; 4390 break; 4391 case 2: /* rej */ 4392 tc = TCG_COND_TSTEQ; 4393 /* fallthru */ 4394 case 1: /* acc */ 4395 mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK; 4396 break; 4397 case 6: /* rej8 */ 4398 tc = TCG_COND_TSTEQ; 4399 /* fallthru */ 4400 case 5: /* acc8 */ 4401 mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK; 4402 break; 4403 case 9: /* acc6 */ 4404 mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK; 4405 break; 4406 case 13: /* acc4 */ 4407 mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK; 4408 break; 4409 case 17: /* acc2 */ 4410 mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK; 4411 break; 4412 default: 4413 gen_illegal(ctx); 4414 return true; 4415 } 4416 } else { 4417 unsigned cbit = (a->y ^ 1) - 1; 4418 mask = R_FPSR_CA0_MASK >> cbit; 4419 } 4420 4421 ctx->null_cond = cond_make_ti(tc, t, mask); 4422 return nullify_end(ctx); 4423 } 4424 4425 /* 4426 * Float class 2 4427 */ 4428 4429 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 4430 { 4431 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 4432 } 4433 4434 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 4435 { 4436 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 4437 } 4438 4439 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 4440 { 4441 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 4442 } 4443 4444 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 4445 { 4446 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 4447 } 4448 4449 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 4450 { 4451 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 4452 } 4453 4454 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 4455 { 4456 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 4457 } 4458 4459 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 4460 { 4461 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 4462 } 4463 4464 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 4465 { 4466 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 4467 } 4468 4469 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 4470 { 4471 TCGv_i64 x, y; 4472 4473 nullify_over(ctx); 4474 4475 x = load_frw0_i64(a->r1); 4476 y = load_frw0_i64(a->r2); 4477 tcg_gen_mul_i64(x, x, y); 4478 save_frd(a->t, x); 4479 4480 return nullify_end(ctx); 4481 } 4482 4483 /* Convert the fmpyadd single-precision register encodings to standard. */ 4484 static inline int fmpyadd_s_reg(unsigned r) 4485 { 4486 return (r & 16) * 2 + 16 + (r & 15); 4487 } 4488 4489 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4490 { 4491 int tm = fmpyadd_s_reg(a->tm); 4492 int ra = fmpyadd_s_reg(a->ra); 4493 int ta = fmpyadd_s_reg(a->ta); 4494 int rm2 = fmpyadd_s_reg(a->rm2); 4495 int rm1 = fmpyadd_s_reg(a->rm1); 4496 4497 nullify_over(ctx); 4498 4499 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 4500 do_fop_weww(ctx, ta, ta, ra, 4501 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 4502 4503 return nullify_end(ctx); 4504 } 4505 4506 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 4507 { 4508 return do_fmpyadd_s(ctx, a, false); 4509 } 4510 4511 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4512 { 4513 return do_fmpyadd_s(ctx, a, true); 4514 } 4515 4516 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4517 { 4518 nullify_over(ctx); 4519 4520 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4521 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4522 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4523 4524 return nullify_end(ctx); 4525 } 4526 4527 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4528 { 4529 return do_fmpyadd_d(ctx, a, false); 4530 } 4531 4532 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4533 { 4534 return do_fmpyadd_d(ctx, a, true); 4535 } 4536 4537 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4538 { 4539 TCGv_i32 x, y, z; 4540 4541 nullify_over(ctx); 4542 x = load_frw0_i32(a->rm1); 4543 y = load_frw0_i32(a->rm2); 4544 z = load_frw0_i32(a->ra3); 4545 4546 if (a->neg) { 4547 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z); 4548 } else { 4549 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z); 4550 } 4551 4552 save_frw_i32(a->t, x); 4553 return nullify_end(ctx); 4554 } 4555 4556 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4557 { 4558 TCGv_i64 x, y, z; 4559 4560 nullify_over(ctx); 4561 x = load_frd0(a->rm1); 4562 y = load_frd0(a->rm2); 4563 z = load_frd0(a->ra3); 4564 4565 if (a->neg) { 4566 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z); 4567 } else { 4568 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z); 4569 } 4570 4571 save_frd(a->t, x); 4572 return nullify_end(ctx); 4573 } 4574 4575 /* Emulate PDC BTLB, called by SeaBIOS-hppa */ 4576 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a) 4577 { 4578 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4579 #ifndef CONFIG_USER_ONLY 4580 nullify_over(ctx); 4581 gen_helper_diag_btlb(tcg_env); 4582 return nullify_end(ctx); 4583 #endif 4584 } 4585 4586 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */ 4587 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a) 4588 { 4589 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4590 #ifndef CONFIG_USER_ONLY 4591 nullify_over(ctx); 4592 gen_helper_diag_console_output(tcg_env); 4593 return nullify_end(ctx); 4594 #endif 4595 } 4596 4597 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a) 4598 { 4599 return !ctx->is_pa20 && do_getshadowregs(ctx); 4600 } 4601 4602 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a) 4603 { 4604 return !ctx->is_pa20 && do_putshadowregs(ctx); 4605 } 4606 4607 static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a) 4608 { 4609 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4610 nullify_over(ctx); 4611 TCGv_i64 dest = dest_gpr(ctx, a->rt); 4612 tcg_gen_ld_i64(dest, tcg_env, 4613 offsetof(CPUHPPAState, dr[a->dr])); 4614 save_gpr(ctx, a->rt, dest); 4615 return nullify_end(ctx); 4616 } 4617 4618 static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a) 4619 { 4620 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4621 nullify_over(ctx); 4622 tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env, 4623 offsetof(CPUHPPAState, dr[a->dr])); 4624 #ifndef CONFIG_USER_ONLY 4625 if (ctx->is_pa20 && (a->dr == 2)) { 4626 /* Update gva_offset_mask from the new value of %dr2 */ 4627 gen_helper_update_gva_offset_mask(tcg_env); 4628 /* Exit to capture the new value for the next TB. */ 4629 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 4630 } 4631 #endif 4632 return nullify_end(ctx); 4633 } 4634 4635 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a) 4636 { 4637 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4638 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); 4639 return true; 4640 } 4641 4642 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4643 { 4644 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4645 uint64_t cs_base; 4646 int bound; 4647 4648 ctx->cs = cs; 4649 ctx->tb_flags = ctx->base.tb->flags; 4650 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4651 ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B); 4652 ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask; 4653 4654 #ifdef CONFIG_USER_ONLY 4655 ctx->privilege = PRIV_USER; 4656 ctx->mmu_idx = MMU_USER_IDX; 4657 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4658 #else 4659 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4660 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4661 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4662 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); 4663 #endif 4664 4665 cs_base = ctx->base.tb->cs_base; 4666 ctx->iaoq_first = ctx->base.pc_first + ctx->privilege; 4667 4668 if (unlikely(cs_base & CS_BASE_DIFFSPACE)) { 4669 ctx->iaq_b.space = cpu_iasq_b; 4670 ctx->iaq_b.base = cpu_iaoq_b; 4671 } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) { 4672 ctx->iaq_b.base = cpu_iaoq_b; 4673 } else { 4674 uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK; 4675 uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK; 4676 ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs; 4677 } 4678 4679 ctx->zero = tcg_constant_i64(0); 4680 4681 /* Bound the number of instructions by those left on the page. */ 4682 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4683 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4684 } 4685 4686 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4687 { 4688 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4689 4690 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4691 ctx->null_cond = cond_make_f(); 4692 ctx->psw_n_nonzero = false; 4693 if (ctx->tb_flags & PSW_N) { 4694 ctx->null_cond.c = TCG_COND_ALWAYS; 4695 ctx->psw_n_nonzero = true; 4696 } 4697 ctx->null_lab = NULL; 4698 } 4699 4700 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4701 { 4702 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4703 uint64_t iaoq_f, iaoq_b; 4704 int64_t diff; 4705 4706 tcg_debug_assert(!iaqe_variable(&ctx->iaq_f)); 4707 4708 iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp; 4709 if (iaqe_variable(&ctx->iaq_b)) { 4710 diff = INT32_MIN; 4711 } else { 4712 iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp; 4713 diff = iaoq_b - iaoq_f; 4714 /* Direct branches can only produce a 24-bit displacement. */ 4715 tcg_debug_assert(diff == (int32_t)diff); 4716 tcg_debug_assert(diff != INT32_MIN); 4717 } 4718 4719 tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0); 4720 ctx->insn_start_updated = false; 4721 } 4722 4723 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4724 { 4725 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4726 CPUHPPAState *env = cpu_env(cs); 4727 DisasJumpType ret; 4728 4729 /* Execute one insn. */ 4730 #ifdef CONFIG_USER_ONLY 4731 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4732 do_page_zero(ctx); 4733 ret = ctx->base.is_jmp; 4734 assert(ret != DISAS_NEXT); 4735 } else 4736 #endif 4737 { 4738 /* Always fetch the insn, even if nullified, so that we check 4739 the page permissions for execute. */ 4740 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4741 4742 /* 4743 * Set up the IA queue for the next insn. 4744 * This will be overwritten by a branch. 4745 */ 4746 ctx->iaq_n = NULL; 4747 memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j)); 4748 ctx->psw_b_next = false; 4749 4750 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4751 ctx->null_cond.c = TCG_COND_NEVER; 4752 ret = DISAS_NEXT; 4753 } else { 4754 ctx->insn = insn; 4755 if (!decode(ctx, insn)) { 4756 gen_illegal(ctx); 4757 } 4758 ret = ctx->base.is_jmp; 4759 assert(ctx->null_lab == NULL); 4760 } 4761 4762 if (ret != DISAS_NORETURN) { 4763 set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0); 4764 } 4765 } 4766 4767 /* If the TranslationBlock must end, do so. */ 4768 ctx->base.pc_next += 4; 4769 if (ret != DISAS_NEXT) { 4770 return; 4771 } 4772 /* Note this also detects a priority change. */ 4773 if (iaqe_variable(&ctx->iaq_b) 4774 || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) { 4775 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 4776 return; 4777 } 4778 4779 /* 4780 * Advance the insn queue. 4781 * The only exit now is DISAS_TOO_MANY from the translator loop. 4782 */ 4783 ctx->iaq_f.disp = ctx->iaq_b.disp; 4784 if (!ctx->iaq_n) { 4785 ctx->iaq_b.disp += 4; 4786 return; 4787 } 4788 /* 4789 * If IAQ_Next is variable in any way, we need to copy into the 4790 * IAQ_Back globals, in case the next insn raises an exception. 4791 */ 4792 if (ctx->iaq_n->base) { 4793 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n); 4794 ctx->iaq_b.base = cpu_iaoq_b; 4795 ctx->iaq_b.disp = 0; 4796 } else { 4797 ctx->iaq_b.disp = ctx->iaq_n->disp; 4798 } 4799 if (ctx->iaq_n->space) { 4800 tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space); 4801 ctx->iaq_b.space = cpu_iasq_b; 4802 } 4803 } 4804 4805 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4806 { 4807 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4808 DisasJumpType is_jmp = ctx->base.is_jmp; 4809 /* Assume the insn queue has not been advanced. */ 4810 DisasIAQE *f = &ctx->iaq_b; 4811 DisasIAQE *b = ctx->iaq_n; 4812 4813 switch (is_jmp) { 4814 case DISAS_NORETURN: 4815 break; 4816 case DISAS_TOO_MANY: 4817 /* The insn queue has not been advanced. */ 4818 f = &ctx->iaq_f; 4819 b = &ctx->iaq_b; 4820 /* FALLTHRU */ 4821 case DISAS_IAQ_N_STALE: 4822 if (use_goto_tb(ctx, f, b) 4823 && (ctx->null_cond.c == TCG_COND_NEVER 4824 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4825 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4826 gen_goto_tb(ctx, 0, f, b); 4827 break; 4828 } 4829 /* FALLTHRU */ 4830 case DISAS_IAQ_N_STALE_EXIT: 4831 install_iaq_entries(ctx, f, b); 4832 nullify_save(ctx); 4833 if (is_jmp == DISAS_IAQ_N_STALE_EXIT) { 4834 tcg_gen_exit_tb(NULL, 0); 4835 break; 4836 } 4837 /* FALLTHRU */ 4838 case DISAS_IAQ_N_UPDATED: 4839 tcg_gen_lookup_and_goto_ptr(); 4840 break; 4841 case DISAS_EXIT: 4842 tcg_gen_exit_tb(NULL, 0); 4843 break; 4844 default: 4845 g_assert_not_reached(); 4846 } 4847 4848 for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) { 4849 gen_set_label(e->lab); 4850 if (e->set_n >= 0) { 4851 tcg_gen_movi_i64(cpu_psw_n, e->set_n); 4852 } 4853 if (e->set_iir) { 4854 tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env, 4855 offsetof(CPUHPPAState, cr[CR_IIR])); 4856 } 4857 install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b); 4858 gen_excp_1(e->excp); 4859 } 4860 } 4861 4862 #ifdef CONFIG_USER_ONLY 4863 static bool hppa_tr_disas_log(const DisasContextBase *dcbase, 4864 CPUState *cs, FILE *logfile) 4865 { 4866 target_ulong pc = dcbase->pc_first; 4867 4868 switch (pc) { 4869 case 0x00: 4870 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4871 return true; 4872 case 0xb0: 4873 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4874 return true; 4875 case 0xe0: 4876 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4877 return true; 4878 case 0x100: 4879 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4880 return true; 4881 } 4882 return false; 4883 } 4884 #endif 4885 4886 static const TranslatorOps hppa_tr_ops = { 4887 .init_disas_context = hppa_tr_init_disas_context, 4888 .tb_start = hppa_tr_tb_start, 4889 .insn_start = hppa_tr_insn_start, 4890 .translate_insn = hppa_tr_translate_insn, 4891 .tb_stop = hppa_tr_tb_stop, 4892 #ifdef CONFIG_USER_ONLY 4893 .disas_log = hppa_tr_disas_log, 4894 #endif 4895 }; 4896 4897 void hppa_translate_code(CPUState *cs, TranslationBlock *tb, 4898 int *max_insns, vaddr pc, void *host_pc) 4899 { 4900 DisasContext ctx = { }; 4901 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4902 } 4903