1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "tcg/tcg-op-gvec.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 #define HELPER_H "helper.h" 33 #include "exec/helper-info.c.inc" 34 #undef HELPER_H 35 36 /* Choose to use explicit sizes within this file. */ 37 #undef tcg_temp_new 38 39 typedef struct DisasCond { 40 TCGCond c; 41 TCGv_i64 a0, a1; 42 } DisasCond; 43 44 typedef struct DisasContext { 45 DisasContextBase base; 46 CPUState *cs; 47 TCGOp *insn_start; 48 49 uint64_t iaoq_f; 50 uint64_t iaoq_b; 51 uint64_t iaoq_n; 52 TCGv_i64 iaoq_n_var; 53 54 DisasCond null_cond; 55 TCGLabel *null_lab; 56 57 TCGv_i64 zero; 58 59 uint32_t insn; 60 uint32_t tb_flags; 61 int mmu_idx; 62 int privilege; 63 bool psw_n_nonzero; 64 bool is_pa20; 65 66 #ifdef CONFIG_USER_ONLY 67 MemOp unalign; 68 #endif 69 } DisasContext; 70 71 #ifdef CONFIG_USER_ONLY 72 #define UNALIGN(C) (C)->unalign 73 #define MMU_DISABLED(C) false 74 #else 75 #define UNALIGN(C) MO_ALIGN 76 #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx) 77 #endif 78 79 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 80 static int expand_sm_imm(DisasContext *ctx, int val) 81 { 82 /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */ 83 if (ctx->is_pa20) { 84 if (val & PSW_SM_W) { 85 val |= PSW_W; 86 } 87 val &= ~(PSW_SM_W | PSW_SM_E | PSW_G); 88 } else { 89 val &= ~(PSW_SM_W | PSW_SM_E | PSW_O); 90 } 91 return val; 92 } 93 94 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 95 static int expand_sr3x(DisasContext *ctx, int val) 96 { 97 return ~val; 98 } 99 100 /* Convert the M:A bits within a memory insn to the tri-state value 101 we use for the final M. */ 102 static int ma_to_m(DisasContext *ctx, int val) 103 { 104 return val & 2 ? (val & 1 ? -1 : 1) : 0; 105 } 106 107 /* Convert the sign of the displacement to a pre or post-modify. */ 108 static int pos_to_m(DisasContext *ctx, int val) 109 { 110 return val ? 1 : -1; 111 } 112 113 static int neg_to_m(DisasContext *ctx, int val) 114 { 115 return val ? -1 : 1; 116 } 117 118 /* Used for branch targets and fp memory ops. */ 119 static int expand_shl2(DisasContext *ctx, int val) 120 { 121 return val << 2; 122 } 123 124 /* Used for fp memory ops. */ 125 static int expand_shl3(DisasContext *ctx, int val) 126 { 127 return val << 3; 128 } 129 130 /* Used for assemble_21. */ 131 static int expand_shl11(DisasContext *ctx, int val) 132 { 133 return val << 11; 134 } 135 136 static int assemble_6(DisasContext *ctx, int val) 137 { 138 /* 139 * Officially, 32 * x + 32 - y. 140 * Here, x is already in bit 5, and y is [4:0]. 141 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1, 142 * with the overflow from bit 4 summing with x. 143 */ 144 return (val ^ 31) + 1; 145 } 146 147 /* Translate CMPI doubleword conditions to standard. */ 148 static int cmpbid_c(DisasContext *ctx, int val) 149 { 150 return val ? val : 4; /* 0 == "*<<" */ 151 } 152 153 154 /* Include the auto-generated decoder. */ 155 #include "decode-insns.c.inc" 156 157 /* We are not using a goto_tb (for whatever reason), but have updated 158 the iaq (for whatever reason), so don't do it again on exit. */ 159 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 160 161 /* We are exiting the TB, but have neither emitted a goto_tb, nor 162 updated the iaq for the next instruction to be executed. */ 163 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 164 165 /* Similarly, but we want to return to the main loop immediately 166 to recognize unmasked interrupts. */ 167 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 168 #define DISAS_EXIT DISAS_TARGET_3 169 170 /* global register indexes */ 171 static TCGv_i64 cpu_gr[32]; 172 static TCGv_i64 cpu_sr[4]; 173 static TCGv_i64 cpu_srH; 174 static TCGv_i64 cpu_iaoq_f; 175 static TCGv_i64 cpu_iaoq_b; 176 static TCGv_i64 cpu_iasq_f; 177 static TCGv_i64 cpu_iasq_b; 178 static TCGv_i64 cpu_sar; 179 static TCGv_i64 cpu_psw_n; 180 static TCGv_i64 cpu_psw_v; 181 static TCGv_i64 cpu_psw_cb; 182 static TCGv_i64 cpu_psw_cb_msb; 183 184 void hppa_translate_init(void) 185 { 186 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 187 188 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar; 189 static const GlobalVar vars[] = { 190 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 191 DEF_VAR(psw_n), 192 DEF_VAR(psw_v), 193 DEF_VAR(psw_cb), 194 DEF_VAR(psw_cb_msb), 195 DEF_VAR(iaoq_f), 196 DEF_VAR(iaoq_b), 197 }; 198 199 #undef DEF_VAR 200 201 /* Use the symbolic register names that match the disassembler. */ 202 static const char gr_names[32][4] = { 203 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 204 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 205 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 206 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 207 }; 208 /* SR[4-7] are not global registers so that we can index them. */ 209 static const char sr_names[5][4] = { 210 "sr0", "sr1", "sr2", "sr3", "srH" 211 }; 212 213 int i; 214 215 cpu_gr[0] = NULL; 216 for (i = 1; i < 32; i++) { 217 cpu_gr[i] = tcg_global_mem_new(tcg_env, 218 offsetof(CPUHPPAState, gr[i]), 219 gr_names[i]); 220 } 221 for (i = 0; i < 4; i++) { 222 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env, 223 offsetof(CPUHPPAState, sr[i]), 224 sr_names[i]); 225 } 226 cpu_srH = tcg_global_mem_new_i64(tcg_env, 227 offsetof(CPUHPPAState, sr[4]), 228 sr_names[4]); 229 230 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 231 const GlobalVar *v = &vars[i]; 232 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name); 233 } 234 235 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env, 236 offsetof(CPUHPPAState, iasq_f), 237 "iasq_f"); 238 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env, 239 offsetof(CPUHPPAState, iasq_b), 240 "iasq_b"); 241 } 242 243 static void set_insn_breg(DisasContext *ctx, int breg) 244 { 245 assert(ctx->insn_start != NULL); 246 tcg_set_insn_start_param(ctx->insn_start, 2, breg); 247 ctx->insn_start = NULL; 248 } 249 250 static DisasCond cond_make_f(void) 251 { 252 return (DisasCond){ 253 .c = TCG_COND_NEVER, 254 .a0 = NULL, 255 .a1 = NULL, 256 }; 257 } 258 259 static DisasCond cond_make_t(void) 260 { 261 return (DisasCond){ 262 .c = TCG_COND_ALWAYS, 263 .a0 = NULL, 264 .a1 = NULL, 265 }; 266 } 267 268 static DisasCond cond_make_n(void) 269 { 270 return (DisasCond){ 271 .c = TCG_COND_NE, 272 .a0 = cpu_psw_n, 273 .a1 = tcg_constant_i64(0) 274 }; 275 } 276 277 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 278 { 279 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 280 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 }; 281 } 282 283 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0) 284 { 285 return cond_make_tmp(c, a0, tcg_constant_i64(0)); 286 } 287 288 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0) 289 { 290 TCGv_i64 tmp = tcg_temp_new_i64(); 291 tcg_gen_mov_i64(tmp, a0); 292 return cond_make_0_tmp(c, tmp); 293 } 294 295 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 296 { 297 TCGv_i64 t0 = tcg_temp_new_i64(); 298 TCGv_i64 t1 = tcg_temp_new_i64(); 299 300 tcg_gen_mov_i64(t0, a0); 301 tcg_gen_mov_i64(t1, a1); 302 return cond_make_tmp(c, t0, t1); 303 } 304 305 static void cond_free(DisasCond *cond) 306 { 307 switch (cond->c) { 308 default: 309 cond->a0 = NULL; 310 cond->a1 = NULL; 311 /* fallthru */ 312 case TCG_COND_ALWAYS: 313 cond->c = TCG_COND_NEVER; 314 break; 315 case TCG_COND_NEVER: 316 break; 317 } 318 } 319 320 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg) 321 { 322 if (reg == 0) { 323 return ctx->zero; 324 } else { 325 return cpu_gr[reg]; 326 } 327 } 328 329 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg) 330 { 331 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 332 return tcg_temp_new_i64(); 333 } else { 334 return cpu_gr[reg]; 335 } 336 } 337 338 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t) 339 { 340 if (ctx->null_cond.c != TCG_COND_NEVER) { 341 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0, 342 ctx->null_cond.a1, dest, t); 343 } else { 344 tcg_gen_mov_i64(dest, t); 345 } 346 } 347 348 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t) 349 { 350 if (reg != 0) { 351 save_or_nullify(ctx, cpu_gr[reg], t); 352 } 353 } 354 355 #if HOST_BIG_ENDIAN 356 # define HI_OFS 0 357 # define LO_OFS 4 358 #else 359 # define HI_OFS 4 360 # define LO_OFS 0 361 #endif 362 363 static TCGv_i32 load_frw_i32(unsigned rt) 364 { 365 TCGv_i32 ret = tcg_temp_new_i32(); 366 tcg_gen_ld_i32(ret, tcg_env, 367 offsetof(CPUHPPAState, fr[rt & 31]) 368 + (rt & 32 ? LO_OFS : HI_OFS)); 369 return ret; 370 } 371 372 static TCGv_i32 load_frw0_i32(unsigned rt) 373 { 374 if (rt == 0) { 375 TCGv_i32 ret = tcg_temp_new_i32(); 376 tcg_gen_movi_i32(ret, 0); 377 return ret; 378 } else { 379 return load_frw_i32(rt); 380 } 381 } 382 383 static TCGv_i64 load_frw0_i64(unsigned rt) 384 { 385 TCGv_i64 ret = tcg_temp_new_i64(); 386 if (rt == 0) { 387 tcg_gen_movi_i64(ret, 0); 388 } else { 389 tcg_gen_ld32u_i64(ret, tcg_env, 390 offsetof(CPUHPPAState, fr[rt & 31]) 391 + (rt & 32 ? LO_OFS : HI_OFS)); 392 } 393 return ret; 394 } 395 396 static void save_frw_i32(unsigned rt, TCGv_i32 val) 397 { 398 tcg_gen_st_i32(val, tcg_env, 399 offsetof(CPUHPPAState, fr[rt & 31]) 400 + (rt & 32 ? LO_OFS : HI_OFS)); 401 } 402 403 #undef HI_OFS 404 #undef LO_OFS 405 406 static TCGv_i64 load_frd(unsigned rt) 407 { 408 TCGv_i64 ret = tcg_temp_new_i64(); 409 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt])); 410 return ret; 411 } 412 413 static TCGv_i64 load_frd0(unsigned rt) 414 { 415 if (rt == 0) { 416 TCGv_i64 ret = tcg_temp_new_i64(); 417 tcg_gen_movi_i64(ret, 0); 418 return ret; 419 } else { 420 return load_frd(rt); 421 } 422 } 423 424 static void save_frd(unsigned rt, TCGv_i64 val) 425 { 426 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt])); 427 } 428 429 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 430 { 431 #ifdef CONFIG_USER_ONLY 432 tcg_gen_movi_i64(dest, 0); 433 #else 434 if (reg < 4) { 435 tcg_gen_mov_i64(dest, cpu_sr[reg]); 436 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 437 tcg_gen_mov_i64(dest, cpu_srH); 438 } else { 439 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg])); 440 } 441 #endif 442 } 443 444 /* Skip over the implementation of an insn that has been nullified. 445 Use this when the insn is too complex for a conditional move. */ 446 static void nullify_over(DisasContext *ctx) 447 { 448 if (ctx->null_cond.c != TCG_COND_NEVER) { 449 /* The always condition should have been handled in the main loop. */ 450 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 451 452 ctx->null_lab = gen_new_label(); 453 454 /* If we're using PSW[N], copy it to a temp because... */ 455 if (ctx->null_cond.a0 == cpu_psw_n) { 456 ctx->null_cond.a0 = tcg_temp_new_i64(); 457 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n); 458 } 459 /* ... we clear it before branching over the implementation, 460 so that (1) it's clear after nullifying this insn and 461 (2) if this insn nullifies the next, PSW[N] is valid. */ 462 if (ctx->psw_n_nonzero) { 463 ctx->psw_n_nonzero = false; 464 tcg_gen_movi_i64(cpu_psw_n, 0); 465 } 466 467 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0, 468 ctx->null_cond.a1, ctx->null_lab); 469 cond_free(&ctx->null_cond); 470 } 471 } 472 473 /* Save the current nullification state to PSW[N]. */ 474 static void nullify_save(DisasContext *ctx) 475 { 476 if (ctx->null_cond.c == TCG_COND_NEVER) { 477 if (ctx->psw_n_nonzero) { 478 tcg_gen_movi_i64(cpu_psw_n, 0); 479 } 480 return; 481 } 482 if (ctx->null_cond.a0 != cpu_psw_n) { 483 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n, 484 ctx->null_cond.a0, ctx->null_cond.a1); 485 ctx->psw_n_nonzero = true; 486 } 487 cond_free(&ctx->null_cond); 488 } 489 490 /* Set a PSW[N] to X. The intention is that this is used immediately 491 before a goto_tb/exit_tb, so that there is no fallthru path to other 492 code within the TB. Therefore we do not update psw_n_nonzero. */ 493 static void nullify_set(DisasContext *ctx, bool x) 494 { 495 if (ctx->psw_n_nonzero || x) { 496 tcg_gen_movi_i64(cpu_psw_n, x); 497 } 498 } 499 500 /* Mark the end of an instruction that may have been nullified. 501 This is the pair to nullify_over. Always returns true so that 502 it may be tail-called from a translate function. */ 503 static bool nullify_end(DisasContext *ctx) 504 { 505 TCGLabel *null_lab = ctx->null_lab; 506 DisasJumpType status = ctx->base.is_jmp; 507 508 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 509 For UPDATED, we cannot update on the nullified path. */ 510 assert(status != DISAS_IAQ_N_UPDATED); 511 512 if (likely(null_lab == NULL)) { 513 /* The current insn wasn't conditional or handled the condition 514 applied to it without a branch, so the (new) setting of 515 NULL_COND can be applied directly to the next insn. */ 516 return true; 517 } 518 ctx->null_lab = NULL; 519 520 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 521 /* The next instruction will be unconditional, 522 and NULL_COND already reflects that. */ 523 gen_set_label(null_lab); 524 } else { 525 /* The insn that we just executed is itself nullifying the next 526 instruction. Store the condition in the PSW[N] global. 527 We asserted PSW[N] = 0 in nullify_over, so that after the 528 label we have the proper value in place. */ 529 nullify_save(ctx); 530 gen_set_label(null_lab); 531 ctx->null_cond = cond_make_n(); 532 } 533 if (status == DISAS_NORETURN) { 534 ctx->base.is_jmp = DISAS_NEXT; 535 } 536 return true; 537 } 538 539 static uint64_t gva_offset_mask(DisasContext *ctx) 540 { 541 return (ctx->tb_flags & PSW_W 542 ? MAKE_64BIT_MASK(0, 62) 543 : MAKE_64BIT_MASK(0, 32)); 544 } 545 546 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, 547 uint64_t ival, TCGv_i64 vval) 548 { 549 uint64_t mask = gva_offset_mask(ctx); 550 551 if (ival != -1) { 552 tcg_gen_movi_i64(dest, ival & mask); 553 return; 554 } 555 tcg_debug_assert(vval != NULL); 556 557 /* 558 * We know that the IAOQ is already properly masked. 559 * This optimization is primarily for "iaoq_f = iaoq_b". 560 */ 561 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) { 562 tcg_gen_mov_i64(dest, vval); 563 } else { 564 tcg_gen_andi_i64(dest, vval, mask); 565 } 566 } 567 568 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp) 569 { 570 return ctx->iaoq_f + disp + 8; 571 } 572 573 static void gen_excp_1(int exception) 574 { 575 gen_helper_excp(tcg_env, tcg_constant_i32(exception)); 576 } 577 578 static void gen_excp(DisasContext *ctx, int exception) 579 { 580 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 581 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 582 nullify_save(ctx); 583 gen_excp_1(exception); 584 ctx->base.is_jmp = DISAS_NORETURN; 585 } 586 587 static bool gen_excp_iir(DisasContext *ctx, int exc) 588 { 589 nullify_over(ctx); 590 tcg_gen_st_i64(tcg_constant_i64(ctx->insn), 591 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); 592 gen_excp(ctx, exc); 593 return nullify_end(ctx); 594 } 595 596 static bool gen_illegal(DisasContext *ctx) 597 { 598 return gen_excp_iir(ctx, EXCP_ILL); 599 } 600 601 #ifdef CONFIG_USER_ONLY 602 #define CHECK_MOST_PRIVILEGED(EXCP) \ 603 return gen_excp_iir(ctx, EXCP) 604 #else 605 #define CHECK_MOST_PRIVILEGED(EXCP) \ 606 do { \ 607 if (ctx->privilege != 0) { \ 608 return gen_excp_iir(ctx, EXCP); \ 609 } \ 610 } while (0) 611 #endif 612 613 static bool use_goto_tb(DisasContext *ctx, uint64_t dest) 614 { 615 return translator_use_goto_tb(&ctx->base, dest); 616 } 617 618 /* If the next insn is to be nullified, and it's on the same page, 619 and we're not attempting to set a breakpoint on it, then we can 620 totally skip the nullified insn. This avoids creating and 621 executing a TB that merely branches to the next TB. */ 622 static bool use_nullify_skip(DisasContext *ctx) 623 { 624 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 625 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 626 } 627 628 static void gen_goto_tb(DisasContext *ctx, int which, 629 uint64_t f, uint64_t b) 630 { 631 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 632 tcg_gen_goto_tb(which); 633 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL); 634 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL); 635 tcg_gen_exit_tb(ctx->base.tb, which); 636 } else { 637 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b); 638 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var); 639 tcg_gen_lookup_and_goto_ptr(); 640 } 641 } 642 643 static bool cond_need_sv(int c) 644 { 645 return c == 2 || c == 3 || c == 6; 646 } 647 648 static bool cond_need_cb(int c) 649 { 650 return c == 4 || c == 5; 651 } 652 653 /* Need extensions from TCGv_i32 to TCGv_i64. */ 654 static bool cond_need_ext(DisasContext *ctx, bool d) 655 { 656 return !(ctx->is_pa20 && d); 657 } 658 659 /* 660 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 661 * the Parisc 1.1 Architecture Reference Manual for details. 662 */ 663 664 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, 665 TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv) 666 { 667 DisasCond cond; 668 TCGv_i64 tmp; 669 670 switch (cf >> 1) { 671 case 0: /* Never / TR (0 / 1) */ 672 cond = cond_make_f(); 673 break; 674 case 1: /* = / <> (Z / !Z) */ 675 if (cond_need_ext(ctx, d)) { 676 tmp = tcg_temp_new_i64(); 677 tcg_gen_ext32u_i64(tmp, res); 678 res = tmp; 679 } 680 cond = cond_make_0(TCG_COND_EQ, res); 681 break; 682 case 2: /* < / >= (N ^ V / !(N ^ V) */ 683 tmp = tcg_temp_new_i64(); 684 tcg_gen_xor_i64(tmp, res, sv); 685 if (cond_need_ext(ctx, d)) { 686 tcg_gen_ext32s_i64(tmp, tmp); 687 } 688 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 689 break; 690 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 691 /* 692 * Simplify: 693 * (N ^ V) | Z 694 * ((res < 0) ^ (sv < 0)) | !res 695 * ((res ^ sv) < 0) | !res 696 * (~(res ^ sv) >= 0) | !res 697 * !(~(res ^ sv) >> 31) | !res 698 * !(~(res ^ sv) >> 31 & res) 699 */ 700 tmp = tcg_temp_new_i64(); 701 tcg_gen_eqv_i64(tmp, res, sv); 702 if (cond_need_ext(ctx, d)) { 703 tcg_gen_sextract_i64(tmp, tmp, 31, 1); 704 tcg_gen_and_i64(tmp, tmp, res); 705 tcg_gen_ext32u_i64(tmp, tmp); 706 } else { 707 tcg_gen_sari_i64(tmp, tmp, 63); 708 tcg_gen_and_i64(tmp, tmp, res); 709 } 710 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 711 break; 712 case 4: /* NUV / UV (!C / C) */ 713 /* Only bit 0 of cb_msb is ever set. */ 714 cond = cond_make_0(TCG_COND_EQ, cb_msb); 715 break; 716 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 717 tmp = tcg_temp_new_i64(); 718 tcg_gen_neg_i64(tmp, cb_msb); 719 tcg_gen_and_i64(tmp, tmp, res); 720 if (cond_need_ext(ctx, d)) { 721 tcg_gen_ext32u_i64(tmp, tmp); 722 } 723 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 724 break; 725 case 6: /* SV / NSV (V / !V) */ 726 if (cond_need_ext(ctx, d)) { 727 tmp = tcg_temp_new_i64(); 728 tcg_gen_ext32s_i64(tmp, sv); 729 sv = tmp; 730 } 731 cond = cond_make_0(TCG_COND_LT, sv); 732 break; 733 case 7: /* OD / EV */ 734 tmp = tcg_temp_new_i64(); 735 tcg_gen_andi_i64(tmp, res, 1); 736 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 737 break; 738 default: 739 g_assert_not_reached(); 740 } 741 if (cf & 1) { 742 cond.c = tcg_invert_cond(cond.c); 743 } 744 745 return cond; 746 } 747 748 /* Similar, but for the special case of subtraction without borrow, we 749 can use the inputs directly. This can allow other computation to be 750 deleted as unused. */ 751 752 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, 753 TCGv_i64 res, TCGv_i64 in1, 754 TCGv_i64 in2, TCGv_i64 sv) 755 { 756 TCGCond tc; 757 bool ext_uns; 758 759 switch (cf >> 1) { 760 case 1: /* = / <> */ 761 tc = TCG_COND_EQ; 762 ext_uns = true; 763 break; 764 case 2: /* < / >= */ 765 tc = TCG_COND_LT; 766 ext_uns = false; 767 break; 768 case 3: /* <= / > */ 769 tc = TCG_COND_LE; 770 ext_uns = false; 771 break; 772 case 4: /* << / >>= */ 773 tc = TCG_COND_LTU; 774 ext_uns = true; 775 break; 776 case 5: /* <<= / >> */ 777 tc = TCG_COND_LEU; 778 ext_uns = true; 779 break; 780 default: 781 return do_cond(ctx, cf, d, res, NULL, sv); 782 } 783 784 if (cf & 1) { 785 tc = tcg_invert_cond(tc); 786 } 787 if (cond_need_ext(ctx, d)) { 788 TCGv_i64 t1 = tcg_temp_new_i64(); 789 TCGv_i64 t2 = tcg_temp_new_i64(); 790 791 if (ext_uns) { 792 tcg_gen_ext32u_i64(t1, in1); 793 tcg_gen_ext32u_i64(t2, in2); 794 } else { 795 tcg_gen_ext32s_i64(t1, in1); 796 tcg_gen_ext32s_i64(t2, in2); 797 } 798 return cond_make_tmp(tc, t1, t2); 799 } 800 return cond_make(tc, in1, in2); 801 } 802 803 /* 804 * Similar, but for logicals, where the carry and overflow bits are not 805 * computed, and use of them is undefined. 806 * 807 * Undefined or not, hardware does not trap. It seems reasonable to 808 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 809 * how cases c={2,3} are treated. 810 */ 811 812 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, 813 TCGv_i64 res) 814 { 815 TCGCond tc; 816 bool ext_uns; 817 818 switch (cf) { 819 case 0: /* never */ 820 case 9: /* undef, C */ 821 case 11: /* undef, C & !Z */ 822 case 12: /* undef, V */ 823 return cond_make_f(); 824 825 case 1: /* true */ 826 case 8: /* undef, !C */ 827 case 10: /* undef, !C | Z */ 828 case 13: /* undef, !V */ 829 return cond_make_t(); 830 831 case 2: /* == */ 832 tc = TCG_COND_EQ; 833 ext_uns = true; 834 break; 835 case 3: /* <> */ 836 tc = TCG_COND_NE; 837 ext_uns = true; 838 break; 839 case 4: /* < */ 840 tc = TCG_COND_LT; 841 ext_uns = false; 842 break; 843 case 5: /* >= */ 844 tc = TCG_COND_GE; 845 ext_uns = false; 846 break; 847 case 6: /* <= */ 848 tc = TCG_COND_LE; 849 ext_uns = false; 850 break; 851 case 7: /* > */ 852 tc = TCG_COND_GT; 853 ext_uns = false; 854 break; 855 856 case 14: /* OD */ 857 case 15: /* EV */ 858 return do_cond(ctx, cf, d, res, NULL, NULL); 859 860 default: 861 g_assert_not_reached(); 862 } 863 864 if (cond_need_ext(ctx, d)) { 865 TCGv_i64 tmp = tcg_temp_new_i64(); 866 867 if (ext_uns) { 868 tcg_gen_ext32u_i64(tmp, res); 869 } else { 870 tcg_gen_ext32s_i64(tmp, res); 871 } 872 return cond_make_0_tmp(tc, tmp); 873 } 874 return cond_make_0(tc, res); 875 } 876 877 /* Similar, but for shift/extract/deposit conditions. */ 878 879 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, 880 TCGv_i64 res) 881 { 882 unsigned c, f; 883 884 /* Convert the compressed condition codes to standard. 885 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 886 4-7 are the reverse of 0-3. */ 887 c = orig & 3; 888 if (c == 3) { 889 c = 7; 890 } 891 f = (orig & 4) / 4; 892 893 return do_log_cond(ctx, c * 2 + f, d, res); 894 } 895 896 /* Similar, but for unit conditions. */ 897 898 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res, 899 TCGv_i64 in1, TCGv_i64 in2) 900 { 901 DisasCond cond; 902 TCGv_i64 tmp, cb = NULL; 903 uint64_t d_repl = d ? 0x0000000100000001ull : 1; 904 905 if (cf & 8) { 906 /* Since we want to test lots of carry-out bits all at once, do not 907 * do our normal thing and compute carry-in of bit B+1 since that 908 * leaves us with carry bits spread across two words. 909 */ 910 cb = tcg_temp_new_i64(); 911 tmp = tcg_temp_new_i64(); 912 tcg_gen_or_i64(cb, in1, in2); 913 tcg_gen_and_i64(tmp, in1, in2); 914 tcg_gen_andc_i64(cb, cb, res); 915 tcg_gen_or_i64(cb, cb, tmp); 916 } 917 918 switch (cf >> 1) { 919 case 0: /* never / TR */ 920 case 1: /* undefined */ 921 case 5: /* undefined */ 922 cond = cond_make_f(); 923 break; 924 925 case 2: /* SBZ / NBZ */ 926 /* See hasless(v,1) from 927 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 928 */ 929 tmp = tcg_temp_new_i64(); 930 tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u); 931 tcg_gen_andc_i64(tmp, tmp, res); 932 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u); 933 cond = cond_make_0(TCG_COND_NE, tmp); 934 break; 935 936 case 3: /* SHZ / NHZ */ 937 tmp = tcg_temp_new_i64(); 938 tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u); 939 tcg_gen_andc_i64(tmp, tmp, res); 940 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u); 941 cond = cond_make_0(TCG_COND_NE, tmp); 942 break; 943 944 case 4: /* SDC / NDC */ 945 tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u); 946 cond = cond_make_0(TCG_COND_NE, cb); 947 break; 948 949 case 6: /* SBC / NBC */ 950 tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u); 951 cond = cond_make_0(TCG_COND_NE, cb); 952 break; 953 954 case 7: /* SHC / NHC */ 955 tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u); 956 cond = cond_make_0(TCG_COND_NE, cb); 957 break; 958 959 default: 960 g_assert_not_reached(); 961 } 962 if (cf & 1) { 963 cond.c = tcg_invert_cond(cond.c); 964 } 965 966 return cond; 967 } 968 969 static TCGv_i64 get_carry(DisasContext *ctx, bool d, 970 TCGv_i64 cb, TCGv_i64 cb_msb) 971 { 972 if (cond_need_ext(ctx, d)) { 973 TCGv_i64 t = tcg_temp_new_i64(); 974 tcg_gen_extract_i64(t, cb, 32, 1); 975 return t; 976 } 977 return cb_msb; 978 } 979 980 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d) 981 { 982 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb); 983 } 984 985 /* Compute signed overflow for addition. */ 986 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, 987 TCGv_i64 in1, TCGv_i64 in2) 988 { 989 TCGv_i64 sv = tcg_temp_new_i64(); 990 TCGv_i64 tmp = tcg_temp_new_i64(); 991 992 tcg_gen_xor_i64(sv, res, in1); 993 tcg_gen_xor_i64(tmp, in1, in2); 994 tcg_gen_andc_i64(sv, sv, tmp); 995 996 return sv; 997 } 998 999 /* Compute signed overflow for subtraction. */ 1000 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, 1001 TCGv_i64 in1, TCGv_i64 in2) 1002 { 1003 TCGv_i64 sv = tcg_temp_new_i64(); 1004 TCGv_i64 tmp = tcg_temp_new_i64(); 1005 1006 tcg_gen_xor_i64(sv, res, in1); 1007 tcg_gen_xor_i64(tmp, in1, in2); 1008 tcg_gen_and_i64(sv, sv, tmp); 1009 1010 return sv; 1011 } 1012 1013 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1014 TCGv_i64 in2, unsigned shift, bool is_l, 1015 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) 1016 { 1017 TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp; 1018 unsigned c = cf >> 1; 1019 DisasCond cond; 1020 1021 dest = tcg_temp_new_i64(); 1022 cb = NULL; 1023 cb_msb = NULL; 1024 cb_cond = NULL; 1025 1026 if (shift) { 1027 tmp = tcg_temp_new_i64(); 1028 tcg_gen_shli_i64(tmp, in1, shift); 1029 in1 = tmp; 1030 } 1031 1032 if (!is_l || cond_need_cb(c)) { 1033 cb_msb = tcg_temp_new_i64(); 1034 cb = tcg_temp_new_i64(); 1035 1036 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 1037 if (is_c) { 1038 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, 1039 get_psw_carry(ctx, d), ctx->zero); 1040 } 1041 tcg_gen_xor_i64(cb, in1, in2); 1042 tcg_gen_xor_i64(cb, cb, dest); 1043 if (cond_need_cb(c)) { 1044 cb_cond = get_carry(ctx, d, cb, cb_msb); 1045 } 1046 } else { 1047 tcg_gen_add_i64(dest, in1, in2); 1048 if (is_c) { 1049 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d)); 1050 } 1051 } 1052 1053 /* Compute signed overflow if required. */ 1054 sv = NULL; 1055 if (is_tsv || cond_need_sv(c)) { 1056 sv = do_add_sv(ctx, dest, in1, in2); 1057 if (is_tsv) { 1058 /* ??? Need to include overflow from shift. */ 1059 gen_helper_tsv(tcg_env, sv); 1060 } 1061 } 1062 1063 /* Emit any conditional trap before any writeback. */ 1064 cond = do_cond(ctx, cf, d, dest, cb_cond, sv); 1065 if (is_tc) { 1066 tmp = tcg_temp_new_i64(); 1067 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); 1068 gen_helper_tcond(tcg_env, tmp); 1069 } 1070 1071 /* Write back the result. */ 1072 if (!is_l) { 1073 save_or_nullify(ctx, cpu_psw_cb, cb); 1074 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1075 } 1076 save_gpr(ctx, rt, dest); 1077 1078 /* Install the new nullification. */ 1079 cond_free(&ctx->null_cond); 1080 ctx->null_cond = cond; 1081 } 1082 1083 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a, 1084 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1085 { 1086 TCGv_i64 tcg_r1, tcg_r2; 1087 1088 if (a->cf) { 1089 nullify_over(ctx); 1090 } 1091 tcg_r1 = load_gpr(ctx, a->r1); 1092 tcg_r2 = load_gpr(ctx, a->r2); 1093 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, 1094 is_tsv, is_tc, is_c, a->cf, a->d); 1095 return nullify_end(ctx); 1096 } 1097 1098 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1099 bool is_tsv, bool is_tc) 1100 { 1101 TCGv_i64 tcg_im, tcg_r2; 1102 1103 if (a->cf) { 1104 nullify_over(ctx); 1105 } 1106 tcg_im = tcg_constant_i64(a->i); 1107 tcg_r2 = load_gpr(ctx, a->r); 1108 /* All ADDI conditions are 32-bit. */ 1109 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false); 1110 return nullify_end(ctx); 1111 } 1112 1113 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1114 TCGv_i64 in2, bool is_tsv, bool is_b, 1115 bool is_tc, unsigned cf, bool d) 1116 { 1117 TCGv_i64 dest, sv, cb, cb_msb, tmp; 1118 unsigned c = cf >> 1; 1119 DisasCond cond; 1120 1121 dest = tcg_temp_new_i64(); 1122 cb = tcg_temp_new_i64(); 1123 cb_msb = tcg_temp_new_i64(); 1124 1125 if (is_b) { 1126 /* DEST,C = IN1 + ~IN2 + C. */ 1127 tcg_gen_not_i64(cb, in2); 1128 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, 1129 get_psw_carry(ctx, d), ctx->zero); 1130 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero); 1131 tcg_gen_xor_i64(cb, cb, in1); 1132 tcg_gen_xor_i64(cb, cb, dest); 1133 } else { 1134 /* 1135 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1136 * operations by seeding the high word with 1 and subtracting. 1137 */ 1138 TCGv_i64 one = tcg_constant_i64(1); 1139 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); 1140 tcg_gen_eqv_i64(cb, in1, in2); 1141 tcg_gen_xor_i64(cb, cb, dest); 1142 } 1143 1144 /* Compute signed overflow if required. */ 1145 sv = NULL; 1146 if (is_tsv || cond_need_sv(c)) { 1147 sv = do_sub_sv(ctx, dest, in1, in2); 1148 if (is_tsv) { 1149 gen_helper_tsv(tcg_env, sv); 1150 } 1151 } 1152 1153 /* Compute the condition. We cannot use the special case for borrow. */ 1154 if (!is_b) { 1155 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1156 } else { 1157 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv); 1158 } 1159 1160 /* Emit any conditional trap before any writeback. */ 1161 if (is_tc) { 1162 tmp = tcg_temp_new_i64(); 1163 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); 1164 gen_helper_tcond(tcg_env, tmp); 1165 } 1166 1167 /* Write back the result. */ 1168 save_or_nullify(ctx, cpu_psw_cb, cb); 1169 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1170 save_gpr(ctx, rt, dest); 1171 1172 /* Install the new nullification. */ 1173 cond_free(&ctx->null_cond); 1174 ctx->null_cond = cond; 1175 } 1176 1177 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1178 bool is_tsv, bool is_b, bool is_tc) 1179 { 1180 TCGv_i64 tcg_r1, tcg_r2; 1181 1182 if (a->cf) { 1183 nullify_over(ctx); 1184 } 1185 tcg_r1 = load_gpr(ctx, a->r1); 1186 tcg_r2 = load_gpr(ctx, a->r2); 1187 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d); 1188 return nullify_end(ctx); 1189 } 1190 1191 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1192 { 1193 TCGv_i64 tcg_im, tcg_r2; 1194 1195 if (a->cf) { 1196 nullify_over(ctx); 1197 } 1198 tcg_im = tcg_constant_i64(a->i); 1199 tcg_r2 = load_gpr(ctx, a->r); 1200 /* All SUBI conditions are 32-bit. */ 1201 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false); 1202 return nullify_end(ctx); 1203 } 1204 1205 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1206 TCGv_i64 in2, unsigned cf, bool d) 1207 { 1208 TCGv_i64 dest, sv; 1209 DisasCond cond; 1210 1211 dest = tcg_temp_new_i64(); 1212 tcg_gen_sub_i64(dest, in1, in2); 1213 1214 /* Compute signed overflow if required. */ 1215 sv = NULL; 1216 if (cond_need_sv(cf >> 1)) { 1217 sv = do_sub_sv(ctx, dest, in1, in2); 1218 } 1219 1220 /* Form the condition for the compare. */ 1221 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1222 1223 /* Clear. */ 1224 tcg_gen_movi_i64(dest, 0); 1225 save_gpr(ctx, rt, dest); 1226 1227 /* Install the new nullification. */ 1228 cond_free(&ctx->null_cond); 1229 ctx->null_cond = cond; 1230 } 1231 1232 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1233 TCGv_i64 in2, unsigned cf, bool d, 1234 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1235 { 1236 TCGv_i64 dest = dest_gpr(ctx, rt); 1237 1238 /* Perform the operation, and writeback. */ 1239 fn(dest, in1, in2); 1240 save_gpr(ctx, rt, dest); 1241 1242 /* Install the new nullification. */ 1243 cond_free(&ctx->null_cond); 1244 if (cf) { 1245 ctx->null_cond = do_log_cond(ctx, cf, d, dest); 1246 } 1247 } 1248 1249 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1250 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1251 { 1252 TCGv_i64 tcg_r1, tcg_r2; 1253 1254 if (a->cf) { 1255 nullify_over(ctx); 1256 } 1257 tcg_r1 = load_gpr(ctx, a->r1); 1258 tcg_r2 = load_gpr(ctx, a->r2); 1259 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn); 1260 return nullify_end(ctx); 1261 } 1262 1263 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1264 TCGv_i64 in2, unsigned cf, bool d, bool is_tc, 1265 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1266 { 1267 TCGv_i64 dest; 1268 DisasCond cond; 1269 1270 if (cf == 0) { 1271 dest = dest_gpr(ctx, rt); 1272 fn(dest, in1, in2); 1273 save_gpr(ctx, rt, dest); 1274 cond_free(&ctx->null_cond); 1275 } else { 1276 dest = tcg_temp_new_i64(); 1277 fn(dest, in1, in2); 1278 1279 cond = do_unit_cond(cf, d, dest, in1, in2); 1280 1281 if (is_tc) { 1282 TCGv_i64 tmp = tcg_temp_new_i64(); 1283 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); 1284 gen_helper_tcond(tcg_env, tmp); 1285 } 1286 save_gpr(ctx, rt, dest); 1287 1288 cond_free(&ctx->null_cond); 1289 ctx->null_cond = cond; 1290 } 1291 } 1292 1293 #ifndef CONFIG_USER_ONLY 1294 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1295 from the top 2 bits of the base register. There are a few system 1296 instructions that have a 3-bit space specifier, for which SR0 is 1297 not special. To handle this, pass ~SP. */ 1298 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base) 1299 { 1300 TCGv_ptr ptr; 1301 TCGv_i64 tmp; 1302 TCGv_i64 spc; 1303 1304 if (sp != 0) { 1305 if (sp < 0) { 1306 sp = ~sp; 1307 } 1308 spc = tcg_temp_new_i64(); 1309 load_spr(ctx, spc, sp); 1310 return spc; 1311 } 1312 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1313 return cpu_srH; 1314 } 1315 1316 ptr = tcg_temp_new_ptr(); 1317 tmp = tcg_temp_new_i64(); 1318 spc = tcg_temp_new_i64(); 1319 1320 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */ 1321 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5); 1322 tcg_gen_andi_i64(tmp, tmp, 030); 1323 tcg_gen_trunc_i64_ptr(ptr, tmp); 1324 1325 tcg_gen_add_ptr(ptr, ptr, tcg_env); 1326 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1327 1328 return spc; 1329 } 1330 #endif 1331 1332 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, 1333 unsigned rb, unsigned rx, int scale, int64_t disp, 1334 unsigned sp, int modify, bool is_phys) 1335 { 1336 TCGv_i64 base = load_gpr(ctx, rb); 1337 TCGv_i64 ofs; 1338 TCGv_i64 addr; 1339 1340 set_insn_breg(ctx, rb); 1341 1342 /* Note that RX is mutually exclusive with DISP. */ 1343 if (rx) { 1344 ofs = tcg_temp_new_i64(); 1345 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale); 1346 tcg_gen_add_i64(ofs, ofs, base); 1347 } else if (disp || modify) { 1348 ofs = tcg_temp_new_i64(); 1349 tcg_gen_addi_i64(ofs, base, disp); 1350 } else { 1351 ofs = base; 1352 } 1353 1354 *pofs = ofs; 1355 *pgva = addr = tcg_temp_new_i64(); 1356 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx)); 1357 #ifndef CONFIG_USER_ONLY 1358 if (!is_phys) { 1359 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); 1360 } 1361 #endif 1362 } 1363 1364 /* Emit a memory load. The modify parameter should be 1365 * < 0 for pre-modify, 1366 * > 0 for post-modify, 1367 * = 0 for no base register update. 1368 */ 1369 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1370 unsigned rx, int scale, int64_t disp, 1371 unsigned sp, int modify, MemOp mop) 1372 { 1373 TCGv_i64 ofs; 1374 TCGv_i64 addr; 1375 1376 /* Caller uses nullify_over/nullify_end. */ 1377 assert(ctx->null_cond.c == TCG_COND_NEVER); 1378 1379 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1380 MMU_DISABLED(ctx)); 1381 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1382 if (modify) { 1383 save_gpr(ctx, rb, ofs); 1384 } 1385 } 1386 1387 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1388 unsigned rx, int scale, int64_t disp, 1389 unsigned sp, int modify, MemOp mop) 1390 { 1391 TCGv_i64 ofs; 1392 TCGv_i64 addr; 1393 1394 /* Caller uses nullify_over/nullify_end. */ 1395 assert(ctx->null_cond.c == TCG_COND_NEVER); 1396 1397 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1398 MMU_DISABLED(ctx)); 1399 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1400 if (modify) { 1401 save_gpr(ctx, rb, ofs); 1402 } 1403 } 1404 1405 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1406 unsigned rx, int scale, int64_t disp, 1407 unsigned sp, int modify, MemOp mop) 1408 { 1409 TCGv_i64 ofs; 1410 TCGv_i64 addr; 1411 1412 /* Caller uses nullify_over/nullify_end. */ 1413 assert(ctx->null_cond.c == TCG_COND_NEVER); 1414 1415 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1416 MMU_DISABLED(ctx)); 1417 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1418 if (modify) { 1419 save_gpr(ctx, rb, ofs); 1420 } 1421 } 1422 1423 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1424 unsigned rx, int scale, int64_t disp, 1425 unsigned sp, int modify, MemOp mop) 1426 { 1427 TCGv_i64 ofs; 1428 TCGv_i64 addr; 1429 1430 /* Caller uses nullify_over/nullify_end. */ 1431 assert(ctx->null_cond.c == TCG_COND_NEVER); 1432 1433 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1434 MMU_DISABLED(ctx)); 1435 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1436 if (modify) { 1437 save_gpr(ctx, rb, ofs); 1438 } 1439 } 1440 1441 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1442 unsigned rx, int scale, int64_t disp, 1443 unsigned sp, int modify, MemOp mop) 1444 { 1445 TCGv_i64 dest; 1446 1447 nullify_over(ctx); 1448 1449 if (modify == 0) { 1450 /* No base register update. */ 1451 dest = dest_gpr(ctx, rt); 1452 } else { 1453 /* Make sure if RT == RB, we see the result of the load. */ 1454 dest = tcg_temp_new_i64(); 1455 } 1456 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1457 save_gpr(ctx, rt, dest); 1458 1459 return nullify_end(ctx); 1460 } 1461 1462 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1463 unsigned rx, int scale, int64_t disp, 1464 unsigned sp, int modify) 1465 { 1466 TCGv_i32 tmp; 1467 1468 nullify_over(ctx); 1469 1470 tmp = tcg_temp_new_i32(); 1471 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1472 save_frw_i32(rt, tmp); 1473 1474 if (rt == 0) { 1475 gen_helper_loaded_fr0(tcg_env); 1476 } 1477 1478 return nullify_end(ctx); 1479 } 1480 1481 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1482 { 1483 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1484 a->disp, a->sp, a->m); 1485 } 1486 1487 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1488 unsigned rx, int scale, int64_t disp, 1489 unsigned sp, int modify) 1490 { 1491 TCGv_i64 tmp; 1492 1493 nullify_over(ctx); 1494 1495 tmp = tcg_temp_new_i64(); 1496 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1497 save_frd(rt, tmp); 1498 1499 if (rt == 0) { 1500 gen_helper_loaded_fr0(tcg_env); 1501 } 1502 1503 return nullify_end(ctx); 1504 } 1505 1506 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1507 { 1508 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1509 a->disp, a->sp, a->m); 1510 } 1511 1512 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1513 int64_t disp, unsigned sp, 1514 int modify, MemOp mop) 1515 { 1516 nullify_over(ctx); 1517 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1518 return nullify_end(ctx); 1519 } 1520 1521 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1522 unsigned rx, int scale, int64_t disp, 1523 unsigned sp, int modify) 1524 { 1525 TCGv_i32 tmp; 1526 1527 nullify_over(ctx); 1528 1529 tmp = load_frw_i32(rt); 1530 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1531 1532 return nullify_end(ctx); 1533 } 1534 1535 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1536 { 1537 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1538 a->disp, a->sp, a->m); 1539 } 1540 1541 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1542 unsigned rx, int scale, int64_t disp, 1543 unsigned sp, int modify) 1544 { 1545 TCGv_i64 tmp; 1546 1547 nullify_over(ctx); 1548 1549 tmp = load_frd(rt); 1550 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1551 1552 return nullify_end(ctx); 1553 } 1554 1555 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1556 { 1557 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1558 a->disp, a->sp, a->m); 1559 } 1560 1561 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1562 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1563 { 1564 TCGv_i32 tmp; 1565 1566 nullify_over(ctx); 1567 tmp = load_frw0_i32(ra); 1568 1569 func(tmp, tcg_env, tmp); 1570 1571 save_frw_i32(rt, tmp); 1572 return nullify_end(ctx); 1573 } 1574 1575 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1576 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1577 { 1578 TCGv_i32 dst; 1579 TCGv_i64 src; 1580 1581 nullify_over(ctx); 1582 src = load_frd(ra); 1583 dst = tcg_temp_new_i32(); 1584 1585 func(dst, tcg_env, src); 1586 1587 save_frw_i32(rt, dst); 1588 return nullify_end(ctx); 1589 } 1590 1591 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1592 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1593 { 1594 TCGv_i64 tmp; 1595 1596 nullify_over(ctx); 1597 tmp = load_frd0(ra); 1598 1599 func(tmp, tcg_env, tmp); 1600 1601 save_frd(rt, tmp); 1602 return nullify_end(ctx); 1603 } 1604 1605 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1606 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1607 { 1608 TCGv_i32 src; 1609 TCGv_i64 dst; 1610 1611 nullify_over(ctx); 1612 src = load_frw0_i32(ra); 1613 dst = tcg_temp_new_i64(); 1614 1615 func(dst, tcg_env, src); 1616 1617 save_frd(rt, dst); 1618 return nullify_end(ctx); 1619 } 1620 1621 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1622 unsigned ra, unsigned rb, 1623 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1624 { 1625 TCGv_i32 a, b; 1626 1627 nullify_over(ctx); 1628 a = load_frw0_i32(ra); 1629 b = load_frw0_i32(rb); 1630 1631 func(a, tcg_env, a, b); 1632 1633 save_frw_i32(rt, a); 1634 return nullify_end(ctx); 1635 } 1636 1637 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1638 unsigned ra, unsigned rb, 1639 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1640 { 1641 TCGv_i64 a, b; 1642 1643 nullify_over(ctx); 1644 a = load_frd0(ra); 1645 b = load_frd0(rb); 1646 1647 func(a, tcg_env, a, b); 1648 1649 save_frd(rt, a); 1650 return nullify_end(ctx); 1651 } 1652 1653 /* Emit an unconditional branch to a direct target, which may or may not 1654 have already had nullification handled. */ 1655 static bool do_dbranch(DisasContext *ctx, uint64_t dest, 1656 unsigned link, bool is_n) 1657 { 1658 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1659 if (link != 0) { 1660 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1661 } 1662 ctx->iaoq_n = dest; 1663 if (is_n) { 1664 ctx->null_cond.c = TCG_COND_ALWAYS; 1665 } 1666 } else { 1667 nullify_over(ctx); 1668 1669 if (link != 0) { 1670 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1671 } 1672 1673 if (is_n && use_nullify_skip(ctx)) { 1674 nullify_set(ctx, 0); 1675 gen_goto_tb(ctx, 0, dest, dest + 4); 1676 } else { 1677 nullify_set(ctx, is_n); 1678 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1679 } 1680 1681 nullify_end(ctx); 1682 1683 nullify_set(ctx, 0); 1684 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1685 ctx->base.is_jmp = DISAS_NORETURN; 1686 } 1687 return true; 1688 } 1689 1690 /* Emit a conditional branch to a direct target. If the branch itself 1691 is nullified, we should have already used nullify_over. */ 1692 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n, 1693 DisasCond *cond) 1694 { 1695 uint64_t dest = iaoq_dest(ctx, disp); 1696 TCGLabel *taken = NULL; 1697 TCGCond c = cond->c; 1698 bool n; 1699 1700 assert(ctx->null_cond.c == TCG_COND_NEVER); 1701 1702 /* Handle TRUE and NEVER as direct branches. */ 1703 if (c == TCG_COND_ALWAYS) { 1704 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1705 } 1706 if (c == TCG_COND_NEVER) { 1707 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1708 } 1709 1710 taken = gen_new_label(); 1711 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken); 1712 cond_free(cond); 1713 1714 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1715 n = is_n && disp < 0; 1716 if (n && use_nullify_skip(ctx)) { 1717 nullify_set(ctx, 0); 1718 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1719 } else { 1720 if (!n && ctx->null_lab) { 1721 gen_set_label(ctx->null_lab); 1722 ctx->null_lab = NULL; 1723 } 1724 nullify_set(ctx, n); 1725 if (ctx->iaoq_n == -1) { 1726 /* The temporary iaoq_n_var died at the branch above. 1727 Regenerate it here instead of saving it. */ 1728 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1729 } 1730 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1731 } 1732 1733 gen_set_label(taken); 1734 1735 /* Taken: Condition satisfied; nullify on forward branches. */ 1736 n = is_n && disp >= 0; 1737 if (n && use_nullify_skip(ctx)) { 1738 nullify_set(ctx, 0); 1739 gen_goto_tb(ctx, 1, dest, dest + 4); 1740 } else { 1741 nullify_set(ctx, n); 1742 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1743 } 1744 1745 /* Not taken: the branch itself was nullified. */ 1746 if (ctx->null_lab) { 1747 gen_set_label(ctx->null_lab); 1748 ctx->null_lab = NULL; 1749 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1750 } else { 1751 ctx->base.is_jmp = DISAS_NORETURN; 1752 } 1753 return true; 1754 } 1755 1756 /* Emit an unconditional branch to an indirect target. This handles 1757 nullification of the branch itself. */ 1758 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest, 1759 unsigned link, bool is_n) 1760 { 1761 TCGv_i64 a0, a1, next, tmp; 1762 TCGCond c; 1763 1764 assert(ctx->null_lab == NULL); 1765 1766 if (ctx->null_cond.c == TCG_COND_NEVER) { 1767 if (link != 0) { 1768 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1769 } 1770 next = tcg_temp_new_i64(); 1771 tcg_gen_mov_i64(next, dest); 1772 if (is_n) { 1773 if (use_nullify_skip(ctx)) { 1774 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next); 1775 tcg_gen_addi_i64(next, next, 4); 1776 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); 1777 nullify_set(ctx, 0); 1778 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1779 return true; 1780 } 1781 ctx->null_cond.c = TCG_COND_ALWAYS; 1782 } 1783 ctx->iaoq_n = -1; 1784 ctx->iaoq_n_var = next; 1785 } else if (is_n && use_nullify_skip(ctx)) { 1786 /* The (conditional) branch, B, nullifies the next insn, N, 1787 and we're allowed to skip execution N (no single-step or 1788 tracepoint in effect). Since the goto_ptr that we must use 1789 for the indirect branch consumes no special resources, we 1790 can (conditionally) skip B and continue execution. */ 1791 /* The use_nullify_skip test implies we have a known control path. */ 1792 tcg_debug_assert(ctx->iaoq_b != -1); 1793 tcg_debug_assert(ctx->iaoq_n != -1); 1794 1795 /* We do have to handle the non-local temporary, DEST, before 1796 branching. Since IOAQ_F is not really live at this point, we 1797 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1798 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest); 1799 next = tcg_temp_new_i64(); 1800 tcg_gen_addi_i64(next, dest, 4); 1801 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); 1802 1803 nullify_over(ctx); 1804 if (link != 0) { 1805 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1806 } 1807 tcg_gen_lookup_and_goto_ptr(); 1808 return nullify_end(ctx); 1809 } else { 1810 c = ctx->null_cond.c; 1811 a0 = ctx->null_cond.a0; 1812 a1 = ctx->null_cond.a1; 1813 1814 tmp = tcg_temp_new_i64(); 1815 next = tcg_temp_new_i64(); 1816 1817 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1818 tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest); 1819 ctx->iaoq_n = -1; 1820 ctx->iaoq_n_var = next; 1821 1822 if (link != 0) { 1823 tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1824 } 1825 1826 if (is_n) { 1827 /* The branch nullifies the next insn, which means the state of N 1828 after the branch is the inverse of the state of N that applied 1829 to the branch. */ 1830 tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1831 cond_free(&ctx->null_cond); 1832 ctx->null_cond = cond_make_n(); 1833 ctx->psw_n_nonzero = true; 1834 } else { 1835 cond_free(&ctx->null_cond); 1836 } 1837 } 1838 return true; 1839 } 1840 1841 /* Implement 1842 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 1843 * IAOQ_Next{30..31} ← GR[b]{30..31}; 1844 * else 1845 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 1846 * which keeps the privilege level from being increased. 1847 */ 1848 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset) 1849 { 1850 TCGv_i64 dest; 1851 switch (ctx->privilege) { 1852 case 0: 1853 /* Privilege 0 is maximum and is allowed to decrease. */ 1854 return offset; 1855 case 3: 1856 /* Privilege 3 is minimum and is never allowed to increase. */ 1857 dest = tcg_temp_new_i64(); 1858 tcg_gen_ori_i64(dest, offset, 3); 1859 break; 1860 default: 1861 dest = tcg_temp_new_i64(); 1862 tcg_gen_andi_i64(dest, offset, -4); 1863 tcg_gen_ori_i64(dest, dest, ctx->privilege); 1864 tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset); 1865 break; 1866 } 1867 return dest; 1868 } 1869 1870 #ifdef CONFIG_USER_ONLY 1871 /* On Linux, page zero is normally marked execute only + gateway. 1872 Therefore normal read or write is supposed to fail, but specific 1873 offsets have kernel code mapped to raise permissions to implement 1874 system calls. Handling this via an explicit check here, rather 1875 in than the "be disp(sr2,r0)" instruction that probably sent us 1876 here, is the easiest way to handle the branch delay slot on the 1877 aforementioned BE. */ 1878 static void do_page_zero(DisasContext *ctx) 1879 { 1880 TCGv_i64 tmp; 1881 1882 /* If by some means we get here with PSW[N]=1, that implies that 1883 the B,GATE instruction would be skipped, and we'd fault on the 1884 next insn within the privileged page. */ 1885 switch (ctx->null_cond.c) { 1886 case TCG_COND_NEVER: 1887 break; 1888 case TCG_COND_ALWAYS: 1889 tcg_gen_movi_i64(cpu_psw_n, 0); 1890 goto do_sigill; 1891 default: 1892 /* Since this is always the first (and only) insn within the 1893 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1894 g_assert_not_reached(); 1895 } 1896 1897 /* Check that we didn't arrive here via some means that allowed 1898 non-sequential instruction execution. Normally the PSW[B] bit 1899 detects this by disallowing the B,GATE instruction to execute 1900 under such conditions. */ 1901 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1902 goto do_sigill; 1903 } 1904 1905 switch (ctx->iaoq_f & -4) { 1906 case 0x00: /* Null pointer call */ 1907 gen_excp_1(EXCP_IMP); 1908 ctx->base.is_jmp = DISAS_NORETURN; 1909 break; 1910 1911 case 0xb0: /* LWS */ 1912 gen_excp_1(EXCP_SYSCALL_LWS); 1913 ctx->base.is_jmp = DISAS_NORETURN; 1914 break; 1915 1916 case 0xe0: /* SET_THREAD_POINTER */ 1917 tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27])); 1918 tmp = tcg_temp_new_i64(); 1919 tcg_gen_ori_i64(tmp, cpu_gr[31], 3); 1920 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); 1921 tcg_gen_addi_i64(tmp, tmp, 4); 1922 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 1923 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1924 break; 1925 1926 case 0x100: /* SYSCALL */ 1927 gen_excp_1(EXCP_SYSCALL); 1928 ctx->base.is_jmp = DISAS_NORETURN; 1929 break; 1930 1931 default: 1932 do_sigill: 1933 gen_excp_1(EXCP_ILL); 1934 ctx->base.is_jmp = DISAS_NORETURN; 1935 break; 1936 } 1937 } 1938 #endif 1939 1940 static bool trans_nop(DisasContext *ctx, arg_nop *a) 1941 { 1942 cond_free(&ctx->null_cond); 1943 return true; 1944 } 1945 1946 static bool trans_break(DisasContext *ctx, arg_break *a) 1947 { 1948 return gen_excp_iir(ctx, EXCP_BREAK); 1949 } 1950 1951 static bool trans_sync(DisasContext *ctx, arg_sync *a) 1952 { 1953 /* No point in nullifying the memory barrier. */ 1954 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 1955 1956 cond_free(&ctx->null_cond); 1957 return true; 1958 } 1959 1960 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 1961 { 1962 unsigned rt = a->t; 1963 TCGv_i64 tmp = dest_gpr(ctx, rt); 1964 tcg_gen_movi_i64(tmp, ctx->iaoq_f); 1965 save_gpr(ctx, rt, tmp); 1966 1967 cond_free(&ctx->null_cond); 1968 return true; 1969 } 1970 1971 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 1972 { 1973 unsigned rt = a->t; 1974 unsigned rs = a->sp; 1975 TCGv_i64 t0 = tcg_temp_new_i64(); 1976 1977 load_spr(ctx, t0, rs); 1978 tcg_gen_shri_i64(t0, t0, 32); 1979 1980 save_gpr(ctx, rt, t0); 1981 1982 cond_free(&ctx->null_cond); 1983 return true; 1984 } 1985 1986 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 1987 { 1988 unsigned rt = a->t; 1989 unsigned ctl = a->r; 1990 TCGv_i64 tmp; 1991 1992 switch (ctl) { 1993 case CR_SAR: 1994 if (a->e == 0) { 1995 /* MFSAR without ,W masks low 5 bits. */ 1996 tmp = dest_gpr(ctx, rt); 1997 tcg_gen_andi_i64(tmp, cpu_sar, 31); 1998 save_gpr(ctx, rt, tmp); 1999 goto done; 2000 } 2001 save_gpr(ctx, rt, cpu_sar); 2002 goto done; 2003 case CR_IT: /* Interval Timer */ 2004 /* FIXME: Respect PSW_S bit. */ 2005 nullify_over(ctx); 2006 tmp = dest_gpr(ctx, rt); 2007 if (translator_io_start(&ctx->base)) { 2008 gen_helper_read_interval_timer(tmp); 2009 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2010 } else { 2011 gen_helper_read_interval_timer(tmp); 2012 } 2013 save_gpr(ctx, rt, tmp); 2014 return nullify_end(ctx); 2015 case 26: 2016 case 27: 2017 break; 2018 default: 2019 /* All other control registers are privileged. */ 2020 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2021 break; 2022 } 2023 2024 tmp = tcg_temp_new_i64(); 2025 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2026 save_gpr(ctx, rt, tmp); 2027 2028 done: 2029 cond_free(&ctx->null_cond); 2030 return true; 2031 } 2032 2033 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2034 { 2035 unsigned rr = a->r; 2036 unsigned rs = a->sp; 2037 TCGv_i64 tmp; 2038 2039 if (rs >= 5) { 2040 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2041 } 2042 nullify_over(ctx); 2043 2044 tmp = tcg_temp_new_i64(); 2045 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32); 2046 2047 if (rs >= 4) { 2048 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs])); 2049 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2050 } else { 2051 tcg_gen_mov_i64(cpu_sr[rs], tmp); 2052 } 2053 2054 return nullify_end(ctx); 2055 } 2056 2057 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2058 { 2059 unsigned ctl = a->t; 2060 TCGv_i64 reg; 2061 TCGv_i64 tmp; 2062 2063 if (ctl == CR_SAR) { 2064 reg = load_gpr(ctx, a->r); 2065 tmp = tcg_temp_new_i64(); 2066 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31); 2067 save_or_nullify(ctx, cpu_sar, tmp); 2068 2069 cond_free(&ctx->null_cond); 2070 return true; 2071 } 2072 2073 /* All other control registers are privileged or read-only. */ 2074 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2075 2076 #ifndef CONFIG_USER_ONLY 2077 nullify_over(ctx); 2078 2079 if (ctx->is_pa20) { 2080 reg = load_gpr(ctx, a->r); 2081 } else { 2082 reg = tcg_temp_new_i64(); 2083 tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r)); 2084 } 2085 2086 switch (ctl) { 2087 case CR_IT: 2088 gen_helper_write_interval_timer(tcg_env, reg); 2089 break; 2090 case CR_EIRR: 2091 gen_helper_write_eirr(tcg_env, reg); 2092 break; 2093 case CR_EIEM: 2094 gen_helper_write_eiem(tcg_env, reg); 2095 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2096 break; 2097 2098 case CR_IIASQ: 2099 case CR_IIAOQ: 2100 /* FIXME: Respect PSW_Q bit */ 2101 /* The write advances the queue and stores to the back element. */ 2102 tmp = tcg_temp_new_i64(); 2103 tcg_gen_ld_i64(tmp, tcg_env, 2104 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2105 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2106 tcg_gen_st_i64(reg, tcg_env, 2107 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2108 break; 2109 2110 case CR_PID1: 2111 case CR_PID2: 2112 case CR_PID3: 2113 case CR_PID4: 2114 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2115 #ifndef CONFIG_USER_ONLY 2116 gen_helper_change_prot_id(tcg_env); 2117 #endif 2118 break; 2119 2120 default: 2121 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2122 break; 2123 } 2124 return nullify_end(ctx); 2125 #endif 2126 } 2127 2128 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2129 { 2130 TCGv_i64 tmp = tcg_temp_new_i64(); 2131 2132 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r)); 2133 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31); 2134 save_or_nullify(ctx, cpu_sar, tmp); 2135 2136 cond_free(&ctx->null_cond); 2137 return true; 2138 } 2139 2140 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2141 { 2142 TCGv_i64 dest = dest_gpr(ctx, a->t); 2143 2144 #ifdef CONFIG_USER_ONLY 2145 /* We don't implement space registers in user mode. */ 2146 tcg_gen_movi_i64(dest, 0); 2147 #else 2148 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2149 tcg_gen_shri_i64(dest, dest, 32); 2150 #endif 2151 save_gpr(ctx, a->t, dest); 2152 2153 cond_free(&ctx->null_cond); 2154 return true; 2155 } 2156 2157 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2158 { 2159 #ifdef CONFIG_USER_ONLY 2160 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2161 #else 2162 TCGv_i64 tmp; 2163 2164 /* HP-UX 11i and HP ODE use rsm for read-access to PSW */ 2165 if (a->i) { 2166 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2167 } 2168 2169 nullify_over(ctx); 2170 2171 tmp = tcg_temp_new_i64(); 2172 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2173 tcg_gen_andi_i64(tmp, tmp, ~a->i); 2174 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2175 save_gpr(ctx, a->t, tmp); 2176 2177 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2178 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2179 return nullify_end(ctx); 2180 #endif 2181 } 2182 2183 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2184 { 2185 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2186 #ifndef CONFIG_USER_ONLY 2187 TCGv_i64 tmp; 2188 2189 nullify_over(ctx); 2190 2191 tmp = tcg_temp_new_i64(); 2192 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2193 tcg_gen_ori_i64(tmp, tmp, a->i); 2194 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2195 save_gpr(ctx, a->t, tmp); 2196 2197 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2198 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2199 return nullify_end(ctx); 2200 #endif 2201 } 2202 2203 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2204 { 2205 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2206 #ifndef CONFIG_USER_ONLY 2207 TCGv_i64 tmp, reg; 2208 nullify_over(ctx); 2209 2210 reg = load_gpr(ctx, a->r); 2211 tmp = tcg_temp_new_i64(); 2212 gen_helper_swap_system_mask(tmp, tcg_env, reg); 2213 2214 /* Exit the TB to recognize new interrupts. */ 2215 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2216 return nullify_end(ctx); 2217 #endif 2218 } 2219 2220 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2221 { 2222 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2223 #ifndef CONFIG_USER_ONLY 2224 nullify_over(ctx); 2225 2226 if (rfi_r) { 2227 gen_helper_rfi_r(tcg_env); 2228 } else { 2229 gen_helper_rfi(tcg_env); 2230 } 2231 /* Exit the TB to recognize new interrupts. */ 2232 tcg_gen_exit_tb(NULL, 0); 2233 ctx->base.is_jmp = DISAS_NORETURN; 2234 2235 return nullify_end(ctx); 2236 #endif 2237 } 2238 2239 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2240 { 2241 return do_rfi(ctx, false); 2242 } 2243 2244 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2245 { 2246 return do_rfi(ctx, true); 2247 } 2248 2249 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2250 { 2251 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2252 #ifndef CONFIG_USER_ONLY 2253 nullify_over(ctx); 2254 gen_helper_halt(tcg_env); 2255 ctx->base.is_jmp = DISAS_NORETURN; 2256 return nullify_end(ctx); 2257 #endif 2258 } 2259 2260 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2261 { 2262 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2263 #ifndef CONFIG_USER_ONLY 2264 nullify_over(ctx); 2265 gen_helper_reset(tcg_env); 2266 ctx->base.is_jmp = DISAS_NORETURN; 2267 return nullify_end(ctx); 2268 #endif 2269 } 2270 2271 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2272 { 2273 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2274 #ifndef CONFIG_USER_ONLY 2275 nullify_over(ctx); 2276 gen_helper_getshadowregs(tcg_env); 2277 return nullify_end(ctx); 2278 #endif 2279 } 2280 2281 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2282 { 2283 if (a->m) { 2284 TCGv_i64 dest = dest_gpr(ctx, a->b); 2285 TCGv_i64 src1 = load_gpr(ctx, a->b); 2286 TCGv_i64 src2 = load_gpr(ctx, a->x); 2287 2288 /* The only thing we need to do is the base register modification. */ 2289 tcg_gen_add_i64(dest, src1, src2); 2290 save_gpr(ctx, a->b, dest); 2291 } 2292 cond_free(&ctx->null_cond); 2293 return true; 2294 } 2295 2296 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2297 { 2298 TCGv_i64 dest, ofs; 2299 TCGv_i32 level, want; 2300 TCGv_i64 addr; 2301 2302 nullify_over(ctx); 2303 2304 dest = dest_gpr(ctx, a->t); 2305 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2306 2307 if (a->imm) { 2308 level = tcg_constant_i32(a->ri & 3); 2309 } else { 2310 level = tcg_temp_new_i32(); 2311 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri)); 2312 tcg_gen_andi_i32(level, level, 3); 2313 } 2314 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2315 2316 gen_helper_probe(dest, tcg_env, addr, level, want); 2317 2318 save_gpr(ctx, a->t, dest); 2319 return nullify_end(ctx); 2320 } 2321 2322 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2323 { 2324 if (ctx->is_pa20) { 2325 return false; 2326 } 2327 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2328 #ifndef CONFIG_USER_ONLY 2329 TCGv_i64 addr; 2330 TCGv_i64 ofs, reg; 2331 2332 nullify_over(ctx); 2333 2334 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2335 reg = load_gpr(ctx, a->r); 2336 if (a->addr) { 2337 gen_helper_itlba_pa11(tcg_env, addr, reg); 2338 } else { 2339 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2340 } 2341 2342 /* Exit TB for TLB change if mmu is enabled. */ 2343 if (ctx->tb_flags & PSW_C) { 2344 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2345 } 2346 return nullify_end(ctx); 2347 #endif 2348 } 2349 2350 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local) 2351 { 2352 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2353 #ifndef CONFIG_USER_ONLY 2354 TCGv_i64 addr; 2355 TCGv_i64 ofs; 2356 2357 nullify_over(ctx); 2358 2359 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2360 2361 /* 2362 * Page align now, rather than later, so that we can add in the 2363 * page_size field from pa2.0 from the low 4 bits of GR[b]. 2364 */ 2365 tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK); 2366 if (ctx->is_pa20) { 2367 tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4); 2368 } 2369 2370 if (local) { 2371 gen_helper_ptlb_l(tcg_env, addr); 2372 } else { 2373 gen_helper_ptlb(tcg_env, addr); 2374 } 2375 2376 if (a->m) { 2377 save_gpr(ctx, a->b, ofs); 2378 } 2379 2380 /* Exit TB for TLB change if mmu is enabled. */ 2381 if (ctx->tb_flags & PSW_C) { 2382 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2383 } 2384 return nullify_end(ctx); 2385 #endif 2386 } 2387 2388 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a) 2389 { 2390 return do_pxtlb(ctx, a, false); 2391 } 2392 2393 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a) 2394 { 2395 return ctx->is_pa20 && do_pxtlb(ctx, a, true); 2396 } 2397 2398 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a) 2399 { 2400 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2401 #ifndef CONFIG_USER_ONLY 2402 nullify_over(ctx); 2403 2404 trans_nop_addrx(ctx, a); 2405 gen_helper_ptlbe(tcg_env); 2406 2407 /* Exit TB for TLB change if mmu is enabled. */ 2408 if (ctx->tb_flags & PSW_C) { 2409 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2410 } 2411 return nullify_end(ctx); 2412 #endif 2413 } 2414 2415 /* 2416 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2417 * See 2418 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2419 * page 13-9 (195/206) 2420 */ 2421 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2422 { 2423 if (ctx->is_pa20) { 2424 return false; 2425 } 2426 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2427 #ifndef CONFIG_USER_ONLY 2428 TCGv_i64 addr, atl, stl; 2429 TCGv_i64 reg; 2430 2431 nullify_over(ctx); 2432 2433 /* 2434 * FIXME: 2435 * if (not (pcxl or pcxl2)) 2436 * return gen_illegal(ctx); 2437 */ 2438 2439 atl = tcg_temp_new_i64(); 2440 stl = tcg_temp_new_i64(); 2441 addr = tcg_temp_new_i64(); 2442 2443 tcg_gen_ld32u_i64(stl, tcg_env, 2444 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2445 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2446 tcg_gen_ld32u_i64(atl, tcg_env, 2447 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2448 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2449 tcg_gen_shli_i64(stl, stl, 32); 2450 tcg_gen_or_i64(addr, atl, stl); 2451 2452 reg = load_gpr(ctx, a->r); 2453 if (a->addr) { 2454 gen_helper_itlba_pa11(tcg_env, addr, reg); 2455 } else { 2456 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2457 } 2458 2459 /* Exit TB for TLB change if mmu is enabled. */ 2460 if (ctx->tb_flags & PSW_C) { 2461 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2462 } 2463 return nullify_end(ctx); 2464 #endif 2465 } 2466 2467 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a) 2468 { 2469 if (!ctx->is_pa20) { 2470 return false; 2471 } 2472 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2473 #ifndef CONFIG_USER_ONLY 2474 nullify_over(ctx); 2475 { 2476 TCGv_i64 src1 = load_gpr(ctx, a->r1); 2477 TCGv_i64 src2 = load_gpr(ctx, a->r2); 2478 2479 if (a->data) { 2480 gen_helper_idtlbt_pa20(tcg_env, src1, src2); 2481 } else { 2482 gen_helper_iitlbt_pa20(tcg_env, src1, src2); 2483 } 2484 } 2485 /* Exit TB for TLB change if mmu is enabled. */ 2486 if (ctx->tb_flags & PSW_C) { 2487 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2488 } 2489 return nullify_end(ctx); 2490 #endif 2491 } 2492 2493 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2494 { 2495 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2496 #ifndef CONFIG_USER_ONLY 2497 TCGv_i64 vaddr; 2498 TCGv_i64 ofs, paddr; 2499 2500 nullify_over(ctx); 2501 2502 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2503 2504 paddr = tcg_temp_new_i64(); 2505 gen_helper_lpa(paddr, tcg_env, vaddr); 2506 2507 /* Note that physical address result overrides base modification. */ 2508 if (a->m) { 2509 save_gpr(ctx, a->b, ofs); 2510 } 2511 save_gpr(ctx, a->t, paddr); 2512 2513 return nullify_end(ctx); 2514 #endif 2515 } 2516 2517 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2518 { 2519 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2520 2521 /* The Coherence Index is an implementation-defined function of the 2522 physical address. Two addresses with the same CI have a coherent 2523 view of the cache. Our implementation is to return 0 for all, 2524 since the entire address space is coherent. */ 2525 save_gpr(ctx, a->t, ctx->zero); 2526 2527 cond_free(&ctx->null_cond); 2528 return true; 2529 } 2530 2531 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2532 { 2533 return do_add_reg(ctx, a, false, false, false, false); 2534 } 2535 2536 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2537 { 2538 return do_add_reg(ctx, a, true, false, false, false); 2539 } 2540 2541 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2542 { 2543 return do_add_reg(ctx, a, false, true, false, false); 2544 } 2545 2546 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2547 { 2548 return do_add_reg(ctx, a, false, false, false, true); 2549 } 2550 2551 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2552 { 2553 return do_add_reg(ctx, a, false, true, false, true); 2554 } 2555 2556 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a) 2557 { 2558 return do_sub_reg(ctx, a, false, false, false); 2559 } 2560 2561 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2562 { 2563 return do_sub_reg(ctx, a, true, false, false); 2564 } 2565 2566 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2567 { 2568 return do_sub_reg(ctx, a, false, false, true); 2569 } 2570 2571 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2572 { 2573 return do_sub_reg(ctx, a, true, false, true); 2574 } 2575 2576 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a) 2577 { 2578 return do_sub_reg(ctx, a, false, true, false); 2579 } 2580 2581 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2582 { 2583 return do_sub_reg(ctx, a, true, true, false); 2584 } 2585 2586 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a) 2587 { 2588 return do_log_reg(ctx, a, tcg_gen_andc_i64); 2589 } 2590 2591 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a) 2592 { 2593 return do_log_reg(ctx, a, tcg_gen_and_i64); 2594 } 2595 2596 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a) 2597 { 2598 if (a->cf == 0) { 2599 unsigned r2 = a->r2; 2600 unsigned r1 = a->r1; 2601 unsigned rt = a->t; 2602 2603 if (rt == 0) { /* NOP */ 2604 cond_free(&ctx->null_cond); 2605 return true; 2606 } 2607 if (r2 == 0) { /* COPY */ 2608 if (r1 == 0) { 2609 TCGv_i64 dest = dest_gpr(ctx, rt); 2610 tcg_gen_movi_i64(dest, 0); 2611 save_gpr(ctx, rt, dest); 2612 } else { 2613 save_gpr(ctx, rt, cpu_gr[r1]); 2614 } 2615 cond_free(&ctx->null_cond); 2616 return true; 2617 } 2618 #ifndef CONFIG_USER_ONLY 2619 /* These are QEMU extensions and are nops in the real architecture: 2620 * 2621 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2622 * or %r31,%r31,%r31 -- death loop; offline cpu 2623 * currently implemented as idle. 2624 */ 2625 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2626 /* No need to check for supervisor, as userland can only pause 2627 until the next timer interrupt. */ 2628 nullify_over(ctx); 2629 2630 /* Advance the instruction queue. */ 2631 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2632 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2633 nullify_set(ctx, 0); 2634 2635 /* Tell the qemu main loop to halt until this cpu has work. */ 2636 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 2637 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2638 gen_excp_1(EXCP_HALTED); 2639 ctx->base.is_jmp = DISAS_NORETURN; 2640 2641 return nullify_end(ctx); 2642 } 2643 #endif 2644 } 2645 return do_log_reg(ctx, a, tcg_gen_or_i64); 2646 } 2647 2648 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a) 2649 { 2650 return do_log_reg(ctx, a, tcg_gen_xor_i64); 2651 } 2652 2653 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) 2654 { 2655 TCGv_i64 tcg_r1, tcg_r2; 2656 2657 if (a->cf) { 2658 nullify_over(ctx); 2659 } 2660 tcg_r1 = load_gpr(ctx, a->r1); 2661 tcg_r2 = load_gpr(ctx, a->r2); 2662 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d); 2663 return nullify_end(ctx); 2664 } 2665 2666 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) 2667 { 2668 TCGv_i64 tcg_r1, tcg_r2; 2669 2670 if (a->cf) { 2671 nullify_over(ctx); 2672 } 2673 tcg_r1 = load_gpr(ctx, a->r1); 2674 tcg_r2 = load_gpr(ctx, a->r2); 2675 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64); 2676 return nullify_end(ctx); 2677 } 2678 2679 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) 2680 { 2681 TCGv_i64 tcg_r1, tcg_r2, tmp; 2682 2683 if (a->cf) { 2684 nullify_over(ctx); 2685 } 2686 tcg_r1 = load_gpr(ctx, a->r1); 2687 tcg_r2 = load_gpr(ctx, a->r2); 2688 tmp = tcg_temp_new_i64(); 2689 tcg_gen_not_i64(tmp, tcg_r2); 2690 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64); 2691 return nullify_end(ctx); 2692 } 2693 2694 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a) 2695 { 2696 return do_uaddcm(ctx, a, false); 2697 } 2698 2699 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2700 { 2701 return do_uaddcm(ctx, a, true); 2702 } 2703 2704 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) 2705 { 2706 TCGv_i64 tmp; 2707 2708 nullify_over(ctx); 2709 2710 tmp = tcg_temp_new_i64(); 2711 tcg_gen_shri_i64(tmp, cpu_psw_cb, 3); 2712 if (!is_i) { 2713 tcg_gen_not_i64(tmp, tmp); 2714 } 2715 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull); 2716 tcg_gen_muli_i64(tmp, tmp, 6); 2717 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false, 2718 is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64); 2719 return nullify_end(ctx); 2720 } 2721 2722 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a) 2723 { 2724 return do_dcor(ctx, a, false); 2725 } 2726 2727 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) 2728 { 2729 return do_dcor(ctx, a, true); 2730 } 2731 2732 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2733 { 2734 TCGv_i64 dest, add1, add2, addc, in1, in2; 2735 TCGv_i64 cout; 2736 2737 nullify_over(ctx); 2738 2739 in1 = load_gpr(ctx, a->r1); 2740 in2 = load_gpr(ctx, a->r2); 2741 2742 add1 = tcg_temp_new_i64(); 2743 add2 = tcg_temp_new_i64(); 2744 addc = tcg_temp_new_i64(); 2745 dest = tcg_temp_new_i64(); 2746 2747 /* Form R1 << 1 | PSW[CB]{8}. */ 2748 tcg_gen_add_i64(add1, in1, in1); 2749 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false)); 2750 2751 /* 2752 * Add or subtract R2, depending on PSW[V]. Proper computation of 2753 * carry requires that we subtract via + ~R2 + 1, as described in 2754 * the manual. By extracting and masking V, we can produce the 2755 * proper inputs to the addition without movcond. 2756 */ 2757 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1); 2758 tcg_gen_xor_i64(add2, in2, addc); 2759 tcg_gen_andi_i64(addc, addc, 1); 2760 2761 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero); 2762 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, 2763 addc, ctx->zero); 2764 2765 /* Write back the result register. */ 2766 save_gpr(ctx, a->t, dest); 2767 2768 /* Write back PSW[CB]. */ 2769 tcg_gen_xor_i64(cpu_psw_cb, add1, add2); 2770 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest); 2771 2772 /* Write back PSW[V] for the division step. */ 2773 cout = get_psw_carry(ctx, false); 2774 tcg_gen_neg_i64(cpu_psw_v, cout); 2775 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2); 2776 2777 /* Install the new nullification. */ 2778 if (a->cf) { 2779 TCGv_i64 sv = NULL; 2780 if (cond_need_sv(a->cf >> 1)) { 2781 /* ??? The lshift is supposed to contribute to overflow. */ 2782 sv = do_add_sv(ctx, dest, add1, add2); 2783 } 2784 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv); 2785 } 2786 2787 return nullify_end(ctx); 2788 } 2789 2790 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2791 { 2792 return do_add_imm(ctx, a, false, false); 2793 } 2794 2795 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2796 { 2797 return do_add_imm(ctx, a, true, false); 2798 } 2799 2800 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2801 { 2802 return do_add_imm(ctx, a, false, true); 2803 } 2804 2805 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2806 { 2807 return do_add_imm(ctx, a, true, true); 2808 } 2809 2810 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2811 { 2812 return do_sub_imm(ctx, a, false); 2813 } 2814 2815 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2816 { 2817 return do_sub_imm(ctx, a, true); 2818 } 2819 2820 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a) 2821 { 2822 TCGv_i64 tcg_im, tcg_r2; 2823 2824 if (a->cf) { 2825 nullify_over(ctx); 2826 } 2827 2828 tcg_im = tcg_constant_i64(a->i); 2829 tcg_r2 = load_gpr(ctx, a->r); 2830 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d); 2831 2832 return nullify_end(ctx); 2833 } 2834 2835 static bool do_multimedia(DisasContext *ctx, arg_rrr *a, 2836 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 2837 { 2838 TCGv_i64 r1, r2, dest; 2839 2840 if (!ctx->is_pa20) { 2841 return false; 2842 } 2843 2844 nullify_over(ctx); 2845 2846 r1 = load_gpr(ctx, a->r1); 2847 r2 = load_gpr(ctx, a->r2); 2848 dest = dest_gpr(ctx, a->t); 2849 2850 fn(dest, r1, r2); 2851 save_gpr(ctx, a->t, dest); 2852 2853 return nullify_end(ctx); 2854 } 2855 2856 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a, 2857 void (*fn)(TCGv_i64, TCGv_i64, int64_t)) 2858 { 2859 TCGv_i64 r, dest; 2860 2861 if (!ctx->is_pa20) { 2862 return false; 2863 } 2864 2865 nullify_over(ctx); 2866 2867 r = load_gpr(ctx, a->r); 2868 dest = dest_gpr(ctx, a->t); 2869 2870 fn(dest, r, a->i); 2871 save_gpr(ctx, a->t, dest); 2872 2873 return nullify_end(ctx); 2874 } 2875 2876 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a, 2877 void (*fn)(TCGv_i64, TCGv_i64, 2878 TCGv_i64, TCGv_i32)) 2879 { 2880 TCGv_i64 r1, r2, dest; 2881 2882 if (!ctx->is_pa20) { 2883 return false; 2884 } 2885 2886 nullify_over(ctx); 2887 2888 r1 = load_gpr(ctx, a->r1); 2889 r2 = load_gpr(ctx, a->r2); 2890 dest = dest_gpr(ctx, a->t); 2891 2892 fn(dest, r1, r2, tcg_constant_i32(a->sh)); 2893 save_gpr(ctx, a->t, dest); 2894 2895 return nullify_end(ctx); 2896 } 2897 2898 static bool trans_hadd(DisasContext *ctx, arg_rrr *a) 2899 { 2900 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64); 2901 } 2902 2903 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a) 2904 { 2905 return do_multimedia(ctx, a, gen_helper_hadd_ss); 2906 } 2907 2908 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a) 2909 { 2910 return do_multimedia(ctx, a, gen_helper_hadd_us); 2911 } 2912 2913 static bool trans_havg(DisasContext *ctx, arg_rrr *a) 2914 { 2915 return do_multimedia(ctx, a, gen_helper_havg); 2916 } 2917 2918 static bool trans_hshl(DisasContext *ctx, arg_rri *a) 2919 { 2920 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64); 2921 } 2922 2923 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a) 2924 { 2925 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64); 2926 } 2927 2928 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a) 2929 { 2930 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64); 2931 } 2932 2933 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a) 2934 { 2935 return do_multimedia_shadd(ctx, a, gen_helper_hshladd); 2936 } 2937 2938 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a) 2939 { 2940 return do_multimedia_shadd(ctx, a, gen_helper_hshradd); 2941 } 2942 2943 static bool trans_hsub(DisasContext *ctx, arg_rrr *a) 2944 { 2945 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64); 2946 } 2947 2948 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a) 2949 { 2950 return do_multimedia(ctx, a, gen_helper_hsub_ss); 2951 } 2952 2953 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a) 2954 { 2955 return do_multimedia(ctx, a, gen_helper_hsub_us); 2956 } 2957 2958 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2959 { 2960 uint64_t mask = 0xffff0000ffff0000ull; 2961 TCGv_i64 tmp = tcg_temp_new_i64(); 2962 2963 tcg_gen_andi_i64(tmp, r2, mask); 2964 tcg_gen_andi_i64(dst, r1, mask); 2965 tcg_gen_shri_i64(tmp, tmp, 16); 2966 tcg_gen_or_i64(dst, dst, tmp); 2967 } 2968 2969 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a) 2970 { 2971 return do_multimedia(ctx, a, gen_mixh_l); 2972 } 2973 2974 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2975 { 2976 uint64_t mask = 0x0000ffff0000ffffull; 2977 TCGv_i64 tmp = tcg_temp_new_i64(); 2978 2979 tcg_gen_andi_i64(tmp, r1, mask); 2980 tcg_gen_andi_i64(dst, r2, mask); 2981 tcg_gen_shli_i64(tmp, tmp, 16); 2982 tcg_gen_or_i64(dst, dst, tmp); 2983 } 2984 2985 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a) 2986 { 2987 return do_multimedia(ctx, a, gen_mixh_r); 2988 } 2989 2990 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 2991 { 2992 TCGv_i64 tmp = tcg_temp_new_i64(); 2993 2994 tcg_gen_shri_i64(tmp, r2, 32); 2995 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32); 2996 } 2997 2998 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a) 2999 { 3000 return do_multimedia(ctx, a, gen_mixw_l); 3001 } 3002 3003 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3004 { 3005 tcg_gen_deposit_i64(dst, r2, r1, 32, 32); 3006 } 3007 3008 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a) 3009 { 3010 return do_multimedia(ctx, a, gen_mixw_r); 3011 } 3012 3013 static bool trans_permh(DisasContext *ctx, arg_permh *a) 3014 { 3015 TCGv_i64 r, t0, t1, t2, t3; 3016 3017 if (!ctx->is_pa20) { 3018 return false; 3019 } 3020 3021 nullify_over(ctx); 3022 3023 r = load_gpr(ctx, a->r1); 3024 t0 = tcg_temp_new_i64(); 3025 t1 = tcg_temp_new_i64(); 3026 t2 = tcg_temp_new_i64(); 3027 t3 = tcg_temp_new_i64(); 3028 3029 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16); 3030 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16); 3031 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16); 3032 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16); 3033 3034 tcg_gen_deposit_i64(t0, t1, t0, 16, 48); 3035 tcg_gen_deposit_i64(t2, t3, t2, 16, 48); 3036 tcg_gen_deposit_i64(t0, t2, t0, 32, 32); 3037 3038 save_gpr(ctx, a->t, t0); 3039 return nullify_end(ctx); 3040 } 3041 3042 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 3043 { 3044 if (ctx->is_pa20) { 3045 /* 3046 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches. 3047 * Any base modification still occurs. 3048 */ 3049 if (a->t == 0) { 3050 return trans_nop_addrx(ctx, a); 3051 } 3052 } else if (a->size > MO_32) { 3053 return gen_illegal(ctx); 3054 } 3055 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 3056 a->disp, a->sp, a->m, a->size | MO_TE); 3057 } 3058 3059 static bool trans_st(DisasContext *ctx, arg_ldst *a) 3060 { 3061 assert(a->x == 0 && a->scale == 0); 3062 if (!ctx->is_pa20 && a->size > MO_32) { 3063 return gen_illegal(ctx); 3064 } 3065 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 3066 } 3067 3068 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 3069 { 3070 MemOp mop = MO_TE | MO_ALIGN | a->size; 3071 TCGv_i64 dest, ofs; 3072 TCGv_i64 addr; 3073 3074 if (!ctx->is_pa20 && a->size > MO_32) { 3075 return gen_illegal(ctx); 3076 } 3077 3078 nullify_over(ctx); 3079 3080 if (a->m) { 3081 /* Base register modification. Make sure if RT == RB, 3082 we see the result of the load. */ 3083 dest = tcg_temp_new_i64(); 3084 } else { 3085 dest = dest_gpr(ctx, a->t); 3086 } 3087 3088 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 3089 a->disp, a->sp, a->m, MMU_DISABLED(ctx)); 3090 3091 /* 3092 * For hppa1.1, LDCW is undefined unless aligned mod 16. 3093 * However actual hardware succeeds with aligned mod 4. 3094 * Detect this case and log a GUEST_ERROR. 3095 * 3096 * TODO: HPPA64 relaxes the over-alignment requirement 3097 * with the ,co completer. 3098 */ 3099 gen_helper_ldc_check(addr); 3100 3101 tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop); 3102 3103 if (a->m) { 3104 save_gpr(ctx, a->b, ofs); 3105 } 3106 save_gpr(ctx, a->t, dest); 3107 3108 return nullify_end(ctx); 3109 } 3110 3111 static bool trans_stby(DisasContext *ctx, arg_stby *a) 3112 { 3113 TCGv_i64 ofs, val; 3114 TCGv_i64 addr; 3115 3116 nullify_over(ctx); 3117 3118 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3119 MMU_DISABLED(ctx)); 3120 val = load_gpr(ctx, a->r); 3121 if (a->a) { 3122 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3123 gen_helper_stby_e_parallel(tcg_env, addr, val); 3124 } else { 3125 gen_helper_stby_e(tcg_env, addr, val); 3126 } 3127 } else { 3128 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3129 gen_helper_stby_b_parallel(tcg_env, addr, val); 3130 } else { 3131 gen_helper_stby_b(tcg_env, addr, val); 3132 } 3133 } 3134 if (a->m) { 3135 tcg_gen_andi_i64(ofs, ofs, ~3); 3136 save_gpr(ctx, a->b, ofs); 3137 } 3138 3139 return nullify_end(ctx); 3140 } 3141 3142 static bool trans_stdby(DisasContext *ctx, arg_stby *a) 3143 { 3144 TCGv_i64 ofs, val; 3145 TCGv_i64 addr; 3146 3147 if (!ctx->is_pa20) { 3148 return false; 3149 } 3150 nullify_over(ctx); 3151 3152 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3153 MMU_DISABLED(ctx)); 3154 val = load_gpr(ctx, a->r); 3155 if (a->a) { 3156 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3157 gen_helper_stdby_e_parallel(tcg_env, addr, val); 3158 } else { 3159 gen_helper_stdby_e(tcg_env, addr, val); 3160 } 3161 } else { 3162 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3163 gen_helper_stdby_b_parallel(tcg_env, addr, val); 3164 } else { 3165 gen_helper_stdby_b(tcg_env, addr, val); 3166 } 3167 } 3168 if (a->m) { 3169 tcg_gen_andi_i64(ofs, ofs, ~7); 3170 save_gpr(ctx, a->b, ofs); 3171 } 3172 3173 return nullify_end(ctx); 3174 } 3175 3176 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 3177 { 3178 int hold_mmu_idx = ctx->mmu_idx; 3179 3180 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3181 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 3182 trans_ld(ctx, a); 3183 ctx->mmu_idx = hold_mmu_idx; 3184 return true; 3185 } 3186 3187 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 3188 { 3189 int hold_mmu_idx = ctx->mmu_idx; 3190 3191 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3192 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 3193 trans_st(ctx, a); 3194 ctx->mmu_idx = hold_mmu_idx; 3195 return true; 3196 } 3197 3198 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 3199 { 3200 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3201 3202 tcg_gen_movi_i64(tcg_rt, a->i); 3203 save_gpr(ctx, a->t, tcg_rt); 3204 cond_free(&ctx->null_cond); 3205 return true; 3206 } 3207 3208 static bool trans_addil(DisasContext *ctx, arg_addil *a) 3209 { 3210 TCGv_i64 tcg_rt = load_gpr(ctx, a->r); 3211 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1); 3212 3213 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i); 3214 save_gpr(ctx, 1, tcg_r1); 3215 cond_free(&ctx->null_cond); 3216 return true; 3217 } 3218 3219 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 3220 { 3221 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3222 3223 /* Special case rb == 0, for the LDI pseudo-op. 3224 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */ 3225 if (a->b == 0) { 3226 tcg_gen_movi_i64(tcg_rt, a->i); 3227 } else { 3228 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i); 3229 } 3230 save_gpr(ctx, a->t, tcg_rt); 3231 cond_free(&ctx->null_cond); 3232 return true; 3233 } 3234 3235 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3236 unsigned c, unsigned f, bool d, unsigned n, int disp) 3237 { 3238 TCGv_i64 dest, in2, sv; 3239 DisasCond cond; 3240 3241 in2 = load_gpr(ctx, r); 3242 dest = tcg_temp_new_i64(); 3243 3244 tcg_gen_sub_i64(dest, in1, in2); 3245 3246 sv = NULL; 3247 if (cond_need_sv(c)) { 3248 sv = do_sub_sv(ctx, dest, in1, in2); 3249 } 3250 3251 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv); 3252 return do_cbranch(ctx, disp, n, &cond); 3253 } 3254 3255 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3256 { 3257 if (!ctx->is_pa20 && a->d) { 3258 return false; 3259 } 3260 nullify_over(ctx); 3261 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), 3262 a->c, a->f, a->d, a->n, a->disp); 3263 } 3264 3265 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3266 { 3267 if (!ctx->is_pa20 && a->d) { 3268 return false; 3269 } 3270 nullify_over(ctx); 3271 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i), 3272 a->c, a->f, a->d, a->n, a->disp); 3273 } 3274 3275 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3276 unsigned c, unsigned f, unsigned n, int disp) 3277 { 3278 TCGv_i64 dest, in2, sv, cb_cond; 3279 DisasCond cond; 3280 bool d = false; 3281 3282 /* 3283 * For hppa64, the ADDB conditions change with PSW.W, 3284 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE. 3285 */ 3286 if (ctx->tb_flags & PSW_W) { 3287 d = c >= 5; 3288 if (d) { 3289 c &= 3; 3290 } 3291 } 3292 3293 in2 = load_gpr(ctx, r); 3294 dest = tcg_temp_new_i64(); 3295 sv = NULL; 3296 cb_cond = NULL; 3297 3298 if (cond_need_cb(c)) { 3299 TCGv_i64 cb = tcg_temp_new_i64(); 3300 TCGv_i64 cb_msb = tcg_temp_new_i64(); 3301 3302 tcg_gen_movi_i64(cb_msb, 0); 3303 tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3304 tcg_gen_xor_i64(cb, in1, in2); 3305 tcg_gen_xor_i64(cb, cb, dest); 3306 cb_cond = get_carry(ctx, d, cb, cb_msb); 3307 } else { 3308 tcg_gen_add_i64(dest, in1, in2); 3309 } 3310 if (cond_need_sv(c)) { 3311 sv = do_add_sv(ctx, dest, in1, in2); 3312 } 3313 3314 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); 3315 save_gpr(ctx, r, dest); 3316 return do_cbranch(ctx, disp, n, &cond); 3317 } 3318 3319 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3320 { 3321 nullify_over(ctx); 3322 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3323 } 3324 3325 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3326 { 3327 nullify_over(ctx); 3328 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp); 3329 } 3330 3331 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3332 { 3333 TCGv_i64 tmp, tcg_r; 3334 DisasCond cond; 3335 3336 nullify_over(ctx); 3337 3338 tmp = tcg_temp_new_i64(); 3339 tcg_r = load_gpr(ctx, a->r); 3340 if (cond_need_ext(ctx, a->d)) { 3341 /* Force shift into [32,63] */ 3342 tcg_gen_ori_i64(tmp, cpu_sar, 32); 3343 tcg_gen_shl_i64(tmp, tcg_r, tmp); 3344 } else { 3345 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); 3346 } 3347 3348 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3349 return do_cbranch(ctx, a->disp, a->n, &cond); 3350 } 3351 3352 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3353 { 3354 TCGv_i64 tmp, tcg_r; 3355 DisasCond cond; 3356 int p; 3357 3358 nullify_over(ctx); 3359 3360 tmp = tcg_temp_new_i64(); 3361 tcg_r = load_gpr(ctx, a->r); 3362 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0); 3363 tcg_gen_shli_i64(tmp, tcg_r, p); 3364 3365 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3366 return do_cbranch(ctx, a->disp, a->n, &cond); 3367 } 3368 3369 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3370 { 3371 TCGv_i64 dest; 3372 DisasCond cond; 3373 3374 nullify_over(ctx); 3375 3376 dest = dest_gpr(ctx, a->r2); 3377 if (a->r1 == 0) { 3378 tcg_gen_movi_i64(dest, 0); 3379 } else { 3380 tcg_gen_mov_i64(dest, cpu_gr[a->r1]); 3381 } 3382 3383 /* All MOVB conditions are 32-bit. */ 3384 cond = do_sed_cond(ctx, a->c, false, dest); 3385 return do_cbranch(ctx, a->disp, a->n, &cond); 3386 } 3387 3388 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3389 { 3390 TCGv_i64 dest; 3391 DisasCond cond; 3392 3393 nullify_over(ctx); 3394 3395 dest = dest_gpr(ctx, a->r); 3396 tcg_gen_movi_i64(dest, a->i); 3397 3398 /* All MOVBI conditions are 32-bit. */ 3399 cond = do_sed_cond(ctx, a->c, false, dest); 3400 return do_cbranch(ctx, a->disp, a->n, &cond); 3401 } 3402 3403 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) 3404 { 3405 TCGv_i64 dest, src2; 3406 3407 if (!ctx->is_pa20 && a->d) { 3408 return false; 3409 } 3410 if (a->c) { 3411 nullify_over(ctx); 3412 } 3413 3414 dest = dest_gpr(ctx, a->t); 3415 src2 = load_gpr(ctx, a->r2); 3416 if (a->r1 == 0) { 3417 if (a->d) { 3418 tcg_gen_shr_i64(dest, src2, cpu_sar); 3419 } else { 3420 TCGv_i64 tmp = tcg_temp_new_i64(); 3421 3422 tcg_gen_ext32u_i64(dest, src2); 3423 tcg_gen_andi_i64(tmp, cpu_sar, 31); 3424 tcg_gen_shr_i64(dest, dest, tmp); 3425 } 3426 } else if (a->r1 == a->r2) { 3427 if (a->d) { 3428 tcg_gen_rotr_i64(dest, src2, cpu_sar); 3429 } else { 3430 TCGv_i32 t32 = tcg_temp_new_i32(); 3431 TCGv_i32 s32 = tcg_temp_new_i32(); 3432 3433 tcg_gen_extrl_i64_i32(t32, src2); 3434 tcg_gen_extrl_i64_i32(s32, cpu_sar); 3435 tcg_gen_andi_i32(s32, s32, 31); 3436 tcg_gen_rotr_i32(t32, t32, s32); 3437 tcg_gen_extu_i32_i64(dest, t32); 3438 } 3439 } else { 3440 TCGv_i64 src1 = load_gpr(ctx, a->r1); 3441 3442 if (a->d) { 3443 TCGv_i64 t = tcg_temp_new_i64(); 3444 TCGv_i64 n = tcg_temp_new_i64(); 3445 3446 tcg_gen_xori_i64(n, cpu_sar, 63); 3447 tcg_gen_shl_i64(t, src1, n); 3448 tcg_gen_shli_i64(t, t, 1); 3449 tcg_gen_shr_i64(dest, src2, cpu_sar); 3450 tcg_gen_or_i64(dest, dest, t); 3451 } else { 3452 TCGv_i64 t = tcg_temp_new_i64(); 3453 TCGv_i64 s = tcg_temp_new_i64(); 3454 3455 tcg_gen_concat32_i64(t, src2, src1); 3456 tcg_gen_andi_i64(s, cpu_sar, 31); 3457 tcg_gen_shr_i64(dest, t, s); 3458 } 3459 } 3460 save_gpr(ctx, a->t, dest); 3461 3462 /* Install the new nullification. */ 3463 cond_free(&ctx->null_cond); 3464 if (a->c) { 3465 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); 3466 } 3467 return nullify_end(ctx); 3468 } 3469 3470 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) 3471 { 3472 unsigned width, sa; 3473 TCGv_i64 dest, t2; 3474 3475 if (!ctx->is_pa20 && a->d) { 3476 return false; 3477 } 3478 if (a->c) { 3479 nullify_over(ctx); 3480 } 3481 3482 width = a->d ? 64 : 32; 3483 sa = width - 1 - a->cpos; 3484 3485 dest = dest_gpr(ctx, a->t); 3486 t2 = load_gpr(ctx, a->r2); 3487 if (a->r1 == 0) { 3488 tcg_gen_extract_i64(dest, t2, sa, width - sa); 3489 } else if (width == TARGET_LONG_BITS) { 3490 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa); 3491 } else { 3492 assert(!a->d); 3493 if (a->r1 == a->r2) { 3494 TCGv_i32 t32 = tcg_temp_new_i32(); 3495 tcg_gen_extrl_i64_i32(t32, t2); 3496 tcg_gen_rotri_i32(t32, t32, sa); 3497 tcg_gen_extu_i32_i64(dest, t32); 3498 } else { 3499 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]); 3500 tcg_gen_extract_i64(dest, dest, sa, 32); 3501 } 3502 } 3503 save_gpr(ctx, a->t, dest); 3504 3505 /* Install the new nullification. */ 3506 cond_free(&ctx->null_cond); 3507 if (a->c) { 3508 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); 3509 } 3510 return nullify_end(ctx); 3511 } 3512 3513 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a) 3514 { 3515 unsigned widthm1 = a->d ? 63 : 31; 3516 TCGv_i64 dest, src, tmp; 3517 3518 if (!ctx->is_pa20 && a->d) { 3519 return false; 3520 } 3521 if (a->c) { 3522 nullify_over(ctx); 3523 } 3524 3525 dest = dest_gpr(ctx, a->t); 3526 src = load_gpr(ctx, a->r); 3527 tmp = tcg_temp_new_i64(); 3528 3529 /* Recall that SAR is using big-endian bit numbering. */ 3530 tcg_gen_andi_i64(tmp, cpu_sar, widthm1); 3531 tcg_gen_xori_i64(tmp, tmp, widthm1); 3532 3533 if (a->se) { 3534 if (!a->d) { 3535 tcg_gen_ext32s_i64(dest, src); 3536 src = dest; 3537 } 3538 tcg_gen_sar_i64(dest, src, tmp); 3539 tcg_gen_sextract_i64(dest, dest, 0, a->len); 3540 } else { 3541 if (!a->d) { 3542 tcg_gen_ext32u_i64(dest, src); 3543 src = dest; 3544 } 3545 tcg_gen_shr_i64(dest, src, tmp); 3546 tcg_gen_extract_i64(dest, dest, 0, a->len); 3547 } 3548 save_gpr(ctx, a->t, dest); 3549 3550 /* Install the new nullification. */ 3551 cond_free(&ctx->null_cond); 3552 if (a->c) { 3553 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3554 } 3555 return nullify_end(ctx); 3556 } 3557 3558 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a) 3559 { 3560 unsigned len, cpos, width; 3561 TCGv_i64 dest, src; 3562 3563 if (!ctx->is_pa20 && a->d) { 3564 return false; 3565 } 3566 if (a->c) { 3567 nullify_over(ctx); 3568 } 3569 3570 len = a->len; 3571 width = a->d ? 64 : 32; 3572 cpos = width - 1 - a->pos; 3573 if (cpos + len > width) { 3574 len = width - cpos; 3575 } 3576 3577 dest = dest_gpr(ctx, a->t); 3578 src = load_gpr(ctx, a->r); 3579 if (a->se) { 3580 tcg_gen_sextract_i64(dest, src, cpos, len); 3581 } else { 3582 tcg_gen_extract_i64(dest, src, cpos, len); 3583 } 3584 save_gpr(ctx, a->t, dest); 3585 3586 /* Install the new nullification. */ 3587 cond_free(&ctx->null_cond); 3588 if (a->c) { 3589 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3590 } 3591 return nullify_end(ctx); 3592 } 3593 3594 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a) 3595 { 3596 unsigned len, width; 3597 uint64_t mask0, mask1; 3598 TCGv_i64 dest; 3599 3600 if (!ctx->is_pa20 && a->d) { 3601 return false; 3602 } 3603 if (a->c) { 3604 nullify_over(ctx); 3605 } 3606 3607 len = a->len; 3608 width = a->d ? 64 : 32; 3609 if (a->cpos + len > width) { 3610 len = width - a->cpos; 3611 } 3612 3613 dest = dest_gpr(ctx, a->t); 3614 mask0 = deposit64(0, a->cpos, len, a->i); 3615 mask1 = deposit64(-1, a->cpos, len, a->i); 3616 3617 if (a->nz) { 3618 TCGv_i64 src = load_gpr(ctx, a->t); 3619 tcg_gen_andi_i64(dest, src, mask1); 3620 tcg_gen_ori_i64(dest, dest, mask0); 3621 } else { 3622 tcg_gen_movi_i64(dest, mask0); 3623 } 3624 save_gpr(ctx, a->t, dest); 3625 3626 /* Install the new nullification. */ 3627 cond_free(&ctx->null_cond); 3628 if (a->c) { 3629 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3630 } 3631 return nullify_end(ctx); 3632 } 3633 3634 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a) 3635 { 3636 unsigned rs = a->nz ? a->t : 0; 3637 unsigned len, width; 3638 TCGv_i64 dest, val; 3639 3640 if (!ctx->is_pa20 && a->d) { 3641 return false; 3642 } 3643 if (a->c) { 3644 nullify_over(ctx); 3645 } 3646 3647 len = a->len; 3648 width = a->d ? 64 : 32; 3649 if (a->cpos + len > width) { 3650 len = width - a->cpos; 3651 } 3652 3653 dest = dest_gpr(ctx, a->t); 3654 val = load_gpr(ctx, a->r); 3655 if (rs == 0) { 3656 tcg_gen_deposit_z_i64(dest, val, a->cpos, len); 3657 } else { 3658 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len); 3659 } 3660 save_gpr(ctx, a->t, dest); 3661 3662 /* Install the new nullification. */ 3663 cond_free(&ctx->null_cond); 3664 if (a->c) { 3665 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3666 } 3667 return nullify_end(ctx); 3668 } 3669 3670 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c, 3671 bool d, bool nz, unsigned len, TCGv_i64 val) 3672 { 3673 unsigned rs = nz ? rt : 0; 3674 unsigned widthm1 = d ? 63 : 31; 3675 TCGv_i64 mask, tmp, shift, dest; 3676 uint64_t msb = 1ULL << (len - 1); 3677 3678 dest = dest_gpr(ctx, rt); 3679 shift = tcg_temp_new_i64(); 3680 tmp = tcg_temp_new_i64(); 3681 3682 /* Convert big-endian bit numbering in SAR to left-shift. */ 3683 tcg_gen_andi_i64(shift, cpu_sar, widthm1); 3684 tcg_gen_xori_i64(shift, shift, widthm1); 3685 3686 mask = tcg_temp_new_i64(); 3687 tcg_gen_movi_i64(mask, msb + (msb - 1)); 3688 tcg_gen_and_i64(tmp, val, mask); 3689 if (rs) { 3690 tcg_gen_shl_i64(mask, mask, shift); 3691 tcg_gen_shl_i64(tmp, tmp, shift); 3692 tcg_gen_andc_i64(dest, cpu_gr[rs], mask); 3693 tcg_gen_or_i64(dest, dest, tmp); 3694 } else { 3695 tcg_gen_shl_i64(dest, tmp, shift); 3696 } 3697 save_gpr(ctx, rt, dest); 3698 3699 /* Install the new nullification. */ 3700 cond_free(&ctx->null_cond); 3701 if (c) { 3702 ctx->null_cond = do_sed_cond(ctx, c, d, dest); 3703 } 3704 return nullify_end(ctx); 3705 } 3706 3707 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a) 3708 { 3709 if (!ctx->is_pa20 && a->d) { 3710 return false; 3711 } 3712 if (a->c) { 3713 nullify_over(ctx); 3714 } 3715 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3716 load_gpr(ctx, a->r)); 3717 } 3718 3719 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a) 3720 { 3721 if (!ctx->is_pa20 && a->d) { 3722 return false; 3723 } 3724 if (a->c) { 3725 nullify_over(ctx); 3726 } 3727 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3728 tcg_constant_i64(a->i)); 3729 } 3730 3731 static bool trans_be(DisasContext *ctx, arg_be *a) 3732 { 3733 TCGv_i64 tmp; 3734 3735 #ifdef CONFIG_USER_ONLY 3736 /* ??? It seems like there should be a good way of using 3737 "be disp(sr2, r0)", the canonical gateway entry mechanism 3738 to our advantage. But that appears to be inconvenient to 3739 manage along side branch delay slots. Therefore we handle 3740 entry into the gateway page via absolute address. */ 3741 /* Since we don't implement spaces, just branch. Do notice the special 3742 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3743 goto_tb to the TB containing the syscall. */ 3744 if (a->b == 0) { 3745 return do_dbranch(ctx, a->disp, a->l, a->n); 3746 } 3747 #else 3748 nullify_over(ctx); 3749 #endif 3750 3751 tmp = tcg_temp_new_i64(); 3752 tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp); 3753 tmp = do_ibranch_priv(ctx, tmp); 3754 3755 #ifdef CONFIG_USER_ONLY 3756 return do_ibranch(ctx, tmp, a->l, a->n); 3757 #else 3758 TCGv_i64 new_spc = tcg_temp_new_i64(); 3759 3760 load_spr(ctx, new_spc, a->sp); 3761 if (a->l) { 3762 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3763 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3764 } 3765 if (a->n && use_nullify_skip(ctx)) { 3766 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); 3767 tcg_gen_addi_i64(tmp, tmp, 4); 3768 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 3769 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3770 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3771 } else { 3772 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3773 if (ctx->iaoq_b == -1) { 3774 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3775 } 3776 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 3777 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3778 nullify_set(ctx, a->n); 3779 } 3780 tcg_gen_lookup_and_goto_ptr(); 3781 ctx->base.is_jmp = DISAS_NORETURN; 3782 return nullify_end(ctx); 3783 #endif 3784 } 3785 3786 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3787 { 3788 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3789 } 3790 3791 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3792 { 3793 uint64_t dest = iaoq_dest(ctx, a->disp); 3794 3795 nullify_over(ctx); 3796 3797 /* Make sure the caller hasn't done something weird with the queue. 3798 * ??? This is not quite the same as the PSW[B] bit, which would be 3799 * expensive to track. Real hardware will trap for 3800 * b gateway 3801 * b gateway+4 (in delay slot of first branch) 3802 * However, checking for a non-sequential instruction queue *will* 3803 * diagnose the security hole 3804 * b gateway 3805 * b evil 3806 * in which instructions at evil would run with increased privs. 3807 */ 3808 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3809 return gen_illegal(ctx); 3810 } 3811 3812 #ifndef CONFIG_USER_ONLY 3813 if (ctx->tb_flags & PSW_C) { 3814 int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next); 3815 /* If we could not find a TLB entry, then we need to generate an 3816 ITLB miss exception so the kernel will provide it. 3817 The resulting TLB fill operation will invalidate this TB and 3818 we will re-translate, at which point we *will* be able to find 3819 the TLB entry and determine if this is in fact a gateway page. */ 3820 if (type < 0) { 3821 gen_excp(ctx, EXCP_ITLB_MISS); 3822 return true; 3823 } 3824 /* No change for non-gateway pages or for priv decrease. */ 3825 if (type >= 4 && type - 4 < ctx->privilege) { 3826 dest = deposit32(dest, 0, 2, type - 4); 3827 } 3828 } else { 3829 dest &= -4; /* priv = 0 */ 3830 } 3831 #endif 3832 3833 if (a->l) { 3834 TCGv_i64 tmp = dest_gpr(ctx, a->l); 3835 if (ctx->privilege < 3) { 3836 tcg_gen_andi_i64(tmp, tmp, -4); 3837 } 3838 tcg_gen_ori_i64(tmp, tmp, ctx->privilege); 3839 save_gpr(ctx, a->l, tmp); 3840 } 3841 3842 return do_dbranch(ctx, dest, 0, a->n); 3843 } 3844 3845 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3846 { 3847 if (a->x) { 3848 TCGv_i64 tmp = tcg_temp_new_i64(); 3849 tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3); 3850 tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8); 3851 /* The computation here never changes privilege level. */ 3852 return do_ibranch(ctx, tmp, a->l, a->n); 3853 } else { 3854 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3855 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3856 } 3857 } 3858 3859 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3860 { 3861 TCGv_i64 dest; 3862 3863 if (a->x == 0) { 3864 dest = load_gpr(ctx, a->b); 3865 } else { 3866 dest = tcg_temp_new_i64(); 3867 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3); 3868 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b)); 3869 } 3870 dest = do_ibranch_priv(ctx, dest); 3871 return do_ibranch(ctx, dest, 0, a->n); 3872 } 3873 3874 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3875 { 3876 TCGv_i64 dest; 3877 3878 #ifdef CONFIG_USER_ONLY 3879 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3880 return do_ibranch(ctx, dest, a->l, a->n); 3881 #else 3882 nullify_over(ctx); 3883 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3884 3885 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3886 if (ctx->iaoq_b == -1) { 3887 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3888 } 3889 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest); 3890 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3891 if (a->l) { 3892 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3893 } 3894 nullify_set(ctx, a->n); 3895 tcg_gen_lookup_and_goto_ptr(); 3896 ctx->base.is_jmp = DISAS_NORETURN; 3897 return nullify_end(ctx); 3898 #endif 3899 } 3900 3901 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a) 3902 { 3903 /* All branch target stack instructions implement as nop. */ 3904 return ctx->is_pa20; 3905 } 3906 3907 /* 3908 * Float class 0 3909 */ 3910 3911 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3912 { 3913 tcg_gen_mov_i32(dst, src); 3914 } 3915 3916 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 3917 { 3918 uint64_t ret; 3919 3920 if (ctx->is_pa20) { 3921 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ 3922 } else { 3923 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ 3924 } 3925 3926 nullify_over(ctx); 3927 save_frd(0, tcg_constant_i64(ret)); 3928 return nullify_end(ctx); 3929 } 3930 3931 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3932 { 3933 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3934 } 3935 3936 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3937 { 3938 tcg_gen_mov_i64(dst, src); 3939 } 3940 3941 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3942 { 3943 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3944 } 3945 3946 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3947 { 3948 tcg_gen_andi_i32(dst, src, INT32_MAX); 3949 } 3950 3951 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3952 { 3953 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3954 } 3955 3956 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3957 { 3958 tcg_gen_andi_i64(dst, src, INT64_MAX); 3959 } 3960 3961 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3962 { 3963 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3964 } 3965 3966 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3967 { 3968 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3969 } 3970 3971 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3972 { 3973 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3974 } 3975 3976 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3977 { 3978 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3979 } 3980 3981 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3982 { 3983 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3984 } 3985 3986 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3987 { 3988 tcg_gen_xori_i32(dst, src, INT32_MIN); 3989 } 3990 3991 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3992 { 3993 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3994 } 3995 3996 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3997 { 3998 tcg_gen_xori_i64(dst, src, INT64_MIN); 3999 } 4000 4001 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 4002 { 4003 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 4004 } 4005 4006 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4007 { 4008 tcg_gen_ori_i32(dst, src, INT32_MIN); 4009 } 4010 4011 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 4012 { 4013 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 4014 } 4015 4016 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4017 { 4018 tcg_gen_ori_i64(dst, src, INT64_MIN); 4019 } 4020 4021 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 4022 { 4023 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 4024 } 4025 4026 /* 4027 * Float class 1 4028 */ 4029 4030 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 4031 { 4032 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 4033 } 4034 4035 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 4036 { 4037 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 4038 } 4039 4040 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 4041 { 4042 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 4043 } 4044 4045 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 4046 { 4047 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 4048 } 4049 4050 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 4051 { 4052 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 4053 } 4054 4055 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 4056 { 4057 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 4058 } 4059 4060 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 4061 { 4062 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 4063 } 4064 4065 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 4066 { 4067 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 4068 } 4069 4070 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 4071 { 4072 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 4073 } 4074 4075 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 4076 { 4077 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 4078 } 4079 4080 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 4081 { 4082 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 4083 } 4084 4085 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 4086 { 4087 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 4088 } 4089 4090 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 4091 { 4092 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 4093 } 4094 4095 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 4096 { 4097 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 4098 } 4099 4100 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 4101 { 4102 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 4103 } 4104 4105 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 4106 { 4107 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 4108 } 4109 4110 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 4111 { 4112 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 4113 } 4114 4115 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 4116 { 4117 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 4118 } 4119 4120 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 4121 { 4122 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 4123 } 4124 4125 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 4126 { 4127 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 4128 } 4129 4130 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 4131 { 4132 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 4133 } 4134 4135 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 4136 { 4137 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 4138 } 4139 4140 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 4141 { 4142 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 4143 } 4144 4145 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 4146 { 4147 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 4148 } 4149 4150 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 4151 { 4152 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 4153 } 4154 4155 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 4156 { 4157 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 4158 } 4159 4160 /* 4161 * Float class 2 4162 */ 4163 4164 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 4165 { 4166 TCGv_i32 ta, tb, tc, ty; 4167 4168 nullify_over(ctx); 4169 4170 ta = load_frw0_i32(a->r1); 4171 tb = load_frw0_i32(a->r2); 4172 ty = tcg_constant_i32(a->y); 4173 tc = tcg_constant_i32(a->c); 4174 4175 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc); 4176 4177 return nullify_end(ctx); 4178 } 4179 4180 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 4181 { 4182 TCGv_i64 ta, tb; 4183 TCGv_i32 tc, ty; 4184 4185 nullify_over(ctx); 4186 4187 ta = load_frd0(a->r1); 4188 tb = load_frd0(a->r2); 4189 ty = tcg_constant_i32(a->y); 4190 tc = tcg_constant_i32(a->c); 4191 4192 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc); 4193 4194 return nullify_end(ctx); 4195 } 4196 4197 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 4198 { 4199 TCGv_i64 t; 4200 4201 nullify_over(ctx); 4202 4203 t = tcg_temp_new_i64(); 4204 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); 4205 4206 if (a->y == 1) { 4207 int mask; 4208 bool inv = false; 4209 4210 switch (a->c) { 4211 case 0: /* simple */ 4212 tcg_gen_andi_i64(t, t, 0x4000000); 4213 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 4214 goto done; 4215 case 2: /* rej */ 4216 inv = true; 4217 /* fallthru */ 4218 case 1: /* acc */ 4219 mask = 0x43ff800; 4220 break; 4221 case 6: /* rej8 */ 4222 inv = true; 4223 /* fallthru */ 4224 case 5: /* acc8 */ 4225 mask = 0x43f8000; 4226 break; 4227 case 9: /* acc6 */ 4228 mask = 0x43e0000; 4229 break; 4230 case 13: /* acc4 */ 4231 mask = 0x4380000; 4232 break; 4233 case 17: /* acc2 */ 4234 mask = 0x4200000; 4235 break; 4236 default: 4237 gen_illegal(ctx); 4238 return true; 4239 } 4240 if (inv) { 4241 TCGv_i64 c = tcg_constant_i64(mask); 4242 tcg_gen_or_i64(t, t, c); 4243 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 4244 } else { 4245 tcg_gen_andi_i64(t, t, mask); 4246 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 4247 } 4248 } else { 4249 unsigned cbit = (a->y ^ 1) - 1; 4250 4251 tcg_gen_extract_i64(t, t, 21 - cbit, 1); 4252 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 4253 } 4254 4255 done: 4256 return nullify_end(ctx); 4257 } 4258 4259 /* 4260 * Float class 2 4261 */ 4262 4263 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 4264 { 4265 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 4266 } 4267 4268 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 4269 { 4270 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 4271 } 4272 4273 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 4274 { 4275 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 4276 } 4277 4278 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 4279 { 4280 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 4281 } 4282 4283 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 4284 { 4285 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 4286 } 4287 4288 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 4289 { 4290 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 4291 } 4292 4293 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 4294 { 4295 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 4296 } 4297 4298 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 4299 { 4300 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 4301 } 4302 4303 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 4304 { 4305 TCGv_i64 x, y; 4306 4307 nullify_over(ctx); 4308 4309 x = load_frw0_i64(a->r1); 4310 y = load_frw0_i64(a->r2); 4311 tcg_gen_mul_i64(x, x, y); 4312 save_frd(a->t, x); 4313 4314 return nullify_end(ctx); 4315 } 4316 4317 /* Convert the fmpyadd single-precision register encodings to standard. */ 4318 static inline int fmpyadd_s_reg(unsigned r) 4319 { 4320 return (r & 16) * 2 + 16 + (r & 15); 4321 } 4322 4323 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4324 { 4325 int tm = fmpyadd_s_reg(a->tm); 4326 int ra = fmpyadd_s_reg(a->ra); 4327 int ta = fmpyadd_s_reg(a->ta); 4328 int rm2 = fmpyadd_s_reg(a->rm2); 4329 int rm1 = fmpyadd_s_reg(a->rm1); 4330 4331 nullify_over(ctx); 4332 4333 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 4334 do_fop_weww(ctx, ta, ta, ra, 4335 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 4336 4337 return nullify_end(ctx); 4338 } 4339 4340 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 4341 { 4342 return do_fmpyadd_s(ctx, a, false); 4343 } 4344 4345 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4346 { 4347 return do_fmpyadd_s(ctx, a, true); 4348 } 4349 4350 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4351 { 4352 nullify_over(ctx); 4353 4354 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4355 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4356 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4357 4358 return nullify_end(ctx); 4359 } 4360 4361 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4362 { 4363 return do_fmpyadd_d(ctx, a, false); 4364 } 4365 4366 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4367 { 4368 return do_fmpyadd_d(ctx, a, true); 4369 } 4370 4371 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4372 { 4373 TCGv_i32 x, y, z; 4374 4375 nullify_over(ctx); 4376 x = load_frw0_i32(a->rm1); 4377 y = load_frw0_i32(a->rm2); 4378 z = load_frw0_i32(a->ra3); 4379 4380 if (a->neg) { 4381 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z); 4382 } else { 4383 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z); 4384 } 4385 4386 save_frw_i32(a->t, x); 4387 return nullify_end(ctx); 4388 } 4389 4390 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4391 { 4392 TCGv_i64 x, y, z; 4393 4394 nullify_over(ctx); 4395 x = load_frd0(a->rm1); 4396 y = load_frd0(a->rm2); 4397 z = load_frd0(a->ra3); 4398 4399 if (a->neg) { 4400 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z); 4401 } else { 4402 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z); 4403 } 4404 4405 save_frd(a->t, x); 4406 return nullify_end(ctx); 4407 } 4408 4409 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4410 { 4411 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4412 #ifndef CONFIG_USER_ONLY 4413 if (a->i == 0x100) { 4414 /* emulate PDC BTLB, called by SeaBIOS-hppa */ 4415 nullify_over(ctx); 4416 gen_helper_diag_btlb(tcg_env); 4417 return nullify_end(ctx); 4418 } 4419 if (a->i == 0x101) { 4420 /* print char in %r26 to first serial console, used by SeaBIOS-hppa */ 4421 nullify_over(ctx); 4422 gen_helper_diag_console_output(tcg_env); 4423 return nullify_end(ctx); 4424 } 4425 #endif 4426 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); 4427 return true; 4428 } 4429 4430 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4431 { 4432 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4433 int bound; 4434 4435 ctx->cs = cs; 4436 ctx->tb_flags = ctx->base.tb->flags; 4437 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4438 4439 #ifdef CONFIG_USER_ONLY 4440 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); 4441 ctx->mmu_idx = MMU_USER_IDX; 4442 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege; 4443 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege; 4444 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4445 #else 4446 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4447 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4448 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4449 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); 4450 4451 /* Recover the IAOQ values from the GVA + PRIV. */ 4452 uint64_t cs_base = ctx->base.tb->cs_base; 4453 uint64_t iasq_f = cs_base & ~0xffffffffull; 4454 int32_t diff = cs_base; 4455 4456 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4457 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4458 #endif 4459 ctx->iaoq_n = -1; 4460 ctx->iaoq_n_var = NULL; 4461 4462 ctx->zero = tcg_constant_i64(0); 4463 4464 /* Bound the number of instructions by those left on the page. */ 4465 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4466 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4467 } 4468 4469 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4470 { 4471 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4472 4473 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4474 ctx->null_cond = cond_make_f(); 4475 ctx->psw_n_nonzero = false; 4476 if (ctx->tb_flags & PSW_N) { 4477 ctx->null_cond.c = TCG_COND_ALWAYS; 4478 ctx->psw_n_nonzero = true; 4479 } 4480 ctx->null_lab = NULL; 4481 } 4482 4483 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4484 { 4485 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4486 4487 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0); 4488 ctx->insn_start = tcg_last_op(); 4489 } 4490 4491 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4492 { 4493 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4494 CPUHPPAState *env = cpu_env(cs); 4495 DisasJumpType ret; 4496 4497 /* Execute one insn. */ 4498 #ifdef CONFIG_USER_ONLY 4499 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4500 do_page_zero(ctx); 4501 ret = ctx->base.is_jmp; 4502 assert(ret != DISAS_NEXT); 4503 } else 4504 #endif 4505 { 4506 /* Always fetch the insn, even if nullified, so that we check 4507 the page permissions for execute. */ 4508 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4509 4510 /* Set up the IA queue for the next insn. 4511 This will be overwritten by a branch. */ 4512 if (ctx->iaoq_b == -1) { 4513 ctx->iaoq_n = -1; 4514 ctx->iaoq_n_var = tcg_temp_new_i64(); 4515 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4516 } else { 4517 ctx->iaoq_n = ctx->iaoq_b + 4; 4518 ctx->iaoq_n_var = NULL; 4519 } 4520 4521 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4522 ctx->null_cond.c = TCG_COND_NEVER; 4523 ret = DISAS_NEXT; 4524 } else { 4525 ctx->insn = insn; 4526 if (!decode(ctx, insn)) { 4527 gen_illegal(ctx); 4528 } 4529 ret = ctx->base.is_jmp; 4530 assert(ctx->null_lab == NULL); 4531 } 4532 } 4533 4534 /* Advance the insn queue. Note that this check also detects 4535 a priority change within the instruction queue. */ 4536 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4537 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4538 && use_goto_tb(ctx, ctx->iaoq_b) 4539 && (ctx->null_cond.c == TCG_COND_NEVER 4540 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4541 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4542 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4543 ctx->base.is_jmp = ret = DISAS_NORETURN; 4544 } else { 4545 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4546 } 4547 } 4548 ctx->iaoq_f = ctx->iaoq_b; 4549 ctx->iaoq_b = ctx->iaoq_n; 4550 ctx->base.pc_next += 4; 4551 4552 switch (ret) { 4553 case DISAS_NORETURN: 4554 case DISAS_IAQ_N_UPDATED: 4555 break; 4556 4557 case DISAS_NEXT: 4558 case DISAS_IAQ_N_STALE: 4559 case DISAS_IAQ_N_STALE_EXIT: 4560 if (ctx->iaoq_f == -1) { 4561 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b); 4562 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4563 #ifndef CONFIG_USER_ONLY 4564 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4565 #endif 4566 nullify_save(ctx); 4567 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT 4568 ? DISAS_EXIT 4569 : DISAS_IAQ_N_UPDATED); 4570 } else if (ctx->iaoq_b == -1) { 4571 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var); 4572 } 4573 break; 4574 4575 default: 4576 g_assert_not_reached(); 4577 } 4578 } 4579 4580 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4581 { 4582 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4583 DisasJumpType is_jmp = ctx->base.is_jmp; 4584 4585 switch (is_jmp) { 4586 case DISAS_NORETURN: 4587 break; 4588 case DISAS_TOO_MANY: 4589 case DISAS_IAQ_N_STALE: 4590 case DISAS_IAQ_N_STALE_EXIT: 4591 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4592 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4593 nullify_save(ctx); 4594 /* FALLTHRU */ 4595 case DISAS_IAQ_N_UPDATED: 4596 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { 4597 tcg_gen_lookup_and_goto_ptr(); 4598 break; 4599 } 4600 /* FALLTHRU */ 4601 case DISAS_EXIT: 4602 tcg_gen_exit_tb(NULL, 0); 4603 break; 4604 default: 4605 g_assert_not_reached(); 4606 } 4607 } 4608 4609 static void hppa_tr_disas_log(const DisasContextBase *dcbase, 4610 CPUState *cs, FILE *logfile) 4611 { 4612 target_ulong pc = dcbase->pc_first; 4613 4614 #ifdef CONFIG_USER_ONLY 4615 switch (pc) { 4616 case 0x00: 4617 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4618 return; 4619 case 0xb0: 4620 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4621 return; 4622 case 0xe0: 4623 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4624 return; 4625 case 0x100: 4626 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4627 return; 4628 } 4629 #endif 4630 4631 fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); 4632 target_disas(logfile, cs, pc, dcbase->tb->size); 4633 } 4634 4635 static const TranslatorOps hppa_tr_ops = { 4636 .init_disas_context = hppa_tr_init_disas_context, 4637 .tb_start = hppa_tr_tb_start, 4638 .insn_start = hppa_tr_insn_start, 4639 .translate_insn = hppa_tr_translate_insn, 4640 .tb_stop = hppa_tr_tb_stop, 4641 .disas_log = hppa_tr_disas_log, 4642 }; 4643 4644 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 4645 vaddr pc, void *host_pc) 4646 { 4647 DisasContext ctx; 4648 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4649 } 4650