1 /* 2 * PowerPC emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (C) 2011 Freescale Semiconductor, Inc. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "internal.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "tcg/tcg-op-gvec.h" 27 #include "qemu/host-utils.h" 28 29 #include "exec/helper-proto.h" 30 #include "exec/helper-gen.h" 31 32 #include "exec/translator.h" 33 #include "exec/log.h" 34 #include "qemu/atomic128.h" 35 #include "spr_common.h" 36 #include "power8-pmu.h" 37 38 #include "qemu/qemu-print.h" 39 #include "qapi/error.h" 40 41 #define HELPER_H "helper.h" 42 #include "exec/helper-info.c.inc" 43 #undef HELPER_H 44 45 #define CPU_SINGLE_STEP 0x1 46 #define CPU_BRANCH_STEP 0x2 47 48 /* Include definitions for instructions classes and implementations flags */ 49 /* #define PPC_DEBUG_DISAS */ 50 51 #ifdef PPC_DEBUG_DISAS 52 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 53 #else 54 # define LOG_DISAS(...) do { } while (0) 55 #endif 56 /*****************************************************************************/ 57 /* Code translation helpers */ 58 59 /* global register indexes */ 60 static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */ 61 + 10 * 4 + 22 * 5 /* SPE GPRh */ 62 + 8 * 5 /* CRF */]; 63 static TCGv cpu_gpr[32]; 64 static TCGv cpu_gprh[32]; 65 static TCGv_i32 cpu_crf[8]; 66 static TCGv cpu_nip; 67 static TCGv cpu_msr; 68 static TCGv cpu_ctr; 69 static TCGv cpu_lr; 70 #if defined(TARGET_PPC64) 71 static TCGv cpu_cfar; 72 #endif 73 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; 74 static TCGv cpu_reserve; 75 static TCGv cpu_reserve_length; 76 static TCGv cpu_reserve_val; 77 #if defined(TARGET_PPC64) 78 static TCGv cpu_reserve_val2; 79 #endif 80 static TCGv cpu_fpscr; 81 static TCGv_i32 cpu_access_type; 82 83 void ppc_translate_init(void) 84 { 85 int i; 86 char *p; 87 size_t cpu_reg_names_size; 88 89 p = cpu_reg_names; 90 cpu_reg_names_size = sizeof(cpu_reg_names); 91 92 for (i = 0; i < 8; i++) { 93 snprintf(p, cpu_reg_names_size, "crf%d", i); 94 cpu_crf[i] = tcg_global_mem_new_i32(tcg_env, 95 offsetof(CPUPPCState, crf[i]), p); 96 p += 5; 97 cpu_reg_names_size -= 5; 98 } 99 100 for (i = 0; i < 32; i++) { 101 snprintf(p, cpu_reg_names_size, "r%d", i); 102 cpu_gpr[i] = tcg_global_mem_new(tcg_env, 103 offsetof(CPUPPCState, gpr[i]), p); 104 p += (i < 10) ? 3 : 4; 105 cpu_reg_names_size -= (i < 10) ? 3 : 4; 106 snprintf(p, cpu_reg_names_size, "r%dH", i); 107 cpu_gprh[i] = tcg_global_mem_new(tcg_env, 108 offsetof(CPUPPCState, gprh[i]), p); 109 p += (i < 10) ? 4 : 5; 110 cpu_reg_names_size -= (i < 10) ? 4 : 5; 111 } 112 113 cpu_nip = tcg_global_mem_new(tcg_env, 114 offsetof(CPUPPCState, nip), "nip"); 115 116 cpu_msr = tcg_global_mem_new(tcg_env, 117 offsetof(CPUPPCState, msr), "msr"); 118 119 cpu_ctr = tcg_global_mem_new(tcg_env, 120 offsetof(CPUPPCState, ctr), "ctr"); 121 122 cpu_lr = tcg_global_mem_new(tcg_env, 123 offsetof(CPUPPCState, lr), "lr"); 124 125 #if defined(TARGET_PPC64) 126 cpu_cfar = tcg_global_mem_new(tcg_env, 127 offsetof(CPUPPCState, cfar), "cfar"); 128 #endif 129 130 cpu_xer = tcg_global_mem_new(tcg_env, 131 offsetof(CPUPPCState, xer), "xer"); 132 cpu_so = tcg_global_mem_new(tcg_env, 133 offsetof(CPUPPCState, so), "SO"); 134 cpu_ov = tcg_global_mem_new(tcg_env, 135 offsetof(CPUPPCState, ov), "OV"); 136 cpu_ca = tcg_global_mem_new(tcg_env, 137 offsetof(CPUPPCState, ca), "CA"); 138 cpu_ov32 = tcg_global_mem_new(tcg_env, 139 offsetof(CPUPPCState, ov32), "OV32"); 140 cpu_ca32 = tcg_global_mem_new(tcg_env, 141 offsetof(CPUPPCState, ca32), "CA32"); 142 143 cpu_reserve = tcg_global_mem_new(tcg_env, 144 offsetof(CPUPPCState, reserve_addr), 145 "reserve_addr"); 146 cpu_reserve_length = tcg_global_mem_new(tcg_env, 147 offsetof(CPUPPCState, 148 reserve_length), 149 "reserve_length"); 150 cpu_reserve_val = tcg_global_mem_new(tcg_env, 151 offsetof(CPUPPCState, reserve_val), 152 "reserve_val"); 153 #if defined(TARGET_PPC64) 154 cpu_reserve_val2 = tcg_global_mem_new(tcg_env, 155 offsetof(CPUPPCState, reserve_val2), 156 "reserve_val2"); 157 #endif 158 159 cpu_fpscr = tcg_global_mem_new(tcg_env, 160 offsetof(CPUPPCState, fpscr), "fpscr"); 161 162 cpu_access_type = tcg_global_mem_new_i32(tcg_env, 163 offsetof(CPUPPCState, access_type), 164 "access_type"); 165 } 166 167 /* internal defines */ 168 struct DisasContext { 169 DisasContextBase base; 170 target_ulong cia; /* current instruction address */ 171 uint32_t opcode; 172 /* Routine used to access memory */ 173 bool pr, hv, dr, le_mode; 174 bool lazy_tlb_flush; 175 bool need_access_type; 176 int mem_idx; 177 int access_type; 178 /* Translation flags */ 179 MemOp default_tcg_memop_mask; 180 #if defined(TARGET_PPC64) 181 bool sf_mode; 182 bool has_cfar; 183 #endif 184 bool fpu_enabled; 185 bool altivec_enabled; 186 bool vsx_enabled; 187 bool spe_enabled; 188 bool tm_enabled; 189 bool gtse; 190 bool hr; 191 bool mmcr0_pmcc0; 192 bool mmcr0_pmcc1; 193 bool mmcr0_pmcjce; 194 bool pmc_other; 195 bool pmu_insn_cnt; 196 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ 197 int singlestep_enabled; 198 uint32_t flags; 199 uint64_t insns_flags; 200 uint64_t insns_flags2; 201 }; 202 203 #define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */ 204 #define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */ 205 #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */ 206 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */ 207 208 /* Return true iff byteswap is needed in a scalar memop */ 209 static inline bool need_byteswap(const DisasContext *ctx) 210 { 211 #if TARGET_BIG_ENDIAN 212 return ctx->le_mode; 213 #else 214 return !ctx->le_mode; 215 #endif 216 } 217 218 /* True when active word size < size of target_long. */ 219 #ifdef TARGET_PPC64 220 # define NARROW_MODE(C) (!(C)->sf_mode) 221 #else 222 # define NARROW_MODE(C) 0 223 #endif 224 225 struct opc_handler_t { 226 /* invalid bits for instruction 1 (Rc(opcode) == 0) */ 227 uint32_t inval1; 228 /* invalid bits for instruction 2 (Rc(opcode) == 1) */ 229 uint32_t inval2; 230 /* instruction type */ 231 uint64_t type; 232 /* extended instruction type */ 233 uint64_t type2; 234 /* handler */ 235 void (*handler)(DisasContext *ctx); 236 }; 237 238 static inline bool gen_serialize(DisasContext *ctx) 239 { 240 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 241 /* Restart with exclusive lock. */ 242 gen_helper_exit_atomic(tcg_env); 243 ctx->base.is_jmp = DISAS_NORETURN; 244 return false; 245 } 246 return true; 247 } 248 249 #if !defined(CONFIG_USER_ONLY) 250 #if defined(TARGET_PPC64) 251 static inline bool gen_serialize_core(DisasContext *ctx) 252 { 253 if (ctx->flags & POWERPC_FLAG_SMT) { 254 return gen_serialize(ctx); 255 } 256 return true; 257 } 258 #endif 259 260 static inline bool gen_serialize_core_lpar(DisasContext *ctx) 261 { 262 #if defined(TARGET_PPC64) 263 if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) { 264 return gen_serialize(ctx); 265 } 266 #endif 267 return true; 268 } 269 #endif 270 271 /* SPR load/store helpers */ 272 static inline void gen_load_spr(TCGv t, int reg) 273 { 274 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg])); 275 } 276 277 static inline void gen_store_spr(int reg, TCGv t) 278 { 279 tcg_gen_st_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg])); 280 } 281 282 static inline void gen_set_access_type(DisasContext *ctx, int access_type) 283 { 284 if (ctx->need_access_type && ctx->access_type != access_type) { 285 tcg_gen_movi_i32(cpu_access_type, access_type); 286 ctx->access_type = access_type; 287 } 288 } 289 290 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) 291 { 292 if (NARROW_MODE(ctx)) { 293 nip = (uint32_t)nip; 294 } 295 tcg_gen_movi_tl(cpu_nip, nip); 296 } 297 298 static void gen_exception_err_nip(DisasContext *ctx, uint32_t excp, 299 uint32_t error, target_ulong nip) 300 { 301 TCGv_i32 t0, t1; 302 303 gen_update_nip(ctx, nip); 304 t0 = tcg_constant_i32(excp); 305 t1 = tcg_constant_i32(error); 306 gen_helper_raise_exception_err(tcg_env, t0, t1); 307 ctx->base.is_jmp = DISAS_NORETURN; 308 } 309 310 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, 311 uint32_t error) 312 { 313 /* 314 * These are all synchronous exceptions, we set the PC back to the 315 * faulting instruction 316 */ 317 gen_exception_err_nip(ctx, excp, error, ctx->cia); 318 } 319 320 static void gen_exception_nip(DisasContext *ctx, uint32_t excp, 321 target_ulong nip) 322 { 323 TCGv_i32 t0; 324 325 gen_update_nip(ctx, nip); 326 t0 = tcg_constant_i32(excp); 327 gen_helper_raise_exception(tcg_env, t0); 328 ctx->base.is_jmp = DISAS_NORETURN; 329 } 330 331 static inline void gen_exception(DisasContext *ctx, uint32_t excp) 332 { 333 /* 334 * These are all synchronous exceptions, we set the PC back to the 335 * faulting instruction 336 */ 337 gen_exception_nip(ctx, excp, ctx->cia); 338 } 339 340 #if !defined(CONFIG_USER_ONLY) 341 static void gen_ppc_maybe_interrupt(DisasContext *ctx) 342 { 343 translator_io_start(&ctx->base); 344 gen_helper_ppc_maybe_interrupt(tcg_env); 345 } 346 #endif 347 348 /* 349 * Tells the caller what is the appropriate exception to generate and prepares 350 * SPR registers for this exception. 351 * 352 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or 353 * POWERPC_EXCP_DEBUG (on BookE). 354 */ 355 static void gen_debug_exception(DisasContext *ctx, bool rfi_type) 356 { 357 #if !defined(CONFIG_USER_ONLY) 358 if (ctx->flags & POWERPC_FLAG_DE) { 359 target_ulong dbsr = 0; 360 if (ctx->singlestep_enabled & CPU_SINGLE_STEP) { 361 dbsr = DBCR0_ICMP; 362 } else { 363 /* Must have been branch */ 364 dbsr = DBCR0_BRT; 365 } 366 TCGv t0 = tcg_temp_new(); 367 gen_load_spr(t0, SPR_BOOKE_DBSR); 368 tcg_gen_ori_tl(t0, t0, dbsr); 369 gen_store_spr(SPR_BOOKE_DBSR, t0); 370 gen_helper_raise_exception(tcg_env, 371 tcg_constant_i32(POWERPC_EXCP_DEBUG)); 372 ctx->base.is_jmp = DISAS_NORETURN; 373 } else { 374 if (!rfi_type) { /* BookS does not single step rfi type instructions */ 375 TCGv t0 = tcg_temp_new(); 376 tcg_gen_movi_tl(t0, ctx->cia); 377 gen_helper_book3s_trace(tcg_env, t0); 378 ctx->base.is_jmp = DISAS_NORETURN; 379 } 380 } 381 #endif 382 } 383 384 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) 385 { 386 /* Will be converted to program check if needed */ 387 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error); 388 } 389 390 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error) 391 { 392 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error); 393 } 394 395 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) 396 { 397 /* Will be converted to program check if needed */ 398 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); 399 } 400 401 /*****************************************************************************/ 402 /* SPR READ/WRITE CALLBACKS */ 403 404 void spr_noaccess(DisasContext *ctx, int gprn, int sprn) 405 { 406 #if 0 407 sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); 408 printf("ERROR: try to access SPR %d !\n", sprn); 409 #endif 410 } 411 412 /* #define PPC_DUMP_SPR_ACCESSES */ 413 414 /* 415 * Generic callbacks: 416 * do nothing but store/retrieve spr value 417 */ 418 static void spr_load_dump_spr(int sprn) 419 { 420 #ifdef PPC_DUMP_SPR_ACCESSES 421 TCGv_i32 t0 = tcg_constant_i32(sprn); 422 gen_helper_load_dump_spr(tcg_env, t0); 423 #endif 424 } 425 426 void spr_read_generic(DisasContext *ctx, int gprn, int sprn) 427 { 428 gen_load_spr(cpu_gpr[gprn], sprn); 429 spr_load_dump_spr(sprn); 430 } 431 432 static void spr_store_dump_spr(int sprn) 433 { 434 #ifdef PPC_DUMP_SPR_ACCESSES 435 TCGv_i32 t0 = tcg_constant_i32(sprn); 436 gen_helper_store_dump_spr(tcg_env, t0); 437 #endif 438 } 439 440 void spr_write_generic(DisasContext *ctx, int sprn, int gprn) 441 { 442 gen_store_spr(sprn, cpu_gpr[gprn]); 443 spr_store_dump_spr(sprn); 444 } 445 446 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) 447 { 448 #ifdef TARGET_PPC64 449 TCGv t0 = tcg_temp_new(); 450 tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); 451 gen_store_spr(sprn, t0); 452 spr_store_dump_spr(sprn); 453 #else 454 spr_write_generic(ctx, sprn, gprn); 455 #endif 456 } 457 458 void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn) 459 { 460 if (!(ctx->flags & POWERPC_FLAG_SMT)) { 461 spr_write_generic(ctx, sprn, gprn); 462 return; 463 } 464 465 if (!gen_serialize(ctx)) { 466 return; 467 } 468 469 gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn), 470 cpu_gpr[gprn]); 471 spr_store_dump_spr(sprn); 472 } 473 474 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn) 475 { 476 /* This does not implement >1 thread */ 477 TCGv t0 = tcg_temp_new(); 478 TCGv t1 = tcg_temp_new(); 479 tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */ 480 tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */ 481 tcg_gen_or_tl(t1, t1, t0); 482 gen_store_spr(sprn, t1); 483 } 484 485 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn) 486 { 487 if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) { 488 /* CTRL behaves as 1-thread in LPAR-per-thread mode */ 489 spr_write_CTRL_ST(ctx, sprn, gprn); 490 goto out; 491 } 492 493 if (!gen_serialize(ctx)) { 494 return; 495 } 496 497 gen_helper_spr_write_CTRL(tcg_env, tcg_constant_i32(sprn), 498 cpu_gpr[gprn]); 499 out: 500 spr_store_dump_spr(sprn); 501 502 /* 503 * SPR_CTRL writes must force a new translation block, 504 * allowing the PMU to calculate the run latch events with 505 * more accuracy. 506 */ 507 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 508 } 509 510 #if !defined(CONFIG_USER_ONLY) 511 void spr_write_clear(DisasContext *ctx, int sprn, int gprn) 512 { 513 TCGv t0 = tcg_temp_new(); 514 TCGv t1 = tcg_temp_new(); 515 gen_load_spr(t0, sprn); 516 tcg_gen_neg_tl(t1, cpu_gpr[gprn]); 517 tcg_gen_and_tl(t0, t0, t1); 518 gen_store_spr(sprn, t0); 519 } 520 521 void spr_access_nop(DisasContext *ctx, int sprn, int gprn) 522 { 523 } 524 525 #endif 526 527 /* SPR common to all PowerPC */ 528 /* XER */ 529 void spr_read_xer(DisasContext *ctx, int gprn, int sprn) 530 { 531 TCGv dst = cpu_gpr[gprn]; 532 TCGv t0 = tcg_temp_new(); 533 TCGv t1 = tcg_temp_new(); 534 TCGv t2 = tcg_temp_new(); 535 tcg_gen_mov_tl(dst, cpu_xer); 536 tcg_gen_shli_tl(t0, cpu_so, XER_SO); 537 tcg_gen_shli_tl(t1, cpu_ov, XER_OV); 538 tcg_gen_shli_tl(t2, cpu_ca, XER_CA); 539 tcg_gen_or_tl(t0, t0, t1); 540 tcg_gen_or_tl(dst, dst, t2); 541 tcg_gen_or_tl(dst, dst, t0); 542 if (is_isa300(ctx)) { 543 tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32); 544 tcg_gen_or_tl(dst, dst, t0); 545 tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); 546 tcg_gen_or_tl(dst, dst, t0); 547 } 548 } 549 550 void spr_write_xer(DisasContext *ctx, int sprn, int gprn) 551 { 552 TCGv src = cpu_gpr[gprn]; 553 /* Write all flags, while reading back check for isa300 */ 554 tcg_gen_andi_tl(cpu_xer, src, 555 ~((1u << XER_SO) | 556 (1u << XER_OV) | (1u << XER_OV32) | 557 (1u << XER_CA) | (1u << XER_CA32))); 558 tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1); 559 tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1); 560 tcg_gen_extract_tl(cpu_so, src, XER_SO, 1); 561 tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1); 562 tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1); 563 } 564 565 /* LR */ 566 void spr_read_lr(DisasContext *ctx, int gprn, int sprn) 567 { 568 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr); 569 } 570 571 void spr_write_lr(DisasContext *ctx, int sprn, int gprn) 572 { 573 tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]); 574 } 575 576 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 577 /* Debug facilities */ 578 /* CFAR */ 579 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) 580 { 581 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar); 582 } 583 584 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) 585 { 586 tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]); 587 } 588 589 /* Breakpoint */ 590 void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn) 591 { 592 translator_io_start(&ctx->base); 593 gen_helper_store_ciabr(tcg_env, cpu_gpr[gprn]); 594 } 595 596 /* Watchpoint */ 597 void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn) 598 { 599 translator_io_start(&ctx->base); 600 gen_helper_store_dawr0(tcg_env, cpu_gpr[gprn]); 601 } 602 603 void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn) 604 { 605 translator_io_start(&ctx->base); 606 gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]); 607 } 608 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ 609 610 /* CTR */ 611 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) 612 { 613 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr); 614 } 615 616 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) 617 { 618 tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]); 619 } 620 621 /* User read access to SPR */ 622 /* USPRx */ 623 /* UMMCRx */ 624 /* UPMCx */ 625 /* USIA */ 626 /* UDECR */ 627 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) 628 { 629 gen_load_spr(cpu_gpr[gprn], sprn + 0x10); 630 } 631 632 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 633 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) 634 { 635 gen_store_spr(sprn + 0x10, cpu_gpr[gprn]); 636 } 637 #endif 638 639 /* SPR common to all non-embedded PowerPC */ 640 /* DECR */ 641 #if !defined(CONFIG_USER_ONLY) 642 void spr_read_decr(DisasContext *ctx, int gprn, int sprn) 643 { 644 translator_io_start(&ctx->base); 645 gen_helper_load_decr(cpu_gpr[gprn], tcg_env); 646 } 647 648 void spr_write_decr(DisasContext *ctx, int sprn, int gprn) 649 { 650 translator_io_start(&ctx->base); 651 gen_helper_store_decr(tcg_env, cpu_gpr[gprn]); 652 } 653 #endif 654 655 /* SPR common to all non-embedded PowerPC, except 601 */ 656 /* Time base */ 657 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) 658 { 659 translator_io_start(&ctx->base); 660 gen_helper_load_tbl(cpu_gpr[gprn], tcg_env); 661 } 662 663 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) 664 { 665 translator_io_start(&ctx->base); 666 gen_helper_load_tbu(cpu_gpr[gprn], tcg_env); 667 } 668 669 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) 670 { 671 gen_helper_load_atbl(cpu_gpr[gprn], tcg_env); 672 } 673 674 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) 675 { 676 gen_helper_load_atbu(cpu_gpr[gprn], tcg_env); 677 } 678 679 #if !defined(CONFIG_USER_ONLY) 680 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) 681 { 682 if (!gen_serialize_core_lpar(ctx)) { 683 return; 684 } 685 686 translator_io_start(&ctx->base); 687 gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]); 688 } 689 690 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) 691 { 692 if (!gen_serialize_core_lpar(ctx)) { 693 return; 694 } 695 696 translator_io_start(&ctx->base); 697 gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]); 698 } 699 700 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) 701 { 702 gen_helper_store_atbl(tcg_env, cpu_gpr[gprn]); 703 } 704 705 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) 706 { 707 gen_helper_store_atbu(tcg_env, cpu_gpr[gprn]); 708 } 709 710 #if defined(TARGET_PPC64) 711 void spr_read_purr(DisasContext *ctx, int gprn, int sprn) 712 { 713 translator_io_start(&ctx->base); 714 gen_helper_load_purr(cpu_gpr[gprn], tcg_env); 715 } 716 717 void spr_write_purr(DisasContext *ctx, int sprn, int gprn) 718 { 719 if (!gen_serialize_core_lpar(ctx)) { 720 return; 721 } 722 translator_io_start(&ctx->base); 723 gen_helper_store_purr(tcg_env, cpu_gpr[gprn]); 724 } 725 726 /* HDECR */ 727 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) 728 { 729 translator_io_start(&ctx->base); 730 gen_helper_load_hdecr(cpu_gpr[gprn], tcg_env); 731 } 732 733 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) 734 { 735 if (!gen_serialize_core_lpar(ctx)) { 736 return; 737 } 738 translator_io_start(&ctx->base); 739 gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]); 740 } 741 742 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) 743 { 744 translator_io_start(&ctx->base); 745 gen_helper_load_vtb(cpu_gpr[gprn], tcg_env); 746 } 747 748 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) 749 { 750 if (!gen_serialize_core_lpar(ctx)) { 751 return; 752 } 753 translator_io_start(&ctx->base); 754 gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]); 755 } 756 757 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) 758 { 759 if (!gen_serialize_core_lpar(ctx)) { 760 return; 761 } 762 translator_io_start(&ctx->base); 763 gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]); 764 } 765 766 #endif 767 #endif 768 769 #if !defined(CONFIG_USER_ONLY) 770 /* IBAT0U...IBAT0U */ 771 /* IBAT0L...IBAT7L */ 772 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) 773 { 774 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, 775 offsetof(CPUPPCState, 776 IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); 777 } 778 779 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) 780 { 781 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, 782 offsetof(CPUPPCState, 783 IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); 784 } 785 786 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) 787 { 788 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2); 789 gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]); 790 } 791 792 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) 793 { 794 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4); 795 gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]); 796 } 797 798 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) 799 { 800 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2); 801 gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]); 802 } 803 804 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) 805 { 806 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4); 807 gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]); 808 } 809 810 /* DBAT0U...DBAT7U */ 811 /* DBAT0L...DBAT7L */ 812 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) 813 { 814 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, 815 offsetof(CPUPPCState, 816 DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); 817 } 818 819 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) 820 { 821 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, 822 offsetof(CPUPPCState, 823 DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); 824 } 825 826 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) 827 { 828 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2); 829 gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]); 830 } 831 832 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) 833 { 834 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4); 835 gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]); 836 } 837 838 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) 839 { 840 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2); 841 gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]); 842 } 843 844 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) 845 { 846 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4); 847 gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]); 848 } 849 850 /* SDR1 */ 851 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) 852 { 853 gen_helper_store_sdr1(tcg_env, cpu_gpr[gprn]); 854 } 855 856 #if defined(TARGET_PPC64) 857 /* 64 bits PowerPC specific SPRs */ 858 /* PIDR */ 859 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) 860 { 861 gen_helper_store_pidr(tcg_env, cpu_gpr[gprn]); 862 } 863 864 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) 865 { 866 gen_helper_store_lpidr(tcg_env, cpu_gpr[gprn]); 867 } 868 869 void spr_read_hior(DisasContext *ctx, int gprn, int sprn) 870 { 871 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, offsetof(CPUPPCState, excp_prefix)); 872 } 873 874 void spr_write_hior(DisasContext *ctx, int sprn, int gprn) 875 { 876 TCGv t0 = tcg_temp_new(); 877 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); 878 tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix)); 879 } 880 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) 881 { 882 gen_helper_store_ptcr(tcg_env, cpu_gpr[gprn]); 883 } 884 885 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) 886 { 887 gen_helper_store_pcr(tcg_env, cpu_gpr[gprn]); 888 } 889 890 /* DPDES */ 891 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) 892 { 893 if (!gen_serialize_core_lpar(ctx)) { 894 return; 895 } 896 897 gen_helper_load_dpdes(cpu_gpr[gprn], tcg_env); 898 } 899 900 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) 901 { 902 if (!gen_serialize_core_lpar(ctx)) { 903 return; 904 } 905 906 gen_helper_store_dpdes(tcg_env, cpu_gpr[gprn]); 907 } 908 #endif 909 #endif 910 911 /* PowerPC 40x specific registers */ 912 #if !defined(CONFIG_USER_ONLY) 913 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) 914 { 915 translator_io_start(&ctx->base); 916 gen_helper_load_40x_pit(cpu_gpr[gprn], tcg_env); 917 } 918 919 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) 920 { 921 translator_io_start(&ctx->base); 922 gen_helper_store_40x_pit(tcg_env, cpu_gpr[gprn]); 923 } 924 925 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) 926 { 927 translator_io_start(&ctx->base); 928 gen_store_spr(sprn, cpu_gpr[gprn]); 929 gen_helper_store_40x_dbcr0(tcg_env, cpu_gpr[gprn]); 930 /* We must stop translation as we may have rebooted */ 931 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 932 } 933 934 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) 935 { 936 translator_io_start(&ctx->base); 937 gen_helper_store_40x_sler(tcg_env, cpu_gpr[gprn]); 938 } 939 940 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn) 941 { 942 translator_io_start(&ctx->base); 943 gen_helper_store_40x_tcr(tcg_env, cpu_gpr[gprn]); 944 } 945 946 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn) 947 { 948 translator_io_start(&ctx->base); 949 gen_helper_store_40x_tsr(tcg_env, cpu_gpr[gprn]); 950 } 951 952 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn) 953 { 954 TCGv t0 = tcg_temp_new(); 955 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF); 956 gen_helper_store_40x_pid(tcg_env, t0); 957 } 958 959 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) 960 { 961 translator_io_start(&ctx->base); 962 gen_helper_store_booke_tcr(tcg_env, cpu_gpr[gprn]); 963 } 964 965 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) 966 { 967 translator_io_start(&ctx->base); 968 gen_helper_store_booke_tsr(tcg_env, cpu_gpr[gprn]); 969 } 970 #endif 971 972 /* PIR */ 973 #if !defined(CONFIG_USER_ONLY) 974 void spr_write_pir(DisasContext *ctx, int sprn, int gprn) 975 { 976 TCGv t0 = tcg_temp_new(); 977 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); 978 gen_store_spr(SPR_PIR, t0); 979 } 980 #endif 981 982 /* SPE specific registers */ 983 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) 984 { 985 TCGv_i32 t0 = tcg_temp_new_i32(); 986 tcg_gen_ld_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr)); 987 tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); 988 } 989 990 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) 991 { 992 TCGv_i32 t0 = tcg_temp_new_i32(); 993 tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); 994 tcg_gen_st_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr)); 995 } 996 997 #if !defined(CONFIG_USER_ONLY) 998 /* Callback used to write the exception vector base */ 999 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) 1000 { 1001 TCGv t0 = tcg_temp_new(); 1002 tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivpr_mask)); 1003 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); 1004 tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix)); 1005 gen_store_spr(sprn, t0); 1006 } 1007 1008 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) 1009 { 1010 int sprn_offs; 1011 1012 if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { 1013 sprn_offs = sprn - SPR_BOOKE_IVOR0; 1014 } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { 1015 sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; 1016 } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { 1017 sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; 1018 } else { 1019 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception" 1020 " vector 0x%03x\n", sprn); 1021 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 1022 return; 1023 } 1024 1025 TCGv t0 = tcg_temp_new(); 1026 tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivor_mask)); 1027 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); 1028 tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); 1029 gen_store_spr(sprn, t0); 1030 } 1031 #endif 1032 1033 #ifdef TARGET_PPC64 1034 #ifndef CONFIG_USER_ONLY 1035 void spr_write_amr(DisasContext *ctx, int sprn, int gprn) 1036 { 1037 TCGv t0 = tcg_temp_new(); 1038 TCGv t1 = tcg_temp_new(); 1039 TCGv t2 = tcg_temp_new(); 1040 1041 /* 1042 * Note, the HV=1 PR=0 case is handled earlier by simply using 1043 * spr_write_generic for HV mode in the SPR table 1044 */ 1045 1046 /* Build insertion mask into t1 based on context */ 1047 if (ctx->pr) { 1048 gen_load_spr(t1, SPR_UAMOR); 1049 } else { 1050 gen_load_spr(t1, SPR_AMOR); 1051 } 1052 1053 /* Mask new bits into t2 */ 1054 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); 1055 1056 /* Load AMR and clear new bits in t0 */ 1057 gen_load_spr(t0, SPR_AMR); 1058 tcg_gen_andc_tl(t0, t0, t1); 1059 1060 /* Or'in new bits and write it out */ 1061 tcg_gen_or_tl(t0, t0, t2); 1062 gen_store_spr(SPR_AMR, t0); 1063 spr_store_dump_spr(SPR_AMR); 1064 } 1065 1066 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) 1067 { 1068 TCGv t0 = tcg_temp_new(); 1069 TCGv t1 = tcg_temp_new(); 1070 TCGv t2 = tcg_temp_new(); 1071 1072 /* 1073 * Note, the HV=1 case is handled earlier by simply using 1074 * spr_write_generic for HV mode in the SPR table 1075 */ 1076 1077 /* Build insertion mask into t1 based on context */ 1078 gen_load_spr(t1, SPR_AMOR); 1079 1080 /* Mask new bits into t2 */ 1081 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); 1082 1083 /* Load AMR and clear new bits in t0 */ 1084 gen_load_spr(t0, SPR_UAMOR); 1085 tcg_gen_andc_tl(t0, t0, t1); 1086 1087 /* Or'in new bits and write it out */ 1088 tcg_gen_or_tl(t0, t0, t2); 1089 gen_store_spr(SPR_UAMOR, t0); 1090 spr_store_dump_spr(SPR_UAMOR); 1091 } 1092 1093 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) 1094 { 1095 TCGv t0 = tcg_temp_new(); 1096 TCGv t1 = tcg_temp_new(); 1097 TCGv t2 = tcg_temp_new(); 1098 1099 /* 1100 * Note, the HV=1 case is handled earlier by simply using 1101 * spr_write_generic for HV mode in the SPR table 1102 */ 1103 1104 /* Build insertion mask into t1 based on context */ 1105 gen_load_spr(t1, SPR_AMOR); 1106 1107 /* Mask new bits into t2 */ 1108 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); 1109 1110 /* Load AMR and clear new bits in t0 */ 1111 gen_load_spr(t0, SPR_IAMR); 1112 tcg_gen_andc_tl(t0, t0, t1); 1113 1114 /* Or'in new bits and write it out */ 1115 tcg_gen_or_tl(t0, t0, t2); 1116 gen_store_spr(SPR_IAMR, t0); 1117 spr_store_dump_spr(SPR_IAMR); 1118 } 1119 #endif 1120 #endif 1121 1122 #ifndef CONFIG_USER_ONLY 1123 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) 1124 { 1125 gen_helper_fixup_thrm(tcg_env); 1126 gen_load_spr(cpu_gpr[gprn], sprn); 1127 spr_load_dump_spr(sprn); 1128 } 1129 #endif /* !CONFIG_USER_ONLY */ 1130 1131 #if !defined(CONFIG_USER_ONLY) 1132 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) 1133 { 1134 TCGv t0 = tcg_temp_new(); 1135 1136 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); 1137 gen_store_spr(sprn, t0); 1138 } 1139 1140 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) 1141 { 1142 TCGv t0 = tcg_temp_new(); 1143 1144 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); 1145 gen_store_spr(sprn, t0); 1146 } 1147 1148 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) 1149 { 1150 TCGv t0 = tcg_temp_new(); 1151 1152 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 1153 ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); 1154 gen_store_spr(sprn, t0); 1155 } 1156 1157 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) 1158 { 1159 gen_helper_booke206_tlbflush(tcg_env, cpu_gpr[gprn]); 1160 } 1161 1162 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) 1163 { 1164 TCGv_i32 t0 = tcg_constant_i32(sprn); 1165 gen_helper_booke_setpid(tcg_env, t0, cpu_gpr[gprn]); 1166 } 1167 1168 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) 1169 { 1170 gen_helper_booke_set_eplc(tcg_env, cpu_gpr[gprn]); 1171 } 1172 1173 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) 1174 { 1175 gen_helper_booke_set_epsc(tcg_env, cpu_gpr[gprn]); 1176 } 1177 1178 #endif 1179 1180 #if !defined(CONFIG_USER_ONLY) 1181 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) 1182 { 1183 TCGv val = tcg_temp_new(); 1184 tcg_gen_ext32u_tl(val, cpu_gpr[gprn]); 1185 gen_store_spr(SPR_BOOKE_MAS3, val); 1186 tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); 1187 gen_store_spr(SPR_BOOKE_MAS7, val); 1188 } 1189 1190 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) 1191 { 1192 TCGv mas7 = tcg_temp_new(); 1193 TCGv mas3 = tcg_temp_new(); 1194 gen_load_spr(mas7, SPR_BOOKE_MAS7); 1195 tcg_gen_shli_tl(mas7, mas7, 32); 1196 gen_load_spr(mas3, SPR_BOOKE_MAS3); 1197 tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); 1198 } 1199 1200 #endif 1201 1202 #ifdef TARGET_PPC64 1203 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, 1204 int bit, int sprn, int cause) 1205 { 1206 TCGv_i32 t1 = tcg_constant_i32(bit); 1207 TCGv_i32 t2 = tcg_constant_i32(sprn); 1208 TCGv_i32 t3 = tcg_constant_i32(cause); 1209 1210 gen_helper_fscr_facility_check(tcg_env, t1, t2, t3); 1211 } 1212 1213 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, 1214 int bit, int sprn, int cause) 1215 { 1216 TCGv_i32 t1 = tcg_constant_i32(bit); 1217 TCGv_i32 t2 = tcg_constant_i32(sprn); 1218 TCGv_i32 t3 = tcg_constant_i32(cause); 1219 1220 gen_helper_msr_facility_check(tcg_env, t1, t2, t3); 1221 } 1222 1223 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) 1224 { 1225 TCGv spr_up = tcg_temp_new(); 1226 TCGv spr = tcg_temp_new(); 1227 1228 gen_load_spr(spr, sprn - 1); 1229 tcg_gen_shri_tl(spr_up, spr, 32); 1230 tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); 1231 } 1232 1233 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) 1234 { 1235 TCGv spr = tcg_temp_new(); 1236 1237 gen_load_spr(spr, sprn - 1); 1238 tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); 1239 gen_store_spr(sprn - 1, spr); 1240 } 1241 1242 #if !defined(CONFIG_USER_ONLY) 1243 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) 1244 { 1245 TCGv hmer = tcg_temp_new(); 1246 1247 gen_load_spr(hmer, sprn); 1248 tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); 1249 gen_store_spr(sprn, hmer); 1250 spr_store_dump_spr(sprn); 1251 } 1252 1253 void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn) 1254 { 1255 /* Reading TFMR can cause it to be updated, so serialize threads here too */ 1256 if (!gen_serialize_core(ctx)) { 1257 return; 1258 } 1259 gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env); 1260 } 1261 1262 void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn) 1263 { 1264 if (!gen_serialize_core(ctx)) { 1265 return; 1266 } 1267 gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]); 1268 } 1269 1270 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) 1271 { 1272 translator_io_start(&ctx->base); 1273 gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]); 1274 } 1275 #endif /* !defined(CONFIG_USER_ONLY) */ 1276 1277 void spr_read_tar(DisasContext *ctx, int gprn, int sprn) 1278 { 1279 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); 1280 spr_read_generic(ctx, gprn, sprn); 1281 } 1282 1283 void spr_write_tar(DisasContext *ctx, int sprn, int gprn) 1284 { 1285 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); 1286 spr_write_generic(ctx, sprn, gprn); 1287 } 1288 1289 void spr_read_tm(DisasContext *ctx, int gprn, int sprn) 1290 { 1291 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1292 spr_read_generic(ctx, gprn, sprn); 1293 } 1294 1295 void spr_write_tm(DisasContext *ctx, int sprn, int gprn) 1296 { 1297 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1298 spr_write_generic(ctx, sprn, gprn); 1299 } 1300 1301 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) 1302 { 1303 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1304 spr_read_prev_upper32(ctx, gprn, sprn); 1305 } 1306 1307 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) 1308 { 1309 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1310 spr_write_prev_upper32(ctx, sprn, gprn); 1311 } 1312 1313 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) 1314 { 1315 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1316 spr_read_generic(ctx, gprn, sprn); 1317 } 1318 1319 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) 1320 { 1321 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1322 spr_write_generic(ctx, sprn, gprn); 1323 } 1324 1325 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) 1326 { 1327 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1328 spr_read_prev_upper32(ctx, gprn, sprn); 1329 } 1330 1331 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) 1332 { 1333 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1334 spr_write_prev_upper32(ctx, sprn, gprn); 1335 } 1336 1337 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn) 1338 { 1339 TCGv t0 = tcg_temp_new(); 1340 1341 /* 1342 * Access to the (H)DEXCR in problem state is done using separated 1343 * SPR indexes which are 16 below the SPR indexes which have full 1344 * access to the (H)DEXCR in privileged state. Problem state can 1345 * only read bits 32:63, bits 0:31 return 0. 1346 * 1347 * See section 9.3.1-9.3.2 of PowerISA v3.1B 1348 */ 1349 1350 gen_load_spr(t0, sprn + 16); 1351 tcg_gen_ext32u_tl(cpu_gpr[gprn], t0); 1352 } 1353 #endif 1354 1355 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ 1356 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) 1357 1358 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ 1359 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) 1360 1361 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ 1362 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) 1363 1364 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ 1365 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) 1366 1367 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \ 1368 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2) 1369 1370 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \ 1371 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) 1372 1373 typedef struct opcode_t { 1374 unsigned char opc1, opc2, opc3, opc4; 1375 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */ 1376 unsigned char pad[4]; 1377 #endif 1378 opc_handler_t handler; 1379 const char *oname; 1380 } opcode_t; 1381 1382 static void gen_priv_opc(DisasContext *ctx) 1383 { 1384 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); 1385 } 1386 1387 /* Helpers for priv. check */ 1388 #define GEN_PRIV(CTX) \ 1389 do { \ 1390 gen_priv_opc(CTX); return; \ 1391 } while (0) 1392 1393 #if defined(CONFIG_USER_ONLY) 1394 #define CHK_HV(CTX) GEN_PRIV(CTX) 1395 #define CHK_SV(CTX) GEN_PRIV(CTX) 1396 #define CHK_HVRM(CTX) GEN_PRIV(CTX) 1397 #else 1398 #define CHK_HV(CTX) \ 1399 do { \ 1400 if (unlikely(ctx->pr || !ctx->hv)) {\ 1401 GEN_PRIV(CTX); \ 1402 } \ 1403 } while (0) 1404 #define CHK_SV(CTX) \ 1405 do { \ 1406 if (unlikely(ctx->pr)) { \ 1407 GEN_PRIV(CTX); \ 1408 } \ 1409 } while (0) 1410 #define CHK_HVRM(CTX) \ 1411 do { \ 1412 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ 1413 GEN_PRIV(CTX); \ 1414 } \ 1415 } while (0) 1416 #endif 1417 1418 #define CHK_NONE(CTX) 1419 1420 /*****************************************************************************/ 1421 /* PowerPC instructions table */ 1422 1423 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ 1424 { \ 1425 .opc1 = op1, \ 1426 .opc2 = op2, \ 1427 .opc3 = op3, \ 1428 .opc4 = 0xff, \ 1429 .handler = { \ 1430 .inval1 = invl, \ 1431 .type = _typ, \ 1432 .type2 = _typ2, \ 1433 .handler = &gen_##name, \ 1434 }, \ 1435 .oname = stringify(name), \ 1436 } 1437 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ 1438 { \ 1439 .opc1 = op1, \ 1440 .opc2 = op2, \ 1441 .opc3 = op3, \ 1442 .opc4 = 0xff, \ 1443 .handler = { \ 1444 .inval1 = invl1, \ 1445 .inval2 = invl2, \ 1446 .type = _typ, \ 1447 .type2 = _typ2, \ 1448 .handler = &gen_##name, \ 1449 }, \ 1450 .oname = stringify(name), \ 1451 } 1452 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ 1453 { \ 1454 .opc1 = op1, \ 1455 .opc2 = op2, \ 1456 .opc3 = op3, \ 1457 .opc4 = 0xff, \ 1458 .handler = { \ 1459 .inval1 = invl, \ 1460 .type = _typ, \ 1461 .type2 = _typ2, \ 1462 .handler = &gen_##name, \ 1463 }, \ 1464 .oname = onam, \ 1465 } 1466 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ 1467 { \ 1468 .opc1 = op1, \ 1469 .opc2 = op2, \ 1470 .opc3 = op3, \ 1471 .opc4 = op4, \ 1472 .handler = { \ 1473 .inval1 = invl, \ 1474 .type = _typ, \ 1475 .type2 = _typ2, \ 1476 .handler = &gen_##name, \ 1477 }, \ 1478 .oname = stringify(name), \ 1479 } 1480 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ 1481 { \ 1482 .opc1 = op1, \ 1483 .opc2 = op2, \ 1484 .opc3 = op3, \ 1485 .opc4 = op4, \ 1486 .handler = { \ 1487 .inval1 = invl, \ 1488 .type = _typ, \ 1489 .type2 = _typ2, \ 1490 .handler = &gen_##name, \ 1491 }, \ 1492 .oname = onam, \ 1493 } 1494 1495 /* Invalid instruction */ 1496 static void gen_invalid(DisasContext *ctx) 1497 { 1498 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 1499 } 1500 1501 static opc_handler_t invalid_handler = { 1502 .inval1 = 0xFFFFFFFF, 1503 .inval2 = 0xFFFFFFFF, 1504 .type = PPC_NONE, 1505 .type2 = PPC_NONE, 1506 .handler = gen_invalid, 1507 }; 1508 1509 /*** Integer comparison ***/ 1510 1511 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) 1512 { 1513 TCGv t0 = tcg_temp_new(); 1514 TCGv t1 = tcg_temp_new(); 1515 TCGv_i32 t = tcg_temp_new_i32(); 1516 1517 tcg_gen_movi_tl(t0, CRF_EQ); 1518 tcg_gen_movi_tl(t1, CRF_LT); 1519 tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), 1520 t0, arg0, arg1, t1, t0); 1521 tcg_gen_movi_tl(t1, CRF_GT); 1522 tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), 1523 t0, arg0, arg1, t1, t0); 1524 1525 tcg_gen_trunc_tl_i32(t, t0); 1526 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); 1527 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t); 1528 } 1529 1530 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) 1531 { 1532 TCGv t0 = tcg_constant_tl(arg1); 1533 gen_op_cmp(arg0, t0, s, crf); 1534 } 1535 1536 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) 1537 { 1538 TCGv t0, t1; 1539 t0 = tcg_temp_new(); 1540 t1 = tcg_temp_new(); 1541 if (s) { 1542 tcg_gen_ext32s_tl(t0, arg0); 1543 tcg_gen_ext32s_tl(t1, arg1); 1544 } else { 1545 tcg_gen_ext32u_tl(t0, arg0); 1546 tcg_gen_ext32u_tl(t1, arg1); 1547 } 1548 gen_op_cmp(t0, t1, s, crf); 1549 } 1550 1551 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) 1552 { 1553 TCGv t0 = tcg_constant_tl(arg1); 1554 gen_op_cmp32(arg0, t0, s, crf); 1555 } 1556 1557 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) 1558 { 1559 if (NARROW_MODE(ctx)) { 1560 gen_op_cmpi32(reg, 0, 1, 0); 1561 } else { 1562 gen_op_cmpi(reg, 0, 1, 0); 1563 } 1564 } 1565 1566 /* cmprb - range comparison: isupper, isaplha, islower*/ 1567 static void gen_cmprb(DisasContext *ctx) 1568 { 1569 TCGv_i32 src1 = tcg_temp_new_i32(); 1570 TCGv_i32 src2 = tcg_temp_new_i32(); 1571 TCGv_i32 src2lo = tcg_temp_new_i32(); 1572 TCGv_i32 src2hi = tcg_temp_new_i32(); 1573 TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; 1574 1575 tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]); 1576 tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]); 1577 1578 tcg_gen_andi_i32(src1, src1, 0xFF); 1579 tcg_gen_ext8u_i32(src2lo, src2); 1580 tcg_gen_shri_i32(src2, src2, 8); 1581 tcg_gen_ext8u_i32(src2hi, src2); 1582 1583 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); 1584 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); 1585 tcg_gen_and_i32(crf, src2lo, src2hi); 1586 1587 if (ctx->opcode & 0x00200000) { 1588 tcg_gen_shri_i32(src2, src2, 8); 1589 tcg_gen_ext8u_i32(src2lo, src2); 1590 tcg_gen_shri_i32(src2, src2, 8); 1591 tcg_gen_ext8u_i32(src2hi, src2); 1592 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); 1593 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); 1594 tcg_gen_and_i32(src2lo, src2lo, src2hi); 1595 tcg_gen_or_i32(crf, crf, src2lo); 1596 } 1597 tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); 1598 } 1599 1600 #if defined(TARGET_PPC64) 1601 /* cmpeqb */ 1602 static void gen_cmpeqb(DisasContext *ctx) 1603 { 1604 gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1605 cpu_gpr[rB(ctx->opcode)]); 1606 } 1607 #endif 1608 1609 /* isel (PowerPC 2.03 specification) */ 1610 static void gen_isel(DisasContext *ctx) 1611 { 1612 uint32_t bi = rC(ctx->opcode); 1613 uint32_t mask = 0x08 >> (bi & 0x03); 1614 TCGv t0 = tcg_temp_new(); 1615 TCGv zr; 1616 1617 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); 1618 tcg_gen_andi_tl(t0, t0, mask); 1619 1620 zr = tcg_constant_tl(0); 1621 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, 1622 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, 1623 cpu_gpr[rB(ctx->opcode)]); 1624 } 1625 1626 /* cmpb: PowerPC 2.05 specification */ 1627 static void gen_cmpb(DisasContext *ctx) 1628 { 1629 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1630 cpu_gpr[rB(ctx->opcode)]); 1631 } 1632 1633 /*** Integer arithmetic ***/ 1634 1635 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, 1636 TCGv arg1, TCGv arg2, int sub) 1637 { 1638 TCGv t0 = tcg_temp_new(); 1639 1640 tcg_gen_xor_tl(cpu_ov, arg0, arg2); 1641 tcg_gen_xor_tl(t0, arg1, arg2); 1642 if (sub) { 1643 tcg_gen_and_tl(cpu_ov, cpu_ov, t0); 1644 } else { 1645 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); 1646 } 1647 if (NARROW_MODE(ctx)) { 1648 tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1); 1649 if (is_isa300(ctx)) { 1650 tcg_gen_mov_tl(cpu_ov32, cpu_ov); 1651 } 1652 } else { 1653 if (is_isa300(ctx)) { 1654 tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1); 1655 } 1656 tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1); 1657 } 1658 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 1659 } 1660 1661 static inline void gen_op_arith_compute_ca32(DisasContext *ctx, 1662 TCGv res, TCGv arg0, TCGv arg1, 1663 TCGv ca32, int sub) 1664 { 1665 TCGv t0; 1666 1667 if (!is_isa300(ctx)) { 1668 return; 1669 } 1670 1671 t0 = tcg_temp_new(); 1672 if (sub) { 1673 tcg_gen_eqv_tl(t0, arg0, arg1); 1674 } else { 1675 tcg_gen_xor_tl(t0, arg0, arg1); 1676 } 1677 tcg_gen_xor_tl(t0, t0, res); 1678 tcg_gen_extract_tl(ca32, t0, 32, 1); 1679 } 1680 1681 /* Common add function */ 1682 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, 1683 TCGv arg2, TCGv ca, TCGv ca32, 1684 bool add_ca, bool compute_ca, 1685 bool compute_ov, bool compute_rc0) 1686 { 1687 TCGv t0 = ret; 1688 1689 if (compute_ca || compute_ov) { 1690 t0 = tcg_temp_new(); 1691 } 1692 1693 if (compute_ca) { 1694 if (NARROW_MODE(ctx)) { 1695 /* 1696 * Caution: a non-obvious corner case of the spec is that 1697 * we must produce the *entire* 64-bit addition, but 1698 * produce the carry into bit 32. 1699 */ 1700 TCGv t1 = tcg_temp_new(); 1701 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ 1702 tcg_gen_add_tl(t0, arg1, arg2); 1703 if (add_ca) { 1704 tcg_gen_add_tl(t0, t0, ca); 1705 } 1706 tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */ 1707 tcg_gen_extract_tl(ca, ca, 32, 1); 1708 if (is_isa300(ctx)) { 1709 tcg_gen_mov_tl(ca32, ca); 1710 } 1711 } else { 1712 TCGv zero = tcg_constant_tl(0); 1713 if (add_ca) { 1714 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); 1715 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); 1716 } else { 1717 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); 1718 } 1719 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); 1720 } 1721 } else { 1722 tcg_gen_add_tl(t0, arg1, arg2); 1723 if (add_ca) { 1724 tcg_gen_add_tl(t0, t0, ca); 1725 } 1726 } 1727 1728 if (compute_ov) { 1729 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); 1730 } 1731 if (unlikely(compute_rc0)) { 1732 gen_set_Rc0(ctx, t0); 1733 } 1734 1735 if (t0 != ret) { 1736 tcg_gen_mov_tl(ret, t0); 1737 } 1738 } 1739 1740 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, 1741 TCGv arg2, int sign, int compute_ov) 1742 { 1743 TCGv_i32 t0 = tcg_temp_new_i32(); 1744 TCGv_i32 t1 = tcg_temp_new_i32(); 1745 TCGv_i32 t2 = tcg_temp_new_i32(); 1746 TCGv_i32 t3 = tcg_temp_new_i32(); 1747 1748 tcg_gen_trunc_tl_i32(t0, arg1); 1749 tcg_gen_trunc_tl_i32(t1, arg2); 1750 if (sign) { 1751 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN); 1752 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1); 1753 tcg_gen_and_i32(t2, t2, t3); 1754 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0); 1755 tcg_gen_or_i32(t2, t2, t3); 1756 tcg_gen_movi_i32(t3, 0); 1757 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); 1758 tcg_gen_div_i32(t3, t0, t1); 1759 tcg_gen_extu_i32_tl(ret, t3); 1760 } else { 1761 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0); 1762 tcg_gen_movi_i32(t3, 0); 1763 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); 1764 tcg_gen_divu_i32(t3, t0, t1); 1765 tcg_gen_extu_i32_tl(ret, t3); 1766 } 1767 if (compute_ov) { 1768 tcg_gen_extu_i32_tl(cpu_ov, t2); 1769 if (is_isa300(ctx)) { 1770 tcg_gen_extu_i32_tl(cpu_ov32, t2); 1771 } 1772 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 1773 } 1774 1775 if (unlikely(Rc(ctx->opcode) != 0)) { 1776 gen_set_Rc0(ctx, ret); 1777 } 1778 } 1779 /* Div functions */ 1780 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ 1781 static void glue(gen_, name)(DisasContext *ctx) \ 1782 { \ 1783 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ 1784 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1785 sign, compute_ov); \ 1786 } 1787 /* divwu divwu. divwuo divwuo. */ 1788 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); 1789 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); 1790 /* divw divw. divwo divwo. */ 1791 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); 1792 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); 1793 1794 /* div[wd]eu[o][.] */ 1795 #define GEN_DIVE(name, hlpr, compute_ov) \ 1796 static void gen_##name(DisasContext *ctx) \ 1797 { \ 1798 TCGv_i32 t0 = tcg_constant_i32(compute_ov); \ 1799 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], tcg_env, \ 1800 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ 1801 if (unlikely(Rc(ctx->opcode) != 0)) { \ 1802 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ 1803 } \ 1804 } 1805 1806 GEN_DIVE(divweu, divweu, 0); 1807 GEN_DIVE(divweuo, divweu, 1); 1808 GEN_DIVE(divwe, divwe, 0); 1809 GEN_DIVE(divweo, divwe, 1); 1810 1811 #if defined(TARGET_PPC64) 1812 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, 1813 TCGv arg2, int sign, int compute_ov) 1814 { 1815 TCGv_i64 t0 = tcg_temp_new_i64(); 1816 TCGv_i64 t1 = tcg_temp_new_i64(); 1817 TCGv_i64 t2 = tcg_temp_new_i64(); 1818 TCGv_i64 t3 = tcg_temp_new_i64(); 1819 1820 tcg_gen_mov_i64(t0, arg1); 1821 tcg_gen_mov_i64(t1, arg2); 1822 if (sign) { 1823 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN); 1824 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1); 1825 tcg_gen_and_i64(t2, t2, t3); 1826 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0); 1827 tcg_gen_or_i64(t2, t2, t3); 1828 tcg_gen_movi_i64(t3, 0); 1829 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); 1830 tcg_gen_div_i64(ret, t0, t1); 1831 } else { 1832 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0); 1833 tcg_gen_movi_i64(t3, 0); 1834 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); 1835 tcg_gen_divu_i64(ret, t0, t1); 1836 } 1837 if (compute_ov) { 1838 tcg_gen_mov_tl(cpu_ov, t2); 1839 if (is_isa300(ctx)) { 1840 tcg_gen_mov_tl(cpu_ov32, t2); 1841 } 1842 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 1843 } 1844 1845 if (unlikely(Rc(ctx->opcode) != 0)) { 1846 gen_set_Rc0(ctx, ret); 1847 } 1848 } 1849 1850 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ 1851 static void glue(gen_, name)(DisasContext *ctx) \ 1852 { \ 1853 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ 1854 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1855 sign, compute_ov); \ 1856 } 1857 /* divdu divdu. divduo divduo. */ 1858 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); 1859 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); 1860 /* divd divd. divdo divdo. */ 1861 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); 1862 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); 1863 1864 GEN_DIVE(divdeu, divdeu, 0); 1865 GEN_DIVE(divdeuo, divdeu, 1); 1866 GEN_DIVE(divde, divde, 0); 1867 GEN_DIVE(divdeo, divde, 1); 1868 #endif 1869 1870 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, 1871 TCGv arg2, int sign) 1872 { 1873 TCGv_i32 t0 = tcg_temp_new_i32(); 1874 TCGv_i32 t1 = tcg_temp_new_i32(); 1875 1876 tcg_gen_trunc_tl_i32(t0, arg1); 1877 tcg_gen_trunc_tl_i32(t1, arg2); 1878 if (sign) { 1879 TCGv_i32 t2 = tcg_temp_new_i32(); 1880 TCGv_i32 t3 = tcg_temp_new_i32(); 1881 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN); 1882 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1); 1883 tcg_gen_and_i32(t2, t2, t3); 1884 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0); 1885 tcg_gen_or_i32(t2, t2, t3); 1886 tcg_gen_movi_i32(t3, 0); 1887 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); 1888 tcg_gen_rem_i32(t3, t0, t1); 1889 tcg_gen_ext_i32_tl(ret, t3); 1890 } else { 1891 TCGv_i32 t2 = tcg_constant_i32(1); 1892 TCGv_i32 t3 = tcg_constant_i32(0); 1893 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1); 1894 tcg_gen_remu_i32(t0, t0, t1); 1895 tcg_gen_extu_i32_tl(ret, t0); 1896 } 1897 } 1898 1899 #define GEN_INT_ARITH_MODW(name, opc3, sign) \ 1900 static void glue(gen_, name)(DisasContext *ctx) \ 1901 { \ 1902 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \ 1903 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1904 sign); \ 1905 } 1906 1907 GEN_INT_ARITH_MODW(moduw, 0x08, 0); 1908 GEN_INT_ARITH_MODW(modsw, 0x18, 1); 1909 1910 #if defined(TARGET_PPC64) 1911 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, 1912 TCGv arg2, int sign) 1913 { 1914 TCGv_i64 t0 = tcg_temp_new_i64(); 1915 TCGv_i64 t1 = tcg_temp_new_i64(); 1916 1917 tcg_gen_mov_i64(t0, arg1); 1918 tcg_gen_mov_i64(t1, arg2); 1919 if (sign) { 1920 TCGv_i64 t2 = tcg_temp_new_i64(); 1921 TCGv_i64 t3 = tcg_temp_new_i64(); 1922 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN); 1923 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1); 1924 tcg_gen_and_i64(t2, t2, t3); 1925 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0); 1926 tcg_gen_or_i64(t2, t2, t3); 1927 tcg_gen_movi_i64(t3, 0); 1928 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); 1929 tcg_gen_rem_i64(ret, t0, t1); 1930 } else { 1931 TCGv_i64 t2 = tcg_constant_i64(1); 1932 TCGv_i64 t3 = tcg_constant_i64(0); 1933 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1); 1934 tcg_gen_remu_i64(ret, t0, t1); 1935 } 1936 } 1937 1938 #define GEN_INT_ARITH_MODD(name, opc3, sign) \ 1939 static void glue(gen_, name)(DisasContext *ctx) \ 1940 { \ 1941 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \ 1942 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1943 sign); \ 1944 } 1945 1946 GEN_INT_ARITH_MODD(modud, 0x08, 0); 1947 GEN_INT_ARITH_MODD(modsd, 0x18, 1); 1948 #endif 1949 1950 /* mulhw mulhw. */ 1951 static void gen_mulhw(DisasContext *ctx) 1952 { 1953 TCGv_i32 t0 = tcg_temp_new_i32(); 1954 TCGv_i32 t1 = tcg_temp_new_i32(); 1955 1956 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); 1957 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); 1958 tcg_gen_muls2_i32(t0, t1, t0, t1); 1959 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); 1960 if (unlikely(Rc(ctx->opcode) != 0)) { 1961 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1962 } 1963 } 1964 1965 /* mulhwu mulhwu. */ 1966 static void gen_mulhwu(DisasContext *ctx) 1967 { 1968 TCGv_i32 t0 = tcg_temp_new_i32(); 1969 TCGv_i32 t1 = tcg_temp_new_i32(); 1970 1971 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); 1972 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); 1973 tcg_gen_mulu2_i32(t0, t1, t0, t1); 1974 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); 1975 if (unlikely(Rc(ctx->opcode) != 0)) { 1976 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1977 } 1978 } 1979 1980 /* mullw mullw. */ 1981 static void gen_mullw(DisasContext *ctx) 1982 { 1983 #if defined(TARGET_PPC64) 1984 TCGv_i64 t0, t1; 1985 t0 = tcg_temp_new_i64(); 1986 t1 = tcg_temp_new_i64(); 1987 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); 1988 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); 1989 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); 1990 #else 1991 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1992 cpu_gpr[rB(ctx->opcode)]); 1993 #endif 1994 if (unlikely(Rc(ctx->opcode) != 0)) { 1995 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1996 } 1997 } 1998 1999 /* mullwo mullwo. */ 2000 static void gen_mullwo(DisasContext *ctx) 2001 { 2002 TCGv_i32 t0 = tcg_temp_new_i32(); 2003 TCGv_i32 t1 = tcg_temp_new_i32(); 2004 2005 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); 2006 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); 2007 tcg_gen_muls2_i32(t0, t1, t0, t1); 2008 #if defined(TARGET_PPC64) 2009 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); 2010 #else 2011 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); 2012 #endif 2013 2014 tcg_gen_sari_i32(t0, t0, 31); 2015 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); 2016 tcg_gen_extu_i32_tl(cpu_ov, t0); 2017 if (is_isa300(ctx)) { 2018 tcg_gen_mov_tl(cpu_ov32, cpu_ov); 2019 } 2020 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 2021 2022 if (unlikely(Rc(ctx->opcode) != 0)) { 2023 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2024 } 2025 } 2026 2027 /* mulli */ 2028 static void gen_mulli(DisasContext *ctx) 2029 { 2030 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2031 SIMM(ctx->opcode)); 2032 } 2033 2034 #if defined(TARGET_PPC64) 2035 /* mulhd mulhd. */ 2036 static void gen_mulhd(DisasContext *ctx) 2037 { 2038 TCGv lo = tcg_temp_new(); 2039 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], 2040 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2041 if (unlikely(Rc(ctx->opcode) != 0)) { 2042 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2043 } 2044 } 2045 2046 /* mulhdu mulhdu. */ 2047 static void gen_mulhdu(DisasContext *ctx) 2048 { 2049 TCGv lo = tcg_temp_new(); 2050 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], 2051 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2052 if (unlikely(Rc(ctx->opcode) != 0)) { 2053 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2054 } 2055 } 2056 2057 /* mulld mulld. */ 2058 static void gen_mulld(DisasContext *ctx) 2059 { 2060 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2061 cpu_gpr[rB(ctx->opcode)]); 2062 if (unlikely(Rc(ctx->opcode) != 0)) { 2063 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2064 } 2065 } 2066 2067 /* mulldo mulldo. */ 2068 static void gen_mulldo(DisasContext *ctx) 2069 { 2070 TCGv_i64 t0 = tcg_temp_new_i64(); 2071 TCGv_i64 t1 = tcg_temp_new_i64(); 2072 2073 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], 2074 cpu_gpr[rB(ctx->opcode)]); 2075 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); 2076 2077 tcg_gen_sari_i64(t0, t0, 63); 2078 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); 2079 if (is_isa300(ctx)) { 2080 tcg_gen_mov_tl(cpu_ov32, cpu_ov); 2081 } 2082 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 2083 2084 if (unlikely(Rc(ctx->opcode) != 0)) { 2085 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2086 } 2087 } 2088 #endif 2089 2090 /* Common subf function */ 2091 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, 2092 TCGv arg2, bool add_ca, bool compute_ca, 2093 bool compute_ov, bool compute_rc0) 2094 { 2095 TCGv t0 = ret; 2096 2097 if (compute_ca || compute_ov) { 2098 t0 = tcg_temp_new(); 2099 } 2100 2101 if (compute_ca) { 2102 /* dest = ~arg1 + arg2 [+ ca]. */ 2103 if (NARROW_MODE(ctx)) { 2104 /* 2105 * Caution: a non-obvious corner case of the spec is that 2106 * we must produce the *entire* 64-bit addition, but 2107 * produce the carry into bit 32. 2108 */ 2109 TCGv inv1 = tcg_temp_new(); 2110 TCGv t1 = tcg_temp_new(); 2111 tcg_gen_not_tl(inv1, arg1); 2112 if (add_ca) { 2113 tcg_gen_add_tl(t0, arg2, cpu_ca); 2114 } else { 2115 tcg_gen_addi_tl(t0, arg2, 1); 2116 } 2117 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ 2118 tcg_gen_add_tl(t0, t0, inv1); 2119 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ 2120 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1); 2121 if (is_isa300(ctx)) { 2122 tcg_gen_mov_tl(cpu_ca32, cpu_ca); 2123 } 2124 } else if (add_ca) { 2125 TCGv zero, inv1 = tcg_temp_new(); 2126 tcg_gen_not_tl(inv1, arg1); 2127 zero = tcg_constant_tl(0); 2128 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); 2129 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); 2130 gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); 2131 } else { 2132 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); 2133 tcg_gen_sub_tl(t0, arg2, arg1); 2134 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); 2135 } 2136 } else if (add_ca) { 2137 /* 2138 * Since we're ignoring carry-out, we can simplify the 2139 * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. 2140 */ 2141 tcg_gen_sub_tl(t0, arg2, arg1); 2142 tcg_gen_add_tl(t0, t0, cpu_ca); 2143 tcg_gen_subi_tl(t0, t0, 1); 2144 } else { 2145 tcg_gen_sub_tl(t0, arg2, arg1); 2146 } 2147 2148 if (compute_ov) { 2149 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); 2150 } 2151 if (unlikely(compute_rc0)) { 2152 gen_set_Rc0(ctx, t0); 2153 } 2154 2155 if (t0 != ret) { 2156 tcg_gen_mov_tl(ret, t0); 2157 } 2158 } 2159 2160 /* neg neg. nego nego. */ 2161 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) 2162 { 2163 TCGv zero = tcg_constant_tl(0); 2164 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2165 zero, 0, 0, compute_ov, Rc(ctx->opcode)); 2166 } 2167 2168 static void gen_neg(DisasContext *ctx) 2169 { 2170 tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 2171 if (unlikely(Rc(ctx->opcode))) { 2172 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2173 } 2174 } 2175 2176 static void gen_nego(DisasContext *ctx) 2177 { 2178 gen_op_arith_neg(ctx, 1); 2179 } 2180 2181 /*** Integer logical ***/ 2182 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ 2183 static void glue(gen_, name)(DisasContext *ctx) \ 2184 { \ 2185 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ 2186 cpu_gpr[rB(ctx->opcode)]); \ 2187 if (unlikely(Rc(ctx->opcode) != 0)) \ 2188 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ 2189 } 2190 2191 #define GEN_LOGICAL1(name, tcg_op, opc, type) \ 2192 static void glue(gen_, name)(DisasContext *ctx) \ 2193 { \ 2194 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ 2195 if (unlikely(Rc(ctx->opcode) != 0)) \ 2196 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ 2197 } 2198 2199 /* and & and. */ 2200 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); 2201 /* andc & andc. */ 2202 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); 2203 2204 /* andi. */ 2205 static void gen_andi_(DisasContext *ctx) 2206 { 2207 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2208 UIMM(ctx->opcode)); 2209 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2210 } 2211 2212 /* andis. */ 2213 static void gen_andis_(DisasContext *ctx) 2214 { 2215 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2216 UIMM(ctx->opcode) << 16); 2217 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2218 } 2219 2220 /* cntlzw */ 2221 static void gen_cntlzw(DisasContext *ctx) 2222 { 2223 TCGv_i32 t = tcg_temp_new_i32(); 2224 2225 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); 2226 tcg_gen_clzi_i32(t, t, 32); 2227 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); 2228 2229 if (unlikely(Rc(ctx->opcode) != 0)) { 2230 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2231 } 2232 } 2233 2234 /* cnttzw */ 2235 static void gen_cnttzw(DisasContext *ctx) 2236 { 2237 TCGv_i32 t = tcg_temp_new_i32(); 2238 2239 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); 2240 tcg_gen_ctzi_i32(t, t, 32); 2241 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); 2242 2243 if (unlikely(Rc(ctx->opcode) != 0)) { 2244 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2245 } 2246 } 2247 2248 /* eqv & eqv. */ 2249 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); 2250 /* extsb & extsb. */ 2251 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); 2252 /* extsh & extsh. */ 2253 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); 2254 /* nand & nand. */ 2255 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); 2256 /* nor & nor. */ 2257 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); 2258 2259 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 2260 static void gen_pause(DisasContext *ctx) 2261 { 2262 TCGv_i32 t0 = tcg_constant_i32(0); 2263 tcg_gen_st_i32(t0, tcg_env, 2264 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); 2265 2266 /* Stop translation, this gives other CPUs a chance to run */ 2267 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 2268 } 2269 #endif /* defined(TARGET_PPC64) */ 2270 2271 /* or & or. */ 2272 static void gen_or(DisasContext *ctx) 2273 { 2274 int rs, ra, rb; 2275 2276 rs = rS(ctx->opcode); 2277 ra = rA(ctx->opcode); 2278 rb = rB(ctx->opcode); 2279 /* Optimisation for mr. ri case */ 2280 if (rs != ra || rs != rb) { 2281 if (rs != rb) { 2282 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); 2283 } else { 2284 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); 2285 } 2286 if (unlikely(Rc(ctx->opcode) != 0)) { 2287 gen_set_Rc0(ctx, cpu_gpr[ra]); 2288 } 2289 } else if (unlikely(Rc(ctx->opcode) != 0)) { 2290 gen_set_Rc0(ctx, cpu_gpr[rs]); 2291 #if defined(TARGET_PPC64) 2292 } else if (rs != 0) { /* 0 is nop */ 2293 int prio = 0; 2294 2295 switch (rs) { 2296 case 1: 2297 /* Set process priority to low */ 2298 prio = 2; 2299 break; 2300 case 6: 2301 /* Set process priority to medium-low */ 2302 prio = 3; 2303 break; 2304 case 2: 2305 /* Set process priority to normal */ 2306 prio = 4; 2307 break; 2308 #if !defined(CONFIG_USER_ONLY) 2309 case 31: 2310 if (!ctx->pr) { 2311 /* Set process priority to very low */ 2312 prio = 1; 2313 } 2314 break; 2315 case 5: 2316 if (!ctx->pr) { 2317 /* Set process priority to medium-hight */ 2318 prio = 5; 2319 } 2320 break; 2321 case 3: 2322 if (!ctx->pr) { 2323 /* Set process priority to high */ 2324 prio = 6; 2325 } 2326 break; 2327 case 7: 2328 if (ctx->hv && !ctx->pr) { 2329 /* Set process priority to very high */ 2330 prio = 7; 2331 } 2332 break; 2333 #endif 2334 default: 2335 break; 2336 } 2337 if (prio) { 2338 TCGv t0 = tcg_temp_new(); 2339 gen_load_spr(t0, SPR_PPR); 2340 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); 2341 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); 2342 gen_store_spr(SPR_PPR, t0); 2343 } 2344 #if !defined(CONFIG_USER_ONLY) 2345 /* 2346 * Pause out of TCG otherwise spin loops with smt_low eat too 2347 * much CPU and the kernel hangs. This applies to all 2348 * encodings other than no-op, e.g., miso(rs=26), yield(27), 2349 * mdoio(29), mdoom(30), and all currently undefined. 2350 */ 2351 gen_pause(ctx); 2352 #endif 2353 #endif 2354 } 2355 } 2356 /* orc & orc. */ 2357 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); 2358 2359 /* xor & xor. */ 2360 static void gen_xor(DisasContext *ctx) 2361 { 2362 /* Optimisation for "set to zero" case */ 2363 if (rS(ctx->opcode) != rB(ctx->opcode)) { 2364 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2365 cpu_gpr[rB(ctx->opcode)]); 2366 } else { 2367 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); 2368 } 2369 if (unlikely(Rc(ctx->opcode) != 0)) { 2370 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2371 } 2372 } 2373 2374 /* ori */ 2375 static void gen_ori(DisasContext *ctx) 2376 { 2377 target_ulong uimm = UIMM(ctx->opcode); 2378 2379 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2380 return; 2381 } 2382 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); 2383 } 2384 2385 /* oris */ 2386 static void gen_oris(DisasContext *ctx) 2387 { 2388 target_ulong uimm = UIMM(ctx->opcode); 2389 2390 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2391 /* NOP */ 2392 return; 2393 } 2394 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2395 uimm << 16); 2396 } 2397 2398 /* xori */ 2399 static void gen_xori(DisasContext *ctx) 2400 { 2401 target_ulong uimm = UIMM(ctx->opcode); 2402 2403 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2404 /* NOP */ 2405 return; 2406 } 2407 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); 2408 } 2409 2410 /* xoris */ 2411 static void gen_xoris(DisasContext *ctx) 2412 { 2413 target_ulong uimm = UIMM(ctx->opcode); 2414 2415 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2416 /* NOP */ 2417 return; 2418 } 2419 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2420 uimm << 16); 2421 } 2422 2423 /* popcntb : PowerPC 2.03 specification */ 2424 static void gen_popcntb(DisasContext *ctx) 2425 { 2426 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2427 } 2428 2429 static void gen_popcntw(DisasContext *ctx) 2430 { 2431 #if defined(TARGET_PPC64) 2432 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2433 #else 2434 tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2435 #endif 2436 } 2437 2438 #if defined(TARGET_PPC64) 2439 /* popcntd: PowerPC 2.06 specification */ 2440 static void gen_popcntd(DisasContext *ctx) 2441 { 2442 tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2443 } 2444 #endif 2445 2446 /* prtyw: PowerPC 2.05 specification */ 2447 static void gen_prtyw(DisasContext *ctx) 2448 { 2449 TCGv ra = cpu_gpr[rA(ctx->opcode)]; 2450 TCGv rs = cpu_gpr[rS(ctx->opcode)]; 2451 TCGv t0 = tcg_temp_new(); 2452 tcg_gen_shri_tl(t0, rs, 16); 2453 tcg_gen_xor_tl(ra, rs, t0); 2454 tcg_gen_shri_tl(t0, ra, 8); 2455 tcg_gen_xor_tl(ra, ra, t0); 2456 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); 2457 } 2458 2459 #if defined(TARGET_PPC64) 2460 /* prtyd: PowerPC 2.05 specification */ 2461 static void gen_prtyd(DisasContext *ctx) 2462 { 2463 TCGv ra = cpu_gpr[rA(ctx->opcode)]; 2464 TCGv rs = cpu_gpr[rS(ctx->opcode)]; 2465 TCGv t0 = tcg_temp_new(); 2466 tcg_gen_shri_tl(t0, rs, 32); 2467 tcg_gen_xor_tl(ra, rs, t0); 2468 tcg_gen_shri_tl(t0, ra, 16); 2469 tcg_gen_xor_tl(ra, ra, t0); 2470 tcg_gen_shri_tl(t0, ra, 8); 2471 tcg_gen_xor_tl(ra, ra, t0); 2472 tcg_gen_andi_tl(ra, ra, 1); 2473 } 2474 #endif 2475 2476 #if defined(TARGET_PPC64) 2477 /* bpermd */ 2478 static void gen_bpermd(DisasContext *ctx) 2479 { 2480 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)], 2481 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2482 } 2483 #endif 2484 2485 #if defined(TARGET_PPC64) 2486 /* extsw & extsw. */ 2487 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); 2488 2489 /* cntlzd */ 2490 static void gen_cntlzd(DisasContext *ctx) 2491 { 2492 tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); 2493 if (unlikely(Rc(ctx->opcode) != 0)) { 2494 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2495 } 2496 } 2497 2498 /* cnttzd */ 2499 static void gen_cnttzd(DisasContext *ctx) 2500 { 2501 tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); 2502 if (unlikely(Rc(ctx->opcode) != 0)) { 2503 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2504 } 2505 } 2506 2507 /* darn */ 2508 static void gen_darn(DisasContext *ctx) 2509 { 2510 int l = L(ctx->opcode); 2511 2512 if (l > 2) { 2513 tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1); 2514 } else { 2515 translator_io_start(&ctx->base); 2516 if (l == 0) { 2517 gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]); 2518 } else { 2519 /* Return 64-bit random for both CRN and RRN */ 2520 gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]); 2521 } 2522 } 2523 } 2524 #endif 2525 2526 /*** Integer rotate ***/ 2527 2528 /* rlwimi & rlwimi. */ 2529 static void gen_rlwimi(DisasContext *ctx) 2530 { 2531 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2532 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2533 uint32_t sh = SH(ctx->opcode); 2534 uint32_t mb = MB(ctx->opcode); 2535 uint32_t me = ME(ctx->opcode); 2536 2537 if (sh == (31 - me) && mb <= me) { 2538 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); 2539 } else { 2540 target_ulong mask; 2541 bool mask_in_32b = true; 2542 TCGv t1; 2543 2544 #if defined(TARGET_PPC64) 2545 mb += 32; 2546 me += 32; 2547 #endif 2548 mask = MASK(mb, me); 2549 2550 #if defined(TARGET_PPC64) 2551 if (mask > 0xffffffffu) { 2552 mask_in_32b = false; 2553 } 2554 #endif 2555 t1 = tcg_temp_new(); 2556 if (mask_in_32b) { 2557 TCGv_i32 t0 = tcg_temp_new_i32(); 2558 tcg_gen_trunc_tl_i32(t0, t_rs); 2559 tcg_gen_rotli_i32(t0, t0, sh); 2560 tcg_gen_extu_i32_tl(t1, t0); 2561 } else { 2562 #if defined(TARGET_PPC64) 2563 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32); 2564 tcg_gen_rotli_i64(t1, t1, sh); 2565 #else 2566 g_assert_not_reached(); 2567 #endif 2568 } 2569 2570 tcg_gen_andi_tl(t1, t1, mask); 2571 tcg_gen_andi_tl(t_ra, t_ra, ~mask); 2572 tcg_gen_or_tl(t_ra, t_ra, t1); 2573 } 2574 if (unlikely(Rc(ctx->opcode) != 0)) { 2575 gen_set_Rc0(ctx, t_ra); 2576 } 2577 } 2578 2579 /* rlwinm & rlwinm. */ 2580 static void gen_rlwinm(DisasContext *ctx) 2581 { 2582 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2583 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2584 int sh = SH(ctx->opcode); 2585 int mb = MB(ctx->opcode); 2586 int me = ME(ctx->opcode); 2587 int len = me - mb + 1; 2588 int rsh = (32 - sh) & 31; 2589 2590 if (sh != 0 && len > 0 && me == (31 - sh)) { 2591 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len); 2592 } else if (me == 31 && rsh + len <= 32) { 2593 tcg_gen_extract_tl(t_ra, t_rs, rsh, len); 2594 } else { 2595 target_ulong mask; 2596 bool mask_in_32b = true; 2597 #if defined(TARGET_PPC64) 2598 mb += 32; 2599 me += 32; 2600 #endif 2601 mask = MASK(mb, me); 2602 #if defined(TARGET_PPC64) 2603 if (mask > 0xffffffffu) { 2604 mask_in_32b = false; 2605 } 2606 #endif 2607 if (mask_in_32b) { 2608 if (sh == 0) { 2609 tcg_gen_andi_tl(t_ra, t_rs, mask); 2610 } else { 2611 TCGv_i32 t0 = tcg_temp_new_i32(); 2612 tcg_gen_trunc_tl_i32(t0, t_rs); 2613 tcg_gen_rotli_i32(t0, t0, sh); 2614 tcg_gen_andi_i32(t0, t0, mask); 2615 tcg_gen_extu_i32_tl(t_ra, t0); 2616 } 2617 } else { 2618 #if defined(TARGET_PPC64) 2619 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); 2620 tcg_gen_rotli_i64(t_ra, t_ra, sh); 2621 tcg_gen_andi_i64(t_ra, t_ra, mask); 2622 #else 2623 g_assert_not_reached(); 2624 #endif 2625 } 2626 } 2627 if (unlikely(Rc(ctx->opcode) != 0)) { 2628 gen_set_Rc0(ctx, t_ra); 2629 } 2630 } 2631 2632 /* rlwnm & rlwnm. */ 2633 static void gen_rlwnm(DisasContext *ctx) 2634 { 2635 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2636 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2637 TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; 2638 uint32_t mb = MB(ctx->opcode); 2639 uint32_t me = ME(ctx->opcode); 2640 target_ulong mask; 2641 bool mask_in_32b = true; 2642 2643 #if defined(TARGET_PPC64) 2644 mb += 32; 2645 me += 32; 2646 #endif 2647 mask = MASK(mb, me); 2648 2649 #if defined(TARGET_PPC64) 2650 if (mask > 0xffffffffu) { 2651 mask_in_32b = false; 2652 } 2653 #endif 2654 if (mask_in_32b) { 2655 TCGv_i32 t0 = tcg_temp_new_i32(); 2656 TCGv_i32 t1 = tcg_temp_new_i32(); 2657 tcg_gen_trunc_tl_i32(t0, t_rb); 2658 tcg_gen_trunc_tl_i32(t1, t_rs); 2659 tcg_gen_andi_i32(t0, t0, 0x1f); 2660 tcg_gen_rotl_i32(t1, t1, t0); 2661 tcg_gen_extu_i32_tl(t_ra, t1); 2662 } else { 2663 #if defined(TARGET_PPC64) 2664 TCGv_i64 t0 = tcg_temp_new_i64(); 2665 tcg_gen_andi_i64(t0, t_rb, 0x1f); 2666 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); 2667 tcg_gen_rotl_i64(t_ra, t_ra, t0); 2668 #else 2669 g_assert_not_reached(); 2670 #endif 2671 } 2672 2673 tcg_gen_andi_tl(t_ra, t_ra, mask); 2674 2675 if (unlikely(Rc(ctx->opcode) != 0)) { 2676 gen_set_Rc0(ctx, t_ra); 2677 } 2678 } 2679 2680 #if defined(TARGET_PPC64) 2681 #define GEN_PPC64_R2(name, opc1, opc2) \ 2682 static void glue(gen_, name##0)(DisasContext *ctx) \ 2683 { \ 2684 gen_##name(ctx, 0); \ 2685 } \ 2686 \ 2687 static void glue(gen_, name##1)(DisasContext *ctx) \ 2688 { \ 2689 gen_##name(ctx, 1); \ 2690 } 2691 #define GEN_PPC64_R4(name, opc1, opc2) \ 2692 static void glue(gen_, name##0)(DisasContext *ctx) \ 2693 { \ 2694 gen_##name(ctx, 0, 0); \ 2695 } \ 2696 \ 2697 static void glue(gen_, name##1)(DisasContext *ctx) \ 2698 { \ 2699 gen_##name(ctx, 0, 1); \ 2700 } \ 2701 \ 2702 static void glue(gen_, name##2)(DisasContext *ctx) \ 2703 { \ 2704 gen_##name(ctx, 1, 0); \ 2705 } \ 2706 \ 2707 static void glue(gen_, name##3)(DisasContext *ctx) \ 2708 { \ 2709 gen_##name(ctx, 1, 1); \ 2710 } 2711 2712 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh) 2713 { 2714 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2715 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2716 int len = me - mb + 1; 2717 int rsh = (64 - sh) & 63; 2718 2719 if (sh != 0 && len > 0 && me == (63 - sh)) { 2720 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len); 2721 } else if (me == 63 && rsh + len <= 64) { 2722 tcg_gen_extract_tl(t_ra, t_rs, rsh, len); 2723 } else { 2724 tcg_gen_rotli_tl(t_ra, t_rs, sh); 2725 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); 2726 } 2727 if (unlikely(Rc(ctx->opcode) != 0)) { 2728 gen_set_Rc0(ctx, t_ra); 2729 } 2730 } 2731 2732 /* rldicl - rldicl. */ 2733 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) 2734 { 2735 uint32_t sh, mb; 2736 2737 sh = SH(ctx->opcode) | (shn << 5); 2738 mb = MB(ctx->opcode) | (mbn << 5); 2739 gen_rldinm(ctx, mb, 63, sh); 2740 } 2741 GEN_PPC64_R4(rldicl, 0x1E, 0x00); 2742 2743 /* rldicr - rldicr. */ 2744 static inline void gen_rldicr(DisasContext *ctx, int men, int shn) 2745 { 2746 uint32_t sh, me; 2747 2748 sh = SH(ctx->opcode) | (shn << 5); 2749 me = MB(ctx->opcode) | (men << 5); 2750 gen_rldinm(ctx, 0, me, sh); 2751 } 2752 GEN_PPC64_R4(rldicr, 0x1E, 0x02); 2753 2754 /* rldic - rldic. */ 2755 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) 2756 { 2757 uint32_t sh, mb; 2758 2759 sh = SH(ctx->opcode) | (shn << 5); 2760 mb = MB(ctx->opcode) | (mbn << 5); 2761 gen_rldinm(ctx, mb, 63 - sh, sh); 2762 } 2763 GEN_PPC64_R4(rldic, 0x1E, 0x04); 2764 2765 static void gen_rldnm(DisasContext *ctx, int mb, int me) 2766 { 2767 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2768 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2769 TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; 2770 TCGv t0; 2771 2772 t0 = tcg_temp_new(); 2773 tcg_gen_andi_tl(t0, t_rb, 0x3f); 2774 tcg_gen_rotl_tl(t_ra, t_rs, t0); 2775 2776 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); 2777 if (unlikely(Rc(ctx->opcode) != 0)) { 2778 gen_set_Rc0(ctx, t_ra); 2779 } 2780 } 2781 2782 /* rldcl - rldcl. */ 2783 static inline void gen_rldcl(DisasContext *ctx, int mbn) 2784 { 2785 uint32_t mb; 2786 2787 mb = MB(ctx->opcode) | (mbn << 5); 2788 gen_rldnm(ctx, mb, 63); 2789 } 2790 GEN_PPC64_R2(rldcl, 0x1E, 0x08); 2791 2792 /* rldcr - rldcr. */ 2793 static inline void gen_rldcr(DisasContext *ctx, int men) 2794 { 2795 uint32_t me; 2796 2797 me = MB(ctx->opcode) | (men << 5); 2798 gen_rldnm(ctx, 0, me); 2799 } 2800 GEN_PPC64_R2(rldcr, 0x1E, 0x09); 2801 2802 /* rldimi - rldimi. */ 2803 static void gen_rldimi(DisasContext *ctx, int mbn, int shn) 2804 { 2805 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2806 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2807 uint32_t sh = SH(ctx->opcode) | (shn << 5); 2808 uint32_t mb = MB(ctx->opcode) | (mbn << 5); 2809 uint32_t me = 63 - sh; 2810 2811 if (mb <= me) { 2812 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); 2813 } else { 2814 target_ulong mask = MASK(mb, me); 2815 TCGv t1 = tcg_temp_new(); 2816 2817 tcg_gen_rotli_tl(t1, t_rs, sh); 2818 tcg_gen_andi_tl(t1, t1, mask); 2819 tcg_gen_andi_tl(t_ra, t_ra, ~mask); 2820 tcg_gen_or_tl(t_ra, t_ra, t1); 2821 } 2822 if (unlikely(Rc(ctx->opcode) != 0)) { 2823 gen_set_Rc0(ctx, t_ra); 2824 } 2825 } 2826 GEN_PPC64_R4(rldimi, 0x1E, 0x06); 2827 #endif 2828 2829 /*** Integer shift ***/ 2830 2831 /* slw & slw. */ 2832 static void gen_slw(DisasContext *ctx) 2833 { 2834 TCGv t0, t1; 2835 2836 t0 = tcg_temp_new(); 2837 /* AND rS with a mask that is 0 when rB >= 0x20 */ 2838 #if defined(TARGET_PPC64) 2839 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); 2840 tcg_gen_sari_tl(t0, t0, 0x3f); 2841 #else 2842 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); 2843 tcg_gen_sari_tl(t0, t0, 0x1f); 2844 #endif 2845 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 2846 t1 = tcg_temp_new(); 2847 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); 2848 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2849 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 2850 if (unlikely(Rc(ctx->opcode) != 0)) { 2851 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2852 } 2853 } 2854 2855 /* sraw & sraw. */ 2856 static void gen_sraw(DisasContext *ctx) 2857 { 2858 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], tcg_env, 2859 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2860 if (unlikely(Rc(ctx->opcode) != 0)) { 2861 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2862 } 2863 } 2864 2865 /* srawi & srawi. */ 2866 static void gen_srawi(DisasContext *ctx) 2867 { 2868 int sh = SH(ctx->opcode); 2869 TCGv dst = cpu_gpr[rA(ctx->opcode)]; 2870 TCGv src = cpu_gpr[rS(ctx->opcode)]; 2871 if (sh == 0) { 2872 tcg_gen_ext32s_tl(dst, src); 2873 tcg_gen_movi_tl(cpu_ca, 0); 2874 if (is_isa300(ctx)) { 2875 tcg_gen_movi_tl(cpu_ca32, 0); 2876 } 2877 } else { 2878 TCGv t0; 2879 tcg_gen_ext32s_tl(dst, src); 2880 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1); 2881 t0 = tcg_temp_new(); 2882 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1); 2883 tcg_gen_and_tl(cpu_ca, cpu_ca, t0); 2884 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); 2885 if (is_isa300(ctx)) { 2886 tcg_gen_mov_tl(cpu_ca32, cpu_ca); 2887 } 2888 tcg_gen_sari_tl(dst, dst, sh); 2889 } 2890 if (unlikely(Rc(ctx->opcode) != 0)) { 2891 gen_set_Rc0(ctx, dst); 2892 } 2893 } 2894 2895 /* srw & srw. */ 2896 static void gen_srw(DisasContext *ctx) 2897 { 2898 TCGv t0, t1; 2899 2900 t0 = tcg_temp_new(); 2901 /* AND rS with a mask that is 0 when rB >= 0x20 */ 2902 #if defined(TARGET_PPC64) 2903 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); 2904 tcg_gen_sari_tl(t0, t0, 0x3f); 2905 #else 2906 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); 2907 tcg_gen_sari_tl(t0, t0, 0x1f); 2908 #endif 2909 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 2910 tcg_gen_ext32u_tl(t0, t0); 2911 t1 = tcg_temp_new(); 2912 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); 2913 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2914 if (unlikely(Rc(ctx->opcode) != 0)) { 2915 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2916 } 2917 } 2918 2919 #if defined(TARGET_PPC64) 2920 /* sld & sld. */ 2921 static void gen_sld(DisasContext *ctx) 2922 { 2923 TCGv t0, t1; 2924 2925 t0 = tcg_temp_new(); 2926 /* AND rS with a mask that is 0 when rB >= 0x40 */ 2927 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39); 2928 tcg_gen_sari_tl(t0, t0, 0x3f); 2929 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 2930 t1 = tcg_temp_new(); 2931 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); 2932 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2933 if (unlikely(Rc(ctx->opcode) != 0)) { 2934 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2935 } 2936 } 2937 2938 /* srad & srad. */ 2939 static void gen_srad(DisasContext *ctx) 2940 { 2941 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], tcg_env, 2942 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2943 if (unlikely(Rc(ctx->opcode) != 0)) { 2944 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2945 } 2946 } 2947 /* sradi & sradi. */ 2948 static inline void gen_sradi(DisasContext *ctx, int n) 2949 { 2950 int sh = SH(ctx->opcode) + (n << 5); 2951 TCGv dst = cpu_gpr[rA(ctx->opcode)]; 2952 TCGv src = cpu_gpr[rS(ctx->opcode)]; 2953 if (sh == 0) { 2954 tcg_gen_mov_tl(dst, src); 2955 tcg_gen_movi_tl(cpu_ca, 0); 2956 if (is_isa300(ctx)) { 2957 tcg_gen_movi_tl(cpu_ca32, 0); 2958 } 2959 } else { 2960 TCGv t0; 2961 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1); 2962 t0 = tcg_temp_new(); 2963 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1); 2964 tcg_gen_and_tl(cpu_ca, cpu_ca, t0); 2965 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); 2966 if (is_isa300(ctx)) { 2967 tcg_gen_mov_tl(cpu_ca32, cpu_ca); 2968 } 2969 tcg_gen_sari_tl(dst, src, sh); 2970 } 2971 if (unlikely(Rc(ctx->opcode) != 0)) { 2972 gen_set_Rc0(ctx, dst); 2973 } 2974 } 2975 2976 static void gen_sradi0(DisasContext *ctx) 2977 { 2978 gen_sradi(ctx, 0); 2979 } 2980 2981 static void gen_sradi1(DisasContext *ctx) 2982 { 2983 gen_sradi(ctx, 1); 2984 } 2985 2986 /* extswsli & extswsli. */ 2987 static inline void gen_extswsli(DisasContext *ctx, int n) 2988 { 2989 int sh = SH(ctx->opcode) + (n << 5); 2990 TCGv dst = cpu_gpr[rA(ctx->opcode)]; 2991 TCGv src = cpu_gpr[rS(ctx->opcode)]; 2992 2993 tcg_gen_ext32s_tl(dst, src); 2994 tcg_gen_shli_tl(dst, dst, sh); 2995 if (unlikely(Rc(ctx->opcode) != 0)) { 2996 gen_set_Rc0(ctx, dst); 2997 } 2998 } 2999 3000 static void gen_extswsli0(DisasContext *ctx) 3001 { 3002 gen_extswsli(ctx, 0); 3003 } 3004 3005 static void gen_extswsli1(DisasContext *ctx) 3006 { 3007 gen_extswsli(ctx, 1); 3008 } 3009 3010 /* srd & srd. */ 3011 static void gen_srd(DisasContext *ctx) 3012 { 3013 TCGv t0, t1; 3014 3015 t0 = tcg_temp_new(); 3016 /* AND rS with a mask that is 0 when rB >= 0x40 */ 3017 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39); 3018 tcg_gen_sari_tl(t0, t0, 0x3f); 3019 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 3020 t1 = tcg_temp_new(); 3021 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); 3022 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 3023 if (unlikely(Rc(ctx->opcode) != 0)) { 3024 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 3025 } 3026 } 3027 #endif 3028 3029 /*** Addressing modes ***/ 3030 /* Register indirect with immediate index : EA = (rA|0) + SIMM */ 3031 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA, 3032 target_long maskl) 3033 { 3034 target_long simm = SIMM(ctx->opcode); 3035 3036 simm &= ~maskl; 3037 if (rA(ctx->opcode) == 0) { 3038 if (NARROW_MODE(ctx)) { 3039 simm = (uint32_t)simm; 3040 } 3041 tcg_gen_movi_tl(EA, simm); 3042 } else if (likely(simm != 0)) { 3043 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm); 3044 if (NARROW_MODE(ctx)) { 3045 tcg_gen_ext32u_tl(EA, EA); 3046 } 3047 } else { 3048 if (NARROW_MODE(ctx)) { 3049 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3050 } else { 3051 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3052 } 3053 } 3054 } 3055 3056 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA) 3057 { 3058 if (rA(ctx->opcode) == 0) { 3059 if (NARROW_MODE(ctx)) { 3060 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]); 3061 } else { 3062 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]); 3063 } 3064 } else { 3065 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 3066 if (NARROW_MODE(ctx)) { 3067 tcg_gen_ext32u_tl(EA, EA); 3068 } 3069 } 3070 } 3071 3072 static inline void gen_addr_register(DisasContext *ctx, TCGv EA) 3073 { 3074 if (rA(ctx->opcode) == 0) { 3075 tcg_gen_movi_tl(EA, 0); 3076 } else if (NARROW_MODE(ctx)) { 3077 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3078 } else { 3079 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3080 } 3081 } 3082 3083 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1, 3084 target_long val) 3085 { 3086 tcg_gen_addi_tl(ret, arg1, val); 3087 if (NARROW_MODE(ctx)) { 3088 tcg_gen_ext32u_tl(ret, ret); 3089 } 3090 } 3091 3092 static inline void gen_align_no_le(DisasContext *ctx) 3093 { 3094 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, 3095 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); 3096 } 3097 3098 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ) 3099 { 3100 TCGv ea = tcg_temp_new(); 3101 if (ra) { 3102 tcg_gen_add_tl(ea, cpu_gpr[ra], displ); 3103 } else { 3104 tcg_gen_mov_tl(ea, displ); 3105 } 3106 if (NARROW_MODE(ctx)) { 3107 tcg_gen_ext32u_tl(ea, ea); 3108 } 3109 return ea; 3110 } 3111 3112 /*** Integer load ***/ 3113 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) 3114 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) 3115 3116 #define GEN_QEMU_LOAD_TL(ldop, op) \ 3117 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \ 3118 TCGv val, \ 3119 TCGv addr) \ 3120 { \ 3121 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \ 3122 } 3123 3124 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB)) 3125 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW)) 3126 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW)) 3127 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL)) 3128 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL)) 3129 3130 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW)) 3131 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL)) 3132 3133 #define GEN_QEMU_LOAD_64(ldop, op) \ 3134 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \ 3135 TCGv_i64 val, \ 3136 TCGv addr) \ 3137 { \ 3138 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \ 3139 } 3140 3141 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB)) 3142 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW)) 3143 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL)) 3144 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL)) 3145 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ)) 3146 3147 #if defined(TARGET_PPC64) 3148 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ)) 3149 #endif 3150 3151 #define GEN_QEMU_STORE_TL(stop, op) \ 3152 static void glue(gen_qemu_, stop)(DisasContext *ctx, \ 3153 TCGv val, \ 3154 TCGv addr) \ 3155 { \ 3156 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \ 3157 } 3158 3159 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY) 3160 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB)) 3161 #endif 3162 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW)) 3163 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL)) 3164 3165 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW)) 3166 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL)) 3167 3168 #define GEN_QEMU_STORE_64(stop, op) \ 3169 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \ 3170 TCGv_i64 val, \ 3171 TCGv addr) \ 3172 { \ 3173 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \ 3174 } 3175 3176 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB)) 3177 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW)) 3178 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL)) 3179 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ)) 3180 3181 #if defined(TARGET_PPC64) 3182 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ)) 3183 #endif 3184 3185 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ 3186 static void glue(gen_, name##x)(DisasContext *ctx) \ 3187 { \ 3188 TCGv EA; \ 3189 chk(ctx); \ 3190 gen_set_access_type(ctx, ACCESS_INT); \ 3191 EA = tcg_temp_new(); \ 3192 gen_addr_reg_index(ctx, EA); \ 3193 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ 3194 } 3195 3196 #define GEN_LDX(name, ldop, opc2, opc3, type) \ 3197 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE) 3198 3199 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \ 3200 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM) 3201 3202 #define GEN_LDEPX(name, ldop, opc2, opc3) \ 3203 static void glue(gen_, name##epx)(DisasContext *ctx) \ 3204 { \ 3205 TCGv EA; \ 3206 CHK_SV(ctx); \ 3207 gen_set_access_type(ctx, ACCESS_INT); \ 3208 EA = tcg_temp_new(); \ 3209 gen_addr_reg_index(ctx, EA); \ 3210 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\ 3211 } 3212 3213 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) 3214 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) 3215 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) 3216 #if defined(TARGET_PPC64) 3217 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00) 3218 #endif 3219 3220 #if defined(TARGET_PPC64) 3221 /* CI load/store variants */ 3222 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) 3223 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) 3224 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) 3225 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) 3226 #endif 3227 3228 /*** Integer store ***/ 3229 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ 3230 static void glue(gen_, name##x)(DisasContext *ctx) \ 3231 { \ 3232 TCGv EA; \ 3233 chk(ctx); \ 3234 gen_set_access_type(ctx, ACCESS_INT); \ 3235 EA = tcg_temp_new(); \ 3236 gen_addr_reg_index(ctx, EA); \ 3237 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ 3238 } 3239 #define GEN_STX(name, stop, opc2, opc3, type) \ 3240 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE) 3241 3242 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \ 3243 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM) 3244 3245 #define GEN_STEPX(name, stop, opc2, opc3) \ 3246 static void glue(gen_, name##epx)(DisasContext *ctx) \ 3247 { \ 3248 TCGv EA; \ 3249 CHK_SV(ctx); \ 3250 gen_set_access_type(ctx, ACCESS_INT); \ 3251 EA = tcg_temp_new(); \ 3252 gen_addr_reg_index(ctx, EA); \ 3253 tcg_gen_qemu_st_tl( \ 3254 cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \ 3255 } 3256 3257 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) 3258 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) 3259 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) 3260 #if defined(TARGET_PPC64) 3261 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04) 3262 #endif 3263 3264 #if defined(TARGET_PPC64) 3265 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) 3266 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) 3267 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) 3268 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) 3269 #endif 3270 /*** Integer load and store with byte reverse ***/ 3271 3272 /* lhbrx */ 3273 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER); 3274 3275 /* lwbrx */ 3276 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER); 3277 3278 #if defined(TARGET_PPC64) 3279 /* ldbrx */ 3280 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE); 3281 /* stdbrx */ 3282 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE); 3283 #endif /* TARGET_PPC64 */ 3284 3285 /* sthbrx */ 3286 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER); 3287 /* stwbrx */ 3288 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER); 3289 3290 /*** Integer load and store multiple ***/ 3291 3292 /* lmw */ 3293 static void gen_lmw(DisasContext *ctx) 3294 { 3295 TCGv t0; 3296 TCGv_i32 t1; 3297 3298 if (ctx->le_mode) { 3299 gen_align_no_le(ctx); 3300 return; 3301 } 3302 gen_set_access_type(ctx, ACCESS_INT); 3303 t0 = tcg_temp_new(); 3304 t1 = tcg_constant_i32(rD(ctx->opcode)); 3305 gen_addr_imm_index(ctx, t0, 0); 3306 gen_helper_lmw(tcg_env, t0, t1); 3307 } 3308 3309 /* stmw */ 3310 static void gen_stmw(DisasContext *ctx) 3311 { 3312 TCGv t0; 3313 TCGv_i32 t1; 3314 3315 if (ctx->le_mode) { 3316 gen_align_no_le(ctx); 3317 return; 3318 } 3319 gen_set_access_type(ctx, ACCESS_INT); 3320 t0 = tcg_temp_new(); 3321 t1 = tcg_constant_i32(rS(ctx->opcode)); 3322 gen_addr_imm_index(ctx, t0, 0); 3323 gen_helper_stmw(tcg_env, t0, t1); 3324 } 3325 3326 /*** Integer load and store strings ***/ 3327 3328 /* lswi */ 3329 /* 3330 * PowerPC32 specification says we must generate an exception if rA is 3331 * in the range of registers to be loaded. In an other hand, IBM says 3332 * this is valid, but rA won't be loaded. For now, I'll follow the 3333 * spec... 3334 */ 3335 static void gen_lswi(DisasContext *ctx) 3336 { 3337 TCGv t0; 3338 TCGv_i32 t1, t2; 3339 int nb = NB(ctx->opcode); 3340 int start = rD(ctx->opcode); 3341 int ra = rA(ctx->opcode); 3342 int nr; 3343 3344 if (ctx->le_mode) { 3345 gen_align_no_le(ctx); 3346 return; 3347 } 3348 if (nb == 0) { 3349 nb = 32; 3350 } 3351 nr = DIV_ROUND_UP(nb, 4); 3352 if (unlikely(lsw_reg_in_range(start, nr, ra))) { 3353 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); 3354 return; 3355 } 3356 gen_set_access_type(ctx, ACCESS_INT); 3357 t0 = tcg_temp_new(); 3358 gen_addr_register(ctx, t0); 3359 t1 = tcg_constant_i32(nb); 3360 t2 = tcg_constant_i32(start); 3361 gen_helper_lsw(tcg_env, t0, t1, t2); 3362 } 3363 3364 /* lswx */ 3365 static void gen_lswx(DisasContext *ctx) 3366 { 3367 TCGv t0; 3368 TCGv_i32 t1, t2, t3; 3369 3370 if (ctx->le_mode) { 3371 gen_align_no_le(ctx); 3372 return; 3373 } 3374 gen_set_access_type(ctx, ACCESS_INT); 3375 t0 = tcg_temp_new(); 3376 gen_addr_reg_index(ctx, t0); 3377 t1 = tcg_constant_i32(rD(ctx->opcode)); 3378 t2 = tcg_constant_i32(rA(ctx->opcode)); 3379 t3 = tcg_constant_i32(rB(ctx->opcode)); 3380 gen_helper_lswx(tcg_env, t0, t1, t2, t3); 3381 } 3382 3383 /* stswi */ 3384 static void gen_stswi(DisasContext *ctx) 3385 { 3386 TCGv t0; 3387 TCGv_i32 t1, t2; 3388 int nb = NB(ctx->opcode); 3389 3390 if (ctx->le_mode) { 3391 gen_align_no_le(ctx); 3392 return; 3393 } 3394 gen_set_access_type(ctx, ACCESS_INT); 3395 t0 = tcg_temp_new(); 3396 gen_addr_register(ctx, t0); 3397 if (nb == 0) { 3398 nb = 32; 3399 } 3400 t1 = tcg_constant_i32(nb); 3401 t2 = tcg_constant_i32(rS(ctx->opcode)); 3402 gen_helper_stsw(tcg_env, t0, t1, t2); 3403 } 3404 3405 /* stswx */ 3406 static void gen_stswx(DisasContext *ctx) 3407 { 3408 TCGv t0; 3409 TCGv_i32 t1, t2; 3410 3411 if (ctx->le_mode) { 3412 gen_align_no_le(ctx); 3413 return; 3414 } 3415 gen_set_access_type(ctx, ACCESS_INT); 3416 t0 = tcg_temp_new(); 3417 gen_addr_reg_index(ctx, t0); 3418 t1 = tcg_temp_new_i32(); 3419 tcg_gen_trunc_tl_i32(t1, cpu_xer); 3420 tcg_gen_andi_i32(t1, t1, 0x7F); 3421 t2 = tcg_constant_i32(rS(ctx->opcode)); 3422 gen_helper_stsw(tcg_env, t0, t1, t2); 3423 } 3424 3425 /*** Memory synchronisation ***/ 3426 /* eieio */ 3427 static void gen_eieio(DisasContext *ctx) 3428 { 3429 TCGBar bar = TCG_MO_ALL; 3430 3431 /* 3432 * eieio has complex semanitcs. It provides memory ordering between 3433 * operations in the set: 3434 * - loads from CI memory. 3435 * - stores to CI memory. 3436 * - stores to WT memory. 3437 * 3438 * It separately also orders memory for operations in the set: 3439 * - stores to cacheble memory. 3440 * 3441 * It also serializes instructions: 3442 * - dcbt and dcbst. 3443 * 3444 * It separately serializes: 3445 * - tlbie and tlbsync. 3446 * 3447 * And separately serializes: 3448 * - slbieg, slbiag, and slbsync. 3449 * 3450 * The end result is that CI memory ordering requires TCG_MO_ALL 3451 * and it is not possible to special-case more relaxed ordering for 3452 * cacheable accesses. TCG_BAR_SC is required to provide this 3453 * serialization. 3454 */ 3455 3456 /* 3457 * POWER9 has a eieio instruction variant using bit 6 as a hint to 3458 * tell the CPU it is a store-forwarding barrier. 3459 */ 3460 if (ctx->opcode & 0x2000000) { 3461 /* 3462 * ISA says that "Reserved fields in instructions are ignored 3463 * by the processor". So ignore the bit 6 on non-POWER9 CPU but 3464 * as this is not an instruction software should be using, 3465 * complain to the user. 3466 */ 3467 if (!(ctx->insns_flags2 & PPC2_ISA300)) { 3468 qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" 3469 TARGET_FMT_lx "\n", ctx->cia); 3470 } else { 3471 bar = TCG_MO_ST_LD; 3472 } 3473 } 3474 3475 tcg_gen_mb(bar | TCG_BAR_SC); 3476 } 3477 3478 #if !defined(CONFIG_USER_ONLY) 3479 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) 3480 { 3481 TCGv_i32 t; 3482 TCGLabel *l; 3483 3484 if (!ctx->lazy_tlb_flush) { 3485 return; 3486 } 3487 l = gen_new_label(); 3488 t = tcg_temp_new_i32(); 3489 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUPPCState, tlb_need_flush)); 3490 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l); 3491 if (global) { 3492 gen_helper_check_tlb_flush_global(tcg_env); 3493 } else { 3494 gen_helper_check_tlb_flush_local(tcg_env); 3495 } 3496 gen_set_label(l); 3497 } 3498 #else 3499 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { } 3500 #endif 3501 3502 /* isync */ 3503 static void gen_isync(DisasContext *ctx) 3504 { 3505 /* 3506 * We need to check for a pending TLB flush. This can only happen in 3507 * kernel mode however so check MSR_PR 3508 */ 3509 if (!ctx->pr) { 3510 gen_check_tlb_flush(ctx, false); 3511 } 3512 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 3513 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 3514 } 3515 3516 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) 3517 3518 static void gen_load_locked(DisasContext *ctx, MemOp memop) 3519 { 3520 TCGv gpr = cpu_gpr[rD(ctx->opcode)]; 3521 TCGv t0 = tcg_temp_new(); 3522 3523 gen_set_access_type(ctx, ACCESS_RES); 3524 gen_addr_reg_index(ctx, t0); 3525 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN); 3526 tcg_gen_mov_tl(cpu_reserve, t0); 3527 tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop)); 3528 tcg_gen_mov_tl(cpu_reserve_val, gpr); 3529 } 3530 3531 #define LARX(name, memop) \ 3532 static void gen_##name(DisasContext *ctx) \ 3533 { \ 3534 gen_load_locked(ctx, memop); \ 3535 } 3536 3537 /* lwarx */ 3538 LARX(lbarx, DEF_MEMOP(MO_UB)) 3539 LARX(lharx, DEF_MEMOP(MO_UW)) 3540 LARX(lwarx, DEF_MEMOP(MO_UL)) 3541 3542 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, 3543 TCGv EA, TCGCond cond, int addend) 3544 { 3545 TCGv t = tcg_temp_new(); 3546 TCGv t2 = tcg_temp_new(); 3547 TCGv u = tcg_temp_new(); 3548 3549 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop); 3550 tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop)); 3551 tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop); 3552 tcg_gen_addi_tl(u, t, addend); 3553 3554 /* E.g. for fetch and increment bounded... */ 3555 /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */ 3556 tcg_gen_movcond_tl(cond, u, t, t2, u, t); 3557 tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop); 3558 3559 /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ 3560 tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1)); 3561 tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); 3562 } 3563 3564 static void gen_ld_atomic(DisasContext *ctx, MemOp memop) 3565 { 3566 uint32_t gpr_FC = FC(ctx->opcode); 3567 TCGv EA = tcg_temp_new(); 3568 int rt = rD(ctx->opcode); 3569 bool need_serial; 3570 TCGv src, dst; 3571 3572 gen_addr_register(ctx, EA); 3573 dst = cpu_gpr[rt]; 3574 src = cpu_gpr[(rt + 1) & 31]; 3575 3576 need_serial = false; 3577 memop |= MO_ALIGN; 3578 switch (gpr_FC) { 3579 case 0: /* Fetch and add */ 3580 tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop); 3581 break; 3582 case 1: /* Fetch and xor */ 3583 tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop); 3584 break; 3585 case 2: /* Fetch and or */ 3586 tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop); 3587 break; 3588 case 3: /* Fetch and 'and' */ 3589 tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop); 3590 break; 3591 case 4: /* Fetch and max unsigned */ 3592 tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop); 3593 break; 3594 case 5: /* Fetch and max signed */ 3595 tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop); 3596 break; 3597 case 6: /* Fetch and min unsigned */ 3598 tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop); 3599 break; 3600 case 7: /* Fetch and min signed */ 3601 tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop); 3602 break; 3603 case 8: /* Swap */ 3604 tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop); 3605 break; 3606 3607 case 16: /* Compare and swap not equal */ 3608 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3609 need_serial = true; 3610 } else { 3611 TCGv t0 = tcg_temp_new(); 3612 TCGv t1 = tcg_temp_new(); 3613 3614 tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop); 3615 if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) { 3616 tcg_gen_mov_tl(t1, src); 3617 } else { 3618 tcg_gen_ext32u_tl(t1, src); 3619 } 3620 tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1, 3621 cpu_gpr[(rt + 2) & 31], t0); 3622 tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop); 3623 tcg_gen_mov_tl(dst, t0); 3624 } 3625 break; 3626 3627 case 24: /* Fetch and increment bounded */ 3628 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3629 need_serial = true; 3630 } else { 3631 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1); 3632 } 3633 break; 3634 case 25: /* Fetch and increment equal */ 3635 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3636 need_serial = true; 3637 } else { 3638 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1); 3639 } 3640 break; 3641 case 28: /* Fetch and decrement bounded */ 3642 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3643 need_serial = true; 3644 } else { 3645 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1); 3646 } 3647 break; 3648 3649 default: 3650 /* invoke data storage error handler */ 3651 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); 3652 } 3653 3654 if (need_serial) { 3655 /* Restart with exclusive lock. */ 3656 gen_helper_exit_atomic(tcg_env); 3657 ctx->base.is_jmp = DISAS_NORETURN; 3658 } 3659 } 3660 3661 static void gen_lwat(DisasContext *ctx) 3662 { 3663 gen_ld_atomic(ctx, DEF_MEMOP(MO_UL)); 3664 } 3665 3666 #ifdef TARGET_PPC64 3667 static void gen_ldat(DisasContext *ctx) 3668 { 3669 gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ)); 3670 } 3671 #endif 3672 3673 static void gen_st_atomic(DisasContext *ctx, MemOp memop) 3674 { 3675 uint32_t gpr_FC = FC(ctx->opcode); 3676 TCGv EA = tcg_temp_new(); 3677 TCGv src, discard; 3678 3679 gen_addr_register(ctx, EA); 3680 src = cpu_gpr[rD(ctx->opcode)]; 3681 discard = tcg_temp_new(); 3682 3683 memop |= MO_ALIGN; 3684 switch (gpr_FC) { 3685 case 0: /* add and Store */ 3686 tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3687 break; 3688 case 1: /* xor and Store */ 3689 tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3690 break; 3691 case 2: /* Or and Store */ 3692 tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3693 break; 3694 case 3: /* 'and' and Store */ 3695 tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3696 break; 3697 case 4: /* Store max unsigned */ 3698 tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3699 break; 3700 case 5: /* Store max signed */ 3701 tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3702 break; 3703 case 6: /* Store min unsigned */ 3704 tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3705 break; 3706 case 7: /* Store min signed */ 3707 tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3708 break; 3709 case 24: /* Store twin */ 3710 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3711 /* Restart with exclusive lock. */ 3712 gen_helper_exit_atomic(tcg_env); 3713 ctx->base.is_jmp = DISAS_NORETURN; 3714 } else { 3715 TCGv t = tcg_temp_new(); 3716 TCGv t2 = tcg_temp_new(); 3717 TCGv s = tcg_temp_new(); 3718 TCGv s2 = tcg_temp_new(); 3719 TCGv ea_plus_s = tcg_temp_new(); 3720 3721 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop); 3722 tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop)); 3723 tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop); 3724 tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t); 3725 tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2); 3726 tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop); 3727 tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop); 3728 } 3729 break; 3730 default: 3731 /* invoke data storage error handler */ 3732 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); 3733 } 3734 } 3735 3736 static void gen_stwat(DisasContext *ctx) 3737 { 3738 gen_st_atomic(ctx, DEF_MEMOP(MO_UL)); 3739 } 3740 3741 #ifdef TARGET_PPC64 3742 static void gen_stdat(DisasContext *ctx) 3743 { 3744 gen_st_atomic(ctx, DEF_MEMOP(MO_UQ)); 3745 } 3746 #endif 3747 3748 static void gen_conditional_store(DisasContext *ctx, MemOp memop) 3749 { 3750 TCGLabel *lfail; 3751 TCGv EA; 3752 TCGv cr0; 3753 TCGv t0; 3754 int rs = rS(ctx->opcode); 3755 3756 lfail = gen_new_label(); 3757 EA = tcg_temp_new(); 3758 cr0 = tcg_temp_new(); 3759 t0 = tcg_temp_new(); 3760 3761 tcg_gen_mov_tl(cr0, cpu_so); 3762 gen_set_access_type(ctx, ACCESS_RES); 3763 gen_addr_reg_index(ctx, EA); 3764 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail); 3765 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail); 3766 3767 tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, 3768 cpu_gpr[rs], ctx->mem_idx, 3769 DEF_MEMOP(memop) | MO_ALIGN); 3770 tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val); 3771 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); 3772 tcg_gen_or_tl(cr0, cr0, t0); 3773 3774 gen_set_label(lfail); 3775 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0); 3776 tcg_gen_movi_tl(cpu_reserve, -1); 3777 } 3778 3779 #define STCX(name, memop) \ 3780 static void gen_##name(DisasContext *ctx) \ 3781 { \ 3782 gen_conditional_store(ctx, memop); \ 3783 } 3784 3785 STCX(stbcx_, DEF_MEMOP(MO_UB)) 3786 STCX(sthcx_, DEF_MEMOP(MO_UW)) 3787 STCX(stwcx_, DEF_MEMOP(MO_UL)) 3788 3789 #if defined(TARGET_PPC64) 3790 /* ldarx */ 3791 LARX(ldarx, DEF_MEMOP(MO_UQ)) 3792 /* stdcx. */ 3793 STCX(stdcx_, DEF_MEMOP(MO_UQ)) 3794 3795 /* lqarx */ 3796 static void gen_lqarx(DisasContext *ctx) 3797 { 3798 int rd = rD(ctx->opcode); 3799 TCGv EA, hi, lo; 3800 TCGv_i128 t16; 3801 3802 if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) || 3803 (rd == rB(ctx->opcode)))) { 3804 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 3805 return; 3806 } 3807 3808 gen_set_access_type(ctx, ACCESS_RES); 3809 EA = tcg_temp_new(); 3810 gen_addr_reg_index(ctx, EA); 3811 3812 /* Note that the low part is always in RD+1, even in LE mode. */ 3813 lo = cpu_gpr[rd + 1]; 3814 hi = cpu_gpr[rd]; 3815 3816 t16 = tcg_temp_new_i128(); 3817 tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN)); 3818 tcg_gen_extr_i128_i64(lo, hi, t16); 3819 3820 tcg_gen_mov_tl(cpu_reserve, EA); 3821 tcg_gen_movi_tl(cpu_reserve_length, 16); 3822 tcg_gen_st_tl(hi, tcg_env, offsetof(CPUPPCState, reserve_val)); 3823 tcg_gen_st_tl(lo, tcg_env, offsetof(CPUPPCState, reserve_val2)); 3824 } 3825 3826 /* stqcx. */ 3827 static void gen_stqcx_(DisasContext *ctx) 3828 { 3829 TCGLabel *lfail; 3830 TCGv EA, t0, t1; 3831 TCGv cr0; 3832 TCGv_i128 cmp, val; 3833 int rs = rS(ctx->opcode); 3834 3835 if (unlikely(rs & 1)) { 3836 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 3837 return; 3838 } 3839 3840 lfail = gen_new_label(); 3841 EA = tcg_temp_new(); 3842 cr0 = tcg_temp_new(); 3843 3844 tcg_gen_mov_tl(cr0, cpu_so); 3845 gen_set_access_type(ctx, ACCESS_RES); 3846 gen_addr_reg_index(ctx, EA); 3847 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail); 3848 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail); 3849 3850 cmp = tcg_temp_new_i128(); 3851 val = tcg_temp_new_i128(); 3852 3853 tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val); 3854 3855 /* Note that the low part is always in RS+1, even in LE mode. */ 3856 tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]); 3857 3858 tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx, 3859 DEF_MEMOP(MO_128 | MO_ALIGN)); 3860 3861 t0 = tcg_temp_new(); 3862 t1 = tcg_temp_new(); 3863 tcg_gen_extr_i128_i64(t1, t0, val); 3864 3865 tcg_gen_xor_tl(t1, t1, cpu_reserve_val2); 3866 tcg_gen_xor_tl(t0, t0, cpu_reserve_val); 3867 tcg_gen_or_tl(t0, t0, t1); 3868 3869 tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0); 3870 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); 3871 tcg_gen_or_tl(cr0, cr0, t0); 3872 3873 gen_set_label(lfail); 3874 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0); 3875 tcg_gen_movi_tl(cpu_reserve, -1); 3876 } 3877 #endif /* defined(TARGET_PPC64) */ 3878 3879 /* sync */ 3880 static void gen_sync(DisasContext *ctx) 3881 { 3882 TCGBar bar = TCG_MO_ALL; 3883 uint32_t l = (ctx->opcode >> 21) & 3; 3884 3885 if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) { 3886 bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST; 3887 } 3888 3889 /* 3890 * We may need to check for a pending TLB flush. 3891 * 3892 * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32. 3893 * 3894 * Additionally, this can only happen in kernel mode however so 3895 * check MSR_PR as well. 3896 */ 3897 if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { 3898 gen_check_tlb_flush(ctx, true); 3899 } 3900 3901 tcg_gen_mb(bar | TCG_BAR_SC); 3902 } 3903 3904 /* wait */ 3905 static void gen_wait(DisasContext *ctx) 3906 { 3907 uint32_t wc; 3908 3909 if (ctx->insns_flags & PPC_WAIT) { 3910 /* v2.03-v2.07 define an older incompatible 'wait' encoding. */ 3911 3912 if (ctx->insns_flags2 & PPC2_PM_ISA206) { 3913 /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */ 3914 wc = WC(ctx->opcode); 3915 } else { 3916 wc = 0; 3917 } 3918 3919 } else if (ctx->insns_flags2 & PPC2_ISA300) { 3920 /* v3.0 defines a new 'wait' encoding. */ 3921 wc = WC(ctx->opcode); 3922 if (ctx->insns_flags2 & PPC2_ISA310) { 3923 uint32_t pl = PL(ctx->opcode); 3924 3925 /* WC 1,2 may be treated as no-op. WC 3 is reserved. */ 3926 if (wc == 3) { 3927 gen_invalid(ctx); 3928 return; 3929 } 3930 3931 /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */ 3932 if (pl > 0 && wc != 2) { 3933 gen_invalid(ctx); 3934 return; 3935 } 3936 3937 } else { /* ISA300 */ 3938 /* WC 1-3 are reserved */ 3939 if (wc > 0) { 3940 gen_invalid(ctx); 3941 return; 3942 } 3943 } 3944 3945 } else { 3946 warn_report("wait instruction decoded with wrong ISA flags."); 3947 gen_invalid(ctx); 3948 return; 3949 } 3950 3951 /* 3952 * wait without WC field or with WC=0 waits for an exception / interrupt 3953 * to occur. 3954 */ 3955 if (wc == 0) { 3956 TCGv_i32 t0 = tcg_constant_i32(1); 3957 tcg_gen_st_i32(t0, tcg_env, 3958 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); 3959 /* Stop translation, as the CPU is supposed to sleep from now */ 3960 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 3961 } 3962 3963 /* 3964 * Other wait types must not just wait until an exception occurs because 3965 * ignoring their other wake-up conditions could cause a hang. 3966 * 3967 * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as 3968 * no-ops. 3969 * 3970 * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op. 3971 * 3972 * wc=2 waits for an implementation-specific condition, such could be 3973 * always true, so it can be implemented as a no-op. 3974 * 3975 * For v3.1, wc=1,2 are architected but may be implemented as no-ops. 3976 * 3977 * wc=1 (waitrsv) waits for an exception or a reservation to be lost. 3978 * Reservation-loss may have implementation-specific conditions, so it 3979 * can be implemented as a no-op. 3980 * 3981 * wc=2 waits for an exception or an amount of time to pass. This 3982 * amount is implementation-specific so it can be implemented as a 3983 * no-op. 3984 * 3985 * ISA v3.1 allows for execution to resume "in the rare case of 3986 * an implementation-dependent event", so in any case software must 3987 * not depend on the architected resumption condition to become 3988 * true, so no-op implementations should be architecturally correct 3989 * (if suboptimal). 3990 */ 3991 } 3992 3993 #if defined(TARGET_PPC64) 3994 static void gen_doze(DisasContext *ctx) 3995 { 3996 #if defined(CONFIG_USER_ONLY) 3997 GEN_PRIV(ctx); 3998 #else 3999 TCGv_i32 t; 4000 4001 CHK_HV(ctx); 4002 translator_io_start(&ctx->base); 4003 t = tcg_constant_i32(PPC_PM_DOZE); 4004 gen_helper_pminsn(tcg_env, t); 4005 /* Stop translation, as the CPU is supposed to sleep from now */ 4006 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4007 #endif /* defined(CONFIG_USER_ONLY) */ 4008 } 4009 4010 static void gen_nap(DisasContext *ctx) 4011 { 4012 #if defined(CONFIG_USER_ONLY) 4013 GEN_PRIV(ctx); 4014 #else 4015 TCGv_i32 t; 4016 4017 CHK_HV(ctx); 4018 translator_io_start(&ctx->base); 4019 t = tcg_constant_i32(PPC_PM_NAP); 4020 gen_helper_pminsn(tcg_env, t); 4021 /* Stop translation, as the CPU is supposed to sleep from now */ 4022 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4023 #endif /* defined(CONFIG_USER_ONLY) */ 4024 } 4025 4026 static void gen_stop(DisasContext *ctx) 4027 { 4028 #if defined(CONFIG_USER_ONLY) 4029 GEN_PRIV(ctx); 4030 #else 4031 TCGv_i32 t; 4032 4033 CHK_HV(ctx); 4034 translator_io_start(&ctx->base); 4035 t = tcg_constant_i32(PPC_PM_STOP); 4036 gen_helper_pminsn(tcg_env, t); 4037 /* Stop translation, as the CPU is supposed to sleep from now */ 4038 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4039 #endif /* defined(CONFIG_USER_ONLY) */ 4040 } 4041 4042 static void gen_sleep(DisasContext *ctx) 4043 { 4044 #if defined(CONFIG_USER_ONLY) 4045 GEN_PRIV(ctx); 4046 #else 4047 TCGv_i32 t; 4048 4049 CHK_HV(ctx); 4050 translator_io_start(&ctx->base); 4051 t = tcg_constant_i32(PPC_PM_SLEEP); 4052 gen_helper_pminsn(tcg_env, t); 4053 /* Stop translation, as the CPU is supposed to sleep from now */ 4054 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4055 #endif /* defined(CONFIG_USER_ONLY) */ 4056 } 4057 4058 static void gen_rvwinkle(DisasContext *ctx) 4059 { 4060 #if defined(CONFIG_USER_ONLY) 4061 GEN_PRIV(ctx); 4062 #else 4063 TCGv_i32 t; 4064 4065 CHK_HV(ctx); 4066 translator_io_start(&ctx->base); 4067 t = tcg_constant_i32(PPC_PM_RVWINKLE); 4068 gen_helper_pminsn(tcg_env, t); 4069 /* Stop translation, as the CPU is supposed to sleep from now */ 4070 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4071 #endif /* defined(CONFIG_USER_ONLY) */ 4072 } 4073 #endif /* #if defined(TARGET_PPC64) */ 4074 4075 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) 4076 { 4077 #if defined(TARGET_PPC64) 4078 if (ctx->has_cfar) { 4079 tcg_gen_movi_tl(cpu_cfar, nip); 4080 } 4081 #endif 4082 } 4083 4084 #if defined(TARGET_PPC64) 4085 static void pmu_count_insns(DisasContext *ctx) 4086 { 4087 /* 4088 * Do not bother calling the helper if the PMU isn't counting 4089 * instructions. 4090 */ 4091 if (!ctx->pmu_insn_cnt) { 4092 return; 4093 } 4094 4095 #if !defined(CONFIG_USER_ONLY) 4096 TCGLabel *l; 4097 TCGv t0; 4098 4099 /* 4100 * The PMU insns_inc() helper stops the internal PMU timer if a 4101 * counter overflows happens. In that case, if the guest is 4102 * running with icount and we do not handle it beforehand, 4103 * the helper can trigger a 'bad icount read'. 4104 */ 4105 translator_io_start(&ctx->base); 4106 4107 /* Avoid helper calls when only PMC5-6 are enabled. */ 4108 if (!ctx->pmc_other) { 4109 l = gen_new_label(); 4110 t0 = tcg_temp_new(); 4111 4112 gen_load_spr(t0, SPR_POWER_PMC5); 4113 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); 4114 gen_store_spr(SPR_POWER_PMC5, t0); 4115 /* Check for overflow, if it's enabled */ 4116 if (ctx->mmcr0_pmcjce) { 4117 tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l); 4118 gen_helper_handle_pmc5_overflow(tcg_env); 4119 } 4120 4121 gen_set_label(l); 4122 } else { 4123 gen_helper_insns_inc(tcg_env, tcg_constant_i32(ctx->base.num_insns)); 4124 } 4125 #else 4126 /* 4127 * User mode can read (but not write) PMC5 and start/stop 4128 * the PMU via MMCR0_FC. In this case just increment 4129 * PMC5 with base.num_insns. 4130 */ 4131 TCGv t0 = tcg_temp_new(); 4132 4133 gen_load_spr(t0, SPR_POWER_PMC5); 4134 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); 4135 gen_store_spr(SPR_POWER_PMC5, t0); 4136 #endif /* #if !defined(CONFIG_USER_ONLY) */ 4137 } 4138 #else 4139 static void pmu_count_insns(DisasContext *ctx) 4140 { 4141 return; 4142 } 4143 #endif /* #if defined(TARGET_PPC64) */ 4144 4145 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) 4146 { 4147 if (unlikely(ctx->singlestep_enabled)) { 4148 return false; 4149 } 4150 return translator_use_goto_tb(&ctx->base, dest); 4151 } 4152 4153 static void gen_lookup_and_goto_ptr(DisasContext *ctx) 4154 { 4155 if (unlikely(ctx->singlestep_enabled)) { 4156 gen_debug_exception(ctx, false); 4157 } else { 4158 /* 4159 * tcg_gen_lookup_and_goto_ptr will exit the TB if 4160 * CF_NO_GOTO_PTR is set. Count insns now. 4161 */ 4162 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) { 4163 pmu_count_insns(ctx); 4164 } 4165 4166 tcg_gen_lookup_and_goto_ptr(); 4167 } 4168 } 4169 4170 /*** Branch ***/ 4171 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) 4172 { 4173 if (NARROW_MODE(ctx)) { 4174 dest = (uint32_t) dest; 4175 } 4176 if (use_goto_tb(ctx, dest)) { 4177 pmu_count_insns(ctx); 4178 tcg_gen_goto_tb(n); 4179 tcg_gen_movi_tl(cpu_nip, dest & ~3); 4180 tcg_gen_exit_tb(ctx->base.tb, n); 4181 } else { 4182 tcg_gen_movi_tl(cpu_nip, dest & ~3); 4183 gen_lookup_and_goto_ptr(ctx); 4184 } 4185 } 4186 4187 static inline void gen_setlr(DisasContext *ctx, target_ulong nip) 4188 { 4189 if (NARROW_MODE(ctx)) { 4190 nip = (uint32_t)nip; 4191 } 4192 tcg_gen_movi_tl(cpu_lr, nip); 4193 } 4194 4195 /* b ba bl bla */ 4196 static void gen_b(DisasContext *ctx) 4197 { 4198 target_ulong li, target; 4199 4200 /* sign extend LI */ 4201 li = LI(ctx->opcode); 4202 li = (li ^ 0x02000000) - 0x02000000; 4203 if (likely(AA(ctx->opcode) == 0)) { 4204 target = ctx->cia + li; 4205 } else { 4206 target = li; 4207 } 4208 if (LK(ctx->opcode)) { 4209 gen_setlr(ctx, ctx->base.pc_next); 4210 } 4211 gen_update_cfar(ctx, ctx->cia); 4212 gen_goto_tb(ctx, 0, target); 4213 ctx->base.is_jmp = DISAS_NORETURN; 4214 } 4215 4216 #define BCOND_IM 0 4217 #define BCOND_LR 1 4218 #define BCOND_CTR 2 4219 #define BCOND_TAR 3 4220 4221 static void gen_bcond(DisasContext *ctx, int type) 4222 { 4223 uint32_t bo = BO(ctx->opcode); 4224 TCGLabel *l1; 4225 TCGv target; 4226 4227 if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { 4228 target = tcg_temp_new(); 4229 if (type == BCOND_CTR) { 4230 tcg_gen_mov_tl(target, cpu_ctr); 4231 } else if (type == BCOND_TAR) { 4232 gen_load_spr(target, SPR_TAR); 4233 } else { 4234 tcg_gen_mov_tl(target, cpu_lr); 4235 } 4236 } else { 4237 target = NULL; 4238 } 4239 if (LK(ctx->opcode)) { 4240 gen_setlr(ctx, ctx->base.pc_next); 4241 } 4242 l1 = gen_new_label(); 4243 if ((bo & 0x4) == 0) { 4244 /* Decrement and test CTR */ 4245 TCGv temp = tcg_temp_new(); 4246 4247 if (type == BCOND_CTR) { 4248 /* 4249 * All ISAs up to v3 describe this form of bcctr as invalid but 4250 * some processors, ie. 64-bit server processors compliant with 4251 * arch 2.x, do implement a "test and decrement" logic instead, 4252 * as described in their respective UMs. This logic involves CTR 4253 * to act as both the branch target and a counter, which makes 4254 * it basically useless and thus never used in real code. 4255 * 4256 * This form was hence chosen to trigger extra micro-architectural 4257 * side-effect on real HW needed for the Spectre v2 workaround. 4258 * It is up to guests that implement such workaround, ie. linux, to 4259 * use this form in a way it just triggers the side-effect without 4260 * doing anything else harmful. 4261 */ 4262 if (unlikely(!is_book3s_arch2x(ctx))) { 4263 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 4264 return; 4265 } 4266 4267 if (NARROW_MODE(ctx)) { 4268 tcg_gen_ext32u_tl(temp, cpu_ctr); 4269 } else { 4270 tcg_gen_mov_tl(temp, cpu_ctr); 4271 } 4272 if (bo & 0x2) { 4273 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1); 4274 } else { 4275 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); 4276 } 4277 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1); 4278 } else { 4279 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1); 4280 if (NARROW_MODE(ctx)) { 4281 tcg_gen_ext32u_tl(temp, cpu_ctr); 4282 } else { 4283 tcg_gen_mov_tl(temp, cpu_ctr); 4284 } 4285 if (bo & 0x2) { 4286 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1); 4287 } else { 4288 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); 4289 } 4290 } 4291 } 4292 if ((bo & 0x10) == 0) { 4293 /* Test CR */ 4294 uint32_t bi = BI(ctx->opcode); 4295 uint32_t mask = 0x08 >> (bi & 0x03); 4296 TCGv_i32 temp = tcg_temp_new_i32(); 4297 4298 if (bo & 0x8) { 4299 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); 4300 tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1); 4301 } else { 4302 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); 4303 tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); 4304 } 4305 } 4306 gen_update_cfar(ctx, ctx->cia); 4307 if (type == BCOND_IM) { 4308 target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); 4309 if (likely(AA(ctx->opcode) == 0)) { 4310 gen_goto_tb(ctx, 0, ctx->cia + li); 4311 } else { 4312 gen_goto_tb(ctx, 0, li); 4313 } 4314 } else { 4315 if (NARROW_MODE(ctx)) { 4316 tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3); 4317 } else { 4318 tcg_gen_andi_tl(cpu_nip, target, ~3); 4319 } 4320 gen_lookup_and_goto_ptr(ctx); 4321 } 4322 if ((bo & 0x14) != 0x14) { 4323 /* fallthrough case */ 4324 gen_set_label(l1); 4325 gen_goto_tb(ctx, 1, ctx->base.pc_next); 4326 } 4327 ctx->base.is_jmp = DISAS_NORETURN; 4328 } 4329 4330 static void gen_bc(DisasContext *ctx) 4331 { 4332 gen_bcond(ctx, BCOND_IM); 4333 } 4334 4335 static void gen_bcctr(DisasContext *ctx) 4336 { 4337 gen_bcond(ctx, BCOND_CTR); 4338 } 4339 4340 static void gen_bclr(DisasContext *ctx) 4341 { 4342 gen_bcond(ctx, BCOND_LR); 4343 } 4344 4345 static void gen_bctar(DisasContext *ctx) 4346 { 4347 gen_bcond(ctx, BCOND_TAR); 4348 } 4349 4350 /*** Condition register logical ***/ 4351 #define GEN_CRLOGIC(name, tcg_op, opc) \ 4352 static void glue(gen_, name)(DisasContext *ctx) \ 4353 { \ 4354 uint8_t bitmask; \ 4355 int sh; \ 4356 TCGv_i32 t0, t1; \ 4357 sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \ 4358 t0 = tcg_temp_new_i32(); \ 4359 if (sh > 0) \ 4360 tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \ 4361 else if (sh < 0) \ 4362 tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \ 4363 else \ 4364 tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \ 4365 t1 = tcg_temp_new_i32(); \ 4366 sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \ 4367 if (sh > 0) \ 4368 tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \ 4369 else if (sh < 0) \ 4370 tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \ 4371 else \ 4372 tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \ 4373 tcg_op(t0, t0, t1); \ 4374 bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \ 4375 tcg_gen_andi_i32(t0, t0, bitmask); \ 4376 tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ 4377 tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ 4378 } 4379 4380 /* crand */ 4381 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08); 4382 /* crandc */ 4383 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04); 4384 /* creqv */ 4385 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09); 4386 /* crnand */ 4387 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07); 4388 /* crnor */ 4389 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01); 4390 /* cror */ 4391 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E); 4392 /* crorc */ 4393 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D); 4394 /* crxor */ 4395 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06); 4396 4397 /* mcrf */ 4398 static void gen_mcrf(DisasContext *ctx) 4399 { 4400 tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]); 4401 } 4402 4403 /*** System linkage ***/ 4404 4405 /* rfi (supervisor only) */ 4406 static void gen_rfi(DisasContext *ctx) 4407 { 4408 #if defined(CONFIG_USER_ONLY) 4409 GEN_PRIV(ctx); 4410 #else 4411 /* 4412 * This instruction doesn't exist anymore on 64-bit server 4413 * processors compliant with arch 2.x 4414 */ 4415 if (is_book3s_arch2x(ctx)) { 4416 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 4417 return; 4418 } 4419 /* Restore CPU state */ 4420 CHK_SV(ctx); 4421 translator_io_start(&ctx->base); 4422 gen_update_cfar(ctx, ctx->cia); 4423 gen_helper_rfi(tcg_env); 4424 ctx->base.is_jmp = DISAS_EXIT; 4425 #endif 4426 } 4427 4428 #if defined(TARGET_PPC64) 4429 static void gen_rfid(DisasContext *ctx) 4430 { 4431 #if defined(CONFIG_USER_ONLY) 4432 GEN_PRIV(ctx); 4433 #else 4434 /* Restore CPU state */ 4435 CHK_SV(ctx); 4436 translator_io_start(&ctx->base); 4437 gen_update_cfar(ctx, ctx->cia); 4438 gen_helper_rfid(tcg_env); 4439 ctx->base.is_jmp = DISAS_EXIT; 4440 #endif 4441 } 4442 4443 #if !defined(CONFIG_USER_ONLY) 4444 static void gen_rfscv(DisasContext *ctx) 4445 { 4446 #if defined(CONFIG_USER_ONLY) 4447 GEN_PRIV(ctx); 4448 #else 4449 /* Restore CPU state */ 4450 CHK_SV(ctx); 4451 translator_io_start(&ctx->base); 4452 gen_update_cfar(ctx, ctx->cia); 4453 gen_helper_rfscv(tcg_env); 4454 ctx->base.is_jmp = DISAS_EXIT; 4455 #endif 4456 } 4457 #endif 4458 4459 static void gen_hrfid(DisasContext *ctx) 4460 { 4461 #if defined(CONFIG_USER_ONLY) 4462 GEN_PRIV(ctx); 4463 #else 4464 /* Restore CPU state */ 4465 CHK_HV(ctx); 4466 translator_io_start(&ctx->base); 4467 gen_helper_hrfid(tcg_env); 4468 ctx->base.is_jmp = DISAS_EXIT; 4469 #endif 4470 } 4471 #endif 4472 4473 /* sc */ 4474 #if defined(CONFIG_USER_ONLY) 4475 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER 4476 #else 4477 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL 4478 #endif 4479 static void gen_sc(DisasContext *ctx) 4480 { 4481 uint32_t lev; 4482 4483 /* 4484 * LEV is a 7-bit field, but the top 6 bits are treated as a reserved 4485 * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is 4486 * for Ultravisor which TCG does not support, so just ignore the top 6. 4487 */ 4488 lev = (ctx->opcode >> 5) & 0x1; 4489 gen_exception_err(ctx, POWERPC_SYSCALL, lev); 4490 } 4491 4492 #if defined(TARGET_PPC64) 4493 #if !defined(CONFIG_USER_ONLY) 4494 static void gen_scv(DisasContext *ctx) 4495 { 4496 uint32_t lev = (ctx->opcode >> 5) & 0x7F; 4497 4498 /* Set the PC back to the faulting instruction. */ 4499 gen_update_nip(ctx, ctx->cia); 4500 gen_helper_scv(tcg_env, tcg_constant_i32(lev)); 4501 4502 ctx->base.is_jmp = DISAS_NORETURN; 4503 } 4504 #endif 4505 #endif 4506 4507 /*** Trap ***/ 4508 4509 /* Check for unconditional traps (always or never) */ 4510 static bool check_unconditional_trap(DisasContext *ctx) 4511 { 4512 /* Trap never */ 4513 if (TO(ctx->opcode) == 0) { 4514 return true; 4515 } 4516 /* Trap always */ 4517 if (TO(ctx->opcode) == 31) { 4518 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); 4519 return true; 4520 } 4521 return false; 4522 } 4523 4524 /* tw */ 4525 static void gen_tw(DisasContext *ctx) 4526 { 4527 TCGv_i32 t0; 4528 4529 if (check_unconditional_trap(ctx)) { 4530 return; 4531 } 4532 t0 = tcg_constant_i32(TO(ctx->opcode)); 4533 gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 4534 t0); 4535 } 4536 4537 /* twi */ 4538 static void gen_twi(DisasContext *ctx) 4539 { 4540 TCGv t0; 4541 TCGv_i32 t1; 4542 4543 if (check_unconditional_trap(ctx)) { 4544 return; 4545 } 4546 t0 = tcg_constant_tl(SIMM(ctx->opcode)); 4547 t1 = tcg_constant_i32(TO(ctx->opcode)); 4548 gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1); 4549 } 4550 4551 #if defined(TARGET_PPC64) 4552 /* td */ 4553 static void gen_td(DisasContext *ctx) 4554 { 4555 TCGv_i32 t0; 4556 4557 if (check_unconditional_trap(ctx)) { 4558 return; 4559 } 4560 t0 = tcg_constant_i32(TO(ctx->opcode)); 4561 gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 4562 t0); 4563 } 4564 4565 /* tdi */ 4566 static void gen_tdi(DisasContext *ctx) 4567 { 4568 TCGv t0; 4569 TCGv_i32 t1; 4570 4571 if (check_unconditional_trap(ctx)) { 4572 return; 4573 } 4574 t0 = tcg_constant_tl(SIMM(ctx->opcode)); 4575 t1 = tcg_constant_i32(TO(ctx->opcode)); 4576 gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1); 4577 } 4578 #endif 4579 4580 /*** Processor control ***/ 4581 4582 /* mcrxr */ 4583 static void gen_mcrxr(DisasContext *ctx) 4584 { 4585 TCGv_i32 t0 = tcg_temp_new_i32(); 4586 TCGv_i32 t1 = tcg_temp_new_i32(); 4587 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; 4588 4589 tcg_gen_trunc_tl_i32(t0, cpu_so); 4590 tcg_gen_trunc_tl_i32(t1, cpu_ov); 4591 tcg_gen_trunc_tl_i32(dst, cpu_ca); 4592 tcg_gen_shli_i32(t0, t0, 3); 4593 tcg_gen_shli_i32(t1, t1, 2); 4594 tcg_gen_shli_i32(dst, dst, 1); 4595 tcg_gen_or_i32(dst, dst, t0); 4596 tcg_gen_or_i32(dst, dst, t1); 4597 4598 tcg_gen_movi_tl(cpu_so, 0); 4599 tcg_gen_movi_tl(cpu_ov, 0); 4600 tcg_gen_movi_tl(cpu_ca, 0); 4601 } 4602 4603 #ifdef TARGET_PPC64 4604 /* mcrxrx */ 4605 static void gen_mcrxrx(DisasContext *ctx) 4606 { 4607 TCGv t0 = tcg_temp_new(); 4608 TCGv t1 = tcg_temp_new(); 4609 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; 4610 4611 /* copy OV and OV32 */ 4612 tcg_gen_shli_tl(t0, cpu_ov, 1); 4613 tcg_gen_or_tl(t0, t0, cpu_ov32); 4614 tcg_gen_shli_tl(t0, t0, 2); 4615 /* copy CA and CA32 */ 4616 tcg_gen_shli_tl(t1, cpu_ca, 1); 4617 tcg_gen_or_tl(t1, t1, cpu_ca32); 4618 tcg_gen_or_tl(t0, t0, t1); 4619 tcg_gen_trunc_tl_i32(dst, t0); 4620 } 4621 #endif 4622 4623 /* mfcr mfocrf */ 4624 static void gen_mfcr(DisasContext *ctx) 4625 { 4626 uint32_t crm, crn; 4627 4628 if (likely(ctx->opcode & 0x00100000)) { 4629 crm = CRM(ctx->opcode); 4630 if (likely(crm && ((crm & (crm - 1)) == 0))) { 4631 crn = ctz32(crm); 4632 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]); 4633 tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], 4634 cpu_gpr[rD(ctx->opcode)], crn * 4); 4635 } 4636 } else { 4637 TCGv_i32 t0 = tcg_temp_new_i32(); 4638 tcg_gen_mov_i32(t0, cpu_crf[0]); 4639 tcg_gen_shli_i32(t0, t0, 4); 4640 tcg_gen_or_i32(t0, t0, cpu_crf[1]); 4641 tcg_gen_shli_i32(t0, t0, 4); 4642 tcg_gen_or_i32(t0, t0, cpu_crf[2]); 4643 tcg_gen_shli_i32(t0, t0, 4); 4644 tcg_gen_or_i32(t0, t0, cpu_crf[3]); 4645 tcg_gen_shli_i32(t0, t0, 4); 4646 tcg_gen_or_i32(t0, t0, cpu_crf[4]); 4647 tcg_gen_shli_i32(t0, t0, 4); 4648 tcg_gen_or_i32(t0, t0, cpu_crf[5]); 4649 tcg_gen_shli_i32(t0, t0, 4); 4650 tcg_gen_or_i32(t0, t0, cpu_crf[6]); 4651 tcg_gen_shli_i32(t0, t0, 4); 4652 tcg_gen_or_i32(t0, t0, cpu_crf[7]); 4653 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); 4654 } 4655 } 4656 4657 /* mfmsr */ 4658 static void gen_mfmsr(DisasContext *ctx) 4659 { 4660 CHK_SV(ctx); 4661 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr); 4662 } 4663 4664 /* mfspr */ 4665 static inline void gen_op_mfspr(DisasContext *ctx) 4666 { 4667 void (*read_cb)(DisasContext *ctx, int gprn, int sprn); 4668 uint32_t sprn = SPR(ctx->opcode); 4669 4670 #if defined(CONFIG_USER_ONLY) 4671 read_cb = ctx->spr_cb[sprn].uea_read; 4672 #else 4673 if (ctx->pr) { 4674 read_cb = ctx->spr_cb[sprn].uea_read; 4675 } else if (ctx->hv) { 4676 read_cb = ctx->spr_cb[sprn].hea_read; 4677 } else { 4678 read_cb = ctx->spr_cb[sprn].oea_read; 4679 } 4680 #endif 4681 if (likely(read_cb != NULL)) { 4682 if (likely(read_cb != SPR_NOACCESS)) { 4683 (*read_cb)(ctx, rD(ctx->opcode), sprn); 4684 } else { 4685 /* Privilege exception */ 4686 /* 4687 * This is a hack to avoid warnings when running Linux: 4688 * this OS breaks the PowerPC virtualisation model, 4689 * allowing userland application to read the PVR 4690 */ 4691 if (sprn != SPR_PVR) { 4692 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr " 4693 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, 4694 ctx->cia); 4695 } 4696 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4697 } 4698 } else { 4699 /* ISA 2.07 defines these as no-ops */ 4700 if ((ctx->insns_flags2 & PPC2_ISA207S) && 4701 (sprn >= 808 && sprn <= 811)) { 4702 /* This is a nop */ 4703 return; 4704 } 4705 /* Not defined */ 4706 qemu_log_mask(LOG_GUEST_ERROR, 4707 "Trying to read invalid spr %d (0x%03x) at " 4708 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia); 4709 4710 /* 4711 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can 4712 * generate a priv, a hv emu or a no-op 4713 */ 4714 if (sprn & 0x10) { 4715 if (ctx->pr) { 4716 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4717 } 4718 } else { 4719 if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { 4720 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4721 } 4722 } 4723 } 4724 } 4725 4726 static void gen_mfspr(DisasContext *ctx) 4727 { 4728 gen_op_mfspr(ctx); 4729 } 4730 4731 /* mftb */ 4732 static void gen_mftb(DisasContext *ctx) 4733 { 4734 gen_op_mfspr(ctx); 4735 } 4736 4737 /* mtcrf mtocrf*/ 4738 static void gen_mtcrf(DisasContext *ctx) 4739 { 4740 uint32_t crm, crn; 4741 4742 crm = CRM(ctx->opcode); 4743 if (likely((ctx->opcode & 0x00100000))) { 4744 if (crm && ((crm & (crm - 1)) == 0)) { 4745 TCGv_i32 temp = tcg_temp_new_i32(); 4746 crn = ctz32(crm); 4747 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); 4748 tcg_gen_shri_i32(temp, temp, crn * 4); 4749 tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf); 4750 } 4751 } else { 4752 TCGv_i32 temp = tcg_temp_new_i32(); 4753 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); 4754 for (crn = 0 ; crn < 8 ; crn++) { 4755 if (crm & (1 << crn)) { 4756 tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4); 4757 tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); 4758 } 4759 } 4760 } 4761 } 4762 4763 /* mtmsr */ 4764 #if defined(TARGET_PPC64) 4765 static void gen_mtmsrd(DisasContext *ctx) 4766 { 4767 if (unlikely(!is_book3s_arch2x(ctx))) { 4768 gen_invalid(ctx); 4769 return; 4770 } 4771 4772 CHK_SV(ctx); 4773 4774 #if !defined(CONFIG_USER_ONLY) 4775 TCGv t0, t1; 4776 target_ulong mask; 4777 4778 t0 = tcg_temp_new(); 4779 t1 = tcg_temp_new(); 4780 4781 translator_io_start(&ctx->base); 4782 4783 if (ctx->opcode & 0x00010000) { 4784 /* L=1 form only updates EE and RI */ 4785 mask = (1ULL << MSR_RI) | (1ULL << MSR_EE); 4786 } else { 4787 /* mtmsrd does not alter HV, S, ME, or LE */ 4788 mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) | 4789 (1ULL << MSR_HV)); 4790 /* 4791 * XXX: we need to update nip before the store if we enter 4792 * power saving mode, we will exit the loop directly from 4793 * ppc_store_msr 4794 */ 4795 gen_update_nip(ctx, ctx->base.pc_next); 4796 } 4797 4798 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); 4799 tcg_gen_andi_tl(t1, cpu_msr, ~mask); 4800 tcg_gen_or_tl(t0, t0, t1); 4801 4802 gen_helper_store_msr(tcg_env, t0); 4803 4804 /* Must stop the translation as machine state (may have) changed */ 4805 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 4806 #endif /* !defined(CONFIG_USER_ONLY) */ 4807 } 4808 #endif /* defined(TARGET_PPC64) */ 4809 4810 static void gen_mtmsr(DisasContext *ctx) 4811 { 4812 CHK_SV(ctx); 4813 4814 #if !defined(CONFIG_USER_ONLY) 4815 TCGv t0, t1; 4816 target_ulong mask = 0xFFFFFFFF; 4817 4818 t0 = tcg_temp_new(); 4819 t1 = tcg_temp_new(); 4820 4821 translator_io_start(&ctx->base); 4822 if (ctx->opcode & 0x00010000) { 4823 /* L=1 form only updates EE and RI */ 4824 mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE); 4825 } else { 4826 /* mtmsr does not alter S, ME, or LE */ 4827 mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S)); 4828 4829 /* 4830 * XXX: we need to update nip before the store if we enter 4831 * power saving mode, we will exit the loop directly from 4832 * ppc_store_msr 4833 */ 4834 gen_update_nip(ctx, ctx->base.pc_next); 4835 } 4836 4837 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); 4838 tcg_gen_andi_tl(t1, cpu_msr, ~mask); 4839 tcg_gen_or_tl(t0, t0, t1); 4840 4841 gen_helper_store_msr(tcg_env, t0); 4842 4843 /* Must stop the translation as machine state (may have) changed */ 4844 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 4845 #endif 4846 } 4847 4848 /* mtspr */ 4849 static void gen_mtspr(DisasContext *ctx) 4850 { 4851 void (*write_cb)(DisasContext *ctx, int sprn, int gprn); 4852 uint32_t sprn = SPR(ctx->opcode); 4853 4854 #if defined(CONFIG_USER_ONLY) 4855 write_cb = ctx->spr_cb[sprn].uea_write; 4856 #else 4857 if (ctx->pr) { 4858 write_cb = ctx->spr_cb[sprn].uea_write; 4859 } else if (ctx->hv) { 4860 write_cb = ctx->spr_cb[sprn].hea_write; 4861 } else { 4862 write_cb = ctx->spr_cb[sprn].oea_write; 4863 } 4864 #endif 4865 if (likely(write_cb != NULL)) { 4866 if (likely(write_cb != SPR_NOACCESS)) { 4867 (*write_cb)(ctx, sprn, rS(ctx->opcode)); 4868 } else { 4869 /* Privilege exception */ 4870 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr " 4871 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, 4872 ctx->cia); 4873 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4874 } 4875 } else { 4876 /* ISA 2.07 defines these as no-ops */ 4877 if ((ctx->insns_flags2 & PPC2_ISA207S) && 4878 (sprn >= 808 && sprn <= 811)) { 4879 /* This is a nop */ 4880 return; 4881 } 4882 4883 /* Not defined */ 4884 qemu_log_mask(LOG_GUEST_ERROR, 4885 "Trying to write invalid spr %d (0x%03x) at " 4886 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia); 4887 4888 4889 /* 4890 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can 4891 * generate a priv, a hv emu or a no-op 4892 */ 4893 if (sprn & 0x10) { 4894 if (ctx->pr) { 4895 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4896 } 4897 } else { 4898 if (ctx->pr || sprn == 0) { 4899 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4900 } 4901 } 4902 } 4903 } 4904 4905 #if defined(TARGET_PPC64) 4906 /* setb */ 4907 static void gen_setb(DisasContext *ctx) 4908 { 4909 TCGv_i32 t0 = tcg_temp_new_i32(); 4910 TCGv_i32 t8 = tcg_constant_i32(8); 4911 TCGv_i32 tm1 = tcg_constant_i32(-1); 4912 int crf = crfS(ctx->opcode); 4913 4914 tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4); 4915 tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); 4916 tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); 4917 } 4918 #endif 4919 4920 /*** Cache management ***/ 4921 4922 /* dcbf */ 4923 static void gen_dcbf(DisasContext *ctx) 4924 { 4925 /* XXX: specification says this is treated as a load by the MMU */ 4926 TCGv t0; 4927 gen_set_access_type(ctx, ACCESS_CACHE); 4928 t0 = tcg_temp_new(); 4929 gen_addr_reg_index(ctx, t0); 4930 gen_qemu_ld8u(ctx, t0, t0); 4931 } 4932 4933 /* dcbfep (external PID dcbf) */ 4934 static void gen_dcbfep(DisasContext *ctx) 4935 { 4936 /* XXX: specification says this is treated as a load by the MMU */ 4937 TCGv t0; 4938 CHK_SV(ctx); 4939 gen_set_access_type(ctx, ACCESS_CACHE); 4940 t0 = tcg_temp_new(); 4941 gen_addr_reg_index(ctx, t0); 4942 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); 4943 } 4944 4945 /* dcbi (Supervisor only) */ 4946 static void gen_dcbi(DisasContext *ctx) 4947 { 4948 #if defined(CONFIG_USER_ONLY) 4949 GEN_PRIV(ctx); 4950 #else 4951 TCGv EA, val; 4952 4953 CHK_SV(ctx); 4954 EA = tcg_temp_new(); 4955 gen_set_access_type(ctx, ACCESS_CACHE); 4956 gen_addr_reg_index(ctx, EA); 4957 val = tcg_temp_new(); 4958 /* XXX: specification says this should be treated as a store by the MMU */ 4959 gen_qemu_ld8u(ctx, val, EA); 4960 gen_qemu_st8(ctx, val, EA); 4961 #endif /* defined(CONFIG_USER_ONLY) */ 4962 } 4963 4964 /* dcdst */ 4965 static void gen_dcbst(DisasContext *ctx) 4966 { 4967 /* XXX: specification say this is treated as a load by the MMU */ 4968 TCGv t0; 4969 gen_set_access_type(ctx, ACCESS_CACHE); 4970 t0 = tcg_temp_new(); 4971 gen_addr_reg_index(ctx, t0); 4972 gen_qemu_ld8u(ctx, t0, t0); 4973 } 4974 4975 /* dcbstep (dcbstep External PID version) */ 4976 static void gen_dcbstep(DisasContext *ctx) 4977 { 4978 /* XXX: specification say this is treated as a load by the MMU */ 4979 TCGv t0; 4980 gen_set_access_type(ctx, ACCESS_CACHE); 4981 t0 = tcg_temp_new(); 4982 gen_addr_reg_index(ctx, t0); 4983 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); 4984 } 4985 4986 /* dcbt */ 4987 static void gen_dcbt(DisasContext *ctx) 4988 { 4989 /* 4990 * interpreted as no-op 4991 * XXX: specification say this is treated as a load by the MMU but 4992 * does not generate any exception 4993 */ 4994 } 4995 4996 /* dcbtep */ 4997 static void gen_dcbtep(DisasContext *ctx) 4998 { 4999 /* 5000 * interpreted as no-op 5001 * XXX: specification say this is treated as a load by the MMU but 5002 * does not generate any exception 5003 */ 5004 } 5005 5006 /* dcbtst */ 5007 static void gen_dcbtst(DisasContext *ctx) 5008 { 5009 /* 5010 * interpreted as no-op 5011 * XXX: specification say this is treated as a load by the MMU but 5012 * does not generate any exception 5013 */ 5014 } 5015 5016 /* dcbtstep */ 5017 static void gen_dcbtstep(DisasContext *ctx) 5018 { 5019 /* 5020 * interpreted as no-op 5021 * XXX: specification say this is treated as a load by the MMU but 5022 * does not generate any exception 5023 */ 5024 } 5025 5026 /* dcbtls */ 5027 static void gen_dcbtls(DisasContext *ctx) 5028 { 5029 /* Always fails locking the cache */ 5030 TCGv t0 = tcg_temp_new(); 5031 gen_load_spr(t0, SPR_Exxx_L1CSR0); 5032 tcg_gen_ori_tl(t0, t0, L1CSR0_CUL); 5033 gen_store_spr(SPR_Exxx_L1CSR0, t0); 5034 } 5035 5036 /* dcblc */ 5037 static void gen_dcblc(DisasContext *ctx) 5038 { 5039 /* 5040 * interpreted as no-op 5041 */ 5042 } 5043 5044 /* dcbz */ 5045 static void gen_dcbz(DisasContext *ctx) 5046 { 5047 TCGv tcgv_addr; 5048 TCGv_i32 tcgv_op; 5049 5050 gen_set_access_type(ctx, ACCESS_CACHE); 5051 tcgv_addr = tcg_temp_new(); 5052 tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); 5053 gen_addr_reg_index(ctx, tcgv_addr); 5054 gen_helper_dcbz(tcg_env, tcgv_addr, tcgv_op); 5055 } 5056 5057 /* dcbzep */ 5058 static void gen_dcbzep(DisasContext *ctx) 5059 { 5060 TCGv tcgv_addr; 5061 TCGv_i32 tcgv_op; 5062 5063 gen_set_access_type(ctx, ACCESS_CACHE); 5064 tcgv_addr = tcg_temp_new(); 5065 tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); 5066 gen_addr_reg_index(ctx, tcgv_addr); 5067 gen_helper_dcbzep(tcg_env, tcgv_addr, tcgv_op); 5068 } 5069 5070 /* dst / dstt */ 5071 static void gen_dst(DisasContext *ctx) 5072 { 5073 if (rA(ctx->opcode) == 0) { 5074 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5075 } else { 5076 /* interpreted as no-op */ 5077 } 5078 } 5079 5080 /* dstst /dststt */ 5081 static void gen_dstst(DisasContext *ctx) 5082 { 5083 if (rA(ctx->opcode) == 0) { 5084 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5085 } else { 5086 /* interpreted as no-op */ 5087 } 5088 5089 } 5090 5091 /* dss / dssall */ 5092 static void gen_dss(DisasContext *ctx) 5093 { 5094 /* interpreted as no-op */ 5095 } 5096 5097 /* icbi */ 5098 static void gen_icbi(DisasContext *ctx) 5099 { 5100 TCGv t0; 5101 gen_set_access_type(ctx, ACCESS_CACHE); 5102 t0 = tcg_temp_new(); 5103 gen_addr_reg_index(ctx, t0); 5104 gen_helper_icbi(tcg_env, t0); 5105 } 5106 5107 /* icbiep */ 5108 static void gen_icbiep(DisasContext *ctx) 5109 { 5110 TCGv t0; 5111 gen_set_access_type(ctx, ACCESS_CACHE); 5112 t0 = tcg_temp_new(); 5113 gen_addr_reg_index(ctx, t0); 5114 gen_helper_icbiep(tcg_env, t0); 5115 } 5116 5117 /* Optional: */ 5118 /* dcba */ 5119 static void gen_dcba(DisasContext *ctx) 5120 { 5121 /* 5122 * interpreted as no-op 5123 * XXX: specification say this is treated as a store by the MMU 5124 * but does not generate any exception 5125 */ 5126 } 5127 5128 /*** Segment register manipulation ***/ 5129 /* Supervisor only: */ 5130 5131 /* mfsr */ 5132 static void gen_mfsr(DisasContext *ctx) 5133 { 5134 #if defined(CONFIG_USER_ONLY) 5135 GEN_PRIV(ctx); 5136 #else 5137 TCGv t0; 5138 5139 CHK_SV(ctx); 5140 t0 = tcg_constant_tl(SR(ctx->opcode)); 5141 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0); 5142 #endif /* defined(CONFIG_USER_ONLY) */ 5143 } 5144 5145 /* mfsrin */ 5146 static void gen_mfsrin(DisasContext *ctx) 5147 { 5148 #if defined(CONFIG_USER_ONLY) 5149 GEN_PRIV(ctx); 5150 #else 5151 TCGv t0; 5152 5153 CHK_SV(ctx); 5154 t0 = tcg_temp_new(); 5155 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5156 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0); 5157 #endif /* defined(CONFIG_USER_ONLY) */ 5158 } 5159 5160 /* mtsr */ 5161 static void gen_mtsr(DisasContext *ctx) 5162 { 5163 #if defined(CONFIG_USER_ONLY) 5164 GEN_PRIV(ctx); 5165 #else 5166 TCGv t0; 5167 5168 CHK_SV(ctx); 5169 t0 = tcg_constant_tl(SR(ctx->opcode)); 5170 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]); 5171 #endif /* defined(CONFIG_USER_ONLY) */ 5172 } 5173 5174 /* mtsrin */ 5175 static void gen_mtsrin(DisasContext *ctx) 5176 { 5177 #if defined(CONFIG_USER_ONLY) 5178 GEN_PRIV(ctx); 5179 #else 5180 TCGv t0; 5181 CHK_SV(ctx); 5182 5183 t0 = tcg_temp_new(); 5184 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5185 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rD(ctx->opcode)]); 5186 #endif /* defined(CONFIG_USER_ONLY) */ 5187 } 5188 5189 #if defined(TARGET_PPC64) 5190 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */ 5191 5192 /* mfsr */ 5193 static void gen_mfsr_64b(DisasContext *ctx) 5194 { 5195 #if defined(CONFIG_USER_ONLY) 5196 GEN_PRIV(ctx); 5197 #else 5198 TCGv t0; 5199 5200 CHK_SV(ctx); 5201 t0 = tcg_constant_tl(SR(ctx->opcode)); 5202 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0); 5203 #endif /* defined(CONFIG_USER_ONLY) */ 5204 } 5205 5206 /* mfsrin */ 5207 static void gen_mfsrin_64b(DisasContext *ctx) 5208 { 5209 #if defined(CONFIG_USER_ONLY) 5210 GEN_PRIV(ctx); 5211 #else 5212 TCGv t0; 5213 5214 CHK_SV(ctx); 5215 t0 = tcg_temp_new(); 5216 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5217 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0); 5218 #endif /* defined(CONFIG_USER_ONLY) */ 5219 } 5220 5221 /* mtsr */ 5222 static void gen_mtsr_64b(DisasContext *ctx) 5223 { 5224 #if defined(CONFIG_USER_ONLY) 5225 GEN_PRIV(ctx); 5226 #else 5227 TCGv t0; 5228 5229 CHK_SV(ctx); 5230 t0 = tcg_constant_tl(SR(ctx->opcode)); 5231 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]); 5232 #endif /* defined(CONFIG_USER_ONLY) */ 5233 } 5234 5235 /* mtsrin */ 5236 static void gen_mtsrin_64b(DisasContext *ctx) 5237 { 5238 #if defined(CONFIG_USER_ONLY) 5239 GEN_PRIV(ctx); 5240 #else 5241 TCGv t0; 5242 5243 CHK_SV(ctx); 5244 t0 = tcg_temp_new(); 5245 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5246 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]); 5247 #endif /* defined(CONFIG_USER_ONLY) */ 5248 } 5249 5250 #endif /* defined(TARGET_PPC64) */ 5251 5252 /*** Lookaside buffer management ***/ 5253 /* Optional & supervisor only: */ 5254 5255 /* tlbia */ 5256 static void gen_tlbia(DisasContext *ctx) 5257 { 5258 #if defined(CONFIG_USER_ONLY) 5259 GEN_PRIV(ctx); 5260 #else 5261 CHK_HV(ctx); 5262 5263 gen_helper_tlbia(tcg_env); 5264 #endif /* defined(CONFIG_USER_ONLY) */ 5265 } 5266 5267 /* tlbsync */ 5268 static void gen_tlbsync(DisasContext *ctx) 5269 { 5270 #if defined(CONFIG_USER_ONLY) 5271 GEN_PRIV(ctx); 5272 #else 5273 5274 if (ctx->gtse) { 5275 CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */ 5276 } else { 5277 CHK_HV(ctx); /* Else hypervisor privileged */ 5278 } 5279 5280 /* BookS does both ptesync and tlbsync make tlbsync a nop for server */ 5281 if (ctx->insns_flags & PPC_BOOKE) { 5282 gen_check_tlb_flush(ctx, true); 5283 } 5284 #endif /* defined(CONFIG_USER_ONLY) */ 5285 } 5286 5287 /*** External control ***/ 5288 /* Optional: */ 5289 5290 /* eciwx */ 5291 static void gen_eciwx(DisasContext *ctx) 5292 { 5293 TCGv t0; 5294 /* Should check EAR[E] ! */ 5295 gen_set_access_type(ctx, ACCESS_EXT); 5296 t0 = tcg_temp_new(); 5297 gen_addr_reg_index(ctx, t0); 5298 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, 5299 DEF_MEMOP(MO_UL | MO_ALIGN)); 5300 } 5301 5302 /* ecowx */ 5303 static void gen_ecowx(DisasContext *ctx) 5304 { 5305 TCGv t0; 5306 /* Should check EAR[E] ! */ 5307 gen_set_access_type(ctx, ACCESS_EXT); 5308 t0 = tcg_temp_new(); 5309 gen_addr_reg_index(ctx, t0); 5310 tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, 5311 DEF_MEMOP(MO_UL | MO_ALIGN)); 5312 } 5313 5314 /* 602 - 603 - G2 TLB management */ 5315 5316 /* tlbld */ 5317 static void gen_tlbld_6xx(DisasContext *ctx) 5318 { 5319 #if defined(CONFIG_USER_ONLY) 5320 GEN_PRIV(ctx); 5321 #else 5322 CHK_SV(ctx); 5323 gen_helper_6xx_tlbd(tcg_env, cpu_gpr[rB(ctx->opcode)]); 5324 #endif /* defined(CONFIG_USER_ONLY) */ 5325 } 5326 5327 /* tlbli */ 5328 static void gen_tlbli_6xx(DisasContext *ctx) 5329 { 5330 #if defined(CONFIG_USER_ONLY) 5331 GEN_PRIV(ctx); 5332 #else 5333 CHK_SV(ctx); 5334 gen_helper_6xx_tlbi(tcg_env, cpu_gpr[rB(ctx->opcode)]); 5335 #endif /* defined(CONFIG_USER_ONLY) */ 5336 } 5337 5338 /* BookE specific instructions */ 5339 5340 /* XXX: not implemented on 440 ? */ 5341 static void gen_mfapidi(DisasContext *ctx) 5342 { 5343 /* XXX: TODO */ 5344 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5345 } 5346 5347 /* XXX: not implemented on 440 ? */ 5348 static void gen_tlbiva(DisasContext *ctx) 5349 { 5350 #if defined(CONFIG_USER_ONLY) 5351 GEN_PRIV(ctx); 5352 #else 5353 TCGv t0; 5354 5355 CHK_SV(ctx); 5356 t0 = tcg_temp_new(); 5357 gen_addr_reg_index(ctx, t0); 5358 gen_helper_tlbiva(tcg_env, cpu_gpr[rB(ctx->opcode)]); 5359 #endif /* defined(CONFIG_USER_ONLY) */ 5360 } 5361 5362 /* All 405 MAC instructions are translated here */ 5363 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, 5364 int ra, int rb, int rt, int Rc) 5365 { 5366 TCGv t0, t1; 5367 5368 t0 = tcg_temp_new(); 5369 t1 = tcg_temp_new(); 5370 5371 switch (opc3 & 0x0D) { 5372 case 0x05: 5373 /* macchw - macchw. - macchwo - macchwo. */ 5374 /* macchws - macchws. - macchwso - macchwso. */ 5375 /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */ 5376 /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */ 5377 /* mulchw - mulchw. */ 5378 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]); 5379 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16); 5380 tcg_gen_ext16s_tl(t1, t1); 5381 break; 5382 case 0x04: 5383 /* macchwu - macchwu. - macchwuo - macchwuo. */ 5384 /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */ 5385 /* mulchwu - mulchwu. */ 5386 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]); 5387 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16); 5388 tcg_gen_ext16u_tl(t1, t1); 5389 break; 5390 case 0x01: 5391 /* machhw - machhw. - machhwo - machhwo. */ 5392 /* machhws - machhws. - machhwso - machhwso. */ 5393 /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */ 5394 /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */ 5395 /* mulhhw - mulhhw. */ 5396 tcg_gen_sari_tl(t0, cpu_gpr[ra], 16); 5397 tcg_gen_ext16s_tl(t0, t0); 5398 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16); 5399 tcg_gen_ext16s_tl(t1, t1); 5400 break; 5401 case 0x00: 5402 /* machhwu - machhwu. - machhwuo - machhwuo. */ 5403 /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */ 5404 /* mulhhwu - mulhhwu. */ 5405 tcg_gen_shri_tl(t0, cpu_gpr[ra], 16); 5406 tcg_gen_ext16u_tl(t0, t0); 5407 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16); 5408 tcg_gen_ext16u_tl(t1, t1); 5409 break; 5410 case 0x0D: 5411 /* maclhw - maclhw. - maclhwo - maclhwo. */ 5412 /* maclhws - maclhws. - maclhwso - maclhwso. */ 5413 /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */ 5414 /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */ 5415 /* mullhw - mullhw. */ 5416 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]); 5417 tcg_gen_ext16s_tl(t1, cpu_gpr[rb]); 5418 break; 5419 case 0x0C: 5420 /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */ 5421 /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */ 5422 /* mullhwu - mullhwu. */ 5423 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]); 5424 tcg_gen_ext16u_tl(t1, cpu_gpr[rb]); 5425 break; 5426 } 5427 if (opc2 & 0x04) { 5428 /* (n)multiply-and-accumulate (0x0C / 0x0E) */ 5429 tcg_gen_mul_tl(t1, t0, t1); 5430 if (opc2 & 0x02) { 5431 /* nmultiply-and-accumulate (0x0E) */ 5432 tcg_gen_sub_tl(t0, cpu_gpr[rt], t1); 5433 } else { 5434 /* multiply-and-accumulate (0x0C) */ 5435 tcg_gen_add_tl(t0, cpu_gpr[rt], t1); 5436 } 5437 5438 if (opc3 & 0x12) { 5439 /* Check overflow and/or saturate */ 5440 TCGLabel *l1 = gen_new_label(); 5441 5442 if (opc3 & 0x10) { 5443 /* Start with XER OV disabled, the most likely case */ 5444 tcg_gen_movi_tl(cpu_ov, 0); 5445 } 5446 if (opc3 & 0x01) { 5447 /* Signed */ 5448 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1); 5449 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); 5450 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0); 5451 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1); 5452 if (opc3 & 0x02) { 5453 /* Saturate */ 5454 tcg_gen_sari_tl(t0, cpu_gpr[rt], 31); 5455 tcg_gen_xori_tl(t0, t0, 0x7fffffff); 5456 } 5457 } else { 5458 /* Unsigned */ 5459 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1); 5460 if (opc3 & 0x02) { 5461 /* Saturate */ 5462 tcg_gen_movi_tl(t0, UINT32_MAX); 5463 } 5464 } 5465 if (opc3 & 0x10) { 5466 /* Check overflow */ 5467 tcg_gen_movi_tl(cpu_ov, 1); 5468 tcg_gen_movi_tl(cpu_so, 1); 5469 } 5470 gen_set_label(l1); 5471 tcg_gen_mov_tl(cpu_gpr[rt], t0); 5472 } 5473 } else { 5474 tcg_gen_mul_tl(cpu_gpr[rt], t0, t1); 5475 } 5476 if (unlikely(Rc) != 0) { 5477 /* Update Rc0 */ 5478 gen_set_Rc0(ctx, cpu_gpr[rt]); 5479 } 5480 } 5481 5482 #define GEN_MAC_HANDLER(name, opc2, opc3) \ 5483 static void glue(gen_, name)(DisasContext *ctx) \ 5484 { \ 5485 gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \ 5486 rD(ctx->opcode), Rc(ctx->opcode)); \ 5487 } 5488 5489 /* macchw - macchw. */ 5490 GEN_MAC_HANDLER(macchw, 0x0C, 0x05); 5491 /* macchwo - macchwo. */ 5492 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15); 5493 /* macchws - macchws. */ 5494 GEN_MAC_HANDLER(macchws, 0x0C, 0x07); 5495 /* macchwso - macchwso. */ 5496 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17); 5497 /* macchwsu - macchwsu. */ 5498 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06); 5499 /* macchwsuo - macchwsuo. */ 5500 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16); 5501 /* macchwu - macchwu. */ 5502 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04); 5503 /* macchwuo - macchwuo. */ 5504 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14); 5505 /* machhw - machhw. */ 5506 GEN_MAC_HANDLER(machhw, 0x0C, 0x01); 5507 /* machhwo - machhwo. */ 5508 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11); 5509 /* machhws - machhws. */ 5510 GEN_MAC_HANDLER(machhws, 0x0C, 0x03); 5511 /* machhwso - machhwso. */ 5512 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13); 5513 /* machhwsu - machhwsu. */ 5514 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02); 5515 /* machhwsuo - machhwsuo. */ 5516 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12); 5517 /* machhwu - machhwu. */ 5518 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00); 5519 /* machhwuo - machhwuo. */ 5520 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10); 5521 /* maclhw - maclhw. */ 5522 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D); 5523 /* maclhwo - maclhwo. */ 5524 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D); 5525 /* maclhws - maclhws. */ 5526 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F); 5527 /* maclhwso - maclhwso. */ 5528 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F); 5529 /* maclhwu - maclhwu. */ 5530 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C); 5531 /* maclhwuo - maclhwuo. */ 5532 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C); 5533 /* maclhwsu - maclhwsu. */ 5534 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E); 5535 /* maclhwsuo - maclhwsuo. */ 5536 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E); 5537 /* nmacchw - nmacchw. */ 5538 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05); 5539 /* nmacchwo - nmacchwo. */ 5540 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15); 5541 /* nmacchws - nmacchws. */ 5542 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07); 5543 /* nmacchwso - nmacchwso. */ 5544 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17); 5545 /* nmachhw - nmachhw. */ 5546 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01); 5547 /* nmachhwo - nmachhwo. */ 5548 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11); 5549 /* nmachhws - nmachhws. */ 5550 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03); 5551 /* nmachhwso - nmachhwso. */ 5552 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13); 5553 /* nmaclhw - nmaclhw. */ 5554 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D); 5555 /* nmaclhwo - nmaclhwo. */ 5556 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D); 5557 /* nmaclhws - nmaclhws. */ 5558 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F); 5559 /* nmaclhwso - nmaclhwso. */ 5560 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F); 5561 5562 /* mulchw - mulchw. */ 5563 GEN_MAC_HANDLER(mulchw, 0x08, 0x05); 5564 /* mulchwu - mulchwu. */ 5565 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04); 5566 /* mulhhw - mulhhw. */ 5567 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01); 5568 /* mulhhwu - mulhhwu. */ 5569 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00); 5570 /* mullhw - mullhw. */ 5571 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D); 5572 /* mullhwu - mullhwu. */ 5573 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C); 5574 5575 /* mfdcr */ 5576 static void gen_mfdcr(DisasContext *ctx) 5577 { 5578 #if defined(CONFIG_USER_ONLY) 5579 GEN_PRIV(ctx); 5580 #else 5581 TCGv dcrn; 5582 5583 CHK_SV(ctx); 5584 dcrn = tcg_constant_tl(SPR(ctx->opcode)); 5585 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env, dcrn); 5586 #endif /* defined(CONFIG_USER_ONLY) */ 5587 } 5588 5589 /* mtdcr */ 5590 static void gen_mtdcr(DisasContext *ctx) 5591 { 5592 #if defined(CONFIG_USER_ONLY) 5593 GEN_PRIV(ctx); 5594 #else 5595 TCGv dcrn; 5596 5597 CHK_SV(ctx); 5598 dcrn = tcg_constant_tl(SPR(ctx->opcode)); 5599 gen_helper_store_dcr(tcg_env, dcrn, cpu_gpr[rS(ctx->opcode)]); 5600 #endif /* defined(CONFIG_USER_ONLY) */ 5601 } 5602 5603 /* mfdcrx */ 5604 /* XXX: not implemented on 440 ? */ 5605 static void gen_mfdcrx(DisasContext *ctx) 5606 { 5607 #if defined(CONFIG_USER_ONLY) 5608 GEN_PRIV(ctx); 5609 #else 5610 CHK_SV(ctx); 5611 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env, 5612 cpu_gpr[rA(ctx->opcode)]); 5613 /* Note: Rc update flag set leads to undefined state of Rc0 */ 5614 #endif /* defined(CONFIG_USER_ONLY) */ 5615 } 5616 5617 /* mtdcrx */ 5618 /* XXX: not implemented on 440 ? */ 5619 static void gen_mtdcrx(DisasContext *ctx) 5620 { 5621 #if defined(CONFIG_USER_ONLY) 5622 GEN_PRIV(ctx); 5623 #else 5624 CHK_SV(ctx); 5625 gen_helper_store_dcr(tcg_env, cpu_gpr[rA(ctx->opcode)], 5626 cpu_gpr[rS(ctx->opcode)]); 5627 /* Note: Rc update flag set leads to undefined state of Rc0 */ 5628 #endif /* defined(CONFIG_USER_ONLY) */ 5629 } 5630 5631 /* dccci */ 5632 static void gen_dccci(DisasContext *ctx) 5633 { 5634 CHK_SV(ctx); 5635 /* interpreted as no-op */ 5636 } 5637 5638 /* dcread */ 5639 static void gen_dcread(DisasContext *ctx) 5640 { 5641 #if defined(CONFIG_USER_ONLY) 5642 GEN_PRIV(ctx); 5643 #else 5644 TCGv EA, val; 5645 5646 CHK_SV(ctx); 5647 gen_set_access_type(ctx, ACCESS_CACHE); 5648 EA = tcg_temp_new(); 5649 gen_addr_reg_index(ctx, EA); 5650 val = tcg_temp_new(); 5651 gen_qemu_ld32u(ctx, val, EA); 5652 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA); 5653 #endif /* defined(CONFIG_USER_ONLY) */ 5654 } 5655 5656 /* icbt */ 5657 static void gen_icbt_40x(DisasContext *ctx) 5658 { 5659 /* 5660 * interpreted as no-op 5661 * XXX: specification say this is treated as a load by the MMU but 5662 * does not generate any exception 5663 */ 5664 } 5665 5666 /* iccci */ 5667 static void gen_iccci(DisasContext *ctx) 5668 { 5669 CHK_SV(ctx); 5670 /* interpreted as no-op */ 5671 } 5672 5673 /* icread */ 5674 static void gen_icread(DisasContext *ctx) 5675 { 5676 CHK_SV(ctx); 5677 /* interpreted as no-op */ 5678 } 5679 5680 /* rfci (supervisor only) */ 5681 static void gen_rfci_40x(DisasContext *ctx) 5682 { 5683 #if defined(CONFIG_USER_ONLY) 5684 GEN_PRIV(ctx); 5685 #else 5686 CHK_SV(ctx); 5687 /* Restore CPU state */ 5688 gen_helper_40x_rfci(tcg_env); 5689 ctx->base.is_jmp = DISAS_EXIT; 5690 #endif /* defined(CONFIG_USER_ONLY) */ 5691 } 5692 5693 static void gen_rfci(DisasContext *ctx) 5694 { 5695 #if defined(CONFIG_USER_ONLY) 5696 GEN_PRIV(ctx); 5697 #else 5698 CHK_SV(ctx); 5699 /* Restore CPU state */ 5700 gen_helper_rfci(tcg_env); 5701 ctx->base.is_jmp = DISAS_EXIT; 5702 #endif /* defined(CONFIG_USER_ONLY) */ 5703 } 5704 5705 /* BookE specific */ 5706 5707 /* XXX: not implemented on 440 ? */ 5708 static void gen_rfdi(DisasContext *ctx) 5709 { 5710 #if defined(CONFIG_USER_ONLY) 5711 GEN_PRIV(ctx); 5712 #else 5713 CHK_SV(ctx); 5714 /* Restore CPU state */ 5715 gen_helper_rfdi(tcg_env); 5716 ctx->base.is_jmp = DISAS_EXIT; 5717 #endif /* defined(CONFIG_USER_ONLY) */ 5718 } 5719 5720 /* XXX: not implemented on 440 ? */ 5721 static void gen_rfmci(DisasContext *ctx) 5722 { 5723 #if defined(CONFIG_USER_ONLY) 5724 GEN_PRIV(ctx); 5725 #else 5726 CHK_SV(ctx); 5727 /* Restore CPU state */ 5728 gen_helper_rfmci(tcg_env); 5729 ctx->base.is_jmp = DISAS_EXIT; 5730 #endif /* defined(CONFIG_USER_ONLY) */ 5731 } 5732 5733 /* TLB management - PowerPC 405 implementation */ 5734 5735 /* tlbre */ 5736 static void gen_tlbre_40x(DisasContext *ctx) 5737 { 5738 #if defined(CONFIG_USER_ONLY) 5739 GEN_PRIV(ctx); 5740 #else 5741 CHK_SV(ctx); 5742 switch (rB(ctx->opcode)) { 5743 case 0: 5744 gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], tcg_env, 5745 cpu_gpr[rA(ctx->opcode)]); 5746 break; 5747 case 1: 5748 gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], tcg_env, 5749 cpu_gpr[rA(ctx->opcode)]); 5750 break; 5751 default: 5752 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5753 break; 5754 } 5755 #endif /* defined(CONFIG_USER_ONLY) */ 5756 } 5757 5758 /* tlbsx - tlbsx. */ 5759 static void gen_tlbsx_40x(DisasContext *ctx) 5760 { 5761 #if defined(CONFIG_USER_ONLY) 5762 GEN_PRIV(ctx); 5763 #else 5764 TCGv t0; 5765 5766 CHK_SV(ctx); 5767 t0 = tcg_temp_new(); 5768 gen_addr_reg_index(ctx, t0); 5769 gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0); 5770 if (Rc(ctx->opcode)) { 5771 TCGLabel *l1 = gen_new_label(); 5772 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); 5773 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); 5774 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); 5775 gen_set_label(l1); 5776 } 5777 #endif /* defined(CONFIG_USER_ONLY) */ 5778 } 5779 5780 /* tlbwe */ 5781 static void gen_tlbwe_40x(DisasContext *ctx) 5782 { 5783 #if defined(CONFIG_USER_ONLY) 5784 GEN_PRIV(ctx); 5785 #else 5786 CHK_SV(ctx); 5787 5788 switch (rB(ctx->opcode)) { 5789 case 0: 5790 gen_helper_4xx_tlbwe_hi(tcg_env, cpu_gpr[rA(ctx->opcode)], 5791 cpu_gpr[rS(ctx->opcode)]); 5792 break; 5793 case 1: 5794 gen_helper_4xx_tlbwe_lo(tcg_env, cpu_gpr[rA(ctx->opcode)], 5795 cpu_gpr[rS(ctx->opcode)]); 5796 break; 5797 default: 5798 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5799 break; 5800 } 5801 #endif /* defined(CONFIG_USER_ONLY) */ 5802 } 5803 5804 /* TLB management - PowerPC 440 implementation */ 5805 5806 /* tlbre */ 5807 static void gen_tlbre_440(DisasContext *ctx) 5808 { 5809 #if defined(CONFIG_USER_ONLY) 5810 GEN_PRIV(ctx); 5811 #else 5812 CHK_SV(ctx); 5813 5814 switch (rB(ctx->opcode)) { 5815 case 0: 5816 case 1: 5817 case 2: 5818 { 5819 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); 5820 gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], tcg_env, 5821 t0, cpu_gpr[rA(ctx->opcode)]); 5822 } 5823 break; 5824 default: 5825 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5826 break; 5827 } 5828 #endif /* defined(CONFIG_USER_ONLY) */ 5829 } 5830 5831 /* tlbsx - tlbsx. */ 5832 static void gen_tlbsx_440(DisasContext *ctx) 5833 { 5834 #if defined(CONFIG_USER_ONLY) 5835 GEN_PRIV(ctx); 5836 #else 5837 TCGv t0; 5838 5839 CHK_SV(ctx); 5840 t0 = tcg_temp_new(); 5841 gen_addr_reg_index(ctx, t0); 5842 gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0); 5843 if (Rc(ctx->opcode)) { 5844 TCGLabel *l1 = gen_new_label(); 5845 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); 5846 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); 5847 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); 5848 gen_set_label(l1); 5849 } 5850 #endif /* defined(CONFIG_USER_ONLY) */ 5851 } 5852 5853 /* tlbwe */ 5854 static void gen_tlbwe_440(DisasContext *ctx) 5855 { 5856 #if defined(CONFIG_USER_ONLY) 5857 GEN_PRIV(ctx); 5858 #else 5859 CHK_SV(ctx); 5860 switch (rB(ctx->opcode)) { 5861 case 0: 5862 case 1: 5863 case 2: 5864 { 5865 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); 5866 gen_helper_440_tlbwe(tcg_env, t0, cpu_gpr[rA(ctx->opcode)], 5867 cpu_gpr[rS(ctx->opcode)]); 5868 } 5869 break; 5870 default: 5871 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5872 break; 5873 } 5874 #endif /* defined(CONFIG_USER_ONLY) */ 5875 } 5876 5877 /* TLB management - PowerPC BookE 2.06 implementation */ 5878 5879 /* tlbre */ 5880 static void gen_tlbre_booke206(DisasContext *ctx) 5881 { 5882 #if defined(CONFIG_USER_ONLY) 5883 GEN_PRIV(ctx); 5884 #else 5885 CHK_SV(ctx); 5886 gen_helper_booke206_tlbre(tcg_env); 5887 #endif /* defined(CONFIG_USER_ONLY) */ 5888 } 5889 5890 /* tlbsx - tlbsx. */ 5891 static void gen_tlbsx_booke206(DisasContext *ctx) 5892 { 5893 #if defined(CONFIG_USER_ONLY) 5894 GEN_PRIV(ctx); 5895 #else 5896 TCGv t0; 5897 5898 CHK_SV(ctx); 5899 if (rA(ctx->opcode)) { 5900 t0 = tcg_temp_new(); 5901 tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 5902 } else { 5903 t0 = cpu_gpr[rB(ctx->opcode)]; 5904 } 5905 gen_helper_booke206_tlbsx(tcg_env, t0); 5906 #endif /* defined(CONFIG_USER_ONLY) */ 5907 } 5908 5909 /* tlbwe */ 5910 static void gen_tlbwe_booke206(DisasContext *ctx) 5911 { 5912 #if defined(CONFIG_USER_ONLY) 5913 GEN_PRIV(ctx); 5914 #else 5915 CHK_SV(ctx); 5916 gen_helper_booke206_tlbwe(tcg_env); 5917 #endif /* defined(CONFIG_USER_ONLY) */ 5918 } 5919 5920 static void gen_tlbivax_booke206(DisasContext *ctx) 5921 { 5922 #if defined(CONFIG_USER_ONLY) 5923 GEN_PRIV(ctx); 5924 #else 5925 TCGv t0; 5926 5927 CHK_SV(ctx); 5928 t0 = tcg_temp_new(); 5929 gen_addr_reg_index(ctx, t0); 5930 gen_helper_booke206_tlbivax(tcg_env, t0); 5931 #endif /* defined(CONFIG_USER_ONLY) */ 5932 } 5933 5934 static void gen_tlbilx_booke206(DisasContext *ctx) 5935 { 5936 #if defined(CONFIG_USER_ONLY) 5937 GEN_PRIV(ctx); 5938 #else 5939 TCGv t0; 5940 5941 CHK_SV(ctx); 5942 t0 = tcg_temp_new(); 5943 gen_addr_reg_index(ctx, t0); 5944 5945 switch ((ctx->opcode >> 21) & 0x3) { 5946 case 0: 5947 gen_helper_booke206_tlbilx0(tcg_env, t0); 5948 break; 5949 case 1: 5950 gen_helper_booke206_tlbilx1(tcg_env, t0); 5951 break; 5952 case 3: 5953 gen_helper_booke206_tlbilx3(tcg_env, t0); 5954 break; 5955 default: 5956 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5957 break; 5958 } 5959 #endif /* defined(CONFIG_USER_ONLY) */ 5960 } 5961 5962 /* wrtee */ 5963 static void gen_wrtee(DisasContext *ctx) 5964 { 5965 #if defined(CONFIG_USER_ONLY) 5966 GEN_PRIV(ctx); 5967 #else 5968 TCGv t0; 5969 5970 CHK_SV(ctx); 5971 t0 = tcg_temp_new(); 5972 tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); 5973 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); 5974 tcg_gen_or_tl(cpu_msr, cpu_msr, t0); 5975 gen_ppc_maybe_interrupt(ctx); 5976 /* 5977 * Stop translation to have a chance to raise an exception if we 5978 * just set msr_ee to 1 5979 */ 5980 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 5981 #endif /* defined(CONFIG_USER_ONLY) */ 5982 } 5983 5984 /* wrteei */ 5985 static void gen_wrteei(DisasContext *ctx) 5986 { 5987 #if defined(CONFIG_USER_ONLY) 5988 GEN_PRIV(ctx); 5989 #else 5990 CHK_SV(ctx); 5991 if (ctx->opcode & 0x00008000) { 5992 tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE)); 5993 gen_ppc_maybe_interrupt(ctx); 5994 /* Stop translation to have a chance to raise an exception */ 5995 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 5996 } else { 5997 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); 5998 } 5999 #endif /* defined(CONFIG_USER_ONLY) */ 6000 } 6001 6002 /* PowerPC 440 specific instructions */ 6003 6004 /* dlmzb */ 6005 static void gen_dlmzb(DisasContext *ctx) 6006 { 6007 TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode)); 6008 gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], tcg_env, 6009 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); 6010 } 6011 6012 /* mbar replaces eieio on 440 */ 6013 static void gen_mbar(DisasContext *ctx) 6014 { 6015 /* interpreted as no-op */ 6016 } 6017 6018 /* msync replaces sync on 440 */ 6019 static void gen_msync_4xx(DisasContext *ctx) 6020 { 6021 /* Only e500 seems to treat reserved bits as invalid */ 6022 if ((ctx->insns_flags2 & PPC2_BOOKE206) && 6023 (ctx->opcode & 0x03FFF801)) { 6024 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 6025 } 6026 /* otherwise interpreted as no-op */ 6027 } 6028 6029 /* icbt */ 6030 static void gen_icbt_440(DisasContext *ctx) 6031 { 6032 /* 6033 * interpreted as no-op 6034 * XXX: specification say this is treated as a load by the MMU but 6035 * does not generate any exception 6036 */ 6037 } 6038 6039 #if defined(TARGET_PPC64) 6040 static void gen_maddld(DisasContext *ctx) 6041 { 6042 TCGv_i64 t1 = tcg_temp_new_i64(); 6043 6044 tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 6045 tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); 6046 } 6047 6048 /* maddhd maddhdu */ 6049 static void gen_maddhd_maddhdu(DisasContext *ctx) 6050 { 6051 TCGv_i64 lo = tcg_temp_new_i64(); 6052 TCGv_i64 hi = tcg_temp_new_i64(); 6053 TCGv_i64 t1 = tcg_temp_new_i64(); 6054 6055 if (Rc(ctx->opcode)) { 6056 tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)], 6057 cpu_gpr[rB(ctx->opcode)]); 6058 tcg_gen_movi_i64(t1, 0); 6059 } else { 6060 tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)], 6061 cpu_gpr[rB(ctx->opcode)]); 6062 tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63); 6063 } 6064 tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi, 6065 cpu_gpr[rC(ctx->opcode)], t1); 6066 } 6067 #endif /* defined(TARGET_PPC64) */ 6068 6069 static void gen_tbegin(DisasContext *ctx) 6070 { 6071 if (unlikely(!ctx->tm_enabled)) { 6072 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); 6073 return; 6074 } 6075 gen_helper_tbegin(tcg_env); 6076 } 6077 6078 #define GEN_TM_NOOP(name) \ 6079 static inline void gen_##name(DisasContext *ctx) \ 6080 { \ 6081 if (unlikely(!ctx->tm_enabled)) { \ 6082 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ 6083 return; \ 6084 } \ 6085 /* \ 6086 * Because tbegin always fails in QEMU, these user \ 6087 * space instructions all have a simple implementation: \ 6088 * \ 6089 * CR[0] = 0b0 || MSR[TS] || 0b0 \ 6090 * = 0b0 || 0b00 || 0b0 \ 6091 */ \ 6092 tcg_gen_movi_i32(cpu_crf[0], 0); \ 6093 } 6094 6095 GEN_TM_NOOP(tend); 6096 GEN_TM_NOOP(tabort); 6097 GEN_TM_NOOP(tabortwc); 6098 GEN_TM_NOOP(tabortwci); 6099 GEN_TM_NOOP(tabortdc); 6100 GEN_TM_NOOP(tabortdci); 6101 GEN_TM_NOOP(tsr); 6102 6103 static inline void gen_cp_abort(DisasContext *ctx) 6104 { 6105 /* Do Nothing */ 6106 } 6107 6108 #define GEN_CP_PASTE_NOOP(name) \ 6109 static inline void gen_##name(DisasContext *ctx) \ 6110 { \ 6111 /* \ 6112 * Generate invalid exception until we have an \ 6113 * implementation of the copy paste facility \ 6114 */ \ 6115 gen_invalid(ctx); \ 6116 } 6117 6118 GEN_CP_PASTE_NOOP(copy) 6119 GEN_CP_PASTE_NOOP(paste) 6120 6121 static void gen_tcheck(DisasContext *ctx) 6122 { 6123 if (unlikely(!ctx->tm_enabled)) { 6124 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); 6125 return; 6126 } 6127 /* 6128 * Because tbegin always fails, the tcheck implementation is 6129 * simple: 6130 * 6131 * CR[CRF] = TDOOMED || MSR[TS] || 0b0 6132 * = 0b1 || 0b00 || 0b0 6133 */ 6134 tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8); 6135 } 6136 6137 #if defined(CONFIG_USER_ONLY) 6138 #define GEN_TM_PRIV_NOOP(name) \ 6139 static inline void gen_##name(DisasContext *ctx) \ 6140 { \ 6141 gen_priv_opc(ctx); \ 6142 } 6143 6144 #else 6145 6146 #define GEN_TM_PRIV_NOOP(name) \ 6147 static inline void gen_##name(DisasContext *ctx) \ 6148 { \ 6149 CHK_SV(ctx); \ 6150 if (unlikely(!ctx->tm_enabled)) { \ 6151 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ 6152 return; \ 6153 } \ 6154 /* \ 6155 * Because tbegin always fails, the implementation is \ 6156 * simple: \ 6157 * \ 6158 * CR[0] = 0b0 || MSR[TS] || 0b0 \ 6159 * = 0b0 || 0b00 | 0b0 \ 6160 */ \ 6161 tcg_gen_movi_i32(cpu_crf[0], 0); \ 6162 } 6163 6164 #endif 6165 6166 GEN_TM_PRIV_NOOP(treclaim); 6167 GEN_TM_PRIV_NOOP(trechkpt); 6168 6169 static inline void get_fpr(TCGv_i64 dst, int regno) 6170 { 6171 tcg_gen_ld_i64(dst, tcg_env, fpr_offset(regno)); 6172 } 6173 6174 static inline void set_fpr(int regno, TCGv_i64 src) 6175 { 6176 tcg_gen_st_i64(src, tcg_env, fpr_offset(regno)); 6177 /* 6178 * Before PowerISA v3.1 the result of doubleword 1 of the VSR 6179 * corresponding to the target FPR was undefined. However, 6180 * most (if not all) real hardware were setting the result to 0. 6181 * Starting at ISA v3.1, the result for doubleword 1 is now defined 6182 * to be 0. 6183 */ 6184 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, vsr64_offset(regno, false)); 6185 } 6186 6187 static inline void get_avr64(TCGv_i64 dst, int regno, bool high) 6188 { 6189 tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high)); 6190 } 6191 6192 static inline void set_avr64(int regno, TCGv_i64 src, bool high) 6193 { 6194 tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high)); 6195 } 6196 6197 /* 6198 * Helpers for decodetree used by !function for decoding arguments. 6199 */ 6200 static int times_2(DisasContext *ctx, int x) 6201 { 6202 return x * 2; 6203 } 6204 6205 static int times_4(DisasContext *ctx, int x) 6206 { 6207 return x * 4; 6208 } 6209 6210 static int times_16(DisasContext *ctx, int x) 6211 { 6212 return x * 16; 6213 } 6214 6215 static int64_t dw_compose_ea(DisasContext *ctx, int x) 6216 { 6217 return deposit64(0xfffffffffffffe00, 3, 6, x); 6218 } 6219 6220 /* 6221 * Helpers for trans_* functions to check for specific insns flags. 6222 * Use token pasting to ensure that we use the proper flag with the 6223 * proper variable. 6224 */ 6225 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \ 6226 do { \ 6227 if (((CTX)->insns_flags & PPC_##NAME) == 0) { \ 6228 return false; \ 6229 } \ 6230 } while (0) 6231 6232 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \ 6233 do { \ 6234 if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \ 6235 return false; \ 6236 } \ 6237 } while (0) 6238 6239 /* Then special-case the check for 64-bit so that we elide code for ppc32. */ 6240 #if TARGET_LONG_BITS == 32 6241 # define REQUIRE_64BIT(CTX) return false 6242 #else 6243 # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B) 6244 #endif 6245 6246 #define REQUIRE_VECTOR(CTX) \ 6247 do { \ 6248 if (unlikely(!(CTX)->altivec_enabled)) { \ 6249 gen_exception((CTX), POWERPC_EXCP_VPU); \ 6250 return true; \ 6251 } \ 6252 } while (0) 6253 6254 #define REQUIRE_VSX(CTX) \ 6255 do { \ 6256 if (unlikely(!(CTX)->vsx_enabled)) { \ 6257 gen_exception((CTX), POWERPC_EXCP_VSXU); \ 6258 return true; \ 6259 } \ 6260 } while (0) 6261 6262 #define REQUIRE_FPU(ctx) \ 6263 do { \ 6264 if (unlikely(!(ctx)->fpu_enabled)) { \ 6265 gen_exception((ctx), POWERPC_EXCP_FPU); \ 6266 return true; \ 6267 } \ 6268 } while (0) 6269 6270 #if !defined(CONFIG_USER_ONLY) 6271 #define REQUIRE_SV(CTX) \ 6272 do { \ 6273 if (unlikely((CTX)->pr)) { \ 6274 gen_priv_opc(CTX); \ 6275 return true; \ 6276 } \ 6277 } while (0) 6278 6279 #define REQUIRE_HV(CTX) \ 6280 do { \ 6281 if (unlikely((CTX)->pr || !(CTX)->hv)) { \ 6282 gen_priv_opc(CTX); \ 6283 return true; \ 6284 } \ 6285 } while (0) 6286 #else 6287 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0) 6288 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0) 6289 #endif 6290 6291 /* 6292 * Helpers for implementing sets of trans_* functions. 6293 * Defer the implementation of NAME to FUNC, with optional extra arguments. 6294 */ 6295 #define TRANS(NAME, FUNC, ...) \ 6296 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6297 { return FUNC(ctx, a, __VA_ARGS__); } 6298 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \ 6299 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6300 { \ 6301 REQUIRE_INSNS_FLAGS(ctx, FLAGS); \ 6302 return FUNC(ctx, a, __VA_ARGS__); \ 6303 } 6304 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \ 6305 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6306 { \ 6307 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \ 6308 return FUNC(ctx, a, __VA_ARGS__); \ 6309 } 6310 6311 #define TRANS64(NAME, FUNC, ...) \ 6312 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6313 { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); } 6314 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \ 6315 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6316 { \ 6317 REQUIRE_64BIT(ctx); \ 6318 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \ 6319 return FUNC(ctx, a, __VA_ARGS__); \ 6320 } 6321 6322 /* TODO: More TRANS* helpers for extra insn_flags checks. */ 6323 6324 6325 #include "decode-insn32.c.inc" 6326 #include "decode-insn64.c.inc" 6327 #include "power8-pmu-regs.c.inc" 6328 6329 /* 6330 * Incorporate CIA into the constant when R=1. 6331 * Validate that when R=1, RA=0. 6332 */ 6333 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) 6334 { 6335 d->rt = a->rt; 6336 d->ra = a->ra; 6337 d->si = a->si; 6338 if (a->r) { 6339 if (unlikely(a->ra != 0)) { 6340 gen_invalid(ctx); 6341 return false; 6342 } 6343 d->si += ctx->cia; 6344 } 6345 return true; 6346 } 6347 6348 #include "translate/fixedpoint-impl.c.inc" 6349 6350 #include "translate/fp-impl.c.inc" 6351 6352 #include "translate/vmx-impl.c.inc" 6353 6354 #include "translate/vsx-impl.c.inc" 6355 6356 #include "translate/dfp-impl.c.inc" 6357 6358 #include "translate/spe-impl.c.inc" 6359 6360 #include "translate/branch-impl.c.inc" 6361 6362 #include "translate/processor-ctrl-impl.c.inc" 6363 6364 #include "translate/storage-ctrl-impl.c.inc" 6365 6366 /* Handles lfdp */ 6367 static void gen_dform39(DisasContext *ctx) 6368 { 6369 if ((ctx->opcode & 0x3) == 0) { 6370 if (ctx->insns_flags2 & PPC2_ISA205) { 6371 return gen_lfdp(ctx); 6372 } 6373 } 6374 return gen_invalid(ctx); 6375 } 6376 6377 /* Handles stfdp */ 6378 static void gen_dform3D(DisasContext *ctx) 6379 { 6380 if ((ctx->opcode & 3) == 0) { /* DS-FORM */ 6381 /* stfdp */ 6382 if (ctx->insns_flags2 & PPC2_ISA205) { 6383 return gen_stfdp(ctx); 6384 } 6385 } 6386 return gen_invalid(ctx); 6387 } 6388 6389 #if defined(TARGET_PPC64) 6390 /* brd */ 6391 static void gen_brd(DisasContext *ctx) 6392 { 6393 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 6394 } 6395 6396 /* brw */ 6397 static void gen_brw(DisasContext *ctx) 6398 { 6399 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 6400 tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32); 6401 6402 } 6403 6404 /* brh */ 6405 static void gen_brh(DisasContext *ctx) 6406 { 6407 TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull); 6408 TCGv_i64 t1 = tcg_temp_new_i64(); 6409 TCGv_i64 t2 = tcg_temp_new_i64(); 6410 6411 tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8); 6412 tcg_gen_and_i64(t2, t1, mask); 6413 tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask); 6414 tcg_gen_shli_i64(t1, t1, 8); 6415 tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2); 6416 } 6417 #endif 6418 6419 static opcode_t opcodes[] = { 6420 #if defined(TARGET_PPC64) 6421 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310), 6422 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310), 6423 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310), 6424 #endif 6425 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE), 6426 #if defined(TARGET_PPC64) 6427 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300), 6428 #endif 6429 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205), 6430 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300), 6431 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL), 6432 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER), 6433 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER), 6434 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER), 6435 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER), 6436 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6437 #if defined(TARGET_PPC64) 6438 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B), 6439 #endif 6440 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER), 6441 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER), 6442 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6443 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6444 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), 6445 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300), 6446 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300), 6447 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300), 6448 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300), 6449 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER), 6450 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER), 6451 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6452 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6453 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6454 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6455 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB), 6456 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD), 6457 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205), 6458 #if defined(TARGET_PPC64) 6459 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD), 6460 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B), 6461 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300), 6462 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300), 6463 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205), 6464 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206), 6465 #endif 6466 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6467 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6468 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6469 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER), 6470 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER), 6471 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER), 6472 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER), 6473 #if defined(TARGET_PPC64) 6474 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B), 6475 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B), 6476 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B), 6477 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B), 6478 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B), 6479 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, 6480 PPC_NONE, PPC2_ISA300), 6481 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, 6482 PPC_NONE, PPC2_ISA300), 6483 #endif 6484 /* handles lfdp, lxsd, lxssp */ 6485 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), 6486 /* handles stfdp, stxsd, stxssp */ 6487 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), 6488 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6489 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6490 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING), 6491 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING), 6492 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING), 6493 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING), 6494 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO), 6495 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM), 6496 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6497 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6498 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES), 6499 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300), 6500 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300), 6501 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6502 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6503 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES), 6504 #if defined(TARGET_PPC64) 6505 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300), 6506 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300), 6507 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B), 6508 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207), 6509 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), 6510 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), 6511 #endif 6512 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), 6513 /* ISA v3.0 changed the extended opcode from 62 to 30 */ 6514 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT), 6515 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300), 6516 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW), 6517 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW), 6518 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW), 6519 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW), 6520 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207), 6521 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER), 6522 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW), 6523 #if defined(TARGET_PPC64) 6524 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B), 6525 #if !defined(CONFIG_USER_ONLY) 6526 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */ 6527 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300), 6528 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300), 6529 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300), 6530 #endif 6531 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300), 6532 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6533 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6534 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6535 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6536 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H), 6537 #endif 6538 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */ 6539 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW), 6540 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW), 6541 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW), 6542 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW), 6543 #if defined(TARGET_PPC64) 6544 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B), 6545 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B), 6546 #endif 6547 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC), 6548 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC), 6549 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC), 6550 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC), 6551 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB), 6552 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC), 6553 #if defined(TARGET_PPC64) 6554 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B), 6555 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300), 6556 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300), 6557 #endif 6558 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC), 6559 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC), 6560 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE), 6561 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206), 6562 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE), 6563 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE), 6564 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206), 6565 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE), 6566 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206), 6567 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE), 6568 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206), 6569 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), 6570 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), 6571 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ), 6572 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206), 6573 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC), 6574 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC), 6575 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC), 6576 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI), 6577 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206), 6578 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA), 6579 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT), 6580 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT), 6581 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT), 6582 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT), 6583 #if defined(TARGET_PPC64) 6584 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B), 6585 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001, 6586 PPC_SEGMENT_64B), 6587 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B), 6588 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, 6589 PPC_SEGMENT_64B), 6590 #endif 6591 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), 6592 /* 6593 * XXX Those instructions will need to be handled differently for 6594 * different ISA versions 6595 */ 6596 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), 6597 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), 6598 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), 6599 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB), 6600 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB), 6601 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI), 6602 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA), 6603 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR), 6604 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR), 6605 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX), 6606 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX), 6607 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON), 6608 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON), 6609 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), 6610 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON), 6611 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON), 6612 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP), 6613 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206), 6614 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI), 6615 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI), 6616 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB), 6617 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB), 6618 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB), 6619 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE), 6620 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE), 6621 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE), 6622 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, 6623 PPC_NONE, PPC2_BOOKE206), 6624 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, 6625 PPC_NONE, PPC2_BOOKE206), 6626 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, 6627 PPC_NONE, PPC2_BOOKE206), 6628 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001, 6629 PPC_NONE, PPC2_BOOKE206), 6630 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001, 6631 PPC_NONE, PPC2_BOOKE206), 6632 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), 6633 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), 6634 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), 6635 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, 6636 PPC_BOOKE, PPC2_BOOKE206), 6637 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE), 6638 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, 6639 PPC_BOOKE, PPC2_BOOKE206), 6640 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, 6641 PPC_440_SPEC), 6642 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), 6643 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), 6644 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), 6645 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC), 6646 #if defined(TARGET_PPC64) 6647 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, 6648 PPC2_ISA300), 6649 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), 6650 #endif 6651 6652 #undef GEN_INT_ARITH_DIVW 6653 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ 6654 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) 6655 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0), 6656 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1), 6657 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0), 6658 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1), 6659 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6660 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6661 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6662 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6663 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), 6664 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), 6665 6666 #if defined(TARGET_PPC64) 6667 #undef GEN_INT_ARITH_DIVD 6668 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ 6669 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) 6670 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0), 6671 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1), 6672 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0), 6673 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1), 6674 6675 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6676 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6677 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6678 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6679 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), 6680 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), 6681 6682 #undef GEN_INT_ARITH_MUL_HELPER 6683 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \ 6684 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) 6685 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00), 6686 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02), 6687 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17), 6688 #endif 6689 6690 #undef GEN_LOGICAL1 6691 #undef GEN_LOGICAL2 6692 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ 6693 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type) 6694 #define GEN_LOGICAL1(name, tcg_op, opc, type) \ 6695 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type) 6696 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER), 6697 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER), 6698 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER), 6699 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER), 6700 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER), 6701 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER), 6702 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER), 6703 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER), 6704 #if defined(TARGET_PPC64) 6705 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B), 6706 #endif 6707 6708 #if defined(TARGET_PPC64) 6709 #undef GEN_PPC64_R2 6710 #undef GEN_PPC64_R4 6711 #define GEN_PPC64_R2(name, opc1, opc2) \ 6712 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ 6713 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ 6714 PPC_64B) 6715 #define GEN_PPC64_R4(name, opc1, opc2) \ 6716 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ 6717 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \ 6718 PPC_64B), \ 6719 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ 6720 PPC_64B), \ 6721 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \ 6722 PPC_64B) 6723 GEN_PPC64_R4(rldicl, 0x1E, 0x00), 6724 GEN_PPC64_R4(rldicr, 0x1E, 0x02), 6725 GEN_PPC64_R4(rldic, 0x1E, 0x04), 6726 GEN_PPC64_R2(rldcl, 0x1E, 0x08), 6727 GEN_PPC64_R2(rldcr, 0x1E, 0x09), 6728 GEN_PPC64_R4(rldimi, 0x1E, 0x06), 6729 #endif 6730 6731 #undef GEN_LDX_E 6732 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ 6733 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2), 6734 6735 #if defined(TARGET_PPC64) 6736 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE) 6737 6738 /* HV/P7 and later only */ 6739 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) 6740 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST) 6741 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) 6742 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) 6743 #endif 6744 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER) 6745 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER) 6746 6747 /* External PID based load */ 6748 #undef GEN_LDEPX 6749 #define GEN_LDEPX(name, ldop, opc2, opc3) \ 6750 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ 6751 0x00000001, PPC_NONE, PPC2_BOOKE206), 6752 6753 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) 6754 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) 6755 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) 6756 #if defined(TARGET_PPC64) 6757 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00) 6758 #endif 6759 6760 #undef GEN_STX_E 6761 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ 6762 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2), 6763 6764 #if defined(TARGET_PPC64) 6765 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE) 6766 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) 6767 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) 6768 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) 6769 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) 6770 #endif 6771 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER) 6772 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER) 6773 6774 #undef GEN_STEPX 6775 #define GEN_STEPX(name, ldop, opc2, opc3) \ 6776 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ 6777 0x00000001, PPC_NONE, PPC2_BOOKE206), 6778 6779 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) 6780 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) 6781 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) 6782 #if defined(TARGET_PPC64) 6783 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04) 6784 #endif 6785 6786 #undef GEN_CRLOGIC 6787 #define GEN_CRLOGIC(name, tcg_op, opc) \ 6788 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER) 6789 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08), 6790 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04), 6791 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09), 6792 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07), 6793 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01), 6794 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E), 6795 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D), 6796 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06), 6797 6798 #undef GEN_MAC_HANDLER 6799 #define GEN_MAC_HANDLER(name, opc2, opc3) \ 6800 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC) 6801 GEN_MAC_HANDLER(macchw, 0x0C, 0x05), 6802 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15), 6803 GEN_MAC_HANDLER(macchws, 0x0C, 0x07), 6804 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17), 6805 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06), 6806 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16), 6807 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04), 6808 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14), 6809 GEN_MAC_HANDLER(machhw, 0x0C, 0x01), 6810 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11), 6811 GEN_MAC_HANDLER(machhws, 0x0C, 0x03), 6812 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13), 6813 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02), 6814 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12), 6815 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00), 6816 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10), 6817 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D), 6818 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D), 6819 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F), 6820 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F), 6821 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C), 6822 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C), 6823 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E), 6824 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E), 6825 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05), 6826 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15), 6827 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07), 6828 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17), 6829 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01), 6830 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11), 6831 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03), 6832 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13), 6833 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D), 6834 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D), 6835 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F), 6836 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F), 6837 GEN_MAC_HANDLER(mulchw, 0x08, 0x05), 6838 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04), 6839 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01), 6840 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00), 6841 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D), 6842 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C), 6843 6844 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \ 6845 PPC_NONE, PPC2_TM), 6846 GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \ 6847 PPC_NONE, PPC2_TM), 6848 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \ 6849 PPC_NONE, PPC2_TM), 6850 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \ 6851 PPC_NONE, PPC2_TM), 6852 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \ 6853 PPC_NONE, PPC2_TM), 6854 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \ 6855 PPC_NONE, PPC2_TM), 6856 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \ 6857 PPC_NONE, PPC2_TM), 6858 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \ 6859 PPC_NONE, PPC2_TM), 6860 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \ 6861 PPC_NONE, PPC2_TM), 6862 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \ 6863 PPC_NONE, PPC2_TM), 6864 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ 6865 PPC_NONE, PPC2_TM), 6866 6867 #include "translate/fp-ops.c.inc" 6868 6869 #include "translate/vmx-ops.c.inc" 6870 6871 #include "translate/vsx-ops.c.inc" 6872 6873 #include "translate/spe-ops.c.inc" 6874 }; 6875 6876 /*****************************************************************************/ 6877 /* Opcode types */ 6878 enum { 6879 PPC_DIRECT = 0, /* Opcode routine */ 6880 PPC_INDIRECT = 1, /* Indirect opcode table */ 6881 }; 6882 6883 #define PPC_OPCODE_MASK 0x3 6884 6885 static inline int is_indirect_opcode(void *handler) 6886 { 6887 return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT; 6888 } 6889 6890 static inline opc_handler_t **ind_table(void *handler) 6891 { 6892 return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK); 6893 } 6894 6895 /* Instruction table creation */ 6896 /* Opcodes tables creation */ 6897 static void fill_new_table(opc_handler_t **table, int len) 6898 { 6899 int i; 6900 6901 for (i = 0; i < len; i++) { 6902 table[i] = &invalid_handler; 6903 } 6904 } 6905 6906 static int create_new_table(opc_handler_t **table, unsigned char idx) 6907 { 6908 opc_handler_t **tmp; 6909 6910 tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN); 6911 fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN); 6912 table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT); 6913 6914 return 0; 6915 } 6916 6917 static int insert_in_table(opc_handler_t **table, unsigned char idx, 6918 opc_handler_t *handler) 6919 { 6920 if (table[idx] != &invalid_handler) { 6921 return -1; 6922 } 6923 table[idx] = handler; 6924 6925 return 0; 6926 } 6927 6928 static int register_direct_insn(opc_handler_t **ppc_opcodes, 6929 unsigned char idx, opc_handler_t *handler) 6930 { 6931 if (insert_in_table(ppc_opcodes, idx, handler) < 0) { 6932 printf("*** ERROR: opcode %02x already assigned in main " 6933 "opcode table\n", idx); 6934 return -1; 6935 } 6936 6937 return 0; 6938 } 6939 6940 static int register_ind_in_table(opc_handler_t **table, 6941 unsigned char idx1, unsigned char idx2, 6942 opc_handler_t *handler) 6943 { 6944 if (table[idx1] == &invalid_handler) { 6945 if (create_new_table(table, idx1) < 0) { 6946 printf("*** ERROR: unable to create indirect table " 6947 "idx=%02x\n", idx1); 6948 return -1; 6949 } 6950 } else { 6951 if (!is_indirect_opcode(table[idx1])) { 6952 printf("*** ERROR: idx %02x already assigned to a direct " 6953 "opcode\n", idx1); 6954 return -1; 6955 } 6956 } 6957 if (handler != NULL && 6958 insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) { 6959 printf("*** ERROR: opcode %02x already assigned in " 6960 "opcode table %02x\n", idx2, idx1); 6961 return -1; 6962 } 6963 6964 return 0; 6965 } 6966 6967 static int register_ind_insn(opc_handler_t **ppc_opcodes, 6968 unsigned char idx1, unsigned char idx2, 6969 opc_handler_t *handler) 6970 { 6971 return register_ind_in_table(ppc_opcodes, idx1, idx2, handler); 6972 } 6973 6974 static int register_dblind_insn(opc_handler_t **ppc_opcodes, 6975 unsigned char idx1, unsigned char idx2, 6976 unsigned char idx3, opc_handler_t *handler) 6977 { 6978 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { 6979 printf("*** ERROR: unable to join indirect table idx " 6980 "[%02x-%02x]\n", idx1, idx2); 6981 return -1; 6982 } 6983 if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3, 6984 handler) < 0) { 6985 printf("*** ERROR: unable to insert opcode " 6986 "[%02x-%02x-%02x]\n", idx1, idx2, idx3); 6987 return -1; 6988 } 6989 6990 return 0; 6991 } 6992 6993 static int register_trplind_insn(opc_handler_t **ppc_opcodes, 6994 unsigned char idx1, unsigned char idx2, 6995 unsigned char idx3, unsigned char idx4, 6996 opc_handler_t *handler) 6997 { 6998 opc_handler_t **table; 6999 7000 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { 7001 printf("*** ERROR: unable to join indirect table idx " 7002 "[%02x-%02x]\n", idx1, idx2); 7003 return -1; 7004 } 7005 table = ind_table(ppc_opcodes[idx1]); 7006 if (register_ind_in_table(table, idx2, idx3, NULL) < 0) { 7007 printf("*** ERROR: unable to join 2nd-level indirect table idx " 7008 "[%02x-%02x-%02x]\n", idx1, idx2, idx3); 7009 return -1; 7010 } 7011 table = ind_table(table[idx2]); 7012 if (register_ind_in_table(table, idx3, idx4, handler) < 0) { 7013 printf("*** ERROR: unable to insert opcode " 7014 "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4); 7015 return -1; 7016 } 7017 return 0; 7018 } 7019 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn) 7020 { 7021 if (insn->opc2 != 0xFF) { 7022 if (insn->opc3 != 0xFF) { 7023 if (insn->opc4 != 0xFF) { 7024 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2, 7025 insn->opc3, insn->opc4, 7026 &insn->handler) < 0) { 7027 return -1; 7028 } 7029 } else { 7030 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, 7031 insn->opc3, &insn->handler) < 0) { 7032 return -1; 7033 } 7034 } 7035 } else { 7036 if (register_ind_insn(ppc_opcodes, insn->opc1, 7037 insn->opc2, &insn->handler) < 0) { 7038 return -1; 7039 } 7040 } 7041 } else { 7042 if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { 7043 return -1; 7044 } 7045 } 7046 7047 return 0; 7048 } 7049 7050 static int test_opcode_table(opc_handler_t **table, int len) 7051 { 7052 int i, count, tmp; 7053 7054 for (i = 0, count = 0; i < len; i++) { 7055 /* Consistency fixup */ 7056 if (table[i] == NULL) { 7057 table[i] = &invalid_handler; 7058 } 7059 if (table[i] != &invalid_handler) { 7060 if (is_indirect_opcode(table[i])) { 7061 tmp = test_opcode_table(ind_table(table[i]), 7062 PPC_CPU_INDIRECT_OPCODES_LEN); 7063 if (tmp == 0) { 7064 g_free(table[i]); 7065 table[i] = &invalid_handler; 7066 } else { 7067 count++; 7068 } 7069 } else { 7070 count++; 7071 } 7072 } 7073 } 7074 7075 return count; 7076 } 7077 7078 static void fix_opcode_tables(opc_handler_t **ppc_opcodes) 7079 { 7080 if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { 7081 printf("*** WARNING: no opcode defined !\n"); 7082 } 7083 } 7084 7085 /*****************************************************************************/ 7086 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp) 7087 { 7088 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 7089 opcode_t *opc; 7090 7091 fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN); 7092 for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { 7093 if (((opc->handler.type & pcc->insns_flags) != 0) || 7094 ((opc->handler.type2 & pcc->insns_flags2) != 0)) { 7095 if (register_insn(cpu->opcodes, opc) < 0) { 7096 error_setg(errp, "ERROR initializing PowerPC instruction " 7097 "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, 7098 opc->opc3); 7099 return; 7100 } 7101 } 7102 } 7103 fix_opcode_tables(cpu->opcodes); 7104 fflush(stdout); 7105 fflush(stderr); 7106 } 7107 7108 void destroy_ppc_opcodes(PowerPCCPU *cpu) 7109 { 7110 opc_handler_t **table, **table_2; 7111 int i, j, k; 7112 7113 for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { 7114 if (cpu->opcodes[i] == &invalid_handler) { 7115 continue; 7116 } 7117 if (is_indirect_opcode(cpu->opcodes[i])) { 7118 table = ind_table(cpu->opcodes[i]); 7119 for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { 7120 if (table[j] == &invalid_handler) { 7121 continue; 7122 } 7123 if (is_indirect_opcode(table[j])) { 7124 table_2 = ind_table(table[j]); 7125 for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) { 7126 if (table_2[k] != &invalid_handler && 7127 is_indirect_opcode(table_2[k])) { 7128 g_free((opc_handler_t *)((uintptr_t)table_2[k] & 7129 ~PPC_INDIRECT)); 7130 } 7131 } 7132 g_free((opc_handler_t *)((uintptr_t)table[j] & 7133 ~PPC_INDIRECT)); 7134 } 7135 } 7136 g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] & 7137 ~PPC_INDIRECT)); 7138 } 7139 } 7140 } 7141 7142 int ppc_fixup_cpu(PowerPCCPU *cpu) 7143 { 7144 CPUPPCState *env = &cpu->env; 7145 7146 /* 7147 * TCG doesn't (yet) emulate some groups of instructions that are 7148 * implemented on some otherwise supported CPUs (e.g. VSX and 7149 * decimal floating point instructions on POWER7). We remove 7150 * unsupported instruction groups from the cpu state's instruction 7151 * masks and hope the guest can cope. For at least the pseries 7152 * machine, the unavailability of these instructions can be 7153 * advertised to the guest via the device tree. 7154 */ 7155 if ((env->insns_flags & ~PPC_TCG_INSNS) 7156 || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { 7157 warn_report("Disabling some instructions which are not " 7158 "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")", 7159 env->insns_flags & ~PPC_TCG_INSNS, 7160 env->insns_flags2 & ~PPC_TCG_INSNS2); 7161 } 7162 env->insns_flags &= PPC_TCG_INSNS; 7163 env->insns_flags2 &= PPC_TCG_INSNS2; 7164 return 0; 7165 } 7166 7167 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn) 7168 { 7169 opc_handler_t **table, *handler; 7170 uint32_t inval; 7171 7172 ctx->opcode = insn; 7173 7174 LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", 7175 insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn), 7176 ctx->le_mode ? "little" : "big"); 7177 7178 table = cpu->opcodes; 7179 handler = table[opc1(insn)]; 7180 if (is_indirect_opcode(handler)) { 7181 table = ind_table(handler); 7182 handler = table[opc2(insn)]; 7183 if (is_indirect_opcode(handler)) { 7184 table = ind_table(handler); 7185 handler = table[opc3(insn)]; 7186 if (is_indirect_opcode(handler)) { 7187 table = ind_table(handler); 7188 handler = table[opc4(insn)]; 7189 } 7190 } 7191 } 7192 7193 /* Is opcode *REALLY* valid ? */ 7194 if (unlikely(handler->handler == &gen_invalid)) { 7195 qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: " 7196 "%02x - %02x - %02x - %02x (%08x) " 7197 TARGET_FMT_lx "\n", 7198 opc1(insn), opc2(insn), opc3(insn), opc4(insn), 7199 insn, ctx->cia); 7200 return false; 7201 } 7202 7203 if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) 7204 && Rc(insn))) { 7205 inval = handler->inval2; 7206 } else { 7207 inval = handler->inval1; 7208 } 7209 7210 if (unlikely((insn & inval) != 0)) { 7211 qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: " 7212 "%02x - %02x - %02x - %02x (%08x) " 7213 TARGET_FMT_lx "\n", insn & inval, 7214 opc1(insn), opc2(insn), opc3(insn), opc4(insn), 7215 insn, ctx->cia); 7216 return false; 7217 } 7218 7219 handler->handler(ctx); 7220 return true; 7221 } 7222 7223 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 7224 { 7225 DisasContext *ctx = container_of(dcbase, DisasContext, base); 7226 CPUPPCState *env = cpu_env(cs); 7227 uint32_t hflags = ctx->base.tb->flags; 7228 7229 ctx->spr_cb = env->spr_cb; 7230 ctx->pr = (hflags >> HFLAGS_PR) & 1; 7231 ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7; 7232 ctx->dr = (hflags >> HFLAGS_DR) & 1; 7233 ctx->hv = (hflags >> HFLAGS_HV) & 1; 7234 ctx->insns_flags = env->insns_flags; 7235 ctx->insns_flags2 = env->insns_flags2; 7236 ctx->access_type = -1; 7237 ctx->need_access_type = !mmu_is_64bit(env->mmu_model); 7238 ctx->le_mode = (hflags >> HFLAGS_LE) & 1; 7239 ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE; 7240 ctx->flags = env->flags; 7241 #if defined(TARGET_PPC64) 7242 ctx->sf_mode = (hflags >> HFLAGS_64) & 1; 7243 ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); 7244 #endif 7245 ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B 7246 || env->mmu_model & POWERPC_MMU_64; 7247 7248 ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1; 7249 ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1; 7250 ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1; 7251 ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1; 7252 ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1; 7253 ctx->gtse = (hflags >> HFLAGS_GTSE) & 1; 7254 ctx->hr = (hflags >> HFLAGS_HR) & 1; 7255 ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1; 7256 ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1; 7257 ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1; 7258 ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1; 7259 ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1; 7260 7261 ctx->singlestep_enabled = 0; 7262 if ((hflags >> HFLAGS_SE) & 1) { 7263 ctx->singlestep_enabled |= CPU_SINGLE_STEP; 7264 ctx->base.max_insns = 1; 7265 } 7266 if ((hflags >> HFLAGS_BE) & 1) { 7267 ctx->singlestep_enabled |= CPU_BRANCH_STEP; 7268 } 7269 } 7270 7271 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) 7272 { 7273 } 7274 7275 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 7276 { 7277 tcg_gen_insn_start(dcbase->pc_next); 7278 } 7279 7280 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn) 7281 { 7282 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 7283 return opc1(insn) == 1; 7284 } 7285 7286 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 7287 { 7288 DisasContext *ctx = container_of(dcbase, DisasContext, base); 7289 PowerPCCPU *cpu = POWERPC_CPU(cs); 7290 CPUPPCState *env = cpu_env(cs); 7291 target_ulong pc; 7292 uint32_t insn; 7293 bool ok; 7294 7295 LOG_DISAS("----------------\n"); 7296 LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", 7297 ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); 7298 7299 ctx->cia = pc = ctx->base.pc_next; 7300 insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx)); 7301 ctx->base.pc_next = pc += 4; 7302 7303 if (!is_prefix_insn(ctx, insn)) { 7304 ok = (decode_insn32(ctx, insn) || 7305 decode_legacy(cpu, ctx, insn)); 7306 } else if ((pc & 63) == 0) { 7307 /* 7308 * Power v3.1, section 1.9 Exceptions: 7309 * attempt to execute a prefixed instruction that crosses a 7310 * 64-byte address boundary (system alignment error). 7311 */ 7312 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN); 7313 ok = true; 7314 } else { 7315 uint32_t insn2 = translator_ldl_swap(env, dcbase, pc, 7316 need_byteswap(ctx)); 7317 ctx->base.pc_next = pc += 4; 7318 ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn)); 7319 } 7320 if (!ok) { 7321 gen_invalid(ctx); 7322 } 7323 7324 /* End the TB when crossing a page boundary. */ 7325 if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) { 7326 ctx->base.is_jmp = DISAS_TOO_MANY; 7327 } 7328 } 7329 7330 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 7331 { 7332 DisasContext *ctx = container_of(dcbase, DisasContext, base); 7333 DisasJumpType is_jmp = ctx->base.is_jmp; 7334 target_ulong nip = ctx->base.pc_next; 7335 7336 if (is_jmp == DISAS_NORETURN) { 7337 /* We have already exited the TB. */ 7338 return; 7339 } 7340 7341 /* Honor single stepping. */ 7342 if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) { 7343 bool rfi_type = false; 7344 7345 switch (is_jmp) { 7346 case DISAS_TOO_MANY: 7347 case DISAS_EXIT_UPDATE: 7348 case DISAS_CHAIN_UPDATE: 7349 gen_update_nip(ctx, nip); 7350 break; 7351 case DISAS_EXIT: 7352 case DISAS_CHAIN: 7353 /* 7354 * This is a heuristic, to put it kindly. The rfi class of 7355 * instructions are among the few outside branches that change 7356 * NIP without taking an interrupt. Single step trace interrupts 7357 * do not fire on completion of these instructions. 7358 */ 7359 rfi_type = true; 7360 break; 7361 default: 7362 g_assert_not_reached(); 7363 } 7364 7365 gen_debug_exception(ctx, rfi_type); 7366 return; 7367 } 7368 7369 switch (is_jmp) { 7370 case DISAS_TOO_MANY: 7371 if (use_goto_tb(ctx, nip)) { 7372 pmu_count_insns(ctx); 7373 tcg_gen_goto_tb(0); 7374 gen_update_nip(ctx, nip); 7375 tcg_gen_exit_tb(ctx->base.tb, 0); 7376 break; 7377 } 7378 /* fall through */ 7379 case DISAS_CHAIN_UPDATE: 7380 gen_update_nip(ctx, nip); 7381 /* fall through */ 7382 case DISAS_CHAIN: 7383 /* 7384 * tcg_gen_lookup_and_goto_ptr will exit the TB if 7385 * CF_NO_GOTO_PTR is set. Count insns now. 7386 */ 7387 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) { 7388 pmu_count_insns(ctx); 7389 } 7390 7391 tcg_gen_lookup_and_goto_ptr(); 7392 break; 7393 7394 case DISAS_EXIT_UPDATE: 7395 gen_update_nip(ctx, nip); 7396 /* fall through */ 7397 case DISAS_EXIT: 7398 pmu_count_insns(ctx); 7399 tcg_gen_exit_tb(NULL, 0); 7400 break; 7401 7402 default: 7403 g_assert_not_reached(); 7404 } 7405 } 7406 7407 static const TranslatorOps ppc_tr_ops = { 7408 .init_disas_context = ppc_tr_init_disas_context, 7409 .tb_start = ppc_tr_tb_start, 7410 .insn_start = ppc_tr_insn_start, 7411 .translate_insn = ppc_tr_translate_insn, 7412 .tb_stop = ppc_tr_tb_stop, 7413 }; 7414 7415 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 7416 vaddr pc, void *host_pc) 7417 { 7418 DisasContext ctx; 7419 7420 translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base); 7421 } 7422