1 /* 2 * PowerPC emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (C) 2011 Freescale Semiconductor, Inc. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "internal.h" 24 #include "disas/disas.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg-op.h" 27 #include "tcg/tcg-op-gvec.h" 28 #include "qemu/host-utils.h" 29 30 #include "exec/helper-proto.h" 31 #include "exec/helper-gen.h" 32 33 #include "exec/translator.h" 34 #include "exec/log.h" 35 #include "qemu/atomic128.h" 36 #include "spr_common.h" 37 #include "power8-pmu.h" 38 39 #include "qemu/qemu-print.h" 40 #include "qapi/error.h" 41 42 #define HELPER_H "helper.h" 43 #include "exec/helper-info.c.inc" 44 #undef HELPER_H 45 46 #define CPU_SINGLE_STEP 0x1 47 #define CPU_BRANCH_STEP 0x2 48 49 /* Include definitions for instructions classes and implementations flags */ 50 /* #define PPC_DEBUG_DISAS */ 51 52 #ifdef PPC_DEBUG_DISAS 53 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 54 #else 55 # define LOG_DISAS(...) do { } while (0) 56 #endif 57 /*****************************************************************************/ 58 /* Code translation helpers */ 59 60 /* global register indexes */ 61 static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */ 62 + 10 * 4 + 22 * 5 /* SPE GPRh */ 63 + 8 * 5 /* CRF */]; 64 static TCGv cpu_gpr[32]; 65 static TCGv cpu_gprh[32]; 66 static TCGv_i32 cpu_crf[8]; 67 static TCGv cpu_nip; 68 static TCGv cpu_msr; 69 static TCGv cpu_ctr; 70 static TCGv cpu_lr; 71 #if defined(TARGET_PPC64) 72 static TCGv cpu_cfar; 73 #endif 74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; 75 static TCGv cpu_reserve; 76 static TCGv cpu_reserve_length; 77 static TCGv cpu_reserve_val; 78 static TCGv cpu_reserve_val2; 79 static TCGv cpu_fpscr; 80 static TCGv_i32 cpu_access_type; 81 82 void ppc_translate_init(void) 83 { 84 int i; 85 char *p; 86 size_t cpu_reg_names_size; 87 88 p = cpu_reg_names; 89 cpu_reg_names_size = sizeof(cpu_reg_names); 90 91 for (i = 0; i < 8; i++) { 92 snprintf(p, cpu_reg_names_size, "crf%d", i); 93 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env, 94 offsetof(CPUPPCState, crf[i]), p); 95 p += 5; 96 cpu_reg_names_size -= 5; 97 } 98 99 for (i = 0; i < 32; i++) { 100 snprintf(p, cpu_reg_names_size, "r%d", i); 101 cpu_gpr[i] = tcg_global_mem_new(cpu_env, 102 offsetof(CPUPPCState, gpr[i]), p); 103 p += (i < 10) ? 3 : 4; 104 cpu_reg_names_size -= (i < 10) ? 3 : 4; 105 snprintf(p, cpu_reg_names_size, "r%dH", i); 106 cpu_gprh[i] = tcg_global_mem_new(cpu_env, 107 offsetof(CPUPPCState, gprh[i]), p); 108 p += (i < 10) ? 4 : 5; 109 cpu_reg_names_size -= (i < 10) ? 4 : 5; 110 } 111 112 cpu_nip = tcg_global_mem_new(cpu_env, 113 offsetof(CPUPPCState, nip), "nip"); 114 115 cpu_msr = tcg_global_mem_new(cpu_env, 116 offsetof(CPUPPCState, msr), "msr"); 117 118 cpu_ctr = tcg_global_mem_new(cpu_env, 119 offsetof(CPUPPCState, ctr), "ctr"); 120 121 cpu_lr = tcg_global_mem_new(cpu_env, 122 offsetof(CPUPPCState, lr), "lr"); 123 124 #if defined(TARGET_PPC64) 125 cpu_cfar = tcg_global_mem_new(cpu_env, 126 offsetof(CPUPPCState, cfar), "cfar"); 127 #endif 128 129 cpu_xer = tcg_global_mem_new(cpu_env, 130 offsetof(CPUPPCState, xer), "xer"); 131 cpu_so = tcg_global_mem_new(cpu_env, 132 offsetof(CPUPPCState, so), "SO"); 133 cpu_ov = tcg_global_mem_new(cpu_env, 134 offsetof(CPUPPCState, ov), "OV"); 135 cpu_ca = tcg_global_mem_new(cpu_env, 136 offsetof(CPUPPCState, ca), "CA"); 137 cpu_ov32 = tcg_global_mem_new(cpu_env, 138 offsetof(CPUPPCState, ov32), "OV32"); 139 cpu_ca32 = tcg_global_mem_new(cpu_env, 140 offsetof(CPUPPCState, ca32), "CA32"); 141 142 cpu_reserve = tcg_global_mem_new(cpu_env, 143 offsetof(CPUPPCState, reserve_addr), 144 "reserve_addr"); 145 cpu_reserve_length = tcg_global_mem_new(cpu_env, 146 offsetof(CPUPPCState, 147 reserve_length), 148 "reserve_length"); 149 cpu_reserve_val = tcg_global_mem_new(cpu_env, 150 offsetof(CPUPPCState, reserve_val), 151 "reserve_val"); 152 cpu_reserve_val2 = tcg_global_mem_new(cpu_env, 153 offsetof(CPUPPCState, reserve_val2), 154 "reserve_val2"); 155 156 cpu_fpscr = tcg_global_mem_new(cpu_env, 157 offsetof(CPUPPCState, fpscr), "fpscr"); 158 159 cpu_access_type = tcg_global_mem_new_i32(cpu_env, 160 offsetof(CPUPPCState, access_type), 161 "access_type"); 162 } 163 164 /* internal defines */ 165 struct DisasContext { 166 DisasContextBase base; 167 target_ulong cia; /* current instruction address */ 168 uint32_t opcode; 169 /* Routine used to access memory */ 170 bool pr, hv, dr, le_mode; 171 bool lazy_tlb_flush; 172 bool need_access_type; 173 int mem_idx; 174 int access_type; 175 /* Translation flags */ 176 MemOp default_tcg_memop_mask; 177 #if defined(TARGET_PPC64) 178 bool sf_mode; 179 bool has_cfar; 180 #endif 181 bool fpu_enabled; 182 bool altivec_enabled; 183 bool vsx_enabled; 184 bool spe_enabled; 185 bool tm_enabled; 186 bool gtse; 187 bool hr; 188 bool mmcr0_pmcc0; 189 bool mmcr0_pmcc1; 190 bool mmcr0_pmcjce; 191 bool pmc_other; 192 bool pmu_insn_cnt; 193 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ 194 int singlestep_enabled; 195 uint32_t flags; 196 uint64_t insns_flags; 197 uint64_t insns_flags2; 198 }; 199 200 #define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */ 201 #define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */ 202 #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */ 203 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */ 204 205 /* Return true iff byteswap is needed in a scalar memop */ 206 static inline bool need_byteswap(const DisasContext *ctx) 207 { 208 #if TARGET_BIG_ENDIAN 209 return ctx->le_mode; 210 #else 211 return !ctx->le_mode; 212 #endif 213 } 214 215 /* True when active word size < size of target_long. */ 216 #ifdef TARGET_PPC64 217 # define NARROW_MODE(C) (!(C)->sf_mode) 218 #else 219 # define NARROW_MODE(C) 0 220 #endif 221 222 struct opc_handler_t { 223 /* invalid bits for instruction 1 (Rc(opcode) == 0) */ 224 uint32_t inval1; 225 /* invalid bits for instruction 2 (Rc(opcode) == 1) */ 226 uint32_t inval2; 227 /* instruction type */ 228 uint64_t type; 229 /* extended instruction type */ 230 uint64_t type2; 231 /* handler */ 232 void (*handler)(DisasContext *ctx); 233 }; 234 235 static inline bool gen_serialize(DisasContext *ctx) 236 { 237 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 238 /* Restart with exclusive lock. */ 239 gen_helper_exit_atomic(cpu_env); 240 ctx->base.is_jmp = DISAS_NORETURN; 241 return false; 242 } 243 return true; 244 } 245 246 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 247 static inline bool gen_serialize_core_lpar(DisasContext *ctx) 248 { 249 if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) { 250 return gen_serialize(ctx); 251 } 252 253 return true; 254 } 255 #endif 256 257 /* SPR load/store helpers */ 258 static inline void gen_load_spr(TCGv t, int reg) 259 { 260 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg])); 261 } 262 263 static inline void gen_store_spr(int reg, TCGv t) 264 { 265 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg])); 266 } 267 268 static inline void gen_set_access_type(DisasContext *ctx, int access_type) 269 { 270 if (ctx->need_access_type && ctx->access_type != access_type) { 271 tcg_gen_movi_i32(cpu_access_type, access_type); 272 ctx->access_type = access_type; 273 } 274 } 275 276 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) 277 { 278 if (NARROW_MODE(ctx)) { 279 nip = (uint32_t)nip; 280 } 281 tcg_gen_movi_tl(cpu_nip, nip); 282 } 283 284 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) 285 { 286 TCGv_i32 t0, t1; 287 288 /* 289 * These are all synchronous exceptions, we set the PC back to the 290 * faulting instruction 291 */ 292 gen_update_nip(ctx, ctx->cia); 293 t0 = tcg_constant_i32(excp); 294 t1 = tcg_constant_i32(error); 295 gen_helper_raise_exception_err(cpu_env, t0, t1); 296 ctx->base.is_jmp = DISAS_NORETURN; 297 } 298 299 static void gen_exception(DisasContext *ctx, uint32_t excp) 300 { 301 TCGv_i32 t0; 302 303 /* 304 * These are all synchronous exceptions, we set the PC back to the 305 * faulting instruction 306 */ 307 gen_update_nip(ctx, ctx->cia); 308 t0 = tcg_constant_i32(excp); 309 gen_helper_raise_exception(cpu_env, t0); 310 ctx->base.is_jmp = DISAS_NORETURN; 311 } 312 313 static void gen_exception_nip(DisasContext *ctx, uint32_t excp, 314 target_ulong nip) 315 { 316 TCGv_i32 t0; 317 318 gen_update_nip(ctx, nip); 319 t0 = tcg_constant_i32(excp); 320 gen_helper_raise_exception(cpu_env, t0); 321 ctx->base.is_jmp = DISAS_NORETURN; 322 } 323 324 #if !defined(CONFIG_USER_ONLY) 325 static void gen_ppc_maybe_interrupt(DisasContext *ctx) 326 { 327 translator_io_start(&ctx->base); 328 gen_helper_ppc_maybe_interrupt(cpu_env); 329 } 330 #endif 331 332 /* 333 * Tells the caller what is the appropriate exception to generate and prepares 334 * SPR registers for this exception. 335 * 336 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or 337 * POWERPC_EXCP_DEBUG (on BookE). 338 */ 339 static void gen_debug_exception(DisasContext *ctx, bool rfi_type) 340 { 341 #if !defined(CONFIG_USER_ONLY) 342 if (ctx->flags & POWERPC_FLAG_DE) { 343 target_ulong dbsr = 0; 344 if (ctx->singlestep_enabled & CPU_SINGLE_STEP) { 345 dbsr = DBCR0_ICMP; 346 } else { 347 /* Must have been branch */ 348 dbsr = DBCR0_BRT; 349 } 350 TCGv t0 = tcg_temp_new(); 351 gen_load_spr(t0, SPR_BOOKE_DBSR); 352 tcg_gen_ori_tl(t0, t0, dbsr); 353 gen_store_spr(SPR_BOOKE_DBSR, t0); 354 gen_helper_raise_exception(cpu_env, 355 tcg_constant_i32(POWERPC_EXCP_DEBUG)); 356 ctx->base.is_jmp = DISAS_NORETURN; 357 } else { 358 if (!rfi_type) { /* BookS does not single step rfi type instructions */ 359 TCGv t0 = tcg_temp_new(); 360 tcg_gen_movi_tl(t0, ctx->cia); 361 gen_helper_book3s_trace(cpu_env, t0); 362 ctx->base.is_jmp = DISAS_NORETURN; 363 } 364 } 365 #endif 366 } 367 368 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) 369 { 370 /* Will be converted to program check if needed */ 371 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error); 372 } 373 374 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error) 375 { 376 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error); 377 } 378 379 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) 380 { 381 /* Will be converted to program check if needed */ 382 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); 383 } 384 385 /*****************************************************************************/ 386 /* SPR READ/WRITE CALLBACKS */ 387 388 void spr_noaccess(DisasContext *ctx, int gprn, int sprn) 389 { 390 #if 0 391 sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); 392 printf("ERROR: try to access SPR %d !\n", sprn); 393 #endif 394 } 395 396 /* #define PPC_DUMP_SPR_ACCESSES */ 397 398 /* 399 * Generic callbacks: 400 * do nothing but store/retrieve spr value 401 */ 402 static void spr_load_dump_spr(int sprn) 403 { 404 #ifdef PPC_DUMP_SPR_ACCESSES 405 TCGv_i32 t0 = tcg_constant_i32(sprn); 406 gen_helper_load_dump_spr(cpu_env, t0); 407 #endif 408 } 409 410 void spr_read_generic(DisasContext *ctx, int gprn, int sprn) 411 { 412 gen_load_spr(cpu_gpr[gprn], sprn); 413 spr_load_dump_spr(sprn); 414 } 415 416 static void spr_store_dump_spr(int sprn) 417 { 418 #ifdef PPC_DUMP_SPR_ACCESSES 419 TCGv_i32 t0 = tcg_constant_i32(sprn); 420 gen_helper_store_dump_spr(cpu_env, t0); 421 #endif 422 } 423 424 void spr_write_generic(DisasContext *ctx, int sprn, int gprn) 425 { 426 gen_store_spr(sprn, cpu_gpr[gprn]); 427 spr_store_dump_spr(sprn); 428 } 429 430 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) 431 { 432 #ifdef TARGET_PPC64 433 TCGv t0 = tcg_temp_new(); 434 tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); 435 gen_store_spr(sprn, t0); 436 spr_store_dump_spr(sprn); 437 #else 438 spr_write_generic(ctx, sprn, gprn); 439 #endif 440 } 441 442 void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn) 443 { 444 if (!(ctx->flags & POWERPC_FLAG_SMT)) { 445 spr_write_generic(ctx, sprn, gprn); 446 return; 447 } 448 449 if (!gen_serialize(ctx)) { 450 return; 451 } 452 453 gen_helper_spr_core_write_generic(cpu_env, tcg_constant_i32(sprn), 454 cpu_gpr[gprn]); 455 spr_store_dump_spr(sprn); 456 } 457 458 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn) 459 { 460 /* This does not implement >1 thread */ 461 TCGv t0 = tcg_temp_new(); 462 TCGv t1 = tcg_temp_new(); 463 tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */ 464 tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */ 465 tcg_gen_or_tl(t1, t1, t0); 466 gen_store_spr(sprn, t1); 467 } 468 469 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn) 470 { 471 if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) { 472 /* CTRL behaves as 1-thread in LPAR-per-thread mode */ 473 spr_write_CTRL_ST(ctx, sprn, gprn); 474 goto out; 475 } 476 477 if (!gen_serialize(ctx)) { 478 return; 479 } 480 481 gen_helper_spr_write_CTRL(cpu_env, tcg_constant_i32(sprn), 482 cpu_gpr[gprn]); 483 out: 484 spr_store_dump_spr(sprn); 485 486 /* 487 * SPR_CTRL writes must force a new translation block, 488 * allowing the PMU to calculate the run latch events with 489 * more accuracy. 490 */ 491 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 492 } 493 494 #if !defined(CONFIG_USER_ONLY) 495 void spr_write_clear(DisasContext *ctx, int sprn, int gprn) 496 { 497 TCGv t0 = tcg_temp_new(); 498 TCGv t1 = tcg_temp_new(); 499 gen_load_spr(t0, sprn); 500 tcg_gen_neg_tl(t1, cpu_gpr[gprn]); 501 tcg_gen_and_tl(t0, t0, t1); 502 gen_store_spr(sprn, t0); 503 } 504 505 void spr_access_nop(DisasContext *ctx, int sprn, int gprn) 506 { 507 } 508 509 #endif 510 511 /* SPR common to all PowerPC */ 512 /* XER */ 513 void spr_read_xer(DisasContext *ctx, int gprn, int sprn) 514 { 515 TCGv dst = cpu_gpr[gprn]; 516 TCGv t0 = tcg_temp_new(); 517 TCGv t1 = tcg_temp_new(); 518 TCGv t2 = tcg_temp_new(); 519 tcg_gen_mov_tl(dst, cpu_xer); 520 tcg_gen_shli_tl(t0, cpu_so, XER_SO); 521 tcg_gen_shli_tl(t1, cpu_ov, XER_OV); 522 tcg_gen_shli_tl(t2, cpu_ca, XER_CA); 523 tcg_gen_or_tl(t0, t0, t1); 524 tcg_gen_or_tl(dst, dst, t2); 525 tcg_gen_or_tl(dst, dst, t0); 526 if (is_isa300(ctx)) { 527 tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32); 528 tcg_gen_or_tl(dst, dst, t0); 529 tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); 530 tcg_gen_or_tl(dst, dst, t0); 531 } 532 } 533 534 void spr_write_xer(DisasContext *ctx, int sprn, int gprn) 535 { 536 TCGv src = cpu_gpr[gprn]; 537 /* Write all flags, while reading back check for isa300 */ 538 tcg_gen_andi_tl(cpu_xer, src, 539 ~((1u << XER_SO) | 540 (1u << XER_OV) | (1u << XER_OV32) | 541 (1u << XER_CA) | (1u << XER_CA32))); 542 tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1); 543 tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1); 544 tcg_gen_extract_tl(cpu_so, src, XER_SO, 1); 545 tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1); 546 tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1); 547 } 548 549 /* LR */ 550 void spr_read_lr(DisasContext *ctx, int gprn, int sprn) 551 { 552 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr); 553 } 554 555 void spr_write_lr(DisasContext *ctx, int sprn, int gprn) 556 { 557 tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]); 558 } 559 560 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 561 /* Debug facilities */ 562 /* CFAR */ 563 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) 564 { 565 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar); 566 } 567 568 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) 569 { 570 tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]); 571 } 572 573 /* Breakpoint */ 574 void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn) 575 { 576 translator_io_start(&ctx->base); 577 gen_helper_store_ciabr(cpu_env, cpu_gpr[gprn]); 578 } 579 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ 580 581 /* CTR */ 582 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) 583 { 584 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr); 585 } 586 587 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) 588 { 589 tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]); 590 } 591 592 /* User read access to SPR */ 593 /* USPRx */ 594 /* UMMCRx */ 595 /* UPMCx */ 596 /* USIA */ 597 /* UDECR */ 598 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) 599 { 600 gen_load_spr(cpu_gpr[gprn], sprn + 0x10); 601 } 602 603 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 604 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) 605 { 606 gen_store_spr(sprn + 0x10, cpu_gpr[gprn]); 607 } 608 #endif 609 610 /* SPR common to all non-embedded PowerPC */ 611 /* DECR */ 612 #if !defined(CONFIG_USER_ONLY) 613 void spr_read_decr(DisasContext *ctx, int gprn, int sprn) 614 { 615 translator_io_start(&ctx->base); 616 gen_helper_load_decr(cpu_gpr[gprn], cpu_env); 617 } 618 619 void spr_write_decr(DisasContext *ctx, int sprn, int gprn) 620 { 621 translator_io_start(&ctx->base); 622 gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); 623 } 624 #endif 625 626 /* SPR common to all non-embedded PowerPC, except 601 */ 627 /* Time base */ 628 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) 629 { 630 translator_io_start(&ctx->base); 631 gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); 632 } 633 634 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) 635 { 636 translator_io_start(&ctx->base); 637 gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); 638 } 639 640 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) 641 { 642 gen_helper_load_atbl(cpu_gpr[gprn], cpu_env); 643 } 644 645 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) 646 { 647 gen_helper_load_atbu(cpu_gpr[gprn], cpu_env); 648 } 649 650 #if !defined(CONFIG_USER_ONLY) 651 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) 652 { 653 translator_io_start(&ctx->base); 654 gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); 655 } 656 657 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) 658 { 659 translator_io_start(&ctx->base); 660 gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); 661 } 662 663 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) 664 { 665 gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]); 666 } 667 668 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) 669 { 670 gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]); 671 } 672 673 #if defined(TARGET_PPC64) 674 void spr_read_purr(DisasContext *ctx, int gprn, int sprn) 675 { 676 translator_io_start(&ctx->base); 677 gen_helper_load_purr(cpu_gpr[gprn], cpu_env); 678 } 679 680 void spr_write_purr(DisasContext *ctx, int sprn, int gprn) 681 { 682 translator_io_start(&ctx->base); 683 gen_helper_store_purr(cpu_env, cpu_gpr[gprn]); 684 } 685 686 /* HDECR */ 687 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) 688 { 689 translator_io_start(&ctx->base); 690 gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env); 691 } 692 693 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) 694 { 695 translator_io_start(&ctx->base); 696 gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]); 697 } 698 699 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) 700 { 701 translator_io_start(&ctx->base); 702 gen_helper_load_vtb(cpu_gpr[gprn], cpu_env); 703 } 704 705 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) 706 { 707 translator_io_start(&ctx->base); 708 gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]); 709 } 710 711 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) 712 { 713 translator_io_start(&ctx->base); 714 gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]); 715 } 716 717 #endif 718 #endif 719 720 #if !defined(CONFIG_USER_ONLY) 721 /* IBAT0U...IBAT0U */ 722 /* IBAT0L...IBAT7L */ 723 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) 724 { 725 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 726 offsetof(CPUPPCState, 727 IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); 728 } 729 730 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) 731 { 732 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 733 offsetof(CPUPPCState, 734 IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); 735 } 736 737 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) 738 { 739 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2); 740 gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); 741 } 742 743 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) 744 { 745 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4); 746 gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); 747 } 748 749 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) 750 { 751 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2); 752 gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); 753 } 754 755 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) 756 { 757 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4); 758 gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); 759 } 760 761 /* DBAT0U...DBAT7U */ 762 /* DBAT0L...DBAT7L */ 763 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) 764 { 765 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 766 offsetof(CPUPPCState, 767 DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); 768 } 769 770 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) 771 { 772 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 773 offsetof(CPUPPCState, 774 DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); 775 } 776 777 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) 778 { 779 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2); 780 gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); 781 } 782 783 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) 784 { 785 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4); 786 gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); 787 } 788 789 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) 790 { 791 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2); 792 gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); 793 } 794 795 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) 796 { 797 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4); 798 gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); 799 } 800 801 /* SDR1 */ 802 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) 803 { 804 gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]); 805 } 806 807 #if defined(TARGET_PPC64) 808 /* 64 bits PowerPC specific SPRs */ 809 /* PIDR */ 810 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) 811 { 812 gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]); 813 } 814 815 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) 816 { 817 gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]); 818 } 819 820 void spr_read_hior(DisasContext *ctx, int gprn, int sprn) 821 { 822 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix)); 823 } 824 825 void spr_write_hior(DisasContext *ctx, int sprn, int gprn) 826 { 827 TCGv t0 = tcg_temp_new(); 828 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); 829 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); 830 } 831 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) 832 { 833 gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]); 834 } 835 836 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) 837 { 838 gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]); 839 } 840 841 /* DPDES */ 842 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) 843 { 844 if (!gen_serialize_core_lpar(ctx)) { 845 return; 846 } 847 848 gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env); 849 } 850 851 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) 852 { 853 if (!gen_serialize_core_lpar(ctx)) { 854 return; 855 } 856 857 gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]); 858 } 859 #endif 860 #endif 861 862 /* PowerPC 40x specific registers */ 863 #if !defined(CONFIG_USER_ONLY) 864 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) 865 { 866 translator_io_start(&ctx->base); 867 gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env); 868 } 869 870 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) 871 { 872 translator_io_start(&ctx->base); 873 gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]); 874 } 875 876 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) 877 { 878 translator_io_start(&ctx->base); 879 gen_store_spr(sprn, cpu_gpr[gprn]); 880 gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]); 881 /* We must stop translation as we may have rebooted */ 882 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 883 } 884 885 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) 886 { 887 translator_io_start(&ctx->base); 888 gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]); 889 } 890 891 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn) 892 { 893 translator_io_start(&ctx->base); 894 gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]); 895 } 896 897 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn) 898 { 899 translator_io_start(&ctx->base); 900 gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]); 901 } 902 903 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn) 904 { 905 TCGv t0 = tcg_temp_new(); 906 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF); 907 gen_helper_store_40x_pid(cpu_env, t0); 908 } 909 910 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) 911 { 912 translator_io_start(&ctx->base); 913 gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]); 914 } 915 916 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) 917 { 918 translator_io_start(&ctx->base); 919 gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]); 920 } 921 #endif 922 923 /* PIR */ 924 #if !defined(CONFIG_USER_ONLY) 925 void spr_write_pir(DisasContext *ctx, int sprn, int gprn) 926 { 927 TCGv t0 = tcg_temp_new(); 928 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); 929 gen_store_spr(SPR_PIR, t0); 930 } 931 #endif 932 933 /* SPE specific registers */ 934 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) 935 { 936 TCGv_i32 t0 = tcg_temp_new_i32(); 937 tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); 938 tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); 939 } 940 941 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) 942 { 943 TCGv_i32 t0 = tcg_temp_new_i32(); 944 tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); 945 tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); 946 } 947 948 #if !defined(CONFIG_USER_ONLY) 949 /* Callback used to write the exception vector base */ 950 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) 951 { 952 TCGv t0 = tcg_temp_new(); 953 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask)); 954 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); 955 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); 956 gen_store_spr(sprn, t0); 957 } 958 959 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) 960 { 961 int sprn_offs; 962 963 if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { 964 sprn_offs = sprn - SPR_BOOKE_IVOR0; 965 } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { 966 sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; 967 } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { 968 sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; 969 } else { 970 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception" 971 " vector 0x%03x\n", sprn); 972 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 973 return; 974 } 975 976 TCGv t0 = tcg_temp_new(); 977 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask)); 978 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); 979 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); 980 gen_store_spr(sprn, t0); 981 } 982 #endif 983 984 #ifdef TARGET_PPC64 985 #ifndef CONFIG_USER_ONLY 986 void spr_write_amr(DisasContext *ctx, int sprn, int gprn) 987 { 988 TCGv t0 = tcg_temp_new(); 989 TCGv t1 = tcg_temp_new(); 990 TCGv t2 = tcg_temp_new(); 991 992 /* 993 * Note, the HV=1 PR=0 case is handled earlier by simply using 994 * spr_write_generic for HV mode in the SPR table 995 */ 996 997 /* Build insertion mask into t1 based on context */ 998 if (ctx->pr) { 999 gen_load_spr(t1, SPR_UAMOR); 1000 } else { 1001 gen_load_spr(t1, SPR_AMOR); 1002 } 1003 1004 /* Mask new bits into t2 */ 1005 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); 1006 1007 /* Load AMR and clear new bits in t0 */ 1008 gen_load_spr(t0, SPR_AMR); 1009 tcg_gen_andc_tl(t0, t0, t1); 1010 1011 /* Or'in new bits and write it out */ 1012 tcg_gen_or_tl(t0, t0, t2); 1013 gen_store_spr(SPR_AMR, t0); 1014 spr_store_dump_spr(SPR_AMR); 1015 } 1016 1017 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) 1018 { 1019 TCGv t0 = tcg_temp_new(); 1020 TCGv t1 = tcg_temp_new(); 1021 TCGv t2 = tcg_temp_new(); 1022 1023 /* 1024 * Note, the HV=1 case is handled earlier by simply using 1025 * spr_write_generic for HV mode in the SPR table 1026 */ 1027 1028 /* Build insertion mask into t1 based on context */ 1029 gen_load_spr(t1, SPR_AMOR); 1030 1031 /* Mask new bits into t2 */ 1032 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); 1033 1034 /* Load AMR and clear new bits in t0 */ 1035 gen_load_spr(t0, SPR_UAMOR); 1036 tcg_gen_andc_tl(t0, t0, t1); 1037 1038 /* Or'in new bits and write it out */ 1039 tcg_gen_or_tl(t0, t0, t2); 1040 gen_store_spr(SPR_UAMOR, t0); 1041 spr_store_dump_spr(SPR_UAMOR); 1042 } 1043 1044 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) 1045 { 1046 TCGv t0 = tcg_temp_new(); 1047 TCGv t1 = tcg_temp_new(); 1048 TCGv t2 = tcg_temp_new(); 1049 1050 /* 1051 * Note, the HV=1 case is handled earlier by simply using 1052 * spr_write_generic for HV mode in the SPR table 1053 */ 1054 1055 /* Build insertion mask into t1 based on context */ 1056 gen_load_spr(t1, SPR_AMOR); 1057 1058 /* Mask new bits into t2 */ 1059 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); 1060 1061 /* Load AMR and clear new bits in t0 */ 1062 gen_load_spr(t0, SPR_IAMR); 1063 tcg_gen_andc_tl(t0, t0, t1); 1064 1065 /* Or'in new bits and write it out */ 1066 tcg_gen_or_tl(t0, t0, t2); 1067 gen_store_spr(SPR_IAMR, t0); 1068 spr_store_dump_spr(SPR_IAMR); 1069 } 1070 #endif 1071 #endif 1072 1073 #ifndef CONFIG_USER_ONLY 1074 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) 1075 { 1076 gen_helper_fixup_thrm(cpu_env); 1077 gen_load_spr(cpu_gpr[gprn], sprn); 1078 spr_load_dump_spr(sprn); 1079 } 1080 #endif /* !CONFIG_USER_ONLY */ 1081 1082 #if !defined(CONFIG_USER_ONLY) 1083 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) 1084 { 1085 TCGv t0 = tcg_temp_new(); 1086 1087 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); 1088 gen_store_spr(sprn, t0); 1089 } 1090 1091 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) 1092 { 1093 TCGv t0 = tcg_temp_new(); 1094 1095 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); 1096 gen_store_spr(sprn, t0); 1097 } 1098 1099 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) 1100 { 1101 TCGv t0 = tcg_temp_new(); 1102 1103 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 1104 ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); 1105 gen_store_spr(sprn, t0); 1106 } 1107 1108 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) 1109 { 1110 gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]); 1111 } 1112 1113 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) 1114 { 1115 TCGv_i32 t0 = tcg_constant_i32(sprn); 1116 gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); 1117 } 1118 1119 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) 1120 { 1121 gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]); 1122 } 1123 1124 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) 1125 { 1126 gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]); 1127 } 1128 1129 #endif 1130 1131 #if !defined(CONFIG_USER_ONLY) 1132 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) 1133 { 1134 TCGv val = tcg_temp_new(); 1135 tcg_gen_ext32u_tl(val, cpu_gpr[gprn]); 1136 gen_store_spr(SPR_BOOKE_MAS3, val); 1137 tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); 1138 gen_store_spr(SPR_BOOKE_MAS7, val); 1139 } 1140 1141 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) 1142 { 1143 TCGv mas7 = tcg_temp_new(); 1144 TCGv mas3 = tcg_temp_new(); 1145 gen_load_spr(mas7, SPR_BOOKE_MAS7); 1146 tcg_gen_shli_tl(mas7, mas7, 32); 1147 gen_load_spr(mas3, SPR_BOOKE_MAS3); 1148 tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); 1149 } 1150 1151 #endif 1152 1153 #ifdef TARGET_PPC64 1154 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, 1155 int bit, int sprn, int cause) 1156 { 1157 TCGv_i32 t1 = tcg_constant_i32(bit); 1158 TCGv_i32 t2 = tcg_constant_i32(sprn); 1159 TCGv_i32 t3 = tcg_constant_i32(cause); 1160 1161 gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); 1162 } 1163 1164 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, 1165 int bit, int sprn, int cause) 1166 { 1167 TCGv_i32 t1 = tcg_constant_i32(bit); 1168 TCGv_i32 t2 = tcg_constant_i32(sprn); 1169 TCGv_i32 t3 = tcg_constant_i32(cause); 1170 1171 gen_helper_msr_facility_check(cpu_env, t1, t2, t3); 1172 } 1173 1174 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) 1175 { 1176 TCGv spr_up = tcg_temp_new(); 1177 TCGv spr = tcg_temp_new(); 1178 1179 gen_load_spr(spr, sprn - 1); 1180 tcg_gen_shri_tl(spr_up, spr, 32); 1181 tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); 1182 } 1183 1184 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) 1185 { 1186 TCGv spr = tcg_temp_new(); 1187 1188 gen_load_spr(spr, sprn - 1); 1189 tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); 1190 gen_store_spr(sprn - 1, spr); 1191 } 1192 1193 #if !defined(CONFIG_USER_ONLY) 1194 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) 1195 { 1196 TCGv hmer = tcg_temp_new(); 1197 1198 gen_load_spr(hmer, sprn); 1199 tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); 1200 gen_store_spr(sprn, hmer); 1201 spr_store_dump_spr(sprn); 1202 } 1203 1204 void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn) 1205 { 1206 gen_helper_load_tfmr(cpu_gpr[gprn], cpu_env); 1207 } 1208 1209 void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn) 1210 { 1211 gen_helper_store_tfmr(cpu_env, cpu_gpr[gprn]); 1212 } 1213 1214 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) 1215 { 1216 translator_io_start(&ctx->base); 1217 gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]); 1218 } 1219 #endif /* !defined(CONFIG_USER_ONLY) */ 1220 1221 void spr_read_tar(DisasContext *ctx, int gprn, int sprn) 1222 { 1223 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); 1224 spr_read_generic(ctx, gprn, sprn); 1225 } 1226 1227 void spr_write_tar(DisasContext *ctx, int sprn, int gprn) 1228 { 1229 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); 1230 spr_write_generic(ctx, sprn, gprn); 1231 } 1232 1233 void spr_read_tm(DisasContext *ctx, int gprn, int sprn) 1234 { 1235 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1236 spr_read_generic(ctx, gprn, sprn); 1237 } 1238 1239 void spr_write_tm(DisasContext *ctx, int sprn, int gprn) 1240 { 1241 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1242 spr_write_generic(ctx, sprn, gprn); 1243 } 1244 1245 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) 1246 { 1247 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1248 spr_read_prev_upper32(ctx, gprn, sprn); 1249 } 1250 1251 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) 1252 { 1253 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); 1254 spr_write_prev_upper32(ctx, sprn, gprn); 1255 } 1256 1257 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) 1258 { 1259 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1260 spr_read_generic(ctx, gprn, sprn); 1261 } 1262 1263 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) 1264 { 1265 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1266 spr_write_generic(ctx, sprn, gprn); 1267 } 1268 1269 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) 1270 { 1271 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1272 spr_read_prev_upper32(ctx, gprn, sprn); 1273 } 1274 1275 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) 1276 { 1277 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); 1278 spr_write_prev_upper32(ctx, sprn, gprn); 1279 } 1280 1281 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn) 1282 { 1283 TCGv t0 = tcg_temp_new(); 1284 1285 /* 1286 * Access to the (H)DEXCR in problem state is done using separated 1287 * SPR indexes which are 16 below the SPR indexes which have full 1288 * access to the (H)DEXCR in privileged state. Problem state can 1289 * only read bits 32:63, bits 0:31 return 0. 1290 * 1291 * See section 9.3.1-9.3.2 of PowerISA v3.1B 1292 */ 1293 1294 gen_load_spr(t0, sprn + 16); 1295 tcg_gen_ext32u_tl(cpu_gpr[gprn], t0); 1296 } 1297 #endif 1298 1299 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ 1300 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) 1301 1302 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ 1303 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) 1304 1305 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ 1306 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) 1307 1308 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ 1309 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) 1310 1311 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \ 1312 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2) 1313 1314 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \ 1315 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) 1316 1317 typedef struct opcode_t { 1318 unsigned char opc1, opc2, opc3, opc4; 1319 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */ 1320 unsigned char pad[4]; 1321 #endif 1322 opc_handler_t handler; 1323 const char *oname; 1324 } opcode_t; 1325 1326 static void gen_priv_opc(DisasContext *ctx) 1327 { 1328 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); 1329 } 1330 1331 /* Helpers for priv. check */ 1332 #define GEN_PRIV(CTX) \ 1333 do { \ 1334 gen_priv_opc(CTX); return; \ 1335 } while (0) 1336 1337 #if defined(CONFIG_USER_ONLY) 1338 #define CHK_HV(CTX) GEN_PRIV(CTX) 1339 #define CHK_SV(CTX) GEN_PRIV(CTX) 1340 #define CHK_HVRM(CTX) GEN_PRIV(CTX) 1341 #else 1342 #define CHK_HV(CTX) \ 1343 do { \ 1344 if (unlikely(ctx->pr || !ctx->hv)) {\ 1345 GEN_PRIV(CTX); \ 1346 } \ 1347 } while (0) 1348 #define CHK_SV(CTX) \ 1349 do { \ 1350 if (unlikely(ctx->pr)) { \ 1351 GEN_PRIV(CTX); \ 1352 } \ 1353 } while (0) 1354 #define CHK_HVRM(CTX) \ 1355 do { \ 1356 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ 1357 GEN_PRIV(CTX); \ 1358 } \ 1359 } while (0) 1360 #endif 1361 1362 #define CHK_NONE(CTX) 1363 1364 /*****************************************************************************/ 1365 /* PowerPC instructions table */ 1366 1367 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ 1368 { \ 1369 .opc1 = op1, \ 1370 .opc2 = op2, \ 1371 .opc3 = op3, \ 1372 .opc4 = 0xff, \ 1373 .handler = { \ 1374 .inval1 = invl, \ 1375 .type = _typ, \ 1376 .type2 = _typ2, \ 1377 .handler = &gen_##name, \ 1378 }, \ 1379 .oname = stringify(name), \ 1380 } 1381 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ 1382 { \ 1383 .opc1 = op1, \ 1384 .opc2 = op2, \ 1385 .opc3 = op3, \ 1386 .opc4 = 0xff, \ 1387 .handler = { \ 1388 .inval1 = invl1, \ 1389 .inval2 = invl2, \ 1390 .type = _typ, \ 1391 .type2 = _typ2, \ 1392 .handler = &gen_##name, \ 1393 }, \ 1394 .oname = stringify(name), \ 1395 } 1396 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ 1397 { \ 1398 .opc1 = op1, \ 1399 .opc2 = op2, \ 1400 .opc3 = op3, \ 1401 .opc4 = 0xff, \ 1402 .handler = { \ 1403 .inval1 = invl, \ 1404 .type = _typ, \ 1405 .type2 = _typ2, \ 1406 .handler = &gen_##name, \ 1407 }, \ 1408 .oname = onam, \ 1409 } 1410 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ 1411 { \ 1412 .opc1 = op1, \ 1413 .opc2 = op2, \ 1414 .opc3 = op3, \ 1415 .opc4 = op4, \ 1416 .handler = { \ 1417 .inval1 = invl, \ 1418 .type = _typ, \ 1419 .type2 = _typ2, \ 1420 .handler = &gen_##name, \ 1421 }, \ 1422 .oname = stringify(name), \ 1423 } 1424 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ 1425 { \ 1426 .opc1 = op1, \ 1427 .opc2 = op2, \ 1428 .opc3 = op3, \ 1429 .opc4 = op4, \ 1430 .handler = { \ 1431 .inval1 = invl, \ 1432 .type = _typ, \ 1433 .type2 = _typ2, \ 1434 .handler = &gen_##name, \ 1435 }, \ 1436 .oname = onam, \ 1437 } 1438 1439 /* Invalid instruction */ 1440 static void gen_invalid(DisasContext *ctx) 1441 { 1442 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 1443 } 1444 1445 static opc_handler_t invalid_handler = { 1446 .inval1 = 0xFFFFFFFF, 1447 .inval2 = 0xFFFFFFFF, 1448 .type = PPC_NONE, 1449 .type2 = PPC_NONE, 1450 .handler = gen_invalid, 1451 }; 1452 1453 /*** Integer comparison ***/ 1454 1455 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) 1456 { 1457 TCGv t0 = tcg_temp_new(); 1458 TCGv t1 = tcg_temp_new(); 1459 TCGv_i32 t = tcg_temp_new_i32(); 1460 1461 tcg_gen_movi_tl(t0, CRF_EQ); 1462 tcg_gen_movi_tl(t1, CRF_LT); 1463 tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), 1464 t0, arg0, arg1, t1, t0); 1465 tcg_gen_movi_tl(t1, CRF_GT); 1466 tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), 1467 t0, arg0, arg1, t1, t0); 1468 1469 tcg_gen_trunc_tl_i32(t, t0); 1470 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); 1471 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t); 1472 } 1473 1474 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) 1475 { 1476 TCGv t0 = tcg_constant_tl(arg1); 1477 gen_op_cmp(arg0, t0, s, crf); 1478 } 1479 1480 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) 1481 { 1482 TCGv t0, t1; 1483 t0 = tcg_temp_new(); 1484 t1 = tcg_temp_new(); 1485 if (s) { 1486 tcg_gen_ext32s_tl(t0, arg0); 1487 tcg_gen_ext32s_tl(t1, arg1); 1488 } else { 1489 tcg_gen_ext32u_tl(t0, arg0); 1490 tcg_gen_ext32u_tl(t1, arg1); 1491 } 1492 gen_op_cmp(t0, t1, s, crf); 1493 } 1494 1495 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) 1496 { 1497 TCGv t0 = tcg_constant_tl(arg1); 1498 gen_op_cmp32(arg0, t0, s, crf); 1499 } 1500 1501 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) 1502 { 1503 if (NARROW_MODE(ctx)) { 1504 gen_op_cmpi32(reg, 0, 1, 0); 1505 } else { 1506 gen_op_cmpi(reg, 0, 1, 0); 1507 } 1508 } 1509 1510 /* cmprb - range comparison: isupper, isaplha, islower*/ 1511 static void gen_cmprb(DisasContext *ctx) 1512 { 1513 TCGv_i32 src1 = tcg_temp_new_i32(); 1514 TCGv_i32 src2 = tcg_temp_new_i32(); 1515 TCGv_i32 src2lo = tcg_temp_new_i32(); 1516 TCGv_i32 src2hi = tcg_temp_new_i32(); 1517 TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; 1518 1519 tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]); 1520 tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]); 1521 1522 tcg_gen_andi_i32(src1, src1, 0xFF); 1523 tcg_gen_ext8u_i32(src2lo, src2); 1524 tcg_gen_shri_i32(src2, src2, 8); 1525 tcg_gen_ext8u_i32(src2hi, src2); 1526 1527 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); 1528 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); 1529 tcg_gen_and_i32(crf, src2lo, src2hi); 1530 1531 if (ctx->opcode & 0x00200000) { 1532 tcg_gen_shri_i32(src2, src2, 8); 1533 tcg_gen_ext8u_i32(src2lo, src2); 1534 tcg_gen_shri_i32(src2, src2, 8); 1535 tcg_gen_ext8u_i32(src2hi, src2); 1536 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); 1537 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); 1538 tcg_gen_and_i32(src2lo, src2lo, src2hi); 1539 tcg_gen_or_i32(crf, crf, src2lo); 1540 } 1541 tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); 1542 } 1543 1544 #if defined(TARGET_PPC64) 1545 /* cmpeqb */ 1546 static void gen_cmpeqb(DisasContext *ctx) 1547 { 1548 gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1549 cpu_gpr[rB(ctx->opcode)]); 1550 } 1551 #endif 1552 1553 /* isel (PowerPC 2.03 specification) */ 1554 static void gen_isel(DisasContext *ctx) 1555 { 1556 uint32_t bi = rC(ctx->opcode); 1557 uint32_t mask = 0x08 >> (bi & 0x03); 1558 TCGv t0 = tcg_temp_new(); 1559 TCGv zr; 1560 1561 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); 1562 tcg_gen_andi_tl(t0, t0, mask); 1563 1564 zr = tcg_constant_tl(0); 1565 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, 1566 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, 1567 cpu_gpr[rB(ctx->opcode)]); 1568 } 1569 1570 /* cmpb: PowerPC 2.05 specification */ 1571 static void gen_cmpb(DisasContext *ctx) 1572 { 1573 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1574 cpu_gpr[rB(ctx->opcode)]); 1575 } 1576 1577 /*** Integer arithmetic ***/ 1578 1579 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, 1580 TCGv arg1, TCGv arg2, int sub) 1581 { 1582 TCGv t0 = tcg_temp_new(); 1583 1584 tcg_gen_xor_tl(cpu_ov, arg0, arg2); 1585 tcg_gen_xor_tl(t0, arg1, arg2); 1586 if (sub) { 1587 tcg_gen_and_tl(cpu_ov, cpu_ov, t0); 1588 } else { 1589 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); 1590 } 1591 if (NARROW_MODE(ctx)) { 1592 tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1); 1593 if (is_isa300(ctx)) { 1594 tcg_gen_mov_tl(cpu_ov32, cpu_ov); 1595 } 1596 } else { 1597 if (is_isa300(ctx)) { 1598 tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1); 1599 } 1600 tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1); 1601 } 1602 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 1603 } 1604 1605 static inline void gen_op_arith_compute_ca32(DisasContext *ctx, 1606 TCGv res, TCGv arg0, TCGv arg1, 1607 TCGv ca32, int sub) 1608 { 1609 TCGv t0; 1610 1611 if (!is_isa300(ctx)) { 1612 return; 1613 } 1614 1615 t0 = tcg_temp_new(); 1616 if (sub) { 1617 tcg_gen_eqv_tl(t0, arg0, arg1); 1618 } else { 1619 tcg_gen_xor_tl(t0, arg0, arg1); 1620 } 1621 tcg_gen_xor_tl(t0, t0, res); 1622 tcg_gen_extract_tl(ca32, t0, 32, 1); 1623 } 1624 1625 /* Common add function */ 1626 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, 1627 TCGv arg2, TCGv ca, TCGv ca32, 1628 bool add_ca, bool compute_ca, 1629 bool compute_ov, bool compute_rc0) 1630 { 1631 TCGv t0 = ret; 1632 1633 if (compute_ca || compute_ov) { 1634 t0 = tcg_temp_new(); 1635 } 1636 1637 if (compute_ca) { 1638 if (NARROW_MODE(ctx)) { 1639 /* 1640 * Caution: a non-obvious corner case of the spec is that 1641 * we must produce the *entire* 64-bit addition, but 1642 * produce the carry into bit 32. 1643 */ 1644 TCGv t1 = tcg_temp_new(); 1645 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ 1646 tcg_gen_add_tl(t0, arg1, arg2); 1647 if (add_ca) { 1648 tcg_gen_add_tl(t0, t0, ca); 1649 } 1650 tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */ 1651 tcg_gen_extract_tl(ca, ca, 32, 1); 1652 if (is_isa300(ctx)) { 1653 tcg_gen_mov_tl(ca32, ca); 1654 } 1655 } else { 1656 TCGv zero = tcg_constant_tl(0); 1657 if (add_ca) { 1658 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); 1659 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); 1660 } else { 1661 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); 1662 } 1663 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); 1664 } 1665 } else { 1666 tcg_gen_add_tl(t0, arg1, arg2); 1667 if (add_ca) { 1668 tcg_gen_add_tl(t0, t0, ca); 1669 } 1670 } 1671 1672 if (compute_ov) { 1673 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); 1674 } 1675 if (unlikely(compute_rc0)) { 1676 gen_set_Rc0(ctx, t0); 1677 } 1678 1679 if (t0 != ret) { 1680 tcg_gen_mov_tl(ret, t0); 1681 } 1682 } 1683 /* Add functions with two operands */ 1684 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \ 1685 static void glue(gen_, name)(DisasContext *ctx) \ 1686 { \ 1687 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ 1688 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1689 ca, glue(ca, 32), \ 1690 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ 1691 } 1692 /* Add functions with one operand and one immediate */ 1693 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \ 1694 add_ca, compute_ca, compute_ov) \ 1695 static void glue(gen_, name)(DisasContext *ctx) \ 1696 { \ 1697 TCGv t0 = tcg_constant_tl(const_val); \ 1698 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ 1699 cpu_gpr[rA(ctx->opcode)], t0, \ 1700 ca, glue(ca, 32), \ 1701 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ 1702 } 1703 1704 /* add add. addo addo. */ 1705 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0) 1706 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1) 1707 /* addc addc. addco addco. */ 1708 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0) 1709 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1) 1710 /* adde adde. addeo addeo. */ 1711 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0) 1712 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1) 1713 /* addme addme. addmeo addmeo. */ 1714 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0) 1715 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1) 1716 /* addex */ 1717 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0); 1718 /* addze addze. addzeo addzeo.*/ 1719 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0) 1720 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) 1721 /* addic addic.*/ 1722 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) 1723 { 1724 TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); 1725 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1726 c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); 1727 } 1728 1729 static void gen_addic(DisasContext *ctx) 1730 { 1731 gen_op_addic(ctx, 0); 1732 } 1733 1734 static void gen_addic_(DisasContext *ctx) 1735 { 1736 gen_op_addic(ctx, 1); 1737 } 1738 1739 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, 1740 TCGv arg2, int sign, int compute_ov) 1741 { 1742 TCGv_i32 t0 = tcg_temp_new_i32(); 1743 TCGv_i32 t1 = tcg_temp_new_i32(); 1744 TCGv_i32 t2 = tcg_temp_new_i32(); 1745 TCGv_i32 t3 = tcg_temp_new_i32(); 1746 1747 tcg_gen_trunc_tl_i32(t0, arg1); 1748 tcg_gen_trunc_tl_i32(t1, arg2); 1749 if (sign) { 1750 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN); 1751 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1); 1752 tcg_gen_and_i32(t2, t2, t3); 1753 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0); 1754 tcg_gen_or_i32(t2, t2, t3); 1755 tcg_gen_movi_i32(t3, 0); 1756 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); 1757 tcg_gen_div_i32(t3, t0, t1); 1758 tcg_gen_extu_i32_tl(ret, t3); 1759 } else { 1760 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0); 1761 tcg_gen_movi_i32(t3, 0); 1762 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); 1763 tcg_gen_divu_i32(t3, t0, t1); 1764 tcg_gen_extu_i32_tl(ret, t3); 1765 } 1766 if (compute_ov) { 1767 tcg_gen_extu_i32_tl(cpu_ov, t2); 1768 if (is_isa300(ctx)) { 1769 tcg_gen_extu_i32_tl(cpu_ov32, t2); 1770 } 1771 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 1772 } 1773 1774 if (unlikely(Rc(ctx->opcode) != 0)) { 1775 gen_set_Rc0(ctx, ret); 1776 } 1777 } 1778 /* Div functions */ 1779 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ 1780 static void glue(gen_, name)(DisasContext *ctx) \ 1781 { \ 1782 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ 1783 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1784 sign, compute_ov); \ 1785 } 1786 /* divwu divwu. divwuo divwuo. */ 1787 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); 1788 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); 1789 /* divw divw. divwo divwo. */ 1790 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); 1791 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); 1792 1793 /* div[wd]eu[o][.] */ 1794 #define GEN_DIVE(name, hlpr, compute_ov) \ 1795 static void gen_##name(DisasContext *ctx) \ 1796 { \ 1797 TCGv_i32 t0 = tcg_constant_i32(compute_ov); \ 1798 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \ 1799 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ 1800 if (unlikely(Rc(ctx->opcode) != 0)) { \ 1801 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ 1802 } \ 1803 } 1804 1805 GEN_DIVE(divweu, divweu, 0); 1806 GEN_DIVE(divweuo, divweu, 1); 1807 GEN_DIVE(divwe, divwe, 0); 1808 GEN_DIVE(divweo, divwe, 1); 1809 1810 #if defined(TARGET_PPC64) 1811 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, 1812 TCGv arg2, int sign, int compute_ov) 1813 { 1814 TCGv_i64 t0 = tcg_temp_new_i64(); 1815 TCGv_i64 t1 = tcg_temp_new_i64(); 1816 TCGv_i64 t2 = tcg_temp_new_i64(); 1817 TCGv_i64 t3 = tcg_temp_new_i64(); 1818 1819 tcg_gen_mov_i64(t0, arg1); 1820 tcg_gen_mov_i64(t1, arg2); 1821 if (sign) { 1822 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN); 1823 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1); 1824 tcg_gen_and_i64(t2, t2, t3); 1825 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0); 1826 tcg_gen_or_i64(t2, t2, t3); 1827 tcg_gen_movi_i64(t3, 0); 1828 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); 1829 tcg_gen_div_i64(ret, t0, t1); 1830 } else { 1831 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0); 1832 tcg_gen_movi_i64(t3, 0); 1833 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); 1834 tcg_gen_divu_i64(ret, t0, t1); 1835 } 1836 if (compute_ov) { 1837 tcg_gen_mov_tl(cpu_ov, t2); 1838 if (is_isa300(ctx)) { 1839 tcg_gen_mov_tl(cpu_ov32, t2); 1840 } 1841 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 1842 } 1843 1844 if (unlikely(Rc(ctx->opcode) != 0)) { 1845 gen_set_Rc0(ctx, ret); 1846 } 1847 } 1848 1849 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ 1850 static void glue(gen_, name)(DisasContext *ctx) \ 1851 { \ 1852 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ 1853 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1854 sign, compute_ov); \ 1855 } 1856 /* divdu divdu. divduo divduo. */ 1857 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); 1858 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); 1859 /* divd divd. divdo divdo. */ 1860 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); 1861 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); 1862 1863 GEN_DIVE(divdeu, divdeu, 0); 1864 GEN_DIVE(divdeuo, divdeu, 1); 1865 GEN_DIVE(divde, divde, 0); 1866 GEN_DIVE(divdeo, divde, 1); 1867 #endif 1868 1869 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, 1870 TCGv arg2, int sign) 1871 { 1872 TCGv_i32 t0 = tcg_temp_new_i32(); 1873 TCGv_i32 t1 = tcg_temp_new_i32(); 1874 1875 tcg_gen_trunc_tl_i32(t0, arg1); 1876 tcg_gen_trunc_tl_i32(t1, arg2); 1877 if (sign) { 1878 TCGv_i32 t2 = tcg_temp_new_i32(); 1879 TCGv_i32 t3 = tcg_temp_new_i32(); 1880 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN); 1881 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1); 1882 tcg_gen_and_i32(t2, t2, t3); 1883 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0); 1884 tcg_gen_or_i32(t2, t2, t3); 1885 tcg_gen_movi_i32(t3, 0); 1886 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); 1887 tcg_gen_rem_i32(t3, t0, t1); 1888 tcg_gen_ext_i32_tl(ret, t3); 1889 } else { 1890 TCGv_i32 t2 = tcg_constant_i32(1); 1891 TCGv_i32 t3 = tcg_constant_i32(0); 1892 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1); 1893 tcg_gen_remu_i32(t0, t0, t1); 1894 tcg_gen_extu_i32_tl(ret, t0); 1895 } 1896 } 1897 1898 #define GEN_INT_ARITH_MODW(name, opc3, sign) \ 1899 static void glue(gen_, name)(DisasContext *ctx) \ 1900 { \ 1901 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \ 1902 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1903 sign); \ 1904 } 1905 1906 GEN_INT_ARITH_MODW(moduw, 0x08, 0); 1907 GEN_INT_ARITH_MODW(modsw, 0x18, 1); 1908 1909 #if defined(TARGET_PPC64) 1910 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, 1911 TCGv arg2, int sign) 1912 { 1913 TCGv_i64 t0 = tcg_temp_new_i64(); 1914 TCGv_i64 t1 = tcg_temp_new_i64(); 1915 1916 tcg_gen_mov_i64(t0, arg1); 1917 tcg_gen_mov_i64(t1, arg2); 1918 if (sign) { 1919 TCGv_i64 t2 = tcg_temp_new_i64(); 1920 TCGv_i64 t3 = tcg_temp_new_i64(); 1921 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN); 1922 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1); 1923 tcg_gen_and_i64(t2, t2, t3); 1924 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0); 1925 tcg_gen_or_i64(t2, t2, t3); 1926 tcg_gen_movi_i64(t3, 0); 1927 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); 1928 tcg_gen_rem_i64(ret, t0, t1); 1929 } else { 1930 TCGv_i64 t2 = tcg_constant_i64(1); 1931 TCGv_i64 t3 = tcg_constant_i64(0); 1932 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1); 1933 tcg_gen_remu_i64(ret, t0, t1); 1934 } 1935 } 1936 1937 #define GEN_INT_ARITH_MODD(name, opc3, sign) \ 1938 static void glue(gen_, name)(DisasContext *ctx) \ 1939 { \ 1940 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \ 1941 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 1942 sign); \ 1943 } 1944 1945 GEN_INT_ARITH_MODD(modud, 0x08, 0); 1946 GEN_INT_ARITH_MODD(modsd, 0x18, 1); 1947 #endif 1948 1949 /* mulhw mulhw. */ 1950 static void gen_mulhw(DisasContext *ctx) 1951 { 1952 TCGv_i32 t0 = tcg_temp_new_i32(); 1953 TCGv_i32 t1 = tcg_temp_new_i32(); 1954 1955 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); 1956 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); 1957 tcg_gen_muls2_i32(t0, t1, t0, t1); 1958 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); 1959 if (unlikely(Rc(ctx->opcode) != 0)) { 1960 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1961 } 1962 } 1963 1964 /* mulhwu mulhwu. */ 1965 static void gen_mulhwu(DisasContext *ctx) 1966 { 1967 TCGv_i32 t0 = tcg_temp_new_i32(); 1968 TCGv_i32 t1 = tcg_temp_new_i32(); 1969 1970 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); 1971 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); 1972 tcg_gen_mulu2_i32(t0, t1, t0, t1); 1973 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); 1974 if (unlikely(Rc(ctx->opcode) != 0)) { 1975 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1976 } 1977 } 1978 1979 /* mullw mullw. */ 1980 static void gen_mullw(DisasContext *ctx) 1981 { 1982 #if defined(TARGET_PPC64) 1983 TCGv_i64 t0, t1; 1984 t0 = tcg_temp_new_i64(); 1985 t1 = tcg_temp_new_i64(); 1986 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); 1987 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); 1988 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); 1989 #else 1990 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1991 cpu_gpr[rB(ctx->opcode)]); 1992 #endif 1993 if (unlikely(Rc(ctx->opcode) != 0)) { 1994 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1995 } 1996 } 1997 1998 /* mullwo mullwo. */ 1999 static void gen_mullwo(DisasContext *ctx) 2000 { 2001 TCGv_i32 t0 = tcg_temp_new_i32(); 2002 TCGv_i32 t1 = tcg_temp_new_i32(); 2003 2004 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); 2005 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); 2006 tcg_gen_muls2_i32(t0, t1, t0, t1); 2007 #if defined(TARGET_PPC64) 2008 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); 2009 #else 2010 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); 2011 #endif 2012 2013 tcg_gen_sari_i32(t0, t0, 31); 2014 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); 2015 tcg_gen_extu_i32_tl(cpu_ov, t0); 2016 if (is_isa300(ctx)) { 2017 tcg_gen_mov_tl(cpu_ov32, cpu_ov); 2018 } 2019 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 2020 2021 if (unlikely(Rc(ctx->opcode) != 0)) { 2022 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2023 } 2024 } 2025 2026 /* mulli */ 2027 static void gen_mulli(DisasContext *ctx) 2028 { 2029 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2030 SIMM(ctx->opcode)); 2031 } 2032 2033 #if defined(TARGET_PPC64) 2034 /* mulhd mulhd. */ 2035 static void gen_mulhd(DisasContext *ctx) 2036 { 2037 TCGv lo = tcg_temp_new(); 2038 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], 2039 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2040 if (unlikely(Rc(ctx->opcode) != 0)) { 2041 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2042 } 2043 } 2044 2045 /* mulhdu mulhdu. */ 2046 static void gen_mulhdu(DisasContext *ctx) 2047 { 2048 TCGv lo = tcg_temp_new(); 2049 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], 2050 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2051 if (unlikely(Rc(ctx->opcode) != 0)) { 2052 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2053 } 2054 } 2055 2056 /* mulld mulld. */ 2057 static void gen_mulld(DisasContext *ctx) 2058 { 2059 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2060 cpu_gpr[rB(ctx->opcode)]); 2061 if (unlikely(Rc(ctx->opcode) != 0)) { 2062 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2063 } 2064 } 2065 2066 /* mulldo mulldo. */ 2067 static void gen_mulldo(DisasContext *ctx) 2068 { 2069 TCGv_i64 t0 = tcg_temp_new_i64(); 2070 TCGv_i64 t1 = tcg_temp_new_i64(); 2071 2072 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], 2073 cpu_gpr[rB(ctx->opcode)]); 2074 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); 2075 2076 tcg_gen_sari_i64(t0, t0, 63); 2077 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); 2078 if (is_isa300(ctx)) { 2079 tcg_gen_mov_tl(cpu_ov32, cpu_ov); 2080 } 2081 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); 2082 2083 if (unlikely(Rc(ctx->opcode) != 0)) { 2084 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2085 } 2086 } 2087 #endif 2088 2089 /* Common subf function */ 2090 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, 2091 TCGv arg2, bool add_ca, bool compute_ca, 2092 bool compute_ov, bool compute_rc0) 2093 { 2094 TCGv t0 = ret; 2095 2096 if (compute_ca || compute_ov) { 2097 t0 = tcg_temp_new(); 2098 } 2099 2100 if (compute_ca) { 2101 /* dest = ~arg1 + arg2 [+ ca]. */ 2102 if (NARROW_MODE(ctx)) { 2103 /* 2104 * Caution: a non-obvious corner case of the spec is that 2105 * we must produce the *entire* 64-bit addition, but 2106 * produce the carry into bit 32. 2107 */ 2108 TCGv inv1 = tcg_temp_new(); 2109 TCGv t1 = tcg_temp_new(); 2110 tcg_gen_not_tl(inv1, arg1); 2111 if (add_ca) { 2112 tcg_gen_add_tl(t0, arg2, cpu_ca); 2113 } else { 2114 tcg_gen_addi_tl(t0, arg2, 1); 2115 } 2116 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ 2117 tcg_gen_add_tl(t0, t0, inv1); 2118 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ 2119 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1); 2120 if (is_isa300(ctx)) { 2121 tcg_gen_mov_tl(cpu_ca32, cpu_ca); 2122 } 2123 } else if (add_ca) { 2124 TCGv zero, inv1 = tcg_temp_new(); 2125 tcg_gen_not_tl(inv1, arg1); 2126 zero = tcg_constant_tl(0); 2127 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); 2128 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); 2129 gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); 2130 } else { 2131 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); 2132 tcg_gen_sub_tl(t0, arg2, arg1); 2133 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); 2134 } 2135 } else if (add_ca) { 2136 /* 2137 * Since we're ignoring carry-out, we can simplify the 2138 * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. 2139 */ 2140 tcg_gen_sub_tl(t0, arg2, arg1); 2141 tcg_gen_add_tl(t0, t0, cpu_ca); 2142 tcg_gen_subi_tl(t0, t0, 1); 2143 } else { 2144 tcg_gen_sub_tl(t0, arg2, arg1); 2145 } 2146 2147 if (compute_ov) { 2148 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); 2149 } 2150 if (unlikely(compute_rc0)) { 2151 gen_set_Rc0(ctx, t0); 2152 } 2153 2154 if (t0 != ret) { 2155 tcg_gen_mov_tl(ret, t0); 2156 } 2157 } 2158 /* Sub functions with Two operands functions */ 2159 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ 2160 static void glue(gen_, name)(DisasContext *ctx) \ 2161 { \ 2162 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ 2163 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ 2164 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ 2165 } 2166 /* Sub functions with one operand and one immediate */ 2167 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ 2168 add_ca, compute_ca, compute_ov) \ 2169 static void glue(gen_, name)(DisasContext *ctx) \ 2170 { \ 2171 TCGv t0 = tcg_constant_tl(const_val); \ 2172 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ 2173 cpu_gpr[rA(ctx->opcode)], t0, \ 2174 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ 2175 } 2176 /* subf subf. subfo subfo. */ 2177 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) 2178 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) 2179 /* subfc subfc. subfco subfco. */ 2180 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) 2181 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) 2182 /* subfe subfe. subfeo subfo. */ 2183 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) 2184 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) 2185 /* subfme subfme. subfmeo subfmeo. */ 2186 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) 2187 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) 2188 /* subfze subfze. subfzeo subfzeo.*/ 2189 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) 2190 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) 2191 2192 /* subfic */ 2193 static void gen_subfic(DisasContext *ctx) 2194 { 2195 TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); 2196 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2197 c, 0, 1, 0, 0); 2198 } 2199 2200 /* neg neg. nego nego. */ 2201 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) 2202 { 2203 TCGv zero = tcg_constant_tl(0); 2204 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 2205 zero, 0, 0, compute_ov, Rc(ctx->opcode)); 2206 } 2207 2208 static void gen_neg(DisasContext *ctx) 2209 { 2210 tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 2211 if (unlikely(Rc(ctx->opcode))) { 2212 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 2213 } 2214 } 2215 2216 static void gen_nego(DisasContext *ctx) 2217 { 2218 gen_op_arith_neg(ctx, 1); 2219 } 2220 2221 /*** Integer logical ***/ 2222 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ 2223 static void glue(gen_, name)(DisasContext *ctx) \ 2224 { \ 2225 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ 2226 cpu_gpr[rB(ctx->opcode)]); \ 2227 if (unlikely(Rc(ctx->opcode) != 0)) \ 2228 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ 2229 } 2230 2231 #define GEN_LOGICAL1(name, tcg_op, opc, type) \ 2232 static void glue(gen_, name)(DisasContext *ctx) \ 2233 { \ 2234 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ 2235 if (unlikely(Rc(ctx->opcode) != 0)) \ 2236 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ 2237 } 2238 2239 /* and & and. */ 2240 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); 2241 /* andc & andc. */ 2242 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); 2243 2244 /* andi. */ 2245 static void gen_andi_(DisasContext *ctx) 2246 { 2247 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2248 UIMM(ctx->opcode)); 2249 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2250 } 2251 2252 /* andis. */ 2253 static void gen_andis_(DisasContext *ctx) 2254 { 2255 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2256 UIMM(ctx->opcode) << 16); 2257 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2258 } 2259 2260 /* cntlzw */ 2261 static void gen_cntlzw(DisasContext *ctx) 2262 { 2263 TCGv_i32 t = tcg_temp_new_i32(); 2264 2265 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); 2266 tcg_gen_clzi_i32(t, t, 32); 2267 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); 2268 2269 if (unlikely(Rc(ctx->opcode) != 0)) { 2270 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2271 } 2272 } 2273 2274 /* cnttzw */ 2275 static void gen_cnttzw(DisasContext *ctx) 2276 { 2277 TCGv_i32 t = tcg_temp_new_i32(); 2278 2279 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); 2280 tcg_gen_ctzi_i32(t, t, 32); 2281 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); 2282 2283 if (unlikely(Rc(ctx->opcode) != 0)) { 2284 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2285 } 2286 } 2287 2288 /* eqv & eqv. */ 2289 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); 2290 /* extsb & extsb. */ 2291 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); 2292 /* extsh & extsh. */ 2293 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); 2294 /* nand & nand. */ 2295 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); 2296 /* nor & nor. */ 2297 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); 2298 2299 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 2300 static void gen_pause(DisasContext *ctx) 2301 { 2302 TCGv_i32 t0 = tcg_constant_i32(0); 2303 tcg_gen_st_i32(t0, cpu_env, 2304 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); 2305 2306 /* Stop translation, this gives other CPUs a chance to run */ 2307 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 2308 } 2309 #endif /* defined(TARGET_PPC64) */ 2310 2311 /* or & or. */ 2312 static void gen_or(DisasContext *ctx) 2313 { 2314 int rs, ra, rb; 2315 2316 rs = rS(ctx->opcode); 2317 ra = rA(ctx->opcode); 2318 rb = rB(ctx->opcode); 2319 /* Optimisation for mr. ri case */ 2320 if (rs != ra || rs != rb) { 2321 if (rs != rb) { 2322 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); 2323 } else { 2324 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); 2325 } 2326 if (unlikely(Rc(ctx->opcode) != 0)) { 2327 gen_set_Rc0(ctx, cpu_gpr[ra]); 2328 } 2329 } else if (unlikely(Rc(ctx->opcode) != 0)) { 2330 gen_set_Rc0(ctx, cpu_gpr[rs]); 2331 #if defined(TARGET_PPC64) 2332 } else if (rs != 0) { /* 0 is nop */ 2333 int prio = 0; 2334 2335 switch (rs) { 2336 case 1: 2337 /* Set process priority to low */ 2338 prio = 2; 2339 break; 2340 case 6: 2341 /* Set process priority to medium-low */ 2342 prio = 3; 2343 break; 2344 case 2: 2345 /* Set process priority to normal */ 2346 prio = 4; 2347 break; 2348 #if !defined(CONFIG_USER_ONLY) 2349 case 31: 2350 if (!ctx->pr) { 2351 /* Set process priority to very low */ 2352 prio = 1; 2353 } 2354 break; 2355 case 5: 2356 if (!ctx->pr) { 2357 /* Set process priority to medium-hight */ 2358 prio = 5; 2359 } 2360 break; 2361 case 3: 2362 if (!ctx->pr) { 2363 /* Set process priority to high */ 2364 prio = 6; 2365 } 2366 break; 2367 case 7: 2368 if (ctx->hv && !ctx->pr) { 2369 /* Set process priority to very high */ 2370 prio = 7; 2371 } 2372 break; 2373 #endif 2374 default: 2375 break; 2376 } 2377 if (prio) { 2378 TCGv t0 = tcg_temp_new(); 2379 gen_load_spr(t0, SPR_PPR); 2380 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); 2381 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); 2382 gen_store_spr(SPR_PPR, t0); 2383 } 2384 #if !defined(CONFIG_USER_ONLY) 2385 /* 2386 * Pause out of TCG otherwise spin loops with smt_low eat too 2387 * much CPU and the kernel hangs. This applies to all 2388 * encodings other than no-op, e.g., miso(rs=26), yield(27), 2389 * mdoio(29), mdoom(30), and all currently undefined. 2390 */ 2391 gen_pause(ctx); 2392 #endif 2393 #endif 2394 } 2395 } 2396 /* orc & orc. */ 2397 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); 2398 2399 /* xor & xor. */ 2400 static void gen_xor(DisasContext *ctx) 2401 { 2402 /* Optimisation for "set to zero" case */ 2403 if (rS(ctx->opcode) != rB(ctx->opcode)) { 2404 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2405 cpu_gpr[rB(ctx->opcode)]); 2406 } else { 2407 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); 2408 } 2409 if (unlikely(Rc(ctx->opcode) != 0)) { 2410 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2411 } 2412 } 2413 2414 /* ori */ 2415 static void gen_ori(DisasContext *ctx) 2416 { 2417 target_ulong uimm = UIMM(ctx->opcode); 2418 2419 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2420 return; 2421 } 2422 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); 2423 } 2424 2425 /* oris */ 2426 static void gen_oris(DisasContext *ctx) 2427 { 2428 target_ulong uimm = UIMM(ctx->opcode); 2429 2430 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2431 /* NOP */ 2432 return; 2433 } 2434 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2435 uimm << 16); 2436 } 2437 2438 /* xori */ 2439 static void gen_xori(DisasContext *ctx) 2440 { 2441 target_ulong uimm = UIMM(ctx->opcode); 2442 2443 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2444 /* NOP */ 2445 return; 2446 } 2447 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); 2448 } 2449 2450 /* xoris */ 2451 static void gen_xoris(DisasContext *ctx) 2452 { 2453 target_ulong uimm = UIMM(ctx->opcode); 2454 2455 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { 2456 /* NOP */ 2457 return; 2458 } 2459 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 2460 uimm << 16); 2461 } 2462 2463 /* popcntb : PowerPC 2.03 specification */ 2464 static void gen_popcntb(DisasContext *ctx) 2465 { 2466 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2467 } 2468 2469 static void gen_popcntw(DisasContext *ctx) 2470 { 2471 #if defined(TARGET_PPC64) 2472 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2473 #else 2474 tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2475 #endif 2476 } 2477 2478 #if defined(TARGET_PPC64) 2479 /* popcntd: PowerPC 2.06 specification */ 2480 static void gen_popcntd(DisasContext *ctx) 2481 { 2482 tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 2483 } 2484 #endif 2485 2486 /* prtyw: PowerPC 2.05 specification */ 2487 static void gen_prtyw(DisasContext *ctx) 2488 { 2489 TCGv ra = cpu_gpr[rA(ctx->opcode)]; 2490 TCGv rs = cpu_gpr[rS(ctx->opcode)]; 2491 TCGv t0 = tcg_temp_new(); 2492 tcg_gen_shri_tl(t0, rs, 16); 2493 tcg_gen_xor_tl(ra, rs, t0); 2494 tcg_gen_shri_tl(t0, ra, 8); 2495 tcg_gen_xor_tl(ra, ra, t0); 2496 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); 2497 } 2498 2499 #if defined(TARGET_PPC64) 2500 /* prtyd: PowerPC 2.05 specification */ 2501 static void gen_prtyd(DisasContext *ctx) 2502 { 2503 TCGv ra = cpu_gpr[rA(ctx->opcode)]; 2504 TCGv rs = cpu_gpr[rS(ctx->opcode)]; 2505 TCGv t0 = tcg_temp_new(); 2506 tcg_gen_shri_tl(t0, rs, 32); 2507 tcg_gen_xor_tl(ra, rs, t0); 2508 tcg_gen_shri_tl(t0, ra, 16); 2509 tcg_gen_xor_tl(ra, ra, t0); 2510 tcg_gen_shri_tl(t0, ra, 8); 2511 tcg_gen_xor_tl(ra, ra, t0); 2512 tcg_gen_andi_tl(ra, ra, 1); 2513 } 2514 #endif 2515 2516 #if defined(TARGET_PPC64) 2517 /* bpermd */ 2518 static void gen_bpermd(DisasContext *ctx) 2519 { 2520 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)], 2521 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2522 } 2523 #endif 2524 2525 #if defined(TARGET_PPC64) 2526 /* extsw & extsw. */ 2527 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); 2528 2529 /* cntlzd */ 2530 static void gen_cntlzd(DisasContext *ctx) 2531 { 2532 tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); 2533 if (unlikely(Rc(ctx->opcode) != 0)) { 2534 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2535 } 2536 } 2537 2538 /* cnttzd */ 2539 static void gen_cnttzd(DisasContext *ctx) 2540 { 2541 tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); 2542 if (unlikely(Rc(ctx->opcode) != 0)) { 2543 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2544 } 2545 } 2546 2547 /* darn */ 2548 static void gen_darn(DisasContext *ctx) 2549 { 2550 int l = L(ctx->opcode); 2551 2552 if (l > 2) { 2553 tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1); 2554 } else { 2555 translator_io_start(&ctx->base); 2556 if (l == 0) { 2557 gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]); 2558 } else { 2559 /* Return 64-bit random for both CRN and RRN */ 2560 gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]); 2561 } 2562 } 2563 } 2564 #endif 2565 2566 /*** Integer rotate ***/ 2567 2568 /* rlwimi & rlwimi. */ 2569 static void gen_rlwimi(DisasContext *ctx) 2570 { 2571 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2572 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2573 uint32_t sh = SH(ctx->opcode); 2574 uint32_t mb = MB(ctx->opcode); 2575 uint32_t me = ME(ctx->opcode); 2576 2577 if (sh == (31 - me) && mb <= me) { 2578 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); 2579 } else { 2580 target_ulong mask; 2581 bool mask_in_32b = true; 2582 TCGv t1; 2583 2584 #if defined(TARGET_PPC64) 2585 mb += 32; 2586 me += 32; 2587 #endif 2588 mask = MASK(mb, me); 2589 2590 #if defined(TARGET_PPC64) 2591 if (mask > 0xffffffffu) { 2592 mask_in_32b = false; 2593 } 2594 #endif 2595 t1 = tcg_temp_new(); 2596 if (mask_in_32b) { 2597 TCGv_i32 t0 = tcg_temp_new_i32(); 2598 tcg_gen_trunc_tl_i32(t0, t_rs); 2599 tcg_gen_rotli_i32(t0, t0, sh); 2600 tcg_gen_extu_i32_tl(t1, t0); 2601 } else { 2602 #if defined(TARGET_PPC64) 2603 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32); 2604 tcg_gen_rotli_i64(t1, t1, sh); 2605 #else 2606 g_assert_not_reached(); 2607 #endif 2608 } 2609 2610 tcg_gen_andi_tl(t1, t1, mask); 2611 tcg_gen_andi_tl(t_ra, t_ra, ~mask); 2612 tcg_gen_or_tl(t_ra, t_ra, t1); 2613 } 2614 if (unlikely(Rc(ctx->opcode) != 0)) { 2615 gen_set_Rc0(ctx, t_ra); 2616 } 2617 } 2618 2619 /* rlwinm & rlwinm. */ 2620 static void gen_rlwinm(DisasContext *ctx) 2621 { 2622 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2623 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2624 int sh = SH(ctx->opcode); 2625 int mb = MB(ctx->opcode); 2626 int me = ME(ctx->opcode); 2627 int len = me - mb + 1; 2628 int rsh = (32 - sh) & 31; 2629 2630 if (sh != 0 && len > 0 && me == (31 - sh)) { 2631 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len); 2632 } else if (me == 31 && rsh + len <= 32) { 2633 tcg_gen_extract_tl(t_ra, t_rs, rsh, len); 2634 } else { 2635 target_ulong mask; 2636 bool mask_in_32b = true; 2637 #if defined(TARGET_PPC64) 2638 mb += 32; 2639 me += 32; 2640 #endif 2641 mask = MASK(mb, me); 2642 #if defined(TARGET_PPC64) 2643 if (mask > 0xffffffffu) { 2644 mask_in_32b = false; 2645 } 2646 #endif 2647 if (mask_in_32b) { 2648 if (sh == 0) { 2649 tcg_gen_andi_tl(t_ra, t_rs, mask); 2650 } else { 2651 TCGv_i32 t0 = tcg_temp_new_i32(); 2652 tcg_gen_trunc_tl_i32(t0, t_rs); 2653 tcg_gen_rotli_i32(t0, t0, sh); 2654 tcg_gen_andi_i32(t0, t0, mask); 2655 tcg_gen_extu_i32_tl(t_ra, t0); 2656 } 2657 } else { 2658 #if defined(TARGET_PPC64) 2659 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); 2660 tcg_gen_rotli_i64(t_ra, t_ra, sh); 2661 tcg_gen_andi_i64(t_ra, t_ra, mask); 2662 #else 2663 g_assert_not_reached(); 2664 #endif 2665 } 2666 } 2667 if (unlikely(Rc(ctx->opcode) != 0)) { 2668 gen_set_Rc0(ctx, t_ra); 2669 } 2670 } 2671 2672 /* rlwnm & rlwnm. */ 2673 static void gen_rlwnm(DisasContext *ctx) 2674 { 2675 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2676 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2677 TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; 2678 uint32_t mb = MB(ctx->opcode); 2679 uint32_t me = ME(ctx->opcode); 2680 target_ulong mask; 2681 bool mask_in_32b = true; 2682 2683 #if defined(TARGET_PPC64) 2684 mb += 32; 2685 me += 32; 2686 #endif 2687 mask = MASK(mb, me); 2688 2689 #if defined(TARGET_PPC64) 2690 if (mask > 0xffffffffu) { 2691 mask_in_32b = false; 2692 } 2693 #endif 2694 if (mask_in_32b) { 2695 TCGv_i32 t0 = tcg_temp_new_i32(); 2696 TCGv_i32 t1 = tcg_temp_new_i32(); 2697 tcg_gen_trunc_tl_i32(t0, t_rb); 2698 tcg_gen_trunc_tl_i32(t1, t_rs); 2699 tcg_gen_andi_i32(t0, t0, 0x1f); 2700 tcg_gen_rotl_i32(t1, t1, t0); 2701 tcg_gen_extu_i32_tl(t_ra, t1); 2702 } else { 2703 #if defined(TARGET_PPC64) 2704 TCGv_i64 t0 = tcg_temp_new_i64(); 2705 tcg_gen_andi_i64(t0, t_rb, 0x1f); 2706 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); 2707 tcg_gen_rotl_i64(t_ra, t_ra, t0); 2708 #else 2709 g_assert_not_reached(); 2710 #endif 2711 } 2712 2713 tcg_gen_andi_tl(t_ra, t_ra, mask); 2714 2715 if (unlikely(Rc(ctx->opcode) != 0)) { 2716 gen_set_Rc0(ctx, t_ra); 2717 } 2718 } 2719 2720 #if defined(TARGET_PPC64) 2721 #define GEN_PPC64_R2(name, opc1, opc2) \ 2722 static void glue(gen_, name##0)(DisasContext *ctx) \ 2723 { \ 2724 gen_##name(ctx, 0); \ 2725 } \ 2726 \ 2727 static void glue(gen_, name##1)(DisasContext *ctx) \ 2728 { \ 2729 gen_##name(ctx, 1); \ 2730 } 2731 #define GEN_PPC64_R4(name, opc1, opc2) \ 2732 static void glue(gen_, name##0)(DisasContext *ctx) \ 2733 { \ 2734 gen_##name(ctx, 0, 0); \ 2735 } \ 2736 \ 2737 static void glue(gen_, name##1)(DisasContext *ctx) \ 2738 { \ 2739 gen_##name(ctx, 0, 1); \ 2740 } \ 2741 \ 2742 static void glue(gen_, name##2)(DisasContext *ctx) \ 2743 { \ 2744 gen_##name(ctx, 1, 0); \ 2745 } \ 2746 \ 2747 static void glue(gen_, name##3)(DisasContext *ctx) \ 2748 { \ 2749 gen_##name(ctx, 1, 1); \ 2750 } 2751 2752 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh) 2753 { 2754 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2755 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2756 int len = me - mb + 1; 2757 int rsh = (64 - sh) & 63; 2758 2759 if (sh != 0 && len > 0 && me == (63 - sh)) { 2760 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len); 2761 } else if (me == 63 && rsh + len <= 64) { 2762 tcg_gen_extract_tl(t_ra, t_rs, rsh, len); 2763 } else { 2764 tcg_gen_rotli_tl(t_ra, t_rs, sh); 2765 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); 2766 } 2767 if (unlikely(Rc(ctx->opcode) != 0)) { 2768 gen_set_Rc0(ctx, t_ra); 2769 } 2770 } 2771 2772 /* rldicl - rldicl. */ 2773 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) 2774 { 2775 uint32_t sh, mb; 2776 2777 sh = SH(ctx->opcode) | (shn << 5); 2778 mb = MB(ctx->opcode) | (mbn << 5); 2779 gen_rldinm(ctx, mb, 63, sh); 2780 } 2781 GEN_PPC64_R4(rldicl, 0x1E, 0x00); 2782 2783 /* rldicr - rldicr. */ 2784 static inline void gen_rldicr(DisasContext *ctx, int men, int shn) 2785 { 2786 uint32_t sh, me; 2787 2788 sh = SH(ctx->opcode) | (shn << 5); 2789 me = MB(ctx->opcode) | (men << 5); 2790 gen_rldinm(ctx, 0, me, sh); 2791 } 2792 GEN_PPC64_R4(rldicr, 0x1E, 0x02); 2793 2794 /* rldic - rldic. */ 2795 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) 2796 { 2797 uint32_t sh, mb; 2798 2799 sh = SH(ctx->opcode) | (shn << 5); 2800 mb = MB(ctx->opcode) | (mbn << 5); 2801 gen_rldinm(ctx, mb, 63 - sh, sh); 2802 } 2803 GEN_PPC64_R4(rldic, 0x1E, 0x04); 2804 2805 static void gen_rldnm(DisasContext *ctx, int mb, int me) 2806 { 2807 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2808 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2809 TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; 2810 TCGv t0; 2811 2812 t0 = tcg_temp_new(); 2813 tcg_gen_andi_tl(t0, t_rb, 0x3f); 2814 tcg_gen_rotl_tl(t_ra, t_rs, t0); 2815 2816 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); 2817 if (unlikely(Rc(ctx->opcode) != 0)) { 2818 gen_set_Rc0(ctx, t_ra); 2819 } 2820 } 2821 2822 /* rldcl - rldcl. */ 2823 static inline void gen_rldcl(DisasContext *ctx, int mbn) 2824 { 2825 uint32_t mb; 2826 2827 mb = MB(ctx->opcode) | (mbn << 5); 2828 gen_rldnm(ctx, mb, 63); 2829 } 2830 GEN_PPC64_R2(rldcl, 0x1E, 0x08); 2831 2832 /* rldcr - rldcr. */ 2833 static inline void gen_rldcr(DisasContext *ctx, int men) 2834 { 2835 uint32_t me; 2836 2837 me = MB(ctx->opcode) | (men << 5); 2838 gen_rldnm(ctx, 0, me); 2839 } 2840 GEN_PPC64_R2(rldcr, 0x1E, 0x09); 2841 2842 /* rldimi - rldimi. */ 2843 static void gen_rldimi(DisasContext *ctx, int mbn, int shn) 2844 { 2845 TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; 2846 TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; 2847 uint32_t sh = SH(ctx->opcode) | (shn << 5); 2848 uint32_t mb = MB(ctx->opcode) | (mbn << 5); 2849 uint32_t me = 63 - sh; 2850 2851 if (mb <= me) { 2852 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); 2853 } else { 2854 target_ulong mask = MASK(mb, me); 2855 TCGv t1 = tcg_temp_new(); 2856 2857 tcg_gen_rotli_tl(t1, t_rs, sh); 2858 tcg_gen_andi_tl(t1, t1, mask); 2859 tcg_gen_andi_tl(t_ra, t_ra, ~mask); 2860 tcg_gen_or_tl(t_ra, t_ra, t1); 2861 } 2862 if (unlikely(Rc(ctx->opcode) != 0)) { 2863 gen_set_Rc0(ctx, t_ra); 2864 } 2865 } 2866 GEN_PPC64_R4(rldimi, 0x1E, 0x06); 2867 #endif 2868 2869 /*** Integer shift ***/ 2870 2871 /* slw & slw. */ 2872 static void gen_slw(DisasContext *ctx) 2873 { 2874 TCGv t0, t1; 2875 2876 t0 = tcg_temp_new(); 2877 /* AND rS with a mask that is 0 when rB >= 0x20 */ 2878 #if defined(TARGET_PPC64) 2879 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); 2880 tcg_gen_sari_tl(t0, t0, 0x3f); 2881 #else 2882 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); 2883 tcg_gen_sari_tl(t0, t0, 0x1f); 2884 #endif 2885 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 2886 t1 = tcg_temp_new(); 2887 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); 2888 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2889 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 2890 if (unlikely(Rc(ctx->opcode) != 0)) { 2891 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2892 } 2893 } 2894 2895 /* sraw & sraw. */ 2896 static void gen_sraw(DisasContext *ctx) 2897 { 2898 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env, 2899 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2900 if (unlikely(Rc(ctx->opcode) != 0)) { 2901 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2902 } 2903 } 2904 2905 /* srawi & srawi. */ 2906 static void gen_srawi(DisasContext *ctx) 2907 { 2908 int sh = SH(ctx->opcode); 2909 TCGv dst = cpu_gpr[rA(ctx->opcode)]; 2910 TCGv src = cpu_gpr[rS(ctx->opcode)]; 2911 if (sh == 0) { 2912 tcg_gen_ext32s_tl(dst, src); 2913 tcg_gen_movi_tl(cpu_ca, 0); 2914 if (is_isa300(ctx)) { 2915 tcg_gen_movi_tl(cpu_ca32, 0); 2916 } 2917 } else { 2918 TCGv t0; 2919 tcg_gen_ext32s_tl(dst, src); 2920 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1); 2921 t0 = tcg_temp_new(); 2922 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1); 2923 tcg_gen_and_tl(cpu_ca, cpu_ca, t0); 2924 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); 2925 if (is_isa300(ctx)) { 2926 tcg_gen_mov_tl(cpu_ca32, cpu_ca); 2927 } 2928 tcg_gen_sari_tl(dst, dst, sh); 2929 } 2930 if (unlikely(Rc(ctx->opcode) != 0)) { 2931 gen_set_Rc0(ctx, dst); 2932 } 2933 } 2934 2935 /* srw & srw. */ 2936 static void gen_srw(DisasContext *ctx) 2937 { 2938 TCGv t0, t1; 2939 2940 t0 = tcg_temp_new(); 2941 /* AND rS with a mask that is 0 when rB >= 0x20 */ 2942 #if defined(TARGET_PPC64) 2943 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); 2944 tcg_gen_sari_tl(t0, t0, 0x3f); 2945 #else 2946 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); 2947 tcg_gen_sari_tl(t0, t0, 0x1f); 2948 #endif 2949 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 2950 tcg_gen_ext32u_tl(t0, t0); 2951 t1 = tcg_temp_new(); 2952 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); 2953 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2954 if (unlikely(Rc(ctx->opcode) != 0)) { 2955 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2956 } 2957 } 2958 2959 #if defined(TARGET_PPC64) 2960 /* sld & sld. */ 2961 static void gen_sld(DisasContext *ctx) 2962 { 2963 TCGv t0, t1; 2964 2965 t0 = tcg_temp_new(); 2966 /* AND rS with a mask that is 0 when rB >= 0x40 */ 2967 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39); 2968 tcg_gen_sari_tl(t0, t0, 0x3f); 2969 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 2970 t1 = tcg_temp_new(); 2971 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); 2972 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2973 if (unlikely(Rc(ctx->opcode) != 0)) { 2974 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2975 } 2976 } 2977 2978 /* srad & srad. */ 2979 static void gen_srad(DisasContext *ctx) 2980 { 2981 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env, 2982 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2983 if (unlikely(Rc(ctx->opcode) != 0)) { 2984 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2985 } 2986 } 2987 /* sradi & sradi. */ 2988 static inline void gen_sradi(DisasContext *ctx, int n) 2989 { 2990 int sh = SH(ctx->opcode) + (n << 5); 2991 TCGv dst = cpu_gpr[rA(ctx->opcode)]; 2992 TCGv src = cpu_gpr[rS(ctx->opcode)]; 2993 if (sh == 0) { 2994 tcg_gen_mov_tl(dst, src); 2995 tcg_gen_movi_tl(cpu_ca, 0); 2996 if (is_isa300(ctx)) { 2997 tcg_gen_movi_tl(cpu_ca32, 0); 2998 } 2999 } else { 3000 TCGv t0; 3001 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1); 3002 t0 = tcg_temp_new(); 3003 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1); 3004 tcg_gen_and_tl(cpu_ca, cpu_ca, t0); 3005 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); 3006 if (is_isa300(ctx)) { 3007 tcg_gen_mov_tl(cpu_ca32, cpu_ca); 3008 } 3009 tcg_gen_sari_tl(dst, src, sh); 3010 } 3011 if (unlikely(Rc(ctx->opcode) != 0)) { 3012 gen_set_Rc0(ctx, dst); 3013 } 3014 } 3015 3016 static void gen_sradi0(DisasContext *ctx) 3017 { 3018 gen_sradi(ctx, 0); 3019 } 3020 3021 static void gen_sradi1(DisasContext *ctx) 3022 { 3023 gen_sradi(ctx, 1); 3024 } 3025 3026 /* extswsli & extswsli. */ 3027 static inline void gen_extswsli(DisasContext *ctx, int n) 3028 { 3029 int sh = SH(ctx->opcode) + (n << 5); 3030 TCGv dst = cpu_gpr[rA(ctx->opcode)]; 3031 TCGv src = cpu_gpr[rS(ctx->opcode)]; 3032 3033 tcg_gen_ext32s_tl(dst, src); 3034 tcg_gen_shli_tl(dst, dst, sh); 3035 if (unlikely(Rc(ctx->opcode) != 0)) { 3036 gen_set_Rc0(ctx, dst); 3037 } 3038 } 3039 3040 static void gen_extswsli0(DisasContext *ctx) 3041 { 3042 gen_extswsli(ctx, 0); 3043 } 3044 3045 static void gen_extswsli1(DisasContext *ctx) 3046 { 3047 gen_extswsli(ctx, 1); 3048 } 3049 3050 /* srd & srd. */ 3051 static void gen_srd(DisasContext *ctx) 3052 { 3053 TCGv t0, t1; 3054 3055 t0 = tcg_temp_new(); 3056 /* AND rS with a mask that is 0 when rB >= 0x40 */ 3057 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39); 3058 tcg_gen_sari_tl(t0, t0, 0x3f); 3059 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 3060 t1 = tcg_temp_new(); 3061 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); 3062 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 3063 if (unlikely(Rc(ctx->opcode) != 0)) { 3064 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 3065 } 3066 } 3067 #endif 3068 3069 /*** Addressing modes ***/ 3070 /* Register indirect with immediate index : EA = (rA|0) + SIMM */ 3071 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA, 3072 target_long maskl) 3073 { 3074 target_long simm = SIMM(ctx->opcode); 3075 3076 simm &= ~maskl; 3077 if (rA(ctx->opcode) == 0) { 3078 if (NARROW_MODE(ctx)) { 3079 simm = (uint32_t)simm; 3080 } 3081 tcg_gen_movi_tl(EA, simm); 3082 } else if (likely(simm != 0)) { 3083 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm); 3084 if (NARROW_MODE(ctx)) { 3085 tcg_gen_ext32u_tl(EA, EA); 3086 } 3087 } else { 3088 if (NARROW_MODE(ctx)) { 3089 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3090 } else { 3091 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3092 } 3093 } 3094 } 3095 3096 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA) 3097 { 3098 if (rA(ctx->opcode) == 0) { 3099 if (NARROW_MODE(ctx)) { 3100 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]); 3101 } else { 3102 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]); 3103 } 3104 } else { 3105 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 3106 if (NARROW_MODE(ctx)) { 3107 tcg_gen_ext32u_tl(EA, EA); 3108 } 3109 } 3110 } 3111 3112 static inline void gen_addr_register(DisasContext *ctx, TCGv EA) 3113 { 3114 if (rA(ctx->opcode) == 0) { 3115 tcg_gen_movi_tl(EA, 0); 3116 } else if (NARROW_MODE(ctx)) { 3117 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3118 } else { 3119 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]); 3120 } 3121 } 3122 3123 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1, 3124 target_long val) 3125 { 3126 tcg_gen_addi_tl(ret, arg1, val); 3127 if (NARROW_MODE(ctx)) { 3128 tcg_gen_ext32u_tl(ret, ret); 3129 } 3130 } 3131 3132 static inline void gen_align_no_le(DisasContext *ctx) 3133 { 3134 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, 3135 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); 3136 } 3137 3138 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ) 3139 { 3140 TCGv ea = tcg_temp_new(); 3141 if (ra) { 3142 tcg_gen_add_tl(ea, cpu_gpr[ra], displ); 3143 } else { 3144 tcg_gen_mov_tl(ea, displ); 3145 } 3146 if (NARROW_MODE(ctx)) { 3147 tcg_gen_ext32u_tl(ea, ea); 3148 } 3149 return ea; 3150 } 3151 3152 /*** Integer load ***/ 3153 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) 3154 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) 3155 3156 #define GEN_QEMU_LOAD_TL(ldop, op) \ 3157 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \ 3158 TCGv val, \ 3159 TCGv addr) \ 3160 { \ 3161 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \ 3162 } 3163 3164 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB)) 3165 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW)) 3166 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW)) 3167 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL)) 3168 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL)) 3169 3170 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW)) 3171 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL)) 3172 3173 #define GEN_QEMU_LOAD_64(ldop, op) \ 3174 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \ 3175 TCGv_i64 val, \ 3176 TCGv addr) \ 3177 { \ 3178 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \ 3179 } 3180 3181 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB)) 3182 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW)) 3183 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL)) 3184 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL)) 3185 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ)) 3186 3187 #if defined(TARGET_PPC64) 3188 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ)) 3189 #endif 3190 3191 #define GEN_QEMU_STORE_TL(stop, op) \ 3192 static void glue(gen_qemu_, stop)(DisasContext *ctx, \ 3193 TCGv val, \ 3194 TCGv addr) \ 3195 { \ 3196 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \ 3197 } 3198 3199 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY) 3200 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB)) 3201 #endif 3202 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW)) 3203 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL)) 3204 3205 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW)) 3206 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL)) 3207 3208 #define GEN_QEMU_STORE_64(stop, op) \ 3209 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \ 3210 TCGv_i64 val, \ 3211 TCGv addr) \ 3212 { \ 3213 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \ 3214 } 3215 3216 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB)) 3217 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW)) 3218 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL)) 3219 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ)) 3220 3221 #if defined(TARGET_PPC64) 3222 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ)) 3223 #endif 3224 3225 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ 3226 static void glue(gen_, name##x)(DisasContext *ctx) \ 3227 { \ 3228 TCGv EA; \ 3229 chk(ctx); \ 3230 gen_set_access_type(ctx, ACCESS_INT); \ 3231 EA = tcg_temp_new(); \ 3232 gen_addr_reg_index(ctx, EA); \ 3233 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ 3234 } 3235 3236 #define GEN_LDX(name, ldop, opc2, opc3, type) \ 3237 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE) 3238 3239 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \ 3240 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM) 3241 3242 #define GEN_LDEPX(name, ldop, opc2, opc3) \ 3243 static void glue(gen_, name##epx)(DisasContext *ctx) \ 3244 { \ 3245 TCGv EA; \ 3246 CHK_SV(ctx); \ 3247 gen_set_access_type(ctx, ACCESS_INT); \ 3248 EA = tcg_temp_new(); \ 3249 gen_addr_reg_index(ctx, EA); \ 3250 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\ 3251 } 3252 3253 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) 3254 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) 3255 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) 3256 #if defined(TARGET_PPC64) 3257 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00) 3258 #endif 3259 3260 #if defined(TARGET_PPC64) 3261 /* CI load/store variants */ 3262 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) 3263 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) 3264 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) 3265 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) 3266 #endif 3267 3268 /*** Integer store ***/ 3269 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ 3270 static void glue(gen_, name##x)(DisasContext *ctx) \ 3271 { \ 3272 TCGv EA; \ 3273 chk(ctx); \ 3274 gen_set_access_type(ctx, ACCESS_INT); \ 3275 EA = tcg_temp_new(); \ 3276 gen_addr_reg_index(ctx, EA); \ 3277 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ 3278 } 3279 #define GEN_STX(name, stop, opc2, opc3, type) \ 3280 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE) 3281 3282 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \ 3283 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM) 3284 3285 #define GEN_STEPX(name, stop, opc2, opc3) \ 3286 static void glue(gen_, name##epx)(DisasContext *ctx) \ 3287 { \ 3288 TCGv EA; \ 3289 CHK_SV(ctx); \ 3290 gen_set_access_type(ctx, ACCESS_INT); \ 3291 EA = tcg_temp_new(); \ 3292 gen_addr_reg_index(ctx, EA); \ 3293 tcg_gen_qemu_st_tl( \ 3294 cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \ 3295 } 3296 3297 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) 3298 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) 3299 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) 3300 #if defined(TARGET_PPC64) 3301 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04) 3302 #endif 3303 3304 #if defined(TARGET_PPC64) 3305 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) 3306 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) 3307 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) 3308 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) 3309 #endif 3310 /*** Integer load and store with byte reverse ***/ 3311 3312 /* lhbrx */ 3313 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER); 3314 3315 /* lwbrx */ 3316 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER); 3317 3318 #if defined(TARGET_PPC64) 3319 /* ldbrx */ 3320 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE); 3321 /* stdbrx */ 3322 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE); 3323 #endif /* TARGET_PPC64 */ 3324 3325 /* sthbrx */ 3326 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER); 3327 /* stwbrx */ 3328 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER); 3329 3330 /*** Integer load and store multiple ***/ 3331 3332 /* lmw */ 3333 static void gen_lmw(DisasContext *ctx) 3334 { 3335 TCGv t0; 3336 TCGv_i32 t1; 3337 3338 if (ctx->le_mode) { 3339 gen_align_no_le(ctx); 3340 return; 3341 } 3342 gen_set_access_type(ctx, ACCESS_INT); 3343 t0 = tcg_temp_new(); 3344 t1 = tcg_constant_i32(rD(ctx->opcode)); 3345 gen_addr_imm_index(ctx, t0, 0); 3346 gen_helper_lmw(cpu_env, t0, t1); 3347 } 3348 3349 /* stmw */ 3350 static void gen_stmw(DisasContext *ctx) 3351 { 3352 TCGv t0; 3353 TCGv_i32 t1; 3354 3355 if (ctx->le_mode) { 3356 gen_align_no_le(ctx); 3357 return; 3358 } 3359 gen_set_access_type(ctx, ACCESS_INT); 3360 t0 = tcg_temp_new(); 3361 t1 = tcg_constant_i32(rS(ctx->opcode)); 3362 gen_addr_imm_index(ctx, t0, 0); 3363 gen_helper_stmw(cpu_env, t0, t1); 3364 } 3365 3366 /*** Integer load and store strings ***/ 3367 3368 /* lswi */ 3369 /* 3370 * PowerPC32 specification says we must generate an exception if rA is 3371 * in the range of registers to be loaded. In an other hand, IBM says 3372 * this is valid, but rA won't be loaded. For now, I'll follow the 3373 * spec... 3374 */ 3375 static void gen_lswi(DisasContext *ctx) 3376 { 3377 TCGv t0; 3378 TCGv_i32 t1, t2; 3379 int nb = NB(ctx->opcode); 3380 int start = rD(ctx->opcode); 3381 int ra = rA(ctx->opcode); 3382 int nr; 3383 3384 if (ctx->le_mode) { 3385 gen_align_no_le(ctx); 3386 return; 3387 } 3388 if (nb == 0) { 3389 nb = 32; 3390 } 3391 nr = DIV_ROUND_UP(nb, 4); 3392 if (unlikely(lsw_reg_in_range(start, nr, ra))) { 3393 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); 3394 return; 3395 } 3396 gen_set_access_type(ctx, ACCESS_INT); 3397 t0 = tcg_temp_new(); 3398 gen_addr_register(ctx, t0); 3399 t1 = tcg_constant_i32(nb); 3400 t2 = tcg_constant_i32(start); 3401 gen_helper_lsw(cpu_env, t0, t1, t2); 3402 } 3403 3404 /* lswx */ 3405 static void gen_lswx(DisasContext *ctx) 3406 { 3407 TCGv t0; 3408 TCGv_i32 t1, t2, t3; 3409 3410 if (ctx->le_mode) { 3411 gen_align_no_le(ctx); 3412 return; 3413 } 3414 gen_set_access_type(ctx, ACCESS_INT); 3415 t0 = tcg_temp_new(); 3416 gen_addr_reg_index(ctx, t0); 3417 t1 = tcg_constant_i32(rD(ctx->opcode)); 3418 t2 = tcg_constant_i32(rA(ctx->opcode)); 3419 t3 = tcg_constant_i32(rB(ctx->opcode)); 3420 gen_helper_lswx(cpu_env, t0, t1, t2, t3); 3421 } 3422 3423 /* stswi */ 3424 static void gen_stswi(DisasContext *ctx) 3425 { 3426 TCGv t0; 3427 TCGv_i32 t1, t2; 3428 int nb = NB(ctx->opcode); 3429 3430 if (ctx->le_mode) { 3431 gen_align_no_le(ctx); 3432 return; 3433 } 3434 gen_set_access_type(ctx, ACCESS_INT); 3435 t0 = tcg_temp_new(); 3436 gen_addr_register(ctx, t0); 3437 if (nb == 0) { 3438 nb = 32; 3439 } 3440 t1 = tcg_constant_i32(nb); 3441 t2 = tcg_constant_i32(rS(ctx->opcode)); 3442 gen_helper_stsw(cpu_env, t0, t1, t2); 3443 } 3444 3445 /* stswx */ 3446 static void gen_stswx(DisasContext *ctx) 3447 { 3448 TCGv t0; 3449 TCGv_i32 t1, t2; 3450 3451 if (ctx->le_mode) { 3452 gen_align_no_le(ctx); 3453 return; 3454 } 3455 gen_set_access_type(ctx, ACCESS_INT); 3456 t0 = tcg_temp_new(); 3457 gen_addr_reg_index(ctx, t0); 3458 t1 = tcg_temp_new_i32(); 3459 tcg_gen_trunc_tl_i32(t1, cpu_xer); 3460 tcg_gen_andi_i32(t1, t1, 0x7F); 3461 t2 = tcg_constant_i32(rS(ctx->opcode)); 3462 gen_helper_stsw(cpu_env, t0, t1, t2); 3463 } 3464 3465 /*** Memory synchronisation ***/ 3466 /* eieio */ 3467 static void gen_eieio(DisasContext *ctx) 3468 { 3469 TCGBar bar = TCG_MO_ALL; 3470 3471 /* 3472 * eieio has complex semanitcs. It provides memory ordering between 3473 * operations in the set: 3474 * - loads from CI memory. 3475 * - stores to CI memory. 3476 * - stores to WT memory. 3477 * 3478 * It separately also orders memory for operations in the set: 3479 * - stores to cacheble memory. 3480 * 3481 * It also serializes instructions: 3482 * - dcbt and dcbst. 3483 * 3484 * It separately serializes: 3485 * - tlbie and tlbsync. 3486 * 3487 * And separately serializes: 3488 * - slbieg, slbiag, and slbsync. 3489 * 3490 * The end result is that CI memory ordering requires TCG_MO_ALL 3491 * and it is not possible to special-case more relaxed ordering for 3492 * cacheable accesses. TCG_BAR_SC is required to provide this 3493 * serialization. 3494 */ 3495 3496 /* 3497 * POWER9 has a eieio instruction variant using bit 6 as a hint to 3498 * tell the CPU it is a store-forwarding barrier. 3499 */ 3500 if (ctx->opcode & 0x2000000) { 3501 /* 3502 * ISA says that "Reserved fields in instructions are ignored 3503 * by the processor". So ignore the bit 6 on non-POWER9 CPU but 3504 * as this is not an instruction software should be using, 3505 * complain to the user. 3506 */ 3507 if (!(ctx->insns_flags2 & PPC2_ISA300)) { 3508 qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" 3509 TARGET_FMT_lx "\n", ctx->cia); 3510 } else { 3511 bar = TCG_MO_ST_LD; 3512 } 3513 } 3514 3515 tcg_gen_mb(bar | TCG_BAR_SC); 3516 } 3517 3518 #if !defined(CONFIG_USER_ONLY) 3519 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) 3520 { 3521 TCGv_i32 t; 3522 TCGLabel *l; 3523 3524 if (!ctx->lazy_tlb_flush) { 3525 return; 3526 } 3527 l = gen_new_label(); 3528 t = tcg_temp_new_i32(); 3529 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); 3530 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l); 3531 if (global) { 3532 gen_helper_check_tlb_flush_global(cpu_env); 3533 } else { 3534 gen_helper_check_tlb_flush_local(cpu_env); 3535 } 3536 gen_set_label(l); 3537 } 3538 #else 3539 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { } 3540 #endif 3541 3542 /* isync */ 3543 static void gen_isync(DisasContext *ctx) 3544 { 3545 /* 3546 * We need to check for a pending TLB flush. This can only happen in 3547 * kernel mode however so check MSR_PR 3548 */ 3549 if (!ctx->pr) { 3550 gen_check_tlb_flush(ctx, false); 3551 } 3552 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 3553 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 3554 } 3555 3556 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) 3557 3558 static void gen_load_locked(DisasContext *ctx, MemOp memop) 3559 { 3560 TCGv gpr = cpu_gpr[rD(ctx->opcode)]; 3561 TCGv t0 = tcg_temp_new(); 3562 3563 gen_set_access_type(ctx, ACCESS_RES); 3564 gen_addr_reg_index(ctx, t0); 3565 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN); 3566 tcg_gen_mov_tl(cpu_reserve, t0); 3567 tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop)); 3568 tcg_gen_mov_tl(cpu_reserve_val, gpr); 3569 } 3570 3571 #define LARX(name, memop) \ 3572 static void gen_##name(DisasContext *ctx) \ 3573 { \ 3574 gen_load_locked(ctx, memop); \ 3575 } 3576 3577 /* lwarx */ 3578 LARX(lbarx, DEF_MEMOP(MO_UB)) 3579 LARX(lharx, DEF_MEMOP(MO_UW)) 3580 LARX(lwarx, DEF_MEMOP(MO_UL)) 3581 3582 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, 3583 TCGv EA, TCGCond cond, int addend) 3584 { 3585 TCGv t = tcg_temp_new(); 3586 TCGv t2 = tcg_temp_new(); 3587 TCGv u = tcg_temp_new(); 3588 3589 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop); 3590 tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop)); 3591 tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop); 3592 tcg_gen_addi_tl(u, t, addend); 3593 3594 /* E.g. for fetch and increment bounded... */ 3595 /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */ 3596 tcg_gen_movcond_tl(cond, u, t, t2, u, t); 3597 tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop); 3598 3599 /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ 3600 tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1)); 3601 tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); 3602 } 3603 3604 static void gen_ld_atomic(DisasContext *ctx, MemOp memop) 3605 { 3606 uint32_t gpr_FC = FC(ctx->opcode); 3607 TCGv EA = tcg_temp_new(); 3608 int rt = rD(ctx->opcode); 3609 bool need_serial; 3610 TCGv src, dst; 3611 3612 gen_addr_register(ctx, EA); 3613 dst = cpu_gpr[rt]; 3614 src = cpu_gpr[(rt + 1) & 31]; 3615 3616 need_serial = false; 3617 memop |= MO_ALIGN; 3618 switch (gpr_FC) { 3619 case 0: /* Fetch and add */ 3620 tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop); 3621 break; 3622 case 1: /* Fetch and xor */ 3623 tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop); 3624 break; 3625 case 2: /* Fetch and or */ 3626 tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop); 3627 break; 3628 case 3: /* Fetch and 'and' */ 3629 tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop); 3630 break; 3631 case 4: /* Fetch and max unsigned */ 3632 tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop); 3633 break; 3634 case 5: /* Fetch and max signed */ 3635 tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop); 3636 break; 3637 case 6: /* Fetch and min unsigned */ 3638 tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop); 3639 break; 3640 case 7: /* Fetch and min signed */ 3641 tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop); 3642 break; 3643 case 8: /* Swap */ 3644 tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop); 3645 break; 3646 3647 case 16: /* Compare and swap not equal */ 3648 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3649 need_serial = true; 3650 } else { 3651 TCGv t0 = tcg_temp_new(); 3652 TCGv t1 = tcg_temp_new(); 3653 3654 tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop); 3655 if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) { 3656 tcg_gen_mov_tl(t1, src); 3657 } else { 3658 tcg_gen_ext32u_tl(t1, src); 3659 } 3660 tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1, 3661 cpu_gpr[(rt + 2) & 31], t0); 3662 tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop); 3663 tcg_gen_mov_tl(dst, t0); 3664 } 3665 break; 3666 3667 case 24: /* Fetch and increment bounded */ 3668 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3669 need_serial = true; 3670 } else { 3671 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1); 3672 } 3673 break; 3674 case 25: /* Fetch and increment equal */ 3675 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3676 need_serial = true; 3677 } else { 3678 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1); 3679 } 3680 break; 3681 case 28: /* Fetch and decrement bounded */ 3682 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3683 need_serial = true; 3684 } else { 3685 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1); 3686 } 3687 break; 3688 3689 default: 3690 /* invoke data storage error handler */ 3691 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); 3692 } 3693 3694 if (need_serial) { 3695 /* Restart with exclusive lock. */ 3696 gen_helper_exit_atomic(cpu_env); 3697 ctx->base.is_jmp = DISAS_NORETURN; 3698 } 3699 } 3700 3701 static void gen_lwat(DisasContext *ctx) 3702 { 3703 gen_ld_atomic(ctx, DEF_MEMOP(MO_UL)); 3704 } 3705 3706 #ifdef TARGET_PPC64 3707 static void gen_ldat(DisasContext *ctx) 3708 { 3709 gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ)); 3710 } 3711 #endif 3712 3713 static void gen_st_atomic(DisasContext *ctx, MemOp memop) 3714 { 3715 uint32_t gpr_FC = FC(ctx->opcode); 3716 TCGv EA = tcg_temp_new(); 3717 TCGv src, discard; 3718 3719 gen_addr_register(ctx, EA); 3720 src = cpu_gpr[rD(ctx->opcode)]; 3721 discard = tcg_temp_new(); 3722 3723 memop |= MO_ALIGN; 3724 switch (gpr_FC) { 3725 case 0: /* add and Store */ 3726 tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3727 break; 3728 case 1: /* xor and Store */ 3729 tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3730 break; 3731 case 2: /* Or and Store */ 3732 tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3733 break; 3734 case 3: /* 'and' and Store */ 3735 tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3736 break; 3737 case 4: /* Store max unsigned */ 3738 tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3739 break; 3740 case 5: /* Store max signed */ 3741 tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3742 break; 3743 case 6: /* Store min unsigned */ 3744 tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3745 break; 3746 case 7: /* Store min signed */ 3747 tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop); 3748 break; 3749 case 24: /* Store twin */ 3750 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3751 /* Restart with exclusive lock. */ 3752 gen_helper_exit_atomic(cpu_env); 3753 ctx->base.is_jmp = DISAS_NORETURN; 3754 } else { 3755 TCGv t = tcg_temp_new(); 3756 TCGv t2 = tcg_temp_new(); 3757 TCGv s = tcg_temp_new(); 3758 TCGv s2 = tcg_temp_new(); 3759 TCGv ea_plus_s = tcg_temp_new(); 3760 3761 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop); 3762 tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop)); 3763 tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop); 3764 tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t); 3765 tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2); 3766 tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop); 3767 tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop); 3768 } 3769 break; 3770 default: 3771 /* invoke data storage error handler */ 3772 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); 3773 } 3774 } 3775 3776 static void gen_stwat(DisasContext *ctx) 3777 { 3778 gen_st_atomic(ctx, DEF_MEMOP(MO_UL)); 3779 } 3780 3781 #ifdef TARGET_PPC64 3782 static void gen_stdat(DisasContext *ctx) 3783 { 3784 gen_st_atomic(ctx, DEF_MEMOP(MO_UQ)); 3785 } 3786 #endif 3787 3788 static void gen_conditional_store(DisasContext *ctx, MemOp memop) 3789 { 3790 TCGLabel *lfail; 3791 TCGv EA; 3792 TCGv cr0; 3793 TCGv t0; 3794 int rs = rS(ctx->opcode); 3795 3796 lfail = gen_new_label(); 3797 EA = tcg_temp_new(); 3798 cr0 = tcg_temp_new(); 3799 t0 = tcg_temp_new(); 3800 3801 tcg_gen_mov_tl(cr0, cpu_so); 3802 gen_set_access_type(ctx, ACCESS_RES); 3803 gen_addr_reg_index(ctx, EA); 3804 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail); 3805 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail); 3806 3807 tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, 3808 cpu_gpr[rs], ctx->mem_idx, 3809 DEF_MEMOP(memop) | MO_ALIGN); 3810 tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val); 3811 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); 3812 tcg_gen_or_tl(cr0, cr0, t0); 3813 3814 gen_set_label(lfail); 3815 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0); 3816 tcg_gen_movi_tl(cpu_reserve, -1); 3817 } 3818 3819 #define STCX(name, memop) \ 3820 static void gen_##name(DisasContext *ctx) \ 3821 { \ 3822 gen_conditional_store(ctx, memop); \ 3823 } 3824 3825 STCX(stbcx_, DEF_MEMOP(MO_UB)) 3826 STCX(sthcx_, DEF_MEMOP(MO_UW)) 3827 STCX(stwcx_, DEF_MEMOP(MO_UL)) 3828 3829 #if defined(TARGET_PPC64) 3830 /* ldarx */ 3831 LARX(ldarx, DEF_MEMOP(MO_UQ)) 3832 /* stdcx. */ 3833 STCX(stdcx_, DEF_MEMOP(MO_UQ)) 3834 3835 /* lqarx */ 3836 static void gen_lqarx(DisasContext *ctx) 3837 { 3838 int rd = rD(ctx->opcode); 3839 TCGv EA, hi, lo; 3840 TCGv_i128 t16; 3841 3842 if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) || 3843 (rd == rB(ctx->opcode)))) { 3844 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 3845 return; 3846 } 3847 3848 gen_set_access_type(ctx, ACCESS_RES); 3849 EA = tcg_temp_new(); 3850 gen_addr_reg_index(ctx, EA); 3851 3852 /* Note that the low part is always in RD+1, even in LE mode. */ 3853 lo = cpu_gpr[rd + 1]; 3854 hi = cpu_gpr[rd]; 3855 3856 t16 = tcg_temp_new_i128(); 3857 tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN)); 3858 tcg_gen_extr_i128_i64(lo, hi, t16); 3859 3860 tcg_gen_mov_tl(cpu_reserve, EA); 3861 tcg_gen_movi_tl(cpu_reserve_length, 16); 3862 tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val)); 3863 tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2)); 3864 } 3865 3866 /* stqcx. */ 3867 static void gen_stqcx_(DisasContext *ctx) 3868 { 3869 TCGLabel *lfail; 3870 TCGv EA, t0, t1; 3871 TCGv cr0; 3872 TCGv_i128 cmp, val; 3873 int rs = rS(ctx->opcode); 3874 3875 if (unlikely(rs & 1)) { 3876 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 3877 return; 3878 } 3879 3880 lfail = gen_new_label(); 3881 EA = tcg_temp_new(); 3882 cr0 = tcg_temp_new(); 3883 3884 tcg_gen_mov_tl(cr0, cpu_so); 3885 gen_set_access_type(ctx, ACCESS_RES); 3886 gen_addr_reg_index(ctx, EA); 3887 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail); 3888 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail); 3889 3890 cmp = tcg_temp_new_i128(); 3891 val = tcg_temp_new_i128(); 3892 3893 tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val); 3894 3895 /* Note that the low part is always in RS+1, even in LE mode. */ 3896 tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]); 3897 3898 tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx, 3899 DEF_MEMOP(MO_128 | MO_ALIGN)); 3900 3901 t0 = tcg_temp_new(); 3902 t1 = tcg_temp_new(); 3903 tcg_gen_extr_i128_i64(t1, t0, val); 3904 3905 tcg_gen_xor_tl(t1, t1, cpu_reserve_val2); 3906 tcg_gen_xor_tl(t0, t0, cpu_reserve_val); 3907 tcg_gen_or_tl(t0, t0, t1); 3908 3909 tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0); 3910 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); 3911 tcg_gen_or_tl(cr0, cr0, t0); 3912 3913 gen_set_label(lfail); 3914 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0); 3915 tcg_gen_movi_tl(cpu_reserve, -1); 3916 } 3917 #endif /* defined(TARGET_PPC64) */ 3918 3919 /* sync */ 3920 static void gen_sync(DisasContext *ctx) 3921 { 3922 TCGBar bar = TCG_MO_ALL; 3923 uint32_t l = (ctx->opcode >> 21) & 3; 3924 3925 if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) { 3926 bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST; 3927 } 3928 3929 /* 3930 * We may need to check for a pending TLB flush. 3931 * 3932 * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32. 3933 * 3934 * Additionally, this can only happen in kernel mode however so 3935 * check MSR_PR as well. 3936 */ 3937 if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { 3938 gen_check_tlb_flush(ctx, true); 3939 } 3940 3941 tcg_gen_mb(bar | TCG_BAR_SC); 3942 } 3943 3944 /* wait */ 3945 static void gen_wait(DisasContext *ctx) 3946 { 3947 uint32_t wc; 3948 3949 if (ctx->insns_flags & PPC_WAIT) { 3950 /* v2.03-v2.07 define an older incompatible 'wait' encoding. */ 3951 3952 if (ctx->insns_flags2 & PPC2_PM_ISA206) { 3953 /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */ 3954 wc = WC(ctx->opcode); 3955 } else { 3956 wc = 0; 3957 } 3958 3959 } else if (ctx->insns_flags2 & PPC2_ISA300) { 3960 /* v3.0 defines a new 'wait' encoding. */ 3961 wc = WC(ctx->opcode); 3962 if (ctx->insns_flags2 & PPC2_ISA310) { 3963 uint32_t pl = PL(ctx->opcode); 3964 3965 /* WC 1,2 may be treated as no-op. WC 3 is reserved. */ 3966 if (wc == 3) { 3967 gen_invalid(ctx); 3968 return; 3969 } 3970 3971 /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */ 3972 if (pl > 0 && wc != 2) { 3973 gen_invalid(ctx); 3974 return; 3975 } 3976 3977 } else { /* ISA300 */ 3978 /* WC 1-3 are reserved */ 3979 if (wc > 0) { 3980 gen_invalid(ctx); 3981 return; 3982 } 3983 } 3984 3985 } else { 3986 warn_report("wait instruction decoded with wrong ISA flags."); 3987 gen_invalid(ctx); 3988 return; 3989 } 3990 3991 /* 3992 * wait without WC field or with WC=0 waits for an exception / interrupt 3993 * to occur. 3994 */ 3995 if (wc == 0) { 3996 TCGv_i32 t0 = tcg_constant_i32(1); 3997 tcg_gen_st_i32(t0, cpu_env, 3998 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); 3999 /* Stop translation, as the CPU is supposed to sleep from now */ 4000 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4001 } 4002 4003 /* 4004 * Other wait types must not just wait until an exception occurs because 4005 * ignoring their other wake-up conditions could cause a hang. 4006 * 4007 * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as 4008 * no-ops. 4009 * 4010 * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op. 4011 * 4012 * wc=2 waits for an implementation-specific condition, such could be 4013 * always true, so it can be implemented as a no-op. 4014 * 4015 * For v3.1, wc=1,2 are architected but may be implemented as no-ops. 4016 * 4017 * wc=1 (waitrsv) waits for an exception or a reservation to be lost. 4018 * Reservation-loss may have implementation-specific conditions, so it 4019 * can be implemented as a no-op. 4020 * 4021 * wc=2 waits for an exception or an amount of time to pass. This 4022 * amount is implementation-specific so it can be implemented as a 4023 * no-op. 4024 * 4025 * ISA v3.1 allows for execution to resume "in the rare case of 4026 * an implementation-dependent event", so in any case software must 4027 * not depend on the architected resumption condition to become 4028 * true, so no-op implementations should be architecturally correct 4029 * (if suboptimal). 4030 */ 4031 } 4032 4033 #if defined(TARGET_PPC64) 4034 static void gen_doze(DisasContext *ctx) 4035 { 4036 #if defined(CONFIG_USER_ONLY) 4037 GEN_PRIV(ctx); 4038 #else 4039 TCGv_i32 t; 4040 4041 CHK_HV(ctx); 4042 translator_io_start(&ctx->base); 4043 t = tcg_constant_i32(PPC_PM_DOZE); 4044 gen_helper_pminsn(cpu_env, t); 4045 /* Stop translation, as the CPU is supposed to sleep from now */ 4046 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4047 #endif /* defined(CONFIG_USER_ONLY) */ 4048 } 4049 4050 static void gen_nap(DisasContext *ctx) 4051 { 4052 #if defined(CONFIG_USER_ONLY) 4053 GEN_PRIV(ctx); 4054 #else 4055 TCGv_i32 t; 4056 4057 CHK_HV(ctx); 4058 translator_io_start(&ctx->base); 4059 t = tcg_constant_i32(PPC_PM_NAP); 4060 gen_helper_pminsn(cpu_env, t); 4061 /* Stop translation, as the CPU is supposed to sleep from now */ 4062 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4063 #endif /* defined(CONFIG_USER_ONLY) */ 4064 } 4065 4066 static void gen_stop(DisasContext *ctx) 4067 { 4068 #if defined(CONFIG_USER_ONLY) 4069 GEN_PRIV(ctx); 4070 #else 4071 TCGv_i32 t; 4072 4073 CHK_HV(ctx); 4074 translator_io_start(&ctx->base); 4075 t = tcg_constant_i32(PPC_PM_STOP); 4076 gen_helper_pminsn(cpu_env, t); 4077 /* Stop translation, as the CPU is supposed to sleep from now */ 4078 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4079 #endif /* defined(CONFIG_USER_ONLY) */ 4080 } 4081 4082 static void gen_sleep(DisasContext *ctx) 4083 { 4084 #if defined(CONFIG_USER_ONLY) 4085 GEN_PRIV(ctx); 4086 #else 4087 TCGv_i32 t; 4088 4089 CHK_HV(ctx); 4090 translator_io_start(&ctx->base); 4091 t = tcg_constant_i32(PPC_PM_SLEEP); 4092 gen_helper_pminsn(cpu_env, t); 4093 /* Stop translation, as the CPU is supposed to sleep from now */ 4094 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4095 #endif /* defined(CONFIG_USER_ONLY) */ 4096 } 4097 4098 static void gen_rvwinkle(DisasContext *ctx) 4099 { 4100 #if defined(CONFIG_USER_ONLY) 4101 GEN_PRIV(ctx); 4102 #else 4103 TCGv_i32 t; 4104 4105 CHK_HV(ctx); 4106 translator_io_start(&ctx->base); 4107 t = tcg_constant_i32(PPC_PM_RVWINKLE); 4108 gen_helper_pminsn(cpu_env, t); 4109 /* Stop translation, as the CPU is supposed to sleep from now */ 4110 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 4111 #endif /* defined(CONFIG_USER_ONLY) */ 4112 } 4113 #endif /* #if defined(TARGET_PPC64) */ 4114 4115 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) 4116 { 4117 #if defined(TARGET_PPC64) 4118 if (ctx->has_cfar) { 4119 tcg_gen_movi_tl(cpu_cfar, nip); 4120 } 4121 #endif 4122 } 4123 4124 #if defined(TARGET_PPC64) 4125 static void pmu_count_insns(DisasContext *ctx) 4126 { 4127 /* 4128 * Do not bother calling the helper if the PMU isn't counting 4129 * instructions. 4130 */ 4131 if (!ctx->pmu_insn_cnt) { 4132 return; 4133 } 4134 4135 #if !defined(CONFIG_USER_ONLY) 4136 TCGLabel *l; 4137 TCGv t0; 4138 4139 /* 4140 * The PMU insns_inc() helper stops the internal PMU timer if a 4141 * counter overflows happens. In that case, if the guest is 4142 * running with icount and we do not handle it beforehand, 4143 * the helper can trigger a 'bad icount read'. 4144 */ 4145 translator_io_start(&ctx->base); 4146 4147 /* Avoid helper calls when only PMC5-6 are enabled. */ 4148 if (!ctx->pmc_other) { 4149 l = gen_new_label(); 4150 t0 = tcg_temp_new(); 4151 4152 gen_load_spr(t0, SPR_POWER_PMC5); 4153 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); 4154 gen_store_spr(SPR_POWER_PMC5, t0); 4155 /* Check for overflow, if it's enabled */ 4156 if (ctx->mmcr0_pmcjce) { 4157 tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l); 4158 gen_helper_handle_pmc5_overflow(cpu_env); 4159 } 4160 4161 gen_set_label(l); 4162 } else { 4163 gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns)); 4164 } 4165 #else 4166 /* 4167 * User mode can read (but not write) PMC5 and start/stop 4168 * the PMU via MMCR0_FC. In this case just increment 4169 * PMC5 with base.num_insns. 4170 */ 4171 TCGv t0 = tcg_temp_new(); 4172 4173 gen_load_spr(t0, SPR_POWER_PMC5); 4174 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); 4175 gen_store_spr(SPR_POWER_PMC5, t0); 4176 #endif /* #if !defined(CONFIG_USER_ONLY) */ 4177 } 4178 #else 4179 static void pmu_count_insns(DisasContext *ctx) 4180 { 4181 return; 4182 } 4183 #endif /* #if defined(TARGET_PPC64) */ 4184 4185 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) 4186 { 4187 if (unlikely(ctx->singlestep_enabled)) { 4188 return false; 4189 } 4190 return translator_use_goto_tb(&ctx->base, dest); 4191 } 4192 4193 static void gen_lookup_and_goto_ptr(DisasContext *ctx) 4194 { 4195 if (unlikely(ctx->singlestep_enabled)) { 4196 gen_debug_exception(ctx, false); 4197 } else { 4198 /* 4199 * tcg_gen_lookup_and_goto_ptr will exit the TB if 4200 * CF_NO_GOTO_PTR is set. Count insns now. 4201 */ 4202 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) { 4203 pmu_count_insns(ctx); 4204 } 4205 4206 tcg_gen_lookup_and_goto_ptr(); 4207 } 4208 } 4209 4210 /*** Branch ***/ 4211 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) 4212 { 4213 if (NARROW_MODE(ctx)) { 4214 dest = (uint32_t) dest; 4215 } 4216 if (use_goto_tb(ctx, dest)) { 4217 pmu_count_insns(ctx); 4218 tcg_gen_goto_tb(n); 4219 tcg_gen_movi_tl(cpu_nip, dest & ~3); 4220 tcg_gen_exit_tb(ctx->base.tb, n); 4221 } else { 4222 tcg_gen_movi_tl(cpu_nip, dest & ~3); 4223 gen_lookup_and_goto_ptr(ctx); 4224 } 4225 } 4226 4227 static inline void gen_setlr(DisasContext *ctx, target_ulong nip) 4228 { 4229 if (NARROW_MODE(ctx)) { 4230 nip = (uint32_t)nip; 4231 } 4232 tcg_gen_movi_tl(cpu_lr, nip); 4233 } 4234 4235 /* b ba bl bla */ 4236 static void gen_b(DisasContext *ctx) 4237 { 4238 target_ulong li, target; 4239 4240 /* sign extend LI */ 4241 li = LI(ctx->opcode); 4242 li = (li ^ 0x02000000) - 0x02000000; 4243 if (likely(AA(ctx->opcode) == 0)) { 4244 target = ctx->cia + li; 4245 } else { 4246 target = li; 4247 } 4248 if (LK(ctx->opcode)) { 4249 gen_setlr(ctx, ctx->base.pc_next); 4250 } 4251 gen_update_cfar(ctx, ctx->cia); 4252 gen_goto_tb(ctx, 0, target); 4253 ctx->base.is_jmp = DISAS_NORETURN; 4254 } 4255 4256 #define BCOND_IM 0 4257 #define BCOND_LR 1 4258 #define BCOND_CTR 2 4259 #define BCOND_TAR 3 4260 4261 static void gen_bcond(DisasContext *ctx, int type) 4262 { 4263 uint32_t bo = BO(ctx->opcode); 4264 TCGLabel *l1; 4265 TCGv target; 4266 4267 if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { 4268 target = tcg_temp_new(); 4269 if (type == BCOND_CTR) { 4270 tcg_gen_mov_tl(target, cpu_ctr); 4271 } else if (type == BCOND_TAR) { 4272 gen_load_spr(target, SPR_TAR); 4273 } else { 4274 tcg_gen_mov_tl(target, cpu_lr); 4275 } 4276 } else { 4277 target = NULL; 4278 } 4279 if (LK(ctx->opcode)) { 4280 gen_setlr(ctx, ctx->base.pc_next); 4281 } 4282 l1 = gen_new_label(); 4283 if ((bo & 0x4) == 0) { 4284 /* Decrement and test CTR */ 4285 TCGv temp = tcg_temp_new(); 4286 4287 if (type == BCOND_CTR) { 4288 /* 4289 * All ISAs up to v3 describe this form of bcctr as invalid but 4290 * some processors, ie. 64-bit server processors compliant with 4291 * arch 2.x, do implement a "test and decrement" logic instead, 4292 * as described in their respective UMs. This logic involves CTR 4293 * to act as both the branch target and a counter, which makes 4294 * it basically useless and thus never used in real code. 4295 * 4296 * This form was hence chosen to trigger extra micro-architectural 4297 * side-effect on real HW needed for the Spectre v2 workaround. 4298 * It is up to guests that implement such workaround, ie. linux, to 4299 * use this form in a way it just triggers the side-effect without 4300 * doing anything else harmful. 4301 */ 4302 if (unlikely(!is_book3s_arch2x(ctx))) { 4303 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 4304 return; 4305 } 4306 4307 if (NARROW_MODE(ctx)) { 4308 tcg_gen_ext32u_tl(temp, cpu_ctr); 4309 } else { 4310 tcg_gen_mov_tl(temp, cpu_ctr); 4311 } 4312 if (bo & 0x2) { 4313 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1); 4314 } else { 4315 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); 4316 } 4317 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1); 4318 } else { 4319 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1); 4320 if (NARROW_MODE(ctx)) { 4321 tcg_gen_ext32u_tl(temp, cpu_ctr); 4322 } else { 4323 tcg_gen_mov_tl(temp, cpu_ctr); 4324 } 4325 if (bo & 0x2) { 4326 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1); 4327 } else { 4328 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); 4329 } 4330 } 4331 } 4332 if ((bo & 0x10) == 0) { 4333 /* Test CR */ 4334 uint32_t bi = BI(ctx->opcode); 4335 uint32_t mask = 0x08 >> (bi & 0x03); 4336 TCGv_i32 temp = tcg_temp_new_i32(); 4337 4338 if (bo & 0x8) { 4339 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); 4340 tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1); 4341 } else { 4342 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); 4343 tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); 4344 } 4345 } 4346 gen_update_cfar(ctx, ctx->cia); 4347 if (type == BCOND_IM) { 4348 target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); 4349 if (likely(AA(ctx->opcode) == 0)) { 4350 gen_goto_tb(ctx, 0, ctx->cia + li); 4351 } else { 4352 gen_goto_tb(ctx, 0, li); 4353 } 4354 } else { 4355 if (NARROW_MODE(ctx)) { 4356 tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3); 4357 } else { 4358 tcg_gen_andi_tl(cpu_nip, target, ~3); 4359 } 4360 gen_lookup_and_goto_ptr(ctx); 4361 } 4362 if ((bo & 0x14) != 0x14) { 4363 /* fallthrough case */ 4364 gen_set_label(l1); 4365 gen_goto_tb(ctx, 1, ctx->base.pc_next); 4366 } 4367 ctx->base.is_jmp = DISAS_NORETURN; 4368 } 4369 4370 static void gen_bc(DisasContext *ctx) 4371 { 4372 gen_bcond(ctx, BCOND_IM); 4373 } 4374 4375 static void gen_bcctr(DisasContext *ctx) 4376 { 4377 gen_bcond(ctx, BCOND_CTR); 4378 } 4379 4380 static void gen_bclr(DisasContext *ctx) 4381 { 4382 gen_bcond(ctx, BCOND_LR); 4383 } 4384 4385 static void gen_bctar(DisasContext *ctx) 4386 { 4387 gen_bcond(ctx, BCOND_TAR); 4388 } 4389 4390 /*** Condition register logical ***/ 4391 #define GEN_CRLOGIC(name, tcg_op, opc) \ 4392 static void glue(gen_, name)(DisasContext *ctx) \ 4393 { \ 4394 uint8_t bitmask; \ 4395 int sh; \ 4396 TCGv_i32 t0, t1; \ 4397 sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \ 4398 t0 = tcg_temp_new_i32(); \ 4399 if (sh > 0) \ 4400 tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \ 4401 else if (sh < 0) \ 4402 tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \ 4403 else \ 4404 tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \ 4405 t1 = tcg_temp_new_i32(); \ 4406 sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \ 4407 if (sh > 0) \ 4408 tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \ 4409 else if (sh < 0) \ 4410 tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \ 4411 else \ 4412 tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \ 4413 tcg_op(t0, t0, t1); \ 4414 bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \ 4415 tcg_gen_andi_i32(t0, t0, bitmask); \ 4416 tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ 4417 tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ 4418 } 4419 4420 /* crand */ 4421 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08); 4422 /* crandc */ 4423 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04); 4424 /* creqv */ 4425 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09); 4426 /* crnand */ 4427 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07); 4428 /* crnor */ 4429 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01); 4430 /* cror */ 4431 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E); 4432 /* crorc */ 4433 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D); 4434 /* crxor */ 4435 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06); 4436 4437 /* mcrf */ 4438 static void gen_mcrf(DisasContext *ctx) 4439 { 4440 tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]); 4441 } 4442 4443 /*** System linkage ***/ 4444 4445 /* rfi (supervisor only) */ 4446 static void gen_rfi(DisasContext *ctx) 4447 { 4448 #if defined(CONFIG_USER_ONLY) 4449 GEN_PRIV(ctx); 4450 #else 4451 /* 4452 * This instruction doesn't exist anymore on 64-bit server 4453 * processors compliant with arch 2.x 4454 */ 4455 if (is_book3s_arch2x(ctx)) { 4456 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 4457 return; 4458 } 4459 /* Restore CPU state */ 4460 CHK_SV(ctx); 4461 translator_io_start(&ctx->base); 4462 gen_update_cfar(ctx, ctx->cia); 4463 gen_helper_rfi(cpu_env); 4464 ctx->base.is_jmp = DISAS_EXIT; 4465 #endif 4466 } 4467 4468 #if defined(TARGET_PPC64) 4469 static void gen_rfid(DisasContext *ctx) 4470 { 4471 #if defined(CONFIG_USER_ONLY) 4472 GEN_PRIV(ctx); 4473 #else 4474 /* Restore CPU state */ 4475 CHK_SV(ctx); 4476 translator_io_start(&ctx->base); 4477 gen_update_cfar(ctx, ctx->cia); 4478 gen_helper_rfid(cpu_env); 4479 ctx->base.is_jmp = DISAS_EXIT; 4480 #endif 4481 } 4482 4483 #if !defined(CONFIG_USER_ONLY) 4484 static void gen_rfscv(DisasContext *ctx) 4485 { 4486 #if defined(CONFIG_USER_ONLY) 4487 GEN_PRIV(ctx); 4488 #else 4489 /* Restore CPU state */ 4490 CHK_SV(ctx); 4491 translator_io_start(&ctx->base); 4492 gen_update_cfar(ctx, ctx->cia); 4493 gen_helper_rfscv(cpu_env); 4494 ctx->base.is_jmp = DISAS_EXIT; 4495 #endif 4496 } 4497 #endif 4498 4499 static void gen_hrfid(DisasContext *ctx) 4500 { 4501 #if defined(CONFIG_USER_ONLY) 4502 GEN_PRIV(ctx); 4503 #else 4504 /* Restore CPU state */ 4505 CHK_HV(ctx); 4506 translator_io_start(&ctx->base); 4507 gen_helper_hrfid(cpu_env); 4508 ctx->base.is_jmp = DISAS_EXIT; 4509 #endif 4510 } 4511 #endif 4512 4513 /* sc */ 4514 #if defined(CONFIG_USER_ONLY) 4515 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER 4516 #else 4517 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL 4518 #endif 4519 static void gen_sc(DisasContext *ctx) 4520 { 4521 uint32_t lev; 4522 4523 /* 4524 * LEV is a 7-bit field, but the top 6 bits are treated as a reserved 4525 * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is 4526 * for Ultravisor which TCG does not support, so just ignore the top 6. 4527 */ 4528 lev = (ctx->opcode >> 5) & 0x1; 4529 gen_exception_err(ctx, POWERPC_SYSCALL, lev); 4530 } 4531 4532 #if defined(TARGET_PPC64) 4533 #if !defined(CONFIG_USER_ONLY) 4534 static void gen_scv(DisasContext *ctx) 4535 { 4536 uint32_t lev = (ctx->opcode >> 5) & 0x7F; 4537 4538 /* Set the PC back to the faulting instruction. */ 4539 gen_update_nip(ctx, ctx->cia); 4540 gen_helper_scv(cpu_env, tcg_constant_i32(lev)); 4541 4542 ctx->base.is_jmp = DISAS_NORETURN; 4543 } 4544 #endif 4545 #endif 4546 4547 /*** Trap ***/ 4548 4549 /* Check for unconditional traps (always or never) */ 4550 static bool check_unconditional_trap(DisasContext *ctx) 4551 { 4552 /* Trap never */ 4553 if (TO(ctx->opcode) == 0) { 4554 return true; 4555 } 4556 /* Trap always */ 4557 if (TO(ctx->opcode) == 31) { 4558 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); 4559 return true; 4560 } 4561 return false; 4562 } 4563 4564 /* tw */ 4565 static void gen_tw(DisasContext *ctx) 4566 { 4567 TCGv_i32 t0; 4568 4569 if (check_unconditional_trap(ctx)) { 4570 return; 4571 } 4572 t0 = tcg_constant_i32(TO(ctx->opcode)); 4573 gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 4574 t0); 4575 } 4576 4577 /* twi */ 4578 static void gen_twi(DisasContext *ctx) 4579 { 4580 TCGv t0; 4581 TCGv_i32 t1; 4582 4583 if (check_unconditional_trap(ctx)) { 4584 return; 4585 } 4586 t0 = tcg_constant_tl(SIMM(ctx->opcode)); 4587 t1 = tcg_constant_i32(TO(ctx->opcode)); 4588 gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); 4589 } 4590 4591 #if defined(TARGET_PPC64) 4592 /* td */ 4593 static void gen_td(DisasContext *ctx) 4594 { 4595 TCGv_i32 t0; 4596 4597 if (check_unconditional_trap(ctx)) { 4598 return; 4599 } 4600 t0 = tcg_constant_i32(TO(ctx->opcode)); 4601 gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 4602 t0); 4603 } 4604 4605 /* tdi */ 4606 static void gen_tdi(DisasContext *ctx) 4607 { 4608 TCGv t0; 4609 TCGv_i32 t1; 4610 4611 if (check_unconditional_trap(ctx)) { 4612 return; 4613 } 4614 t0 = tcg_constant_tl(SIMM(ctx->opcode)); 4615 t1 = tcg_constant_i32(TO(ctx->opcode)); 4616 gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); 4617 } 4618 #endif 4619 4620 /*** Processor control ***/ 4621 4622 /* mcrxr */ 4623 static void gen_mcrxr(DisasContext *ctx) 4624 { 4625 TCGv_i32 t0 = tcg_temp_new_i32(); 4626 TCGv_i32 t1 = tcg_temp_new_i32(); 4627 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; 4628 4629 tcg_gen_trunc_tl_i32(t0, cpu_so); 4630 tcg_gen_trunc_tl_i32(t1, cpu_ov); 4631 tcg_gen_trunc_tl_i32(dst, cpu_ca); 4632 tcg_gen_shli_i32(t0, t0, 3); 4633 tcg_gen_shli_i32(t1, t1, 2); 4634 tcg_gen_shli_i32(dst, dst, 1); 4635 tcg_gen_or_i32(dst, dst, t0); 4636 tcg_gen_or_i32(dst, dst, t1); 4637 4638 tcg_gen_movi_tl(cpu_so, 0); 4639 tcg_gen_movi_tl(cpu_ov, 0); 4640 tcg_gen_movi_tl(cpu_ca, 0); 4641 } 4642 4643 #ifdef TARGET_PPC64 4644 /* mcrxrx */ 4645 static void gen_mcrxrx(DisasContext *ctx) 4646 { 4647 TCGv t0 = tcg_temp_new(); 4648 TCGv t1 = tcg_temp_new(); 4649 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; 4650 4651 /* copy OV and OV32 */ 4652 tcg_gen_shli_tl(t0, cpu_ov, 1); 4653 tcg_gen_or_tl(t0, t0, cpu_ov32); 4654 tcg_gen_shli_tl(t0, t0, 2); 4655 /* copy CA and CA32 */ 4656 tcg_gen_shli_tl(t1, cpu_ca, 1); 4657 tcg_gen_or_tl(t1, t1, cpu_ca32); 4658 tcg_gen_or_tl(t0, t0, t1); 4659 tcg_gen_trunc_tl_i32(dst, t0); 4660 } 4661 #endif 4662 4663 /* mfcr mfocrf */ 4664 static void gen_mfcr(DisasContext *ctx) 4665 { 4666 uint32_t crm, crn; 4667 4668 if (likely(ctx->opcode & 0x00100000)) { 4669 crm = CRM(ctx->opcode); 4670 if (likely(crm && ((crm & (crm - 1)) == 0))) { 4671 crn = ctz32(crm); 4672 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]); 4673 tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], 4674 cpu_gpr[rD(ctx->opcode)], crn * 4); 4675 } 4676 } else { 4677 TCGv_i32 t0 = tcg_temp_new_i32(); 4678 tcg_gen_mov_i32(t0, cpu_crf[0]); 4679 tcg_gen_shli_i32(t0, t0, 4); 4680 tcg_gen_or_i32(t0, t0, cpu_crf[1]); 4681 tcg_gen_shli_i32(t0, t0, 4); 4682 tcg_gen_or_i32(t0, t0, cpu_crf[2]); 4683 tcg_gen_shli_i32(t0, t0, 4); 4684 tcg_gen_or_i32(t0, t0, cpu_crf[3]); 4685 tcg_gen_shli_i32(t0, t0, 4); 4686 tcg_gen_or_i32(t0, t0, cpu_crf[4]); 4687 tcg_gen_shli_i32(t0, t0, 4); 4688 tcg_gen_or_i32(t0, t0, cpu_crf[5]); 4689 tcg_gen_shli_i32(t0, t0, 4); 4690 tcg_gen_or_i32(t0, t0, cpu_crf[6]); 4691 tcg_gen_shli_i32(t0, t0, 4); 4692 tcg_gen_or_i32(t0, t0, cpu_crf[7]); 4693 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); 4694 } 4695 } 4696 4697 /* mfmsr */ 4698 static void gen_mfmsr(DisasContext *ctx) 4699 { 4700 CHK_SV(ctx); 4701 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr); 4702 } 4703 4704 /* mfspr */ 4705 static inline void gen_op_mfspr(DisasContext *ctx) 4706 { 4707 void (*read_cb)(DisasContext *ctx, int gprn, int sprn); 4708 uint32_t sprn = SPR(ctx->opcode); 4709 4710 #if defined(CONFIG_USER_ONLY) 4711 read_cb = ctx->spr_cb[sprn].uea_read; 4712 #else 4713 if (ctx->pr) { 4714 read_cb = ctx->spr_cb[sprn].uea_read; 4715 } else if (ctx->hv) { 4716 read_cb = ctx->spr_cb[sprn].hea_read; 4717 } else { 4718 read_cb = ctx->spr_cb[sprn].oea_read; 4719 } 4720 #endif 4721 if (likely(read_cb != NULL)) { 4722 if (likely(read_cb != SPR_NOACCESS)) { 4723 (*read_cb)(ctx, rD(ctx->opcode), sprn); 4724 } else { 4725 /* Privilege exception */ 4726 /* 4727 * This is a hack to avoid warnings when running Linux: 4728 * this OS breaks the PowerPC virtualisation model, 4729 * allowing userland application to read the PVR 4730 */ 4731 if (sprn != SPR_PVR) { 4732 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr " 4733 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, 4734 ctx->cia); 4735 } 4736 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4737 } 4738 } else { 4739 /* ISA 2.07 defines these as no-ops */ 4740 if ((ctx->insns_flags2 & PPC2_ISA207S) && 4741 (sprn >= 808 && sprn <= 811)) { 4742 /* This is a nop */ 4743 return; 4744 } 4745 /* Not defined */ 4746 qemu_log_mask(LOG_GUEST_ERROR, 4747 "Trying to read invalid spr %d (0x%03x) at " 4748 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia); 4749 4750 /* 4751 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can 4752 * generate a priv, a hv emu or a no-op 4753 */ 4754 if (sprn & 0x10) { 4755 if (ctx->pr) { 4756 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4757 } 4758 } else { 4759 if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { 4760 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4761 } 4762 } 4763 } 4764 } 4765 4766 static void gen_mfspr(DisasContext *ctx) 4767 { 4768 gen_op_mfspr(ctx); 4769 } 4770 4771 /* mftb */ 4772 static void gen_mftb(DisasContext *ctx) 4773 { 4774 gen_op_mfspr(ctx); 4775 } 4776 4777 /* mtcrf mtocrf*/ 4778 static void gen_mtcrf(DisasContext *ctx) 4779 { 4780 uint32_t crm, crn; 4781 4782 crm = CRM(ctx->opcode); 4783 if (likely((ctx->opcode & 0x00100000))) { 4784 if (crm && ((crm & (crm - 1)) == 0)) { 4785 TCGv_i32 temp = tcg_temp_new_i32(); 4786 crn = ctz32(crm); 4787 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); 4788 tcg_gen_shri_i32(temp, temp, crn * 4); 4789 tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf); 4790 } 4791 } else { 4792 TCGv_i32 temp = tcg_temp_new_i32(); 4793 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); 4794 for (crn = 0 ; crn < 8 ; crn++) { 4795 if (crm & (1 << crn)) { 4796 tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4); 4797 tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); 4798 } 4799 } 4800 } 4801 } 4802 4803 /* mtmsr */ 4804 #if defined(TARGET_PPC64) 4805 static void gen_mtmsrd(DisasContext *ctx) 4806 { 4807 if (unlikely(!is_book3s_arch2x(ctx))) { 4808 gen_invalid(ctx); 4809 return; 4810 } 4811 4812 CHK_SV(ctx); 4813 4814 #if !defined(CONFIG_USER_ONLY) 4815 TCGv t0, t1; 4816 target_ulong mask; 4817 4818 t0 = tcg_temp_new(); 4819 t1 = tcg_temp_new(); 4820 4821 translator_io_start(&ctx->base); 4822 4823 if (ctx->opcode & 0x00010000) { 4824 /* L=1 form only updates EE and RI */ 4825 mask = (1ULL << MSR_RI) | (1ULL << MSR_EE); 4826 } else { 4827 /* mtmsrd does not alter HV, S, ME, or LE */ 4828 mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) | 4829 (1ULL << MSR_HV)); 4830 /* 4831 * XXX: we need to update nip before the store if we enter 4832 * power saving mode, we will exit the loop directly from 4833 * ppc_store_msr 4834 */ 4835 gen_update_nip(ctx, ctx->base.pc_next); 4836 } 4837 4838 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); 4839 tcg_gen_andi_tl(t1, cpu_msr, ~mask); 4840 tcg_gen_or_tl(t0, t0, t1); 4841 4842 gen_helper_store_msr(cpu_env, t0); 4843 4844 /* Must stop the translation as machine state (may have) changed */ 4845 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 4846 #endif /* !defined(CONFIG_USER_ONLY) */ 4847 } 4848 #endif /* defined(TARGET_PPC64) */ 4849 4850 static void gen_mtmsr(DisasContext *ctx) 4851 { 4852 CHK_SV(ctx); 4853 4854 #if !defined(CONFIG_USER_ONLY) 4855 TCGv t0, t1; 4856 target_ulong mask = 0xFFFFFFFF; 4857 4858 t0 = tcg_temp_new(); 4859 t1 = tcg_temp_new(); 4860 4861 translator_io_start(&ctx->base); 4862 if (ctx->opcode & 0x00010000) { 4863 /* L=1 form only updates EE and RI */ 4864 mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE); 4865 } else { 4866 /* mtmsr does not alter S, ME, or LE */ 4867 mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S)); 4868 4869 /* 4870 * XXX: we need to update nip before the store if we enter 4871 * power saving mode, we will exit the loop directly from 4872 * ppc_store_msr 4873 */ 4874 gen_update_nip(ctx, ctx->base.pc_next); 4875 } 4876 4877 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); 4878 tcg_gen_andi_tl(t1, cpu_msr, ~mask); 4879 tcg_gen_or_tl(t0, t0, t1); 4880 4881 gen_helper_store_msr(cpu_env, t0); 4882 4883 /* Must stop the translation as machine state (may have) changed */ 4884 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 4885 #endif 4886 } 4887 4888 /* mtspr */ 4889 static void gen_mtspr(DisasContext *ctx) 4890 { 4891 void (*write_cb)(DisasContext *ctx, int sprn, int gprn); 4892 uint32_t sprn = SPR(ctx->opcode); 4893 4894 #if defined(CONFIG_USER_ONLY) 4895 write_cb = ctx->spr_cb[sprn].uea_write; 4896 #else 4897 if (ctx->pr) { 4898 write_cb = ctx->spr_cb[sprn].uea_write; 4899 } else if (ctx->hv) { 4900 write_cb = ctx->spr_cb[sprn].hea_write; 4901 } else { 4902 write_cb = ctx->spr_cb[sprn].oea_write; 4903 } 4904 #endif 4905 if (likely(write_cb != NULL)) { 4906 if (likely(write_cb != SPR_NOACCESS)) { 4907 (*write_cb)(ctx, sprn, rS(ctx->opcode)); 4908 } else { 4909 /* Privilege exception */ 4910 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr " 4911 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, 4912 ctx->cia); 4913 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4914 } 4915 } else { 4916 /* ISA 2.07 defines these as no-ops */ 4917 if ((ctx->insns_flags2 & PPC2_ISA207S) && 4918 (sprn >= 808 && sprn <= 811)) { 4919 /* This is a nop */ 4920 return; 4921 } 4922 4923 /* Not defined */ 4924 qemu_log_mask(LOG_GUEST_ERROR, 4925 "Trying to write invalid spr %d (0x%03x) at " 4926 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia); 4927 4928 4929 /* 4930 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can 4931 * generate a priv, a hv emu or a no-op 4932 */ 4933 if (sprn & 0x10) { 4934 if (ctx->pr) { 4935 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4936 } 4937 } else { 4938 if (ctx->pr || sprn == 0) { 4939 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4940 } 4941 } 4942 } 4943 } 4944 4945 #if defined(TARGET_PPC64) 4946 /* setb */ 4947 static void gen_setb(DisasContext *ctx) 4948 { 4949 TCGv_i32 t0 = tcg_temp_new_i32(); 4950 TCGv_i32 t8 = tcg_constant_i32(8); 4951 TCGv_i32 tm1 = tcg_constant_i32(-1); 4952 int crf = crfS(ctx->opcode); 4953 4954 tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4); 4955 tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); 4956 tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); 4957 } 4958 #endif 4959 4960 /*** Cache management ***/ 4961 4962 /* dcbf */ 4963 static void gen_dcbf(DisasContext *ctx) 4964 { 4965 /* XXX: specification says this is treated as a load by the MMU */ 4966 TCGv t0; 4967 gen_set_access_type(ctx, ACCESS_CACHE); 4968 t0 = tcg_temp_new(); 4969 gen_addr_reg_index(ctx, t0); 4970 gen_qemu_ld8u(ctx, t0, t0); 4971 } 4972 4973 /* dcbfep (external PID dcbf) */ 4974 static void gen_dcbfep(DisasContext *ctx) 4975 { 4976 /* XXX: specification says this is treated as a load by the MMU */ 4977 TCGv t0; 4978 CHK_SV(ctx); 4979 gen_set_access_type(ctx, ACCESS_CACHE); 4980 t0 = tcg_temp_new(); 4981 gen_addr_reg_index(ctx, t0); 4982 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); 4983 } 4984 4985 /* dcbi (Supervisor only) */ 4986 static void gen_dcbi(DisasContext *ctx) 4987 { 4988 #if defined(CONFIG_USER_ONLY) 4989 GEN_PRIV(ctx); 4990 #else 4991 TCGv EA, val; 4992 4993 CHK_SV(ctx); 4994 EA = tcg_temp_new(); 4995 gen_set_access_type(ctx, ACCESS_CACHE); 4996 gen_addr_reg_index(ctx, EA); 4997 val = tcg_temp_new(); 4998 /* XXX: specification says this should be treated as a store by the MMU */ 4999 gen_qemu_ld8u(ctx, val, EA); 5000 gen_qemu_st8(ctx, val, EA); 5001 #endif /* defined(CONFIG_USER_ONLY) */ 5002 } 5003 5004 /* dcdst */ 5005 static void gen_dcbst(DisasContext *ctx) 5006 { 5007 /* XXX: specification say this is treated as a load by the MMU */ 5008 TCGv t0; 5009 gen_set_access_type(ctx, ACCESS_CACHE); 5010 t0 = tcg_temp_new(); 5011 gen_addr_reg_index(ctx, t0); 5012 gen_qemu_ld8u(ctx, t0, t0); 5013 } 5014 5015 /* dcbstep (dcbstep External PID version) */ 5016 static void gen_dcbstep(DisasContext *ctx) 5017 { 5018 /* XXX: specification say this is treated as a load by the MMU */ 5019 TCGv t0; 5020 gen_set_access_type(ctx, ACCESS_CACHE); 5021 t0 = tcg_temp_new(); 5022 gen_addr_reg_index(ctx, t0); 5023 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); 5024 } 5025 5026 /* dcbt */ 5027 static void gen_dcbt(DisasContext *ctx) 5028 { 5029 /* 5030 * interpreted as no-op 5031 * XXX: specification say this is treated as a load by the MMU but 5032 * does not generate any exception 5033 */ 5034 } 5035 5036 /* dcbtep */ 5037 static void gen_dcbtep(DisasContext *ctx) 5038 { 5039 /* 5040 * interpreted as no-op 5041 * XXX: specification say this is treated as a load by the MMU but 5042 * does not generate any exception 5043 */ 5044 } 5045 5046 /* dcbtst */ 5047 static void gen_dcbtst(DisasContext *ctx) 5048 { 5049 /* 5050 * interpreted as no-op 5051 * XXX: specification say this is treated as a load by the MMU but 5052 * does not generate any exception 5053 */ 5054 } 5055 5056 /* dcbtstep */ 5057 static void gen_dcbtstep(DisasContext *ctx) 5058 { 5059 /* 5060 * interpreted as no-op 5061 * XXX: specification say this is treated as a load by the MMU but 5062 * does not generate any exception 5063 */ 5064 } 5065 5066 /* dcbtls */ 5067 static void gen_dcbtls(DisasContext *ctx) 5068 { 5069 /* Always fails locking the cache */ 5070 TCGv t0 = tcg_temp_new(); 5071 gen_load_spr(t0, SPR_Exxx_L1CSR0); 5072 tcg_gen_ori_tl(t0, t0, L1CSR0_CUL); 5073 gen_store_spr(SPR_Exxx_L1CSR0, t0); 5074 } 5075 5076 /* dcblc */ 5077 static void gen_dcblc(DisasContext *ctx) 5078 { 5079 /* 5080 * interpreted as no-op 5081 */ 5082 } 5083 5084 /* dcbz */ 5085 static void gen_dcbz(DisasContext *ctx) 5086 { 5087 TCGv tcgv_addr; 5088 TCGv_i32 tcgv_op; 5089 5090 gen_set_access_type(ctx, ACCESS_CACHE); 5091 tcgv_addr = tcg_temp_new(); 5092 tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); 5093 gen_addr_reg_index(ctx, tcgv_addr); 5094 gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op); 5095 } 5096 5097 /* dcbzep */ 5098 static void gen_dcbzep(DisasContext *ctx) 5099 { 5100 TCGv tcgv_addr; 5101 TCGv_i32 tcgv_op; 5102 5103 gen_set_access_type(ctx, ACCESS_CACHE); 5104 tcgv_addr = tcg_temp_new(); 5105 tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); 5106 gen_addr_reg_index(ctx, tcgv_addr); 5107 gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op); 5108 } 5109 5110 /* dst / dstt */ 5111 static void gen_dst(DisasContext *ctx) 5112 { 5113 if (rA(ctx->opcode) == 0) { 5114 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5115 } else { 5116 /* interpreted as no-op */ 5117 } 5118 } 5119 5120 /* dstst /dststt */ 5121 static void gen_dstst(DisasContext *ctx) 5122 { 5123 if (rA(ctx->opcode) == 0) { 5124 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5125 } else { 5126 /* interpreted as no-op */ 5127 } 5128 5129 } 5130 5131 /* dss / dssall */ 5132 static void gen_dss(DisasContext *ctx) 5133 { 5134 /* interpreted as no-op */ 5135 } 5136 5137 /* icbi */ 5138 static void gen_icbi(DisasContext *ctx) 5139 { 5140 TCGv t0; 5141 gen_set_access_type(ctx, ACCESS_CACHE); 5142 t0 = tcg_temp_new(); 5143 gen_addr_reg_index(ctx, t0); 5144 gen_helper_icbi(cpu_env, t0); 5145 } 5146 5147 /* icbiep */ 5148 static void gen_icbiep(DisasContext *ctx) 5149 { 5150 TCGv t0; 5151 gen_set_access_type(ctx, ACCESS_CACHE); 5152 t0 = tcg_temp_new(); 5153 gen_addr_reg_index(ctx, t0); 5154 gen_helper_icbiep(cpu_env, t0); 5155 } 5156 5157 /* Optional: */ 5158 /* dcba */ 5159 static void gen_dcba(DisasContext *ctx) 5160 { 5161 /* 5162 * interpreted as no-op 5163 * XXX: specification say this is treated as a store by the MMU 5164 * but does not generate any exception 5165 */ 5166 } 5167 5168 /*** Segment register manipulation ***/ 5169 /* Supervisor only: */ 5170 5171 /* mfsr */ 5172 static void gen_mfsr(DisasContext *ctx) 5173 { 5174 #if defined(CONFIG_USER_ONLY) 5175 GEN_PRIV(ctx); 5176 #else 5177 TCGv t0; 5178 5179 CHK_SV(ctx); 5180 t0 = tcg_constant_tl(SR(ctx->opcode)); 5181 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); 5182 #endif /* defined(CONFIG_USER_ONLY) */ 5183 } 5184 5185 /* mfsrin */ 5186 static void gen_mfsrin(DisasContext *ctx) 5187 { 5188 #if defined(CONFIG_USER_ONLY) 5189 GEN_PRIV(ctx); 5190 #else 5191 TCGv t0; 5192 5193 CHK_SV(ctx); 5194 t0 = tcg_temp_new(); 5195 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5196 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); 5197 #endif /* defined(CONFIG_USER_ONLY) */ 5198 } 5199 5200 /* mtsr */ 5201 static void gen_mtsr(DisasContext *ctx) 5202 { 5203 #if defined(CONFIG_USER_ONLY) 5204 GEN_PRIV(ctx); 5205 #else 5206 TCGv t0; 5207 5208 CHK_SV(ctx); 5209 t0 = tcg_constant_tl(SR(ctx->opcode)); 5210 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); 5211 #endif /* defined(CONFIG_USER_ONLY) */ 5212 } 5213 5214 /* mtsrin */ 5215 static void gen_mtsrin(DisasContext *ctx) 5216 { 5217 #if defined(CONFIG_USER_ONLY) 5218 GEN_PRIV(ctx); 5219 #else 5220 TCGv t0; 5221 CHK_SV(ctx); 5222 5223 t0 = tcg_temp_new(); 5224 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5225 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); 5226 #endif /* defined(CONFIG_USER_ONLY) */ 5227 } 5228 5229 #if defined(TARGET_PPC64) 5230 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */ 5231 5232 /* mfsr */ 5233 static void gen_mfsr_64b(DisasContext *ctx) 5234 { 5235 #if defined(CONFIG_USER_ONLY) 5236 GEN_PRIV(ctx); 5237 #else 5238 TCGv t0; 5239 5240 CHK_SV(ctx); 5241 t0 = tcg_constant_tl(SR(ctx->opcode)); 5242 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); 5243 #endif /* defined(CONFIG_USER_ONLY) */ 5244 } 5245 5246 /* mfsrin */ 5247 static void gen_mfsrin_64b(DisasContext *ctx) 5248 { 5249 #if defined(CONFIG_USER_ONLY) 5250 GEN_PRIV(ctx); 5251 #else 5252 TCGv t0; 5253 5254 CHK_SV(ctx); 5255 t0 = tcg_temp_new(); 5256 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5257 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); 5258 #endif /* defined(CONFIG_USER_ONLY) */ 5259 } 5260 5261 /* mtsr */ 5262 static void gen_mtsr_64b(DisasContext *ctx) 5263 { 5264 #if defined(CONFIG_USER_ONLY) 5265 GEN_PRIV(ctx); 5266 #else 5267 TCGv t0; 5268 5269 CHK_SV(ctx); 5270 t0 = tcg_constant_tl(SR(ctx->opcode)); 5271 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); 5272 #endif /* defined(CONFIG_USER_ONLY) */ 5273 } 5274 5275 /* mtsrin */ 5276 static void gen_mtsrin_64b(DisasContext *ctx) 5277 { 5278 #if defined(CONFIG_USER_ONLY) 5279 GEN_PRIV(ctx); 5280 #else 5281 TCGv t0; 5282 5283 CHK_SV(ctx); 5284 t0 = tcg_temp_new(); 5285 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); 5286 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); 5287 #endif /* defined(CONFIG_USER_ONLY) */ 5288 } 5289 5290 #endif /* defined(TARGET_PPC64) */ 5291 5292 /*** Lookaside buffer management ***/ 5293 /* Optional & supervisor only: */ 5294 5295 /* tlbia */ 5296 static void gen_tlbia(DisasContext *ctx) 5297 { 5298 #if defined(CONFIG_USER_ONLY) 5299 GEN_PRIV(ctx); 5300 #else 5301 CHK_HV(ctx); 5302 5303 gen_helper_tlbia(cpu_env); 5304 #endif /* defined(CONFIG_USER_ONLY) */ 5305 } 5306 5307 /* tlbsync */ 5308 static void gen_tlbsync(DisasContext *ctx) 5309 { 5310 #if defined(CONFIG_USER_ONLY) 5311 GEN_PRIV(ctx); 5312 #else 5313 5314 if (ctx->gtse) { 5315 CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */ 5316 } else { 5317 CHK_HV(ctx); /* Else hypervisor privileged */ 5318 } 5319 5320 /* BookS does both ptesync and tlbsync make tlbsync a nop for server */ 5321 if (ctx->insns_flags & PPC_BOOKE) { 5322 gen_check_tlb_flush(ctx, true); 5323 } 5324 #endif /* defined(CONFIG_USER_ONLY) */ 5325 } 5326 5327 /*** External control ***/ 5328 /* Optional: */ 5329 5330 /* eciwx */ 5331 static void gen_eciwx(DisasContext *ctx) 5332 { 5333 TCGv t0; 5334 /* Should check EAR[E] ! */ 5335 gen_set_access_type(ctx, ACCESS_EXT); 5336 t0 = tcg_temp_new(); 5337 gen_addr_reg_index(ctx, t0); 5338 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, 5339 DEF_MEMOP(MO_UL | MO_ALIGN)); 5340 } 5341 5342 /* ecowx */ 5343 static void gen_ecowx(DisasContext *ctx) 5344 { 5345 TCGv t0; 5346 /* Should check EAR[E] ! */ 5347 gen_set_access_type(ctx, ACCESS_EXT); 5348 t0 = tcg_temp_new(); 5349 gen_addr_reg_index(ctx, t0); 5350 tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, 5351 DEF_MEMOP(MO_UL | MO_ALIGN)); 5352 } 5353 5354 /* 602 - 603 - G2 TLB management */ 5355 5356 /* tlbld */ 5357 static void gen_tlbld_6xx(DisasContext *ctx) 5358 { 5359 #if defined(CONFIG_USER_ONLY) 5360 GEN_PRIV(ctx); 5361 #else 5362 CHK_SV(ctx); 5363 gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); 5364 #endif /* defined(CONFIG_USER_ONLY) */ 5365 } 5366 5367 /* tlbli */ 5368 static void gen_tlbli_6xx(DisasContext *ctx) 5369 { 5370 #if defined(CONFIG_USER_ONLY) 5371 GEN_PRIV(ctx); 5372 #else 5373 CHK_SV(ctx); 5374 gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); 5375 #endif /* defined(CONFIG_USER_ONLY) */ 5376 } 5377 5378 /* BookE specific instructions */ 5379 5380 /* XXX: not implemented on 440 ? */ 5381 static void gen_mfapidi(DisasContext *ctx) 5382 { 5383 /* XXX: TODO */ 5384 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5385 } 5386 5387 /* XXX: not implemented on 440 ? */ 5388 static void gen_tlbiva(DisasContext *ctx) 5389 { 5390 #if defined(CONFIG_USER_ONLY) 5391 GEN_PRIV(ctx); 5392 #else 5393 TCGv t0; 5394 5395 CHK_SV(ctx); 5396 t0 = tcg_temp_new(); 5397 gen_addr_reg_index(ctx, t0); 5398 gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]); 5399 #endif /* defined(CONFIG_USER_ONLY) */ 5400 } 5401 5402 /* All 405 MAC instructions are translated here */ 5403 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, 5404 int ra, int rb, int rt, int Rc) 5405 { 5406 TCGv t0, t1; 5407 5408 t0 = tcg_temp_new(); 5409 t1 = tcg_temp_new(); 5410 5411 switch (opc3 & 0x0D) { 5412 case 0x05: 5413 /* macchw - macchw. - macchwo - macchwo. */ 5414 /* macchws - macchws. - macchwso - macchwso. */ 5415 /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */ 5416 /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */ 5417 /* mulchw - mulchw. */ 5418 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]); 5419 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16); 5420 tcg_gen_ext16s_tl(t1, t1); 5421 break; 5422 case 0x04: 5423 /* macchwu - macchwu. - macchwuo - macchwuo. */ 5424 /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */ 5425 /* mulchwu - mulchwu. */ 5426 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]); 5427 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16); 5428 tcg_gen_ext16u_tl(t1, t1); 5429 break; 5430 case 0x01: 5431 /* machhw - machhw. - machhwo - machhwo. */ 5432 /* machhws - machhws. - machhwso - machhwso. */ 5433 /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */ 5434 /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */ 5435 /* mulhhw - mulhhw. */ 5436 tcg_gen_sari_tl(t0, cpu_gpr[ra], 16); 5437 tcg_gen_ext16s_tl(t0, t0); 5438 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16); 5439 tcg_gen_ext16s_tl(t1, t1); 5440 break; 5441 case 0x00: 5442 /* machhwu - machhwu. - machhwuo - machhwuo. */ 5443 /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */ 5444 /* mulhhwu - mulhhwu. */ 5445 tcg_gen_shri_tl(t0, cpu_gpr[ra], 16); 5446 tcg_gen_ext16u_tl(t0, t0); 5447 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16); 5448 tcg_gen_ext16u_tl(t1, t1); 5449 break; 5450 case 0x0D: 5451 /* maclhw - maclhw. - maclhwo - maclhwo. */ 5452 /* maclhws - maclhws. - maclhwso - maclhwso. */ 5453 /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */ 5454 /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */ 5455 /* mullhw - mullhw. */ 5456 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]); 5457 tcg_gen_ext16s_tl(t1, cpu_gpr[rb]); 5458 break; 5459 case 0x0C: 5460 /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */ 5461 /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */ 5462 /* mullhwu - mullhwu. */ 5463 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]); 5464 tcg_gen_ext16u_tl(t1, cpu_gpr[rb]); 5465 break; 5466 } 5467 if (opc2 & 0x04) { 5468 /* (n)multiply-and-accumulate (0x0C / 0x0E) */ 5469 tcg_gen_mul_tl(t1, t0, t1); 5470 if (opc2 & 0x02) { 5471 /* nmultiply-and-accumulate (0x0E) */ 5472 tcg_gen_sub_tl(t0, cpu_gpr[rt], t1); 5473 } else { 5474 /* multiply-and-accumulate (0x0C) */ 5475 tcg_gen_add_tl(t0, cpu_gpr[rt], t1); 5476 } 5477 5478 if (opc3 & 0x12) { 5479 /* Check overflow and/or saturate */ 5480 TCGLabel *l1 = gen_new_label(); 5481 5482 if (opc3 & 0x10) { 5483 /* Start with XER OV disabled, the most likely case */ 5484 tcg_gen_movi_tl(cpu_ov, 0); 5485 } 5486 if (opc3 & 0x01) { 5487 /* Signed */ 5488 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1); 5489 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); 5490 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0); 5491 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1); 5492 if (opc3 & 0x02) { 5493 /* Saturate */ 5494 tcg_gen_sari_tl(t0, cpu_gpr[rt], 31); 5495 tcg_gen_xori_tl(t0, t0, 0x7fffffff); 5496 } 5497 } else { 5498 /* Unsigned */ 5499 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1); 5500 if (opc3 & 0x02) { 5501 /* Saturate */ 5502 tcg_gen_movi_tl(t0, UINT32_MAX); 5503 } 5504 } 5505 if (opc3 & 0x10) { 5506 /* Check overflow */ 5507 tcg_gen_movi_tl(cpu_ov, 1); 5508 tcg_gen_movi_tl(cpu_so, 1); 5509 } 5510 gen_set_label(l1); 5511 tcg_gen_mov_tl(cpu_gpr[rt], t0); 5512 } 5513 } else { 5514 tcg_gen_mul_tl(cpu_gpr[rt], t0, t1); 5515 } 5516 if (unlikely(Rc) != 0) { 5517 /* Update Rc0 */ 5518 gen_set_Rc0(ctx, cpu_gpr[rt]); 5519 } 5520 } 5521 5522 #define GEN_MAC_HANDLER(name, opc2, opc3) \ 5523 static void glue(gen_, name)(DisasContext *ctx) \ 5524 { \ 5525 gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \ 5526 rD(ctx->opcode), Rc(ctx->opcode)); \ 5527 } 5528 5529 /* macchw - macchw. */ 5530 GEN_MAC_HANDLER(macchw, 0x0C, 0x05); 5531 /* macchwo - macchwo. */ 5532 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15); 5533 /* macchws - macchws. */ 5534 GEN_MAC_HANDLER(macchws, 0x0C, 0x07); 5535 /* macchwso - macchwso. */ 5536 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17); 5537 /* macchwsu - macchwsu. */ 5538 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06); 5539 /* macchwsuo - macchwsuo. */ 5540 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16); 5541 /* macchwu - macchwu. */ 5542 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04); 5543 /* macchwuo - macchwuo. */ 5544 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14); 5545 /* machhw - machhw. */ 5546 GEN_MAC_HANDLER(machhw, 0x0C, 0x01); 5547 /* machhwo - machhwo. */ 5548 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11); 5549 /* machhws - machhws. */ 5550 GEN_MAC_HANDLER(machhws, 0x0C, 0x03); 5551 /* machhwso - machhwso. */ 5552 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13); 5553 /* machhwsu - machhwsu. */ 5554 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02); 5555 /* machhwsuo - machhwsuo. */ 5556 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12); 5557 /* machhwu - machhwu. */ 5558 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00); 5559 /* machhwuo - machhwuo. */ 5560 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10); 5561 /* maclhw - maclhw. */ 5562 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D); 5563 /* maclhwo - maclhwo. */ 5564 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D); 5565 /* maclhws - maclhws. */ 5566 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F); 5567 /* maclhwso - maclhwso. */ 5568 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F); 5569 /* maclhwu - maclhwu. */ 5570 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C); 5571 /* maclhwuo - maclhwuo. */ 5572 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C); 5573 /* maclhwsu - maclhwsu. */ 5574 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E); 5575 /* maclhwsuo - maclhwsuo. */ 5576 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E); 5577 /* nmacchw - nmacchw. */ 5578 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05); 5579 /* nmacchwo - nmacchwo. */ 5580 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15); 5581 /* nmacchws - nmacchws. */ 5582 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07); 5583 /* nmacchwso - nmacchwso. */ 5584 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17); 5585 /* nmachhw - nmachhw. */ 5586 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01); 5587 /* nmachhwo - nmachhwo. */ 5588 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11); 5589 /* nmachhws - nmachhws. */ 5590 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03); 5591 /* nmachhwso - nmachhwso. */ 5592 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13); 5593 /* nmaclhw - nmaclhw. */ 5594 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D); 5595 /* nmaclhwo - nmaclhwo. */ 5596 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D); 5597 /* nmaclhws - nmaclhws. */ 5598 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F); 5599 /* nmaclhwso - nmaclhwso. */ 5600 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F); 5601 5602 /* mulchw - mulchw. */ 5603 GEN_MAC_HANDLER(mulchw, 0x08, 0x05); 5604 /* mulchwu - mulchwu. */ 5605 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04); 5606 /* mulhhw - mulhhw. */ 5607 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01); 5608 /* mulhhwu - mulhhwu. */ 5609 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00); 5610 /* mullhw - mullhw. */ 5611 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D); 5612 /* mullhwu - mullhwu. */ 5613 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C); 5614 5615 /* mfdcr */ 5616 static void gen_mfdcr(DisasContext *ctx) 5617 { 5618 #if defined(CONFIG_USER_ONLY) 5619 GEN_PRIV(ctx); 5620 #else 5621 TCGv dcrn; 5622 5623 CHK_SV(ctx); 5624 dcrn = tcg_constant_tl(SPR(ctx->opcode)); 5625 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); 5626 #endif /* defined(CONFIG_USER_ONLY) */ 5627 } 5628 5629 /* mtdcr */ 5630 static void gen_mtdcr(DisasContext *ctx) 5631 { 5632 #if defined(CONFIG_USER_ONLY) 5633 GEN_PRIV(ctx); 5634 #else 5635 TCGv dcrn; 5636 5637 CHK_SV(ctx); 5638 dcrn = tcg_constant_tl(SPR(ctx->opcode)); 5639 gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); 5640 #endif /* defined(CONFIG_USER_ONLY) */ 5641 } 5642 5643 /* mfdcrx */ 5644 /* XXX: not implemented on 440 ? */ 5645 static void gen_mfdcrx(DisasContext *ctx) 5646 { 5647 #if defined(CONFIG_USER_ONLY) 5648 GEN_PRIV(ctx); 5649 #else 5650 CHK_SV(ctx); 5651 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, 5652 cpu_gpr[rA(ctx->opcode)]); 5653 /* Note: Rc update flag set leads to undefined state of Rc0 */ 5654 #endif /* defined(CONFIG_USER_ONLY) */ 5655 } 5656 5657 /* mtdcrx */ 5658 /* XXX: not implemented on 440 ? */ 5659 static void gen_mtdcrx(DisasContext *ctx) 5660 { 5661 #if defined(CONFIG_USER_ONLY) 5662 GEN_PRIV(ctx); 5663 #else 5664 CHK_SV(ctx); 5665 gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], 5666 cpu_gpr[rS(ctx->opcode)]); 5667 /* Note: Rc update flag set leads to undefined state of Rc0 */ 5668 #endif /* defined(CONFIG_USER_ONLY) */ 5669 } 5670 5671 /* dccci */ 5672 static void gen_dccci(DisasContext *ctx) 5673 { 5674 CHK_SV(ctx); 5675 /* interpreted as no-op */ 5676 } 5677 5678 /* dcread */ 5679 static void gen_dcread(DisasContext *ctx) 5680 { 5681 #if defined(CONFIG_USER_ONLY) 5682 GEN_PRIV(ctx); 5683 #else 5684 TCGv EA, val; 5685 5686 CHK_SV(ctx); 5687 gen_set_access_type(ctx, ACCESS_CACHE); 5688 EA = tcg_temp_new(); 5689 gen_addr_reg_index(ctx, EA); 5690 val = tcg_temp_new(); 5691 gen_qemu_ld32u(ctx, val, EA); 5692 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA); 5693 #endif /* defined(CONFIG_USER_ONLY) */ 5694 } 5695 5696 /* icbt */ 5697 static void gen_icbt_40x(DisasContext *ctx) 5698 { 5699 /* 5700 * interpreted as no-op 5701 * XXX: specification say this is treated as a load by the MMU but 5702 * does not generate any exception 5703 */ 5704 } 5705 5706 /* iccci */ 5707 static void gen_iccci(DisasContext *ctx) 5708 { 5709 CHK_SV(ctx); 5710 /* interpreted as no-op */ 5711 } 5712 5713 /* icread */ 5714 static void gen_icread(DisasContext *ctx) 5715 { 5716 CHK_SV(ctx); 5717 /* interpreted as no-op */ 5718 } 5719 5720 /* rfci (supervisor only) */ 5721 static void gen_rfci_40x(DisasContext *ctx) 5722 { 5723 #if defined(CONFIG_USER_ONLY) 5724 GEN_PRIV(ctx); 5725 #else 5726 CHK_SV(ctx); 5727 /* Restore CPU state */ 5728 gen_helper_40x_rfci(cpu_env); 5729 ctx->base.is_jmp = DISAS_EXIT; 5730 #endif /* defined(CONFIG_USER_ONLY) */ 5731 } 5732 5733 static void gen_rfci(DisasContext *ctx) 5734 { 5735 #if defined(CONFIG_USER_ONLY) 5736 GEN_PRIV(ctx); 5737 #else 5738 CHK_SV(ctx); 5739 /* Restore CPU state */ 5740 gen_helper_rfci(cpu_env); 5741 ctx->base.is_jmp = DISAS_EXIT; 5742 #endif /* defined(CONFIG_USER_ONLY) */ 5743 } 5744 5745 /* BookE specific */ 5746 5747 /* XXX: not implemented on 440 ? */ 5748 static void gen_rfdi(DisasContext *ctx) 5749 { 5750 #if defined(CONFIG_USER_ONLY) 5751 GEN_PRIV(ctx); 5752 #else 5753 CHK_SV(ctx); 5754 /* Restore CPU state */ 5755 gen_helper_rfdi(cpu_env); 5756 ctx->base.is_jmp = DISAS_EXIT; 5757 #endif /* defined(CONFIG_USER_ONLY) */ 5758 } 5759 5760 /* XXX: not implemented on 440 ? */ 5761 static void gen_rfmci(DisasContext *ctx) 5762 { 5763 #if defined(CONFIG_USER_ONLY) 5764 GEN_PRIV(ctx); 5765 #else 5766 CHK_SV(ctx); 5767 /* Restore CPU state */ 5768 gen_helper_rfmci(cpu_env); 5769 ctx->base.is_jmp = DISAS_EXIT; 5770 #endif /* defined(CONFIG_USER_ONLY) */ 5771 } 5772 5773 /* TLB management - PowerPC 405 implementation */ 5774 5775 /* tlbre */ 5776 static void gen_tlbre_40x(DisasContext *ctx) 5777 { 5778 #if defined(CONFIG_USER_ONLY) 5779 GEN_PRIV(ctx); 5780 #else 5781 CHK_SV(ctx); 5782 switch (rB(ctx->opcode)) { 5783 case 0: 5784 gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env, 5785 cpu_gpr[rA(ctx->opcode)]); 5786 break; 5787 case 1: 5788 gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env, 5789 cpu_gpr[rA(ctx->opcode)]); 5790 break; 5791 default: 5792 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5793 break; 5794 } 5795 #endif /* defined(CONFIG_USER_ONLY) */ 5796 } 5797 5798 /* tlbsx - tlbsx. */ 5799 static void gen_tlbsx_40x(DisasContext *ctx) 5800 { 5801 #if defined(CONFIG_USER_ONLY) 5802 GEN_PRIV(ctx); 5803 #else 5804 TCGv t0; 5805 5806 CHK_SV(ctx); 5807 t0 = tcg_temp_new(); 5808 gen_addr_reg_index(ctx, t0); 5809 gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); 5810 if (Rc(ctx->opcode)) { 5811 TCGLabel *l1 = gen_new_label(); 5812 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); 5813 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); 5814 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); 5815 gen_set_label(l1); 5816 } 5817 #endif /* defined(CONFIG_USER_ONLY) */ 5818 } 5819 5820 /* tlbwe */ 5821 static void gen_tlbwe_40x(DisasContext *ctx) 5822 { 5823 #if defined(CONFIG_USER_ONLY) 5824 GEN_PRIV(ctx); 5825 #else 5826 CHK_SV(ctx); 5827 5828 switch (rB(ctx->opcode)) { 5829 case 0: 5830 gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)], 5831 cpu_gpr[rS(ctx->opcode)]); 5832 break; 5833 case 1: 5834 gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)], 5835 cpu_gpr[rS(ctx->opcode)]); 5836 break; 5837 default: 5838 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5839 break; 5840 } 5841 #endif /* defined(CONFIG_USER_ONLY) */ 5842 } 5843 5844 /* TLB management - PowerPC 440 implementation */ 5845 5846 /* tlbre */ 5847 static void gen_tlbre_440(DisasContext *ctx) 5848 { 5849 #if defined(CONFIG_USER_ONLY) 5850 GEN_PRIV(ctx); 5851 #else 5852 CHK_SV(ctx); 5853 5854 switch (rB(ctx->opcode)) { 5855 case 0: 5856 case 1: 5857 case 2: 5858 { 5859 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); 5860 gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env, 5861 t0, cpu_gpr[rA(ctx->opcode)]); 5862 } 5863 break; 5864 default: 5865 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5866 break; 5867 } 5868 #endif /* defined(CONFIG_USER_ONLY) */ 5869 } 5870 5871 /* tlbsx - tlbsx. */ 5872 static void gen_tlbsx_440(DisasContext *ctx) 5873 { 5874 #if defined(CONFIG_USER_ONLY) 5875 GEN_PRIV(ctx); 5876 #else 5877 TCGv t0; 5878 5879 CHK_SV(ctx); 5880 t0 = tcg_temp_new(); 5881 gen_addr_reg_index(ctx, t0); 5882 gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); 5883 if (Rc(ctx->opcode)) { 5884 TCGLabel *l1 = gen_new_label(); 5885 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); 5886 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); 5887 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); 5888 gen_set_label(l1); 5889 } 5890 #endif /* defined(CONFIG_USER_ONLY) */ 5891 } 5892 5893 /* tlbwe */ 5894 static void gen_tlbwe_440(DisasContext *ctx) 5895 { 5896 #if defined(CONFIG_USER_ONLY) 5897 GEN_PRIV(ctx); 5898 #else 5899 CHK_SV(ctx); 5900 switch (rB(ctx->opcode)) { 5901 case 0: 5902 case 1: 5903 case 2: 5904 { 5905 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); 5906 gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)], 5907 cpu_gpr[rS(ctx->opcode)]); 5908 } 5909 break; 5910 default: 5911 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5912 break; 5913 } 5914 #endif /* defined(CONFIG_USER_ONLY) */ 5915 } 5916 5917 /* TLB management - PowerPC BookE 2.06 implementation */ 5918 5919 /* tlbre */ 5920 static void gen_tlbre_booke206(DisasContext *ctx) 5921 { 5922 #if defined(CONFIG_USER_ONLY) 5923 GEN_PRIV(ctx); 5924 #else 5925 CHK_SV(ctx); 5926 gen_helper_booke206_tlbre(cpu_env); 5927 #endif /* defined(CONFIG_USER_ONLY) */ 5928 } 5929 5930 /* tlbsx - tlbsx. */ 5931 static void gen_tlbsx_booke206(DisasContext *ctx) 5932 { 5933 #if defined(CONFIG_USER_ONLY) 5934 GEN_PRIV(ctx); 5935 #else 5936 TCGv t0; 5937 5938 CHK_SV(ctx); 5939 if (rA(ctx->opcode)) { 5940 t0 = tcg_temp_new(); 5941 tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 5942 } else { 5943 t0 = cpu_gpr[rB(ctx->opcode)]; 5944 } 5945 gen_helper_booke206_tlbsx(cpu_env, t0); 5946 #endif /* defined(CONFIG_USER_ONLY) */ 5947 } 5948 5949 /* tlbwe */ 5950 static void gen_tlbwe_booke206(DisasContext *ctx) 5951 { 5952 #if defined(CONFIG_USER_ONLY) 5953 GEN_PRIV(ctx); 5954 #else 5955 CHK_SV(ctx); 5956 gen_helper_booke206_tlbwe(cpu_env); 5957 #endif /* defined(CONFIG_USER_ONLY) */ 5958 } 5959 5960 static void gen_tlbivax_booke206(DisasContext *ctx) 5961 { 5962 #if defined(CONFIG_USER_ONLY) 5963 GEN_PRIV(ctx); 5964 #else 5965 TCGv t0; 5966 5967 CHK_SV(ctx); 5968 t0 = tcg_temp_new(); 5969 gen_addr_reg_index(ctx, t0); 5970 gen_helper_booke206_tlbivax(cpu_env, t0); 5971 #endif /* defined(CONFIG_USER_ONLY) */ 5972 } 5973 5974 static void gen_tlbilx_booke206(DisasContext *ctx) 5975 { 5976 #if defined(CONFIG_USER_ONLY) 5977 GEN_PRIV(ctx); 5978 #else 5979 TCGv t0; 5980 5981 CHK_SV(ctx); 5982 t0 = tcg_temp_new(); 5983 gen_addr_reg_index(ctx, t0); 5984 5985 switch ((ctx->opcode >> 21) & 0x3) { 5986 case 0: 5987 gen_helper_booke206_tlbilx0(cpu_env, t0); 5988 break; 5989 case 1: 5990 gen_helper_booke206_tlbilx1(cpu_env, t0); 5991 break; 5992 case 3: 5993 gen_helper_booke206_tlbilx3(cpu_env, t0); 5994 break; 5995 default: 5996 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 5997 break; 5998 } 5999 #endif /* defined(CONFIG_USER_ONLY) */ 6000 } 6001 6002 /* wrtee */ 6003 static void gen_wrtee(DisasContext *ctx) 6004 { 6005 #if defined(CONFIG_USER_ONLY) 6006 GEN_PRIV(ctx); 6007 #else 6008 TCGv t0; 6009 6010 CHK_SV(ctx); 6011 t0 = tcg_temp_new(); 6012 tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); 6013 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); 6014 tcg_gen_or_tl(cpu_msr, cpu_msr, t0); 6015 gen_ppc_maybe_interrupt(ctx); 6016 /* 6017 * Stop translation to have a chance to raise an exception if we 6018 * just set msr_ee to 1 6019 */ 6020 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 6021 #endif /* defined(CONFIG_USER_ONLY) */ 6022 } 6023 6024 /* wrteei */ 6025 static void gen_wrteei(DisasContext *ctx) 6026 { 6027 #if defined(CONFIG_USER_ONLY) 6028 GEN_PRIV(ctx); 6029 #else 6030 CHK_SV(ctx); 6031 if (ctx->opcode & 0x00008000) { 6032 tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE)); 6033 gen_ppc_maybe_interrupt(ctx); 6034 /* Stop translation to have a chance to raise an exception */ 6035 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 6036 } else { 6037 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); 6038 } 6039 #endif /* defined(CONFIG_USER_ONLY) */ 6040 } 6041 6042 /* PowerPC 440 specific instructions */ 6043 6044 /* dlmzb */ 6045 static void gen_dlmzb(DisasContext *ctx) 6046 { 6047 TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode)); 6048 gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env, 6049 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); 6050 } 6051 6052 /* mbar replaces eieio on 440 */ 6053 static void gen_mbar(DisasContext *ctx) 6054 { 6055 /* interpreted as no-op */ 6056 } 6057 6058 /* msync replaces sync on 440 */ 6059 static void gen_msync_4xx(DisasContext *ctx) 6060 { 6061 /* Only e500 seems to treat reserved bits as invalid */ 6062 if ((ctx->insns_flags2 & PPC2_BOOKE206) && 6063 (ctx->opcode & 0x03FFF801)) { 6064 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 6065 } 6066 /* otherwise interpreted as no-op */ 6067 } 6068 6069 /* icbt */ 6070 static void gen_icbt_440(DisasContext *ctx) 6071 { 6072 /* 6073 * interpreted as no-op 6074 * XXX: specification say this is treated as a load by the MMU but 6075 * does not generate any exception 6076 */ 6077 } 6078 6079 #if defined(TARGET_PPC64) 6080 static void gen_maddld(DisasContext *ctx) 6081 { 6082 TCGv_i64 t1 = tcg_temp_new_i64(); 6083 6084 tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 6085 tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); 6086 } 6087 6088 /* maddhd maddhdu */ 6089 static void gen_maddhd_maddhdu(DisasContext *ctx) 6090 { 6091 TCGv_i64 lo = tcg_temp_new_i64(); 6092 TCGv_i64 hi = tcg_temp_new_i64(); 6093 TCGv_i64 t1 = tcg_temp_new_i64(); 6094 6095 if (Rc(ctx->opcode)) { 6096 tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)], 6097 cpu_gpr[rB(ctx->opcode)]); 6098 tcg_gen_movi_i64(t1, 0); 6099 } else { 6100 tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)], 6101 cpu_gpr[rB(ctx->opcode)]); 6102 tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63); 6103 } 6104 tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi, 6105 cpu_gpr[rC(ctx->opcode)], t1); 6106 } 6107 #endif /* defined(TARGET_PPC64) */ 6108 6109 static void gen_tbegin(DisasContext *ctx) 6110 { 6111 if (unlikely(!ctx->tm_enabled)) { 6112 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); 6113 return; 6114 } 6115 gen_helper_tbegin(cpu_env); 6116 } 6117 6118 #define GEN_TM_NOOP(name) \ 6119 static inline void gen_##name(DisasContext *ctx) \ 6120 { \ 6121 if (unlikely(!ctx->tm_enabled)) { \ 6122 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ 6123 return; \ 6124 } \ 6125 /* \ 6126 * Because tbegin always fails in QEMU, these user \ 6127 * space instructions all have a simple implementation: \ 6128 * \ 6129 * CR[0] = 0b0 || MSR[TS] || 0b0 \ 6130 * = 0b0 || 0b00 || 0b0 \ 6131 */ \ 6132 tcg_gen_movi_i32(cpu_crf[0], 0); \ 6133 } 6134 6135 GEN_TM_NOOP(tend); 6136 GEN_TM_NOOP(tabort); 6137 GEN_TM_NOOP(tabortwc); 6138 GEN_TM_NOOP(tabortwci); 6139 GEN_TM_NOOP(tabortdc); 6140 GEN_TM_NOOP(tabortdci); 6141 GEN_TM_NOOP(tsr); 6142 6143 static inline void gen_cp_abort(DisasContext *ctx) 6144 { 6145 /* Do Nothing */ 6146 } 6147 6148 #define GEN_CP_PASTE_NOOP(name) \ 6149 static inline void gen_##name(DisasContext *ctx) \ 6150 { \ 6151 /* \ 6152 * Generate invalid exception until we have an \ 6153 * implementation of the copy paste facility \ 6154 */ \ 6155 gen_invalid(ctx); \ 6156 } 6157 6158 GEN_CP_PASTE_NOOP(copy) 6159 GEN_CP_PASTE_NOOP(paste) 6160 6161 static void gen_tcheck(DisasContext *ctx) 6162 { 6163 if (unlikely(!ctx->tm_enabled)) { 6164 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); 6165 return; 6166 } 6167 /* 6168 * Because tbegin always fails, the tcheck implementation is 6169 * simple: 6170 * 6171 * CR[CRF] = TDOOMED || MSR[TS] || 0b0 6172 * = 0b1 || 0b00 || 0b0 6173 */ 6174 tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8); 6175 } 6176 6177 #if defined(CONFIG_USER_ONLY) 6178 #define GEN_TM_PRIV_NOOP(name) \ 6179 static inline void gen_##name(DisasContext *ctx) \ 6180 { \ 6181 gen_priv_opc(ctx); \ 6182 } 6183 6184 #else 6185 6186 #define GEN_TM_PRIV_NOOP(name) \ 6187 static inline void gen_##name(DisasContext *ctx) \ 6188 { \ 6189 CHK_SV(ctx); \ 6190 if (unlikely(!ctx->tm_enabled)) { \ 6191 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ 6192 return; \ 6193 } \ 6194 /* \ 6195 * Because tbegin always fails, the implementation is \ 6196 * simple: \ 6197 * \ 6198 * CR[0] = 0b0 || MSR[TS] || 0b0 \ 6199 * = 0b0 || 0b00 | 0b0 \ 6200 */ \ 6201 tcg_gen_movi_i32(cpu_crf[0], 0); \ 6202 } 6203 6204 #endif 6205 6206 GEN_TM_PRIV_NOOP(treclaim); 6207 GEN_TM_PRIV_NOOP(trechkpt); 6208 6209 static inline void get_fpr(TCGv_i64 dst, int regno) 6210 { 6211 tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno)); 6212 } 6213 6214 static inline void set_fpr(int regno, TCGv_i64 src) 6215 { 6216 tcg_gen_st_i64(src, cpu_env, fpr_offset(regno)); 6217 /* 6218 * Before PowerISA v3.1 the result of doubleword 1 of the VSR 6219 * corresponding to the target FPR was undefined. However, 6220 * most (if not all) real hardware were setting the result to 0. 6221 * Starting at ISA v3.1, the result for doubleword 1 is now defined 6222 * to be 0. 6223 */ 6224 tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false)); 6225 } 6226 6227 static inline void get_avr64(TCGv_i64 dst, int regno, bool high) 6228 { 6229 tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high)); 6230 } 6231 6232 static inline void set_avr64(int regno, TCGv_i64 src, bool high) 6233 { 6234 tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high)); 6235 } 6236 6237 /* 6238 * Helpers for decodetree used by !function for decoding arguments. 6239 */ 6240 static int times_2(DisasContext *ctx, int x) 6241 { 6242 return x * 2; 6243 } 6244 6245 static int times_4(DisasContext *ctx, int x) 6246 { 6247 return x * 4; 6248 } 6249 6250 static int times_16(DisasContext *ctx, int x) 6251 { 6252 return x * 16; 6253 } 6254 6255 static int64_t dw_compose_ea(DisasContext *ctx, int x) 6256 { 6257 return deposit64(0xfffffffffffffe00, 3, 6, x); 6258 } 6259 6260 /* 6261 * Helpers for trans_* functions to check for specific insns flags. 6262 * Use token pasting to ensure that we use the proper flag with the 6263 * proper variable. 6264 */ 6265 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \ 6266 do { \ 6267 if (((CTX)->insns_flags & PPC_##NAME) == 0) { \ 6268 return false; \ 6269 } \ 6270 } while (0) 6271 6272 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \ 6273 do { \ 6274 if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \ 6275 return false; \ 6276 } \ 6277 } while (0) 6278 6279 /* Then special-case the check for 64-bit so that we elide code for ppc32. */ 6280 #if TARGET_LONG_BITS == 32 6281 # define REQUIRE_64BIT(CTX) return false 6282 #else 6283 # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B) 6284 #endif 6285 6286 #define REQUIRE_VECTOR(CTX) \ 6287 do { \ 6288 if (unlikely(!(CTX)->altivec_enabled)) { \ 6289 gen_exception((CTX), POWERPC_EXCP_VPU); \ 6290 return true; \ 6291 } \ 6292 } while (0) 6293 6294 #define REQUIRE_VSX(CTX) \ 6295 do { \ 6296 if (unlikely(!(CTX)->vsx_enabled)) { \ 6297 gen_exception((CTX), POWERPC_EXCP_VSXU); \ 6298 return true; \ 6299 } \ 6300 } while (0) 6301 6302 #define REQUIRE_FPU(ctx) \ 6303 do { \ 6304 if (unlikely(!(ctx)->fpu_enabled)) { \ 6305 gen_exception((ctx), POWERPC_EXCP_FPU); \ 6306 return true; \ 6307 } \ 6308 } while (0) 6309 6310 #if !defined(CONFIG_USER_ONLY) 6311 #define REQUIRE_SV(CTX) \ 6312 do { \ 6313 if (unlikely((CTX)->pr)) { \ 6314 gen_priv_opc(CTX); \ 6315 return true; \ 6316 } \ 6317 } while (0) 6318 6319 #define REQUIRE_HV(CTX) \ 6320 do { \ 6321 if (unlikely((CTX)->pr || !(CTX)->hv)) { \ 6322 gen_priv_opc(CTX); \ 6323 return true; \ 6324 } \ 6325 } while (0) 6326 #else 6327 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0) 6328 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0) 6329 #endif 6330 6331 /* 6332 * Helpers for implementing sets of trans_* functions. 6333 * Defer the implementation of NAME to FUNC, with optional extra arguments. 6334 */ 6335 #define TRANS(NAME, FUNC, ...) \ 6336 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6337 { return FUNC(ctx, a, __VA_ARGS__); } 6338 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \ 6339 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6340 { \ 6341 REQUIRE_INSNS_FLAGS(ctx, FLAGS); \ 6342 return FUNC(ctx, a, __VA_ARGS__); \ 6343 } 6344 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \ 6345 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6346 { \ 6347 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \ 6348 return FUNC(ctx, a, __VA_ARGS__); \ 6349 } 6350 6351 #define TRANS64(NAME, FUNC, ...) \ 6352 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6353 { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); } 6354 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \ 6355 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ 6356 { \ 6357 REQUIRE_64BIT(ctx); \ 6358 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \ 6359 return FUNC(ctx, a, __VA_ARGS__); \ 6360 } 6361 6362 /* TODO: More TRANS* helpers for extra insn_flags checks. */ 6363 6364 6365 #include "decode-insn32.c.inc" 6366 #include "decode-insn64.c.inc" 6367 #include "power8-pmu-regs.c.inc" 6368 6369 /* 6370 * Incorporate CIA into the constant when R=1. 6371 * Validate that when R=1, RA=0. 6372 */ 6373 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) 6374 { 6375 d->rt = a->rt; 6376 d->ra = a->ra; 6377 d->si = a->si; 6378 if (a->r) { 6379 if (unlikely(a->ra != 0)) { 6380 gen_invalid(ctx); 6381 return false; 6382 } 6383 d->si += ctx->cia; 6384 } 6385 return true; 6386 } 6387 6388 #include "translate/fixedpoint-impl.c.inc" 6389 6390 #include "translate/fp-impl.c.inc" 6391 6392 #include "translate/vmx-impl.c.inc" 6393 6394 #include "translate/vsx-impl.c.inc" 6395 6396 #include "translate/dfp-impl.c.inc" 6397 6398 #include "translate/spe-impl.c.inc" 6399 6400 #include "translate/branch-impl.c.inc" 6401 6402 #include "translate/processor-ctrl-impl.c.inc" 6403 6404 #include "translate/storage-ctrl-impl.c.inc" 6405 6406 /* Handles lfdp */ 6407 static void gen_dform39(DisasContext *ctx) 6408 { 6409 if ((ctx->opcode & 0x3) == 0) { 6410 if (ctx->insns_flags2 & PPC2_ISA205) { 6411 return gen_lfdp(ctx); 6412 } 6413 } 6414 return gen_invalid(ctx); 6415 } 6416 6417 /* Handles stfdp */ 6418 static void gen_dform3D(DisasContext *ctx) 6419 { 6420 if ((ctx->opcode & 3) == 0) { /* DS-FORM */ 6421 /* stfdp */ 6422 if (ctx->insns_flags2 & PPC2_ISA205) { 6423 return gen_stfdp(ctx); 6424 } 6425 } 6426 return gen_invalid(ctx); 6427 } 6428 6429 #if defined(TARGET_PPC64) 6430 /* brd */ 6431 static void gen_brd(DisasContext *ctx) 6432 { 6433 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 6434 } 6435 6436 /* brw */ 6437 static void gen_brw(DisasContext *ctx) 6438 { 6439 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); 6440 tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32); 6441 6442 } 6443 6444 /* brh */ 6445 static void gen_brh(DisasContext *ctx) 6446 { 6447 TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull); 6448 TCGv_i64 t1 = tcg_temp_new_i64(); 6449 TCGv_i64 t2 = tcg_temp_new_i64(); 6450 6451 tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8); 6452 tcg_gen_and_i64(t2, t1, mask); 6453 tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask); 6454 tcg_gen_shli_i64(t1, t1, 8); 6455 tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2); 6456 } 6457 #endif 6458 6459 static opcode_t opcodes[] = { 6460 #if defined(TARGET_PPC64) 6461 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310), 6462 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310), 6463 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310), 6464 #endif 6465 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE), 6466 #if defined(TARGET_PPC64) 6467 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300), 6468 #endif 6469 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205), 6470 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300), 6471 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL), 6472 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6473 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6474 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER), 6475 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER), 6476 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER), 6477 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER), 6478 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6479 #if defined(TARGET_PPC64) 6480 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B), 6481 #endif 6482 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER), 6483 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER), 6484 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6485 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6486 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6487 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), 6488 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300), 6489 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300), 6490 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300), 6491 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300), 6492 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER), 6493 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER), 6494 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6495 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6496 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6497 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6498 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB), 6499 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD), 6500 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205), 6501 #if defined(TARGET_PPC64) 6502 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD), 6503 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B), 6504 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300), 6505 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300), 6506 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205), 6507 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206), 6508 #endif 6509 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6510 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6511 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6512 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER), 6513 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER), 6514 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER), 6515 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER), 6516 #if defined(TARGET_PPC64) 6517 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B), 6518 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B), 6519 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B), 6520 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B), 6521 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B), 6522 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, 6523 PPC_NONE, PPC2_ISA300), 6524 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, 6525 PPC_NONE, PPC2_ISA300), 6526 #endif 6527 /* handles lfdp, lxsd, lxssp */ 6528 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), 6529 /* handles stfdp, stxsd, stxssp */ 6530 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), 6531 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6532 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), 6533 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING), 6534 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING), 6535 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING), 6536 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING), 6537 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO), 6538 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM), 6539 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6540 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6541 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES), 6542 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300), 6543 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300), 6544 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6545 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206), 6546 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES), 6547 #if defined(TARGET_PPC64) 6548 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300), 6549 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300), 6550 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B), 6551 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207), 6552 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), 6553 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), 6554 #endif 6555 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), 6556 /* ISA v3.0 changed the extended opcode from 62 to 30 */ 6557 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT), 6558 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300), 6559 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW), 6560 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW), 6561 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW), 6562 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW), 6563 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207), 6564 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER), 6565 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW), 6566 #if defined(TARGET_PPC64) 6567 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B), 6568 #if !defined(CONFIG_USER_ONLY) 6569 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */ 6570 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300), 6571 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300), 6572 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300), 6573 #endif 6574 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300), 6575 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6576 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6577 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6578 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), 6579 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H), 6580 #endif 6581 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */ 6582 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW), 6583 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW), 6584 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW), 6585 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW), 6586 #if defined(TARGET_PPC64) 6587 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B), 6588 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B), 6589 #endif 6590 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC), 6591 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC), 6592 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC), 6593 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC), 6594 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB), 6595 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC), 6596 #if defined(TARGET_PPC64) 6597 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B), 6598 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300), 6599 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300), 6600 #endif 6601 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC), 6602 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC), 6603 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE), 6604 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206), 6605 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE), 6606 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE), 6607 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206), 6608 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE), 6609 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206), 6610 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE), 6611 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206), 6612 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), 6613 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), 6614 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ), 6615 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206), 6616 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC), 6617 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC), 6618 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC), 6619 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI), 6620 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206), 6621 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA), 6622 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT), 6623 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT), 6624 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT), 6625 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT), 6626 #if defined(TARGET_PPC64) 6627 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B), 6628 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001, 6629 PPC_SEGMENT_64B), 6630 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B), 6631 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, 6632 PPC_SEGMENT_64B), 6633 #endif 6634 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), 6635 /* 6636 * XXX Those instructions will need to be handled differently for 6637 * different ISA versions 6638 */ 6639 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), 6640 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), 6641 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), 6642 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB), 6643 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB), 6644 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI), 6645 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA), 6646 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR), 6647 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR), 6648 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX), 6649 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX), 6650 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON), 6651 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON), 6652 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), 6653 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON), 6654 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON), 6655 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP), 6656 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206), 6657 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI), 6658 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI), 6659 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB), 6660 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB), 6661 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB), 6662 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE), 6663 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE), 6664 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE), 6665 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, 6666 PPC_NONE, PPC2_BOOKE206), 6667 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, 6668 PPC_NONE, PPC2_BOOKE206), 6669 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, 6670 PPC_NONE, PPC2_BOOKE206), 6671 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001, 6672 PPC_NONE, PPC2_BOOKE206), 6673 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001, 6674 PPC_NONE, PPC2_BOOKE206), 6675 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), 6676 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), 6677 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), 6678 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, 6679 PPC_BOOKE, PPC2_BOOKE206), 6680 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE), 6681 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, 6682 PPC_BOOKE, PPC2_BOOKE206), 6683 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, 6684 PPC_440_SPEC), 6685 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), 6686 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), 6687 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), 6688 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC), 6689 #if defined(TARGET_PPC64) 6690 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, 6691 PPC2_ISA300), 6692 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), 6693 #endif 6694 6695 #undef GEN_INT_ARITH_ADD 6696 #undef GEN_INT_ARITH_ADD_CONST 6697 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ 6698 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER), 6699 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ 6700 add_ca, compute_ca, compute_ov) \ 6701 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER), 6702 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) 6703 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) 6704 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) 6705 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) 6706 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) 6707 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) 6708 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) 6709 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) 6710 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300), 6711 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) 6712 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) 6713 6714 #undef GEN_INT_ARITH_DIVW 6715 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ 6716 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) 6717 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0), 6718 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1), 6719 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0), 6720 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1), 6721 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6722 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6723 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6724 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6725 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), 6726 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), 6727 6728 #if defined(TARGET_PPC64) 6729 #undef GEN_INT_ARITH_DIVD 6730 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ 6731 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) 6732 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0), 6733 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1), 6734 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0), 6735 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1), 6736 6737 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6738 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), 6739 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6740 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), 6741 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), 6742 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), 6743 6744 #undef GEN_INT_ARITH_MUL_HELPER 6745 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \ 6746 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) 6747 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00), 6748 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02), 6749 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17), 6750 #endif 6751 6752 #undef GEN_INT_ARITH_SUBF 6753 #undef GEN_INT_ARITH_SUBF_CONST 6754 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ 6755 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER), 6756 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ 6757 add_ca, compute_ca, compute_ov) \ 6758 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER), 6759 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) 6760 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) 6761 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) 6762 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) 6763 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) 6764 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) 6765 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) 6766 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) 6767 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) 6768 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) 6769 6770 #undef GEN_LOGICAL1 6771 #undef GEN_LOGICAL2 6772 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ 6773 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type) 6774 #define GEN_LOGICAL1(name, tcg_op, opc, type) \ 6775 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type) 6776 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER), 6777 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER), 6778 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER), 6779 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER), 6780 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER), 6781 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER), 6782 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER), 6783 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER), 6784 #if defined(TARGET_PPC64) 6785 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B), 6786 #endif 6787 6788 #if defined(TARGET_PPC64) 6789 #undef GEN_PPC64_R2 6790 #undef GEN_PPC64_R4 6791 #define GEN_PPC64_R2(name, opc1, opc2) \ 6792 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ 6793 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ 6794 PPC_64B) 6795 #define GEN_PPC64_R4(name, opc1, opc2) \ 6796 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ 6797 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \ 6798 PPC_64B), \ 6799 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ 6800 PPC_64B), \ 6801 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \ 6802 PPC_64B) 6803 GEN_PPC64_R4(rldicl, 0x1E, 0x00), 6804 GEN_PPC64_R4(rldicr, 0x1E, 0x02), 6805 GEN_PPC64_R4(rldic, 0x1E, 0x04), 6806 GEN_PPC64_R2(rldcl, 0x1E, 0x08), 6807 GEN_PPC64_R2(rldcr, 0x1E, 0x09), 6808 GEN_PPC64_R4(rldimi, 0x1E, 0x06), 6809 #endif 6810 6811 #undef GEN_LDX_E 6812 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ 6813 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2), 6814 6815 #if defined(TARGET_PPC64) 6816 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE) 6817 6818 /* HV/P7 and later only */ 6819 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) 6820 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST) 6821 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) 6822 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) 6823 #endif 6824 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER) 6825 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER) 6826 6827 /* External PID based load */ 6828 #undef GEN_LDEPX 6829 #define GEN_LDEPX(name, ldop, opc2, opc3) \ 6830 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ 6831 0x00000001, PPC_NONE, PPC2_BOOKE206), 6832 6833 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) 6834 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) 6835 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) 6836 #if defined(TARGET_PPC64) 6837 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00) 6838 #endif 6839 6840 #undef GEN_STX_E 6841 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ 6842 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2), 6843 6844 #if defined(TARGET_PPC64) 6845 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE) 6846 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) 6847 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) 6848 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) 6849 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) 6850 #endif 6851 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER) 6852 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER) 6853 6854 #undef GEN_STEPX 6855 #define GEN_STEPX(name, ldop, opc2, opc3) \ 6856 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ 6857 0x00000001, PPC_NONE, PPC2_BOOKE206), 6858 6859 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) 6860 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) 6861 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) 6862 #if defined(TARGET_PPC64) 6863 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04) 6864 #endif 6865 6866 #undef GEN_CRLOGIC 6867 #define GEN_CRLOGIC(name, tcg_op, opc) \ 6868 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER) 6869 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08), 6870 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04), 6871 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09), 6872 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07), 6873 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01), 6874 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E), 6875 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D), 6876 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06), 6877 6878 #undef GEN_MAC_HANDLER 6879 #define GEN_MAC_HANDLER(name, opc2, opc3) \ 6880 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC) 6881 GEN_MAC_HANDLER(macchw, 0x0C, 0x05), 6882 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15), 6883 GEN_MAC_HANDLER(macchws, 0x0C, 0x07), 6884 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17), 6885 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06), 6886 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16), 6887 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04), 6888 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14), 6889 GEN_MAC_HANDLER(machhw, 0x0C, 0x01), 6890 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11), 6891 GEN_MAC_HANDLER(machhws, 0x0C, 0x03), 6892 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13), 6893 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02), 6894 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12), 6895 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00), 6896 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10), 6897 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D), 6898 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D), 6899 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F), 6900 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F), 6901 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C), 6902 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C), 6903 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E), 6904 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E), 6905 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05), 6906 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15), 6907 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07), 6908 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17), 6909 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01), 6910 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11), 6911 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03), 6912 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13), 6913 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D), 6914 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D), 6915 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F), 6916 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F), 6917 GEN_MAC_HANDLER(mulchw, 0x08, 0x05), 6918 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04), 6919 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01), 6920 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00), 6921 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D), 6922 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C), 6923 6924 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \ 6925 PPC_NONE, PPC2_TM), 6926 GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \ 6927 PPC_NONE, PPC2_TM), 6928 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \ 6929 PPC_NONE, PPC2_TM), 6930 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \ 6931 PPC_NONE, PPC2_TM), 6932 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \ 6933 PPC_NONE, PPC2_TM), 6934 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \ 6935 PPC_NONE, PPC2_TM), 6936 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \ 6937 PPC_NONE, PPC2_TM), 6938 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \ 6939 PPC_NONE, PPC2_TM), 6940 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \ 6941 PPC_NONE, PPC2_TM), 6942 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \ 6943 PPC_NONE, PPC2_TM), 6944 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ 6945 PPC_NONE, PPC2_TM), 6946 6947 #include "translate/fp-ops.c.inc" 6948 6949 #include "translate/vmx-ops.c.inc" 6950 6951 #include "translate/vsx-ops.c.inc" 6952 6953 #include "translate/spe-ops.c.inc" 6954 }; 6955 6956 /*****************************************************************************/ 6957 /* Opcode types */ 6958 enum { 6959 PPC_DIRECT = 0, /* Opcode routine */ 6960 PPC_INDIRECT = 1, /* Indirect opcode table */ 6961 }; 6962 6963 #define PPC_OPCODE_MASK 0x3 6964 6965 static inline int is_indirect_opcode(void *handler) 6966 { 6967 return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT; 6968 } 6969 6970 static inline opc_handler_t **ind_table(void *handler) 6971 { 6972 return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK); 6973 } 6974 6975 /* Instruction table creation */ 6976 /* Opcodes tables creation */ 6977 static void fill_new_table(opc_handler_t **table, int len) 6978 { 6979 int i; 6980 6981 for (i = 0; i < len; i++) { 6982 table[i] = &invalid_handler; 6983 } 6984 } 6985 6986 static int create_new_table(opc_handler_t **table, unsigned char idx) 6987 { 6988 opc_handler_t **tmp; 6989 6990 tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN); 6991 fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN); 6992 table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT); 6993 6994 return 0; 6995 } 6996 6997 static int insert_in_table(opc_handler_t **table, unsigned char idx, 6998 opc_handler_t *handler) 6999 { 7000 if (table[idx] != &invalid_handler) { 7001 return -1; 7002 } 7003 table[idx] = handler; 7004 7005 return 0; 7006 } 7007 7008 static int register_direct_insn(opc_handler_t **ppc_opcodes, 7009 unsigned char idx, opc_handler_t *handler) 7010 { 7011 if (insert_in_table(ppc_opcodes, idx, handler) < 0) { 7012 printf("*** ERROR: opcode %02x already assigned in main " 7013 "opcode table\n", idx); 7014 return -1; 7015 } 7016 7017 return 0; 7018 } 7019 7020 static int register_ind_in_table(opc_handler_t **table, 7021 unsigned char idx1, unsigned char idx2, 7022 opc_handler_t *handler) 7023 { 7024 if (table[idx1] == &invalid_handler) { 7025 if (create_new_table(table, idx1) < 0) { 7026 printf("*** ERROR: unable to create indirect table " 7027 "idx=%02x\n", idx1); 7028 return -1; 7029 } 7030 } else { 7031 if (!is_indirect_opcode(table[idx1])) { 7032 printf("*** ERROR: idx %02x already assigned to a direct " 7033 "opcode\n", idx1); 7034 return -1; 7035 } 7036 } 7037 if (handler != NULL && 7038 insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) { 7039 printf("*** ERROR: opcode %02x already assigned in " 7040 "opcode table %02x\n", idx2, idx1); 7041 return -1; 7042 } 7043 7044 return 0; 7045 } 7046 7047 static int register_ind_insn(opc_handler_t **ppc_opcodes, 7048 unsigned char idx1, unsigned char idx2, 7049 opc_handler_t *handler) 7050 { 7051 return register_ind_in_table(ppc_opcodes, idx1, idx2, handler); 7052 } 7053 7054 static int register_dblind_insn(opc_handler_t **ppc_opcodes, 7055 unsigned char idx1, unsigned char idx2, 7056 unsigned char idx3, opc_handler_t *handler) 7057 { 7058 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { 7059 printf("*** ERROR: unable to join indirect table idx " 7060 "[%02x-%02x]\n", idx1, idx2); 7061 return -1; 7062 } 7063 if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3, 7064 handler) < 0) { 7065 printf("*** ERROR: unable to insert opcode " 7066 "[%02x-%02x-%02x]\n", idx1, idx2, idx3); 7067 return -1; 7068 } 7069 7070 return 0; 7071 } 7072 7073 static int register_trplind_insn(opc_handler_t **ppc_opcodes, 7074 unsigned char idx1, unsigned char idx2, 7075 unsigned char idx3, unsigned char idx4, 7076 opc_handler_t *handler) 7077 { 7078 opc_handler_t **table; 7079 7080 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { 7081 printf("*** ERROR: unable to join indirect table idx " 7082 "[%02x-%02x]\n", idx1, idx2); 7083 return -1; 7084 } 7085 table = ind_table(ppc_opcodes[idx1]); 7086 if (register_ind_in_table(table, idx2, idx3, NULL) < 0) { 7087 printf("*** ERROR: unable to join 2nd-level indirect table idx " 7088 "[%02x-%02x-%02x]\n", idx1, idx2, idx3); 7089 return -1; 7090 } 7091 table = ind_table(table[idx2]); 7092 if (register_ind_in_table(table, idx3, idx4, handler) < 0) { 7093 printf("*** ERROR: unable to insert opcode " 7094 "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4); 7095 return -1; 7096 } 7097 return 0; 7098 } 7099 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn) 7100 { 7101 if (insn->opc2 != 0xFF) { 7102 if (insn->opc3 != 0xFF) { 7103 if (insn->opc4 != 0xFF) { 7104 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2, 7105 insn->opc3, insn->opc4, 7106 &insn->handler) < 0) { 7107 return -1; 7108 } 7109 } else { 7110 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, 7111 insn->opc3, &insn->handler) < 0) { 7112 return -1; 7113 } 7114 } 7115 } else { 7116 if (register_ind_insn(ppc_opcodes, insn->opc1, 7117 insn->opc2, &insn->handler) < 0) { 7118 return -1; 7119 } 7120 } 7121 } else { 7122 if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { 7123 return -1; 7124 } 7125 } 7126 7127 return 0; 7128 } 7129 7130 static int test_opcode_table(opc_handler_t **table, int len) 7131 { 7132 int i, count, tmp; 7133 7134 for (i = 0, count = 0; i < len; i++) { 7135 /* Consistency fixup */ 7136 if (table[i] == NULL) { 7137 table[i] = &invalid_handler; 7138 } 7139 if (table[i] != &invalid_handler) { 7140 if (is_indirect_opcode(table[i])) { 7141 tmp = test_opcode_table(ind_table(table[i]), 7142 PPC_CPU_INDIRECT_OPCODES_LEN); 7143 if (tmp == 0) { 7144 free(table[i]); 7145 table[i] = &invalid_handler; 7146 } else { 7147 count++; 7148 } 7149 } else { 7150 count++; 7151 } 7152 } 7153 } 7154 7155 return count; 7156 } 7157 7158 static void fix_opcode_tables(opc_handler_t **ppc_opcodes) 7159 { 7160 if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { 7161 printf("*** WARNING: no opcode defined !\n"); 7162 } 7163 } 7164 7165 /*****************************************************************************/ 7166 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp) 7167 { 7168 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 7169 opcode_t *opc; 7170 7171 fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN); 7172 for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { 7173 if (((opc->handler.type & pcc->insns_flags) != 0) || 7174 ((opc->handler.type2 & pcc->insns_flags2) != 0)) { 7175 if (register_insn(cpu->opcodes, opc) < 0) { 7176 error_setg(errp, "ERROR initializing PowerPC instruction " 7177 "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, 7178 opc->opc3); 7179 return; 7180 } 7181 } 7182 } 7183 fix_opcode_tables(cpu->opcodes); 7184 fflush(stdout); 7185 fflush(stderr); 7186 } 7187 7188 void destroy_ppc_opcodes(PowerPCCPU *cpu) 7189 { 7190 opc_handler_t **table, **table_2; 7191 int i, j, k; 7192 7193 for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { 7194 if (cpu->opcodes[i] == &invalid_handler) { 7195 continue; 7196 } 7197 if (is_indirect_opcode(cpu->opcodes[i])) { 7198 table = ind_table(cpu->opcodes[i]); 7199 for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { 7200 if (table[j] == &invalid_handler) { 7201 continue; 7202 } 7203 if (is_indirect_opcode(table[j])) { 7204 table_2 = ind_table(table[j]); 7205 for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) { 7206 if (table_2[k] != &invalid_handler && 7207 is_indirect_opcode(table_2[k])) { 7208 g_free((opc_handler_t *)((uintptr_t)table_2[k] & 7209 ~PPC_INDIRECT)); 7210 } 7211 } 7212 g_free((opc_handler_t *)((uintptr_t)table[j] & 7213 ~PPC_INDIRECT)); 7214 } 7215 } 7216 g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] & 7217 ~PPC_INDIRECT)); 7218 } 7219 } 7220 } 7221 7222 int ppc_fixup_cpu(PowerPCCPU *cpu) 7223 { 7224 CPUPPCState *env = &cpu->env; 7225 7226 /* 7227 * TCG doesn't (yet) emulate some groups of instructions that are 7228 * implemented on some otherwise supported CPUs (e.g. VSX and 7229 * decimal floating point instructions on POWER7). We remove 7230 * unsupported instruction groups from the cpu state's instruction 7231 * masks and hope the guest can cope. For at least the pseries 7232 * machine, the unavailability of these instructions can be 7233 * advertised to the guest via the device tree. 7234 */ 7235 if ((env->insns_flags & ~PPC_TCG_INSNS) 7236 || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { 7237 warn_report("Disabling some instructions which are not " 7238 "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")", 7239 env->insns_flags & ~PPC_TCG_INSNS, 7240 env->insns_flags2 & ~PPC_TCG_INSNS2); 7241 } 7242 env->insns_flags &= PPC_TCG_INSNS; 7243 env->insns_flags2 &= PPC_TCG_INSNS2; 7244 return 0; 7245 } 7246 7247 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn) 7248 { 7249 opc_handler_t **table, *handler; 7250 uint32_t inval; 7251 7252 ctx->opcode = insn; 7253 7254 LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", 7255 insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn), 7256 ctx->le_mode ? "little" : "big"); 7257 7258 table = cpu->opcodes; 7259 handler = table[opc1(insn)]; 7260 if (is_indirect_opcode(handler)) { 7261 table = ind_table(handler); 7262 handler = table[opc2(insn)]; 7263 if (is_indirect_opcode(handler)) { 7264 table = ind_table(handler); 7265 handler = table[opc3(insn)]; 7266 if (is_indirect_opcode(handler)) { 7267 table = ind_table(handler); 7268 handler = table[opc4(insn)]; 7269 } 7270 } 7271 } 7272 7273 /* Is opcode *REALLY* valid ? */ 7274 if (unlikely(handler->handler == &gen_invalid)) { 7275 qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: " 7276 "%02x - %02x - %02x - %02x (%08x) " 7277 TARGET_FMT_lx "\n", 7278 opc1(insn), opc2(insn), opc3(insn), opc4(insn), 7279 insn, ctx->cia); 7280 return false; 7281 } 7282 7283 if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) 7284 && Rc(insn))) { 7285 inval = handler->inval2; 7286 } else { 7287 inval = handler->inval1; 7288 } 7289 7290 if (unlikely((insn & inval) != 0)) { 7291 qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: " 7292 "%02x - %02x - %02x - %02x (%08x) " 7293 TARGET_FMT_lx "\n", insn & inval, 7294 opc1(insn), opc2(insn), opc3(insn), opc4(insn), 7295 insn, ctx->cia); 7296 return false; 7297 } 7298 7299 handler->handler(ctx); 7300 return true; 7301 } 7302 7303 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 7304 { 7305 DisasContext *ctx = container_of(dcbase, DisasContext, base); 7306 CPUPPCState *env = cs->env_ptr; 7307 uint32_t hflags = ctx->base.tb->flags; 7308 7309 ctx->spr_cb = env->spr_cb; 7310 ctx->pr = (hflags >> HFLAGS_PR) & 1; 7311 ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7; 7312 ctx->dr = (hflags >> HFLAGS_DR) & 1; 7313 ctx->hv = (hflags >> HFLAGS_HV) & 1; 7314 ctx->insns_flags = env->insns_flags; 7315 ctx->insns_flags2 = env->insns_flags2; 7316 ctx->access_type = -1; 7317 ctx->need_access_type = !mmu_is_64bit(env->mmu_model); 7318 ctx->le_mode = (hflags >> HFLAGS_LE) & 1; 7319 ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE; 7320 ctx->flags = env->flags; 7321 #if defined(TARGET_PPC64) 7322 ctx->sf_mode = (hflags >> HFLAGS_64) & 1; 7323 ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); 7324 #endif 7325 ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B 7326 || env->mmu_model & POWERPC_MMU_64; 7327 7328 ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1; 7329 ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1; 7330 ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1; 7331 ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1; 7332 ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1; 7333 ctx->gtse = (hflags >> HFLAGS_GTSE) & 1; 7334 ctx->hr = (hflags >> HFLAGS_HR) & 1; 7335 ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1; 7336 ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1; 7337 ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1; 7338 ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1; 7339 ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1; 7340 7341 ctx->singlestep_enabled = 0; 7342 if ((hflags >> HFLAGS_SE) & 1) { 7343 ctx->singlestep_enabled |= CPU_SINGLE_STEP; 7344 ctx->base.max_insns = 1; 7345 } 7346 if ((hflags >> HFLAGS_BE) & 1) { 7347 ctx->singlestep_enabled |= CPU_BRANCH_STEP; 7348 } 7349 } 7350 7351 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) 7352 { 7353 } 7354 7355 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 7356 { 7357 tcg_gen_insn_start(dcbase->pc_next); 7358 } 7359 7360 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn) 7361 { 7362 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 7363 return opc1(insn) == 1; 7364 } 7365 7366 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 7367 { 7368 DisasContext *ctx = container_of(dcbase, DisasContext, base); 7369 PowerPCCPU *cpu = POWERPC_CPU(cs); 7370 CPUPPCState *env = cs->env_ptr; 7371 target_ulong pc; 7372 uint32_t insn; 7373 bool ok; 7374 7375 LOG_DISAS("----------------\n"); 7376 LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", 7377 ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); 7378 7379 ctx->cia = pc = ctx->base.pc_next; 7380 insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx)); 7381 ctx->base.pc_next = pc += 4; 7382 7383 if (!is_prefix_insn(ctx, insn)) { 7384 ok = (decode_insn32(ctx, insn) || 7385 decode_legacy(cpu, ctx, insn)); 7386 } else if ((pc & 63) == 0) { 7387 /* 7388 * Power v3.1, section 1.9 Exceptions: 7389 * attempt to execute a prefixed instruction that crosses a 7390 * 64-byte address boundary (system alignment error). 7391 */ 7392 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN); 7393 ok = true; 7394 } else { 7395 uint32_t insn2 = translator_ldl_swap(env, dcbase, pc, 7396 need_byteswap(ctx)); 7397 ctx->base.pc_next = pc += 4; 7398 ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn)); 7399 } 7400 if (!ok) { 7401 gen_invalid(ctx); 7402 } 7403 7404 /* End the TB when crossing a page boundary. */ 7405 if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) { 7406 ctx->base.is_jmp = DISAS_TOO_MANY; 7407 } 7408 } 7409 7410 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 7411 { 7412 DisasContext *ctx = container_of(dcbase, DisasContext, base); 7413 DisasJumpType is_jmp = ctx->base.is_jmp; 7414 target_ulong nip = ctx->base.pc_next; 7415 7416 if (is_jmp == DISAS_NORETURN) { 7417 /* We have already exited the TB. */ 7418 return; 7419 } 7420 7421 /* Honor single stepping. */ 7422 if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) { 7423 bool rfi_type = false; 7424 7425 switch (is_jmp) { 7426 case DISAS_TOO_MANY: 7427 case DISAS_EXIT_UPDATE: 7428 case DISAS_CHAIN_UPDATE: 7429 gen_update_nip(ctx, nip); 7430 break; 7431 case DISAS_EXIT: 7432 case DISAS_CHAIN: 7433 /* 7434 * This is a heuristic, to put it kindly. The rfi class of 7435 * instructions are among the few outside branches that change 7436 * NIP without taking an interrupt. Single step trace interrupts 7437 * do not fire on completion of these instructions. 7438 */ 7439 rfi_type = true; 7440 break; 7441 default: 7442 g_assert_not_reached(); 7443 } 7444 7445 gen_debug_exception(ctx, rfi_type); 7446 return; 7447 } 7448 7449 switch (is_jmp) { 7450 case DISAS_TOO_MANY: 7451 if (use_goto_tb(ctx, nip)) { 7452 pmu_count_insns(ctx); 7453 tcg_gen_goto_tb(0); 7454 gen_update_nip(ctx, nip); 7455 tcg_gen_exit_tb(ctx->base.tb, 0); 7456 break; 7457 } 7458 /* fall through */ 7459 case DISAS_CHAIN_UPDATE: 7460 gen_update_nip(ctx, nip); 7461 /* fall through */ 7462 case DISAS_CHAIN: 7463 /* 7464 * tcg_gen_lookup_and_goto_ptr will exit the TB if 7465 * CF_NO_GOTO_PTR is set. Count insns now. 7466 */ 7467 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) { 7468 pmu_count_insns(ctx); 7469 } 7470 7471 tcg_gen_lookup_and_goto_ptr(); 7472 break; 7473 7474 case DISAS_EXIT_UPDATE: 7475 gen_update_nip(ctx, nip); 7476 /* fall through */ 7477 case DISAS_EXIT: 7478 pmu_count_insns(ctx); 7479 tcg_gen_exit_tb(NULL, 0); 7480 break; 7481 7482 default: 7483 g_assert_not_reached(); 7484 } 7485 } 7486 7487 static void ppc_tr_disas_log(const DisasContextBase *dcbase, 7488 CPUState *cs, FILE *logfile) 7489 { 7490 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 7491 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); 7492 } 7493 7494 static const TranslatorOps ppc_tr_ops = { 7495 .init_disas_context = ppc_tr_init_disas_context, 7496 .tb_start = ppc_tr_tb_start, 7497 .insn_start = ppc_tr_insn_start, 7498 .translate_insn = ppc_tr_translate_insn, 7499 .tb_stop = ppc_tr_tb_stop, 7500 .disas_log = ppc_tr_disas_log, 7501 }; 7502 7503 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 7504 target_ulong pc, void *host_pc) 7505 { 7506 DisasContext ctx; 7507 7508 translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base); 7509 } 7510