1 /* 2 * PowerPC exception emulation helpers for QEMU (TCG specific) 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "qemu/log.h" 22 #include "exec/cpu_ldst.h" 23 #include "exec/exec-all.h" 24 #include "exec/helper-proto.h" 25 #include "system/runstate.h" 26 27 #include "helper_regs.h" 28 #include "hw/ppc/ppc.h" 29 #include "internal.h" 30 #include "cpu.h" 31 #include "trace.h" 32 33 /*****************************************************************************/ 34 /* Exceptions processing helpers */ 35 36 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 37 uint32_t error_code, uintptr_t raddr) 38 { 39 CPUState *cs = env_cpu(env); 40 41 cs->exception_index = exception; 42 env->error_code = error_code; 43 cpu_loop_exit_restore(cs, raddr); 44 } 45 46 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 47 uint32_t error_code) 48 { 49 raise_exception_err_ra(env, exception, error_code, 0); 50 } 51 52 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 53 { 54 raise_exception_err_ra(env, exception, 0, 0); 55 } 56 57 #ifndef CONFIG_USER_ONLY 58 59 static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception, 60 uint32_t error_code) 61 { 62 raise_exception_err_ra(env, exception, error_code, 0); 63 } 64 65 static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception) 66 { 67 raise_exception_err_ra(env, exception, 0, 0); 68 } 69 70 #endif /* !CONFIG_USER_ONLY */ 71 72 void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 73 uint32_t flags) 74 { 75 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 76 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 77 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 78 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 79 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 80 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 81 POWERPC_EXCP_TRAP, GETPC()); 82 } 83 } 84 85 #ifdef TARGET_PPC64 86 void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 87 uint32_t flags) 88 { 89 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 90 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 91 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 92 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 93 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 94 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 95 POWERPC_EXCP_TRAP, GETPC()); 96 } 97 } 98 #endif /* TARGET_PPC64 */ 99 100 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) 101 { 102 const uint16_t c = 0xfffc; 103 const uint64_t z0 = 0xfa2561cdf44ac398ULL; 104 uint16_t z = 0, temp; 105 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; 106 107 for (int i = 3; i >= 0; i--) { 108 k[i] = key & 0xffff; 109 key >>= 16; 110 } 111 xleft[0] = x & 0xffff; 112 xright[0] = (x >> 16) & 0xffff; 113 114 for (int i = 0; i < 28; i++) { 115 z = (z0 >> (63 - i)) & 1; 116 temp = ror16(k[i + 3], 3) ^ k[i + 1]; 117 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); 118 } 119 120 for (int i = 0; i < 8; i++) { 121 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; 122 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; 123 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; 124 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; 125 } 126 127 for (int i = 0; i < 32; i++) { 128 fxleft[i] = (rol16(xleft[i], 1) & 129 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); 130 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; 131 xright[i + 1] = xleft[i]; 132 } 133 134 return (((uint32_t)xright[32]) << 16) | xleft[32]; 135 } 136 137 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) 138 { 139 uint64_t stage0_h = 0ULL, stage0_l = 0ULL; 140 uint64_t stage1_h, stage1_l; 141 142 for (int i = 0; i < 4; i++) { 143 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); 144 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); 145 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); 146 stage0_l |= (ra & 0xff) << (8 * 2 * i); 147 rb >>= 8; 148 ra >>= 8; 149 } 150 151 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; 152 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); 153 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; 154 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); 155 156 return stage1_h ^ stage1_l; 157 } 158 159 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra, 160 target_ulong rb, uint64_t key, bool store) 161 { 162 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; 163 164 if (store) { 165 cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); 166 } else { 167 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); 168 if (loaded_hash != calculated_hash) { 169 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 170 POWERPC_EXCP_TRAP, GETPC()); 171 } 172 } 173 } 174 175 #include "qemu/guest-random.h" 176 177 #ifdef TARGET_PPC64 178 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 179 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 180 target_ulong rb) \ 181 { \ 182 if (env->msr & R_MSR_PR_MASK) { \ 183 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \ 184 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 185 return; \ 186 } else if (!(env->msr & R_MSR_HV_MASK)) { \ 187 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \ 188 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 189 return; \ 190 } else if (!(env->msr & R_MSR_S_MASK)) { \ 191 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \ 192 return; \ 193 } \ 194 \ 195 do_hash(env, ea, ra, rb, key, store); \ 196 } 197 #else 198 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 199 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 200 target_ulong rb) \ 201 { \ 202 do_hash(env, ea, ra, rb, key, store); \ 203 } 204 #endif /* TARGET_PPC64 */ 205 206 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) 207 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) 208 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) 209 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) 210 211 #ifndef CONFIG_USER_ONLY 212 213 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 214 MMUAccessType access_type, 215 int mmu_idx, uintptr_t retaddr) 216 { 217 CPUPPCState *env = cpu_env(cs); 218 uint32_t insn; 219 220 /* Restore state and reload the insn we executed, for filling in DSISR. */ 221 cpu_restore_state(cs, retaddr); 222 insn = ppc_ldl_code(env, env->nip); 223 224 switch (env->mmu_model) { 225 case POWERPC_MMU_SOFT_4xx: 226 env->spr[SPR_40x_DEAR] = vaddr; 227 break; 228 case POWERPC_MMU_BOOKE: 229 case POWERPC_MMU_BOOKE206: 230 env->spr[SPR_BOOKE_DEAR] = vaddr; 231 break; 232 default: 233 env->spr[SPR_DAR] = vaddr; 234 break; 235 } 236 237 cs->exception_index = POWERPC_EXCP_ALIGN; 238 env->error_code = insn & 0x03FF0000; 239 cpu_loop_exit(cs); 240 } 241 242 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 243 vaddr vaddr, unsigned size, 244 MMUAccessType access_type, 245 int mmu_idx, MemTxAttrs attrs, 246 MemTxResult response, uintptr_t retaddr) 247 { 248 CPUPPCState *env = cpu_env(cs); 249 250 switch (env->excp_model) { 251 #if defined(TARGET_PPC64) 252 case POWERPC_EXCP_POWER8: 253 case POWERPC_EXCP_POWER9: 254 case POWERPC_EXCP_POWER10: 255 case POWERPC_EXCP_POWER11: 256 /* 257 * Machine check codes can be found in processor User Manual or 258 * Linux or skiboot source. 259 */ 260 if (access_type == MMU_DATA_LOAD) { 261 env->spr[SPR_DAR] = vaddr; 262 env->spr[SPR_DSISR] = PPC_BIT(57); 263 env->error_code = PPC_BIT(42); 264 265 } else if (access_type == MMU_DATA_STORE) { 266 /* 267 * MCE for stores in POWER is asynchronous so hardware does 268 * not set DAR, but QEMU can do better. 269 */ 270 env->spr[SPR_DAR] = vaddr; 271 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45); 272 env->error_code |= PPC_BIT(42); 273 274 } else { /* Fetch */ 275 /* 276 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching 277 * the instruction, so that must always be clear for fetches. 278 */ 279 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45); 280 } 281 break; 282 #endif 283 default: 284 /* 285 * TODO: Check behaviour for other CPUs, for now do nothing. 286 * Could add a basic MCE even if real hardware ignores. 287 */ 288 return; 289 } 290 291 cs->exception_index = POWERPC_EXCP_MCHECK; 292 cpu_loop_exit_restore(cs, retaddr); 293 } 294 295 void ppc_cpu_debug_excp_handler(CPUState *cs) 296 { 297 #if defined(TARGET_PPC64) 298 CPUPPCState *env = cpu_env(cs); 299 300 if (env->insns_flags2 & PPC2_ISA207S) { 301 if (cs->watchpoint_hit) { 302 if (cs->watchpoint_hit->flags & BP_CPU) { 303 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr; 304 env->spr[SPR_DSISR] = PPC_BIT(41); 305 cs->watchpoint_hit = NULL; 306 raise_exception(env, POWERPC_EXCP_DSI); 307 } 308 cs->watchpoint_hit = NULL; 309 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) { 310 raise_exception_err(env, POWERPC_EXCP_TRACE, 311 PPC_BIT(33) | PPC_BIT(43)); 312 } 313 } 314 #endif 315 } 316 317 bool ppc_cpu_debug_check_breakpoint(CPUState *cs) 318 { 319 #if defined(TARGET_PPC64) 320 CPUPPCState *env = cpu_env(cs); 321 322 if (env->insns_flags2 & PPC2_ISA207S) { 323 target_ulong priv; 324 325 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63); 326 switch (priv) { 327 case 0x1: /* problem */ 328 return env->msr & ((target_ulong)1 << MSR_PR); 329 case 0x2: /* supervisor */ 330 return (!(env->msr & ((target_ulong)1 << MSR_PR)) && 331 !(env->msr & ((target_ulong)1 << MSR_HV))); 332 case 0x3: /* hypervisor */ 333 return (!(env->msr & ((target_ulong)1 << MSR_PR)) && 334 (env->msr & ((target_ulong)1 << MSR_HV))); 335 default: 336 g_assert_not_reached(); 337 } 338 } 339 #endif 340 341 return false; 342 } 343 344 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 345 { 346 #if defined(TARGET_PPC64) 347 CPUPPCState *env = cpu_env(cs); 348 349 if (env->insns_flags2 & PPC2_ISA207S) { 350 if (wp == env->dawr0_watchpoint) { 351 uint32_t dawrx = env->spr[SPR_DAWRX0]; 352 bool wt = extract32(dawrx, PPC_BIT_NR(59), 1); 353 bool wti = extract32(dawrx, PPC_BIT_NR(60), 1); 354 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1); 355 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1); 356 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1); 357 358 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) { 359 return false; 360 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) { 361 return false; 362 } else if (!sv) { 363 return false; 364 } 365 366 if (!wti) { 367 if (env->msr & ((target_ulong)1 << MSR_DR)) { 368 if (!wt) { 369 return false; 370 } 371 } else { 372 if (wt) { 373 return false; 374 } 375 } 376 } 377 378 return true; 379 } 380 } 381 #endif 382 383 return false; 384 } 385 386 /* 387 * This stops the machine and logs CPU state without killing QEMU (like 388 * cpu_abort()) because it is often a guest error as opposed to a QEMU error, 389 * so the machine can still be debugged. 390 */ 391 G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason) 392 { 393 CPUState *cs = env_cpu(env); 394 FILE *f; 395 396 f = qemu_log_trylock(); 397 if (f) { 398 fprintf(f, "Entering checkstop state: %s\n", reason); 399 cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP); 400 qemu_log_unlock(f); 401 } 402 403 /* 404 * This stops the machine and logs CPU state without killing QEMU 405 * (like cpu_abort()) so the machine can still be debugged (because 406 * it is often a guest error). 407 */ 408 qemu_system_guest_panicked(NULL); 409 cpu_loop_exit_noexc(cs); 410 } 411 412 /* Return true iff byteswap is needed to load instruction */ 413 static inline bool insn_need_byteswap(CPUArchState *env) 414 { 415 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */ 416 return !!(env->msr & ((target_ulong)1 << MSR_LE)); 417 } 418 419 uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr) 420 { 421 uint32_t insn = cpu_ldl_code(env, addr); 422 423 if (insn_need_byteswap(env)) { 424 insn = bswap32(insn); 425 } 426 427 return insn; 428 } 429 430 #if defined(TARGET_PPC64) 431 void helper_attn(CPUPPCState *env) 432 { 433 /* POWER attn is unprivileged when enabled by HID, otherwise illegal */ 434 if ((*env->check_attn)(env)) { 435 powerpc_checkstop(env, "host executed attn"); 436 } else { 437 raise_exception_err(env, POWERPC_EXCP_HV_EMU, 438 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); 439 } 440 } 441 442 void helper_scv(CPUPPCState *env, uint32_t lev) 443 { 444 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 445 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 446 } else { 447 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 448 } 449 } 450 451 void helper_pminsn(CPUPPCState *env, uint32_t insn) 452 { 453 CPUState *cs = env_cpu(env); 454 455 cs->halted = 1; 456 457 /* Condition for waking up at 0x100 */ 458 env->resume_as_sreset = (insn != PPC_PM_STOP) || 459 (env->spr[SPR_PSSCR] & PSSCR_EC); 460 461 /* HDECR is not to wake from PM state, it may have already fired */ 462 if (env->resume_as_sreset) { 463 PowerPCCPU *cpu = env_archcpu(env); 464 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); 465 } 466 467 ppc_maybe_interrupt(env); 468 } 469 470 #endif /* TARGET_PPC64 */ 471 void helper_store_msr(CPUPPCState *env, target_ulong val) 472 { 473 uint32_t excp = hreg_store_msr(env, val, 0); 474 475 if (excp != 0) { 476 cpu_interrupt_exittb(env_cpu(env)); 477 raise_exception(env, excp); 478 } 479 } 480 481 void helper_ppc_maybe_interrupt(CPUPPCState *env) 482 { 483 ppc_maybe_interrupt(env); 484 } 485 486 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 487 { 488 /* MSR:POW cannot be set by any form of rfi */ 489 msr &= ~(1ULL << MSR_POW); 490 491 /* MSR:TGPR cannot be set by any form of rfi */ 492 if (env->flags & POWERPC_FLAG_TGPR) { 493 msr &= ~(1ULL << MSR_TGPR); 494 } 495 496 #ifdef TARGET_PPC64 497 /* Switching to 32-bit ? Crop the nip */ 498 if (!msr_is_64bit(env, msr)) { 499 nip = (uint32_t)nip; 500 } 501 #else 502 nip = (uint32_t)nip; 503 #endif 504 /* XXX: beware: this is false if VLE is supported */ 505 env->nip = nip & ~((target_ulong)0x00000003); 506 hreg_store_msr(env, msr, 1); 507 trace_ppc_excp_rfi(env->nip, env->msr); 508 /* 509 * No need to raise an exception here, as rfi is always the last 510 * insn of a TB 511 */ 512 cpu_interrupt_exittb(env_cpu(env)); 513 /* Reset the reservation */ 514 env->reserve_addr = -1; 515 516 /* Context synchronizing: check if TCG TLB needs flush */ 517 check_tlb_flush(env, false); 518 } 519 520 void helper_rfi(CPUPPCState *env) 521 { 522 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 523 } 524 525 #ifdef TARGET_PPC64 526 void helper_rfid(CPUPPCState *env) 527 { 528 /* 529 * The architecture defines a number of rules for which bits can 530 * change but in practice, we handle this in hreg_store_msr() 531 * which will be called by do_rfi(), so there is no need to filter 532 * here 533 */ 534 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 535 } 536 537 void helper_rfscv(CPUPPCState *env) 538 { 539 do_rfi(env, env->lr, env->ctr); 540 } 541 542 void helper_hrfid(CPUPPCState *env) 543 { 544 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 545 } 546 547 void helper_rfebb(CPUPPCState *env, target_ulong s) 548 { 549 target_ulong msr = env->msr; 550 551 /* 552 * Handling of BESCR bits 32:33 according to PowerISA v3.1: 553 * 554 * "If BESCR 32:33 != 0b00 the instruction is treated as if 555 * the instruction form were invalid." 556 */ 557 if (env->spr[SPR_BESCR] & BESCR_INVALID) { 558 raise_exception_err(env, POWERPC_EXCP_PROGRAM, 559 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); 560 } 561 562 env->nip = env->spr[SPR_EBBRR]; 563 564 /* Switching to 32-bit ? Crop the nip */ 565 if (!msr_is_64bit(env, msr)) { 566 env->nip = (uint32_t)env->spr[SPR_EBBRR]; 567 } 568 569 if (s) { 570 env->spr[SPR_BESCR] |= BESCR_GE; 571 } else { 572 env->spr[SPR_BESCR] &= ~BESCR_GE; 573 } 574 } 575 576 /* 577 * Triggers or queues an 'ebb_excp' EBB exception. All checks 578 * but FSCR, HFSCR and msr_pr must be done beforehand. 579 * 580 * PowerISA v3.1 isn't clear about whether an EBB should be 581 * postponed or cancelled if the EBB facility is unavailable. 582 * Our assumption here is that the EBB is cancelled if both 583 * FSCR and HFSCR EBB facilities aren't available. 584 */ 585 static void do_ebb(CPUPPCState *env, int ebb_excp) 586 { 587 PowerPCCPU *cpu = env_archcpu(env); 588 589 /* 590 * FSCR_EBB and FSCR_IC_EBB are the same bits used with 591 * HFSCR. 592 */ 593 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); 594 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); 595 596 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { 597 env->spr[SPR_BESCR] |= BESCR_PMEO; 598 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { 599 env->spr[SPR_BESCR] |= BESCR_EEO; 600 } 601 602 if (FIELD_EX64(env->msr, MSR, PR)) { 603 powerpc_excp(cpu, ebb_excp); 604 } else { 605 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); 606 } 607 } 608 609 void raise_ebb_perfm_exception(CPUPPCState *env) 610 { 611 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && 612 env->spr[SPR_BESCR] & BESCR_PME && 613 env->spr[SPR_BESCR] & BESCR_GE; 614 615 if (!perfm_ebb_enabled) { 616 return; 617 } 618 619 do_ebb(env, POWERPC_EXCP_PERFM_EBB); 620 } 621 #endif /* TARGET_PPC64 */ 622 623 /*****************************************************************************/ 624 /* Embedded PowerPC specific helpers */ 625 void helper_40x_rfci(CPUPPCState *env) 626 { 627 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 628 } 629 630 void helper_rfci(CPUPPCState *env) 631 { 632 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 633 } 634 635 void helper_rfdi(CPUPPCState *env) 636 { 637 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 638 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 639 } 640 641 void helper_rfmci(CPUPPCState *env) 642 { 643 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 644 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 645 } 646 647 /* Embedded.Processor Control */ 648 static int dbell2irq(target_ulong rb) 649 { 650 int msg = rb & DBELL_TYPE_MASK; 651 int irq = -1; 652 653 switch (msg) { 654 case DBELL_TYPE_DBELL: 655 irq = PPC_INTERRUPT_DOORBELL; 656 break; 657 case DBELL_TYPE_DBELL_CRIT: 658 irq = PPC_INTERRUPT_CDOORBELL; 659 break; 660 case DBELL_TYPE_G_DBELL: 661 case DBELL_TYPE_G_DBELL_CRIT: 662 case DBELL_TYPE_G_DBELL_MC: 663 /* XXX implement */ 664 default: 665 break; 666 } 667 668 return irq; 669 } 670 671 void helper_msgclr(CPUPPCState *env, target_ulong rb) 672 { 673 int irq = dbell2irq(rb); 674 675 if (irq < 0) { 676 return; 677 } 678 679 ppc_set_irq(env_archcpu(env), irq, 0); 680 } 681 682 void helper_msgsnd(target_ulong rb) 683 { 684 int irq = dbell2irq(rb); 685 int pir = rb & DBELL_PIRTAG_MASK; 686 CPUState *cs; 687 688 if (irq < 0) { 689 return; 690 } 691 692 bql_lock(); 693 CPU_FOREACH(cs) { 694 PowerPCCPU *cpu = POWERPC_CPU(cs); 695 CPUPPCState *cenv = &cpu->env; 696 697 if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 698 ppc_set_irq(cpu, irq, 1); 699 } 700 } 701 bql_unlock(); 702 } 703 704 /* Server Processor Control */ 705 706 static bool dbell_type_server(target_ulong rb) 707 { 708 /* 709 * A Directed Hypervisor Doorbell message is sent only if the 710 * message type is 5. All other types are reserved and the 711 * instruction is a no-op 712 */ 713 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 714 } 715 716 static inline bool dbell_bcast_core(target_ulong rb) 717 { 718 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE; 719 } 720 721 static inline bool dbell_bcast_subproc(target_ulong rb) 722 { 723 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC; 724 } 725 726 /* 727 * Send an interrupt to a thread in the same core as env). 728 */ 729 static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq) 730 { 731 PowerPCCPU *cpu = env_archcpu(env); 732 CPUState *cs = env_cpu(env); 733 734 if (ppc_cpu_lpar_single_threaded(cs)) { 735 if (target_tir == 0) { 736 ppc_set_irq(cpu, irq, 1); 737 } 738 } else { 739 CPUState *ccs; 740 741 /* Does iothread need to be locked for walking CPU list? */ 742 bql_lock(); 743 THREAD_SIBLING_FOREACH(cs, ccs) { 744 PowerPCCPU *ccpu = POWERPC_CPU(ccs); 745 if (target_tir == ppc_cpu_tir(ccpu)) { 746 ppc_set_irq(ccpu, irq, 1); 747 break; 748 } 749 } 750 bql_unlock(); 751 } 752 } 753 754 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 755 { 756 if (!dbell_type_server(rb)) { 757 return; 758 } 759 760 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); 761 } 762 763 void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb) 764 { 765 int pir = rb & DBELL_PROCIDTAG_MASK; 766 bool brdcast = false; 767 CPUState *cs, *ccs; 768 PowerPCCPU *cpu; 769 770 if (!dbell_type_server(rb)) { 771 return; 772 } 773 774 /* POWER8 msgsnd is like msgsndp (targets a thread within core) */ 775 if (!(env->insns_flags2 & PPC2_ISA300)) { 776 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL); 777 return; 778 } 779 780 /* POWER9 and later msgsnd is a global (targets any thread) */ 781 cpu = ppc_get_vcpu_by_pir(pir); 782 if (!cpu) { 783 return; 784 } 785 cs = CPU(cpu); 786 787 if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) && 788 (env->flags & POWERPC_FLAG_SMT_1LPAR))) { 789 brdcast = true; 790 } 791 792 if (ppc_cpu_core_single_threaded(cs) || !brdcast) { 793 ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1); 794 return; 795 } 796 797 /* 798 * Why is bql needed for walking CPU list? Answer seems to be because ppc 799 * irq handling needs it, but ppc_set_irq takes the lock itself if needed, 800 * so could this be removed? 801 */ 802 bql_lock(); 803 THREAD_SIBLING_FOREACH(cs, ccs) { 804 ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1); 805 } 806 bql_unlock(); 807 } 808 809 #ifdef TARGET_PPC64 810 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 811 { 812 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 813 814 if (!dbell_type_server(rb)) { 815 return; 816 } 817 818 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0); 819 } 820 821 /* 822 * sends a message to another thread on the same 823 * multi-threaded processor 824 */ 825 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 826 { 827 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 828 829 if (!dbell_type_server(rb)) { 830 return; 831 } 832 833 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL); 834 } 835 #endif /* TARGET_PPC64 */ 836 837 /* Single-step tracing */ 838 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip) 839 { 840 uint32_t error_code = 0; 841 if (env->insns_flags2 & PPC2_ISA207S) { 842 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */ 843 env->spr[SPR_POWER_SIAR] = prev_ip; 844 error_code = PPC_BIT(33); 845 } 846 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code); 847 } 848 #endif /* !CONFIG_USER_ONLY */ 849