1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "internal.h" 24 #include "helper_regs.h" 25 26 #include "trace.h" 27 28 #ifdef CONFIG_TCG 29 #include "exec/helper-proto.h" 30 #include "exec/cpu_ldst.h" 31 #endif 32 33 /* #define DEBUG_SOFTWARE_TLB */ 34 35 /*****************************************************************************/ 36 /* Exception processing */ 37 #if !defined(CONFIG_USER_ONLY) 38 39 static inline void dump_syscall(CPUPPCState *env) 40 { 41 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 42 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 43 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 44 " nip=" TARGET_FMT_lx "\n", 45 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 46 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 47 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 48 ppc_dump_gpr(env, 8), env->nip); 49 } 50 51 static inline void dump_hcall(CPUPPCState *env) 52 { 53 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 54 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 55 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 56 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 57 " nip=" TARGET_FMT_lx "\n", 58 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 59 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 60 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 61 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 62 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 63 env->nip); 64 } 65 66 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 67 target_ulong *msr) 68 { 69 /* We no longer are in a PM state */ 70 env->resume_as_sreset = false; 71 72 /* Pretend to be returning from doze always as we don't lose state */ 73 *msr |= SRR1_WS_NOLOSS; 74 75 /* Machine checks are sent normally */ 76 if (excp == POWERPC_EXCP_MCHECK) { 77 return excp; 78 } 79 switch (excp) { 80 case POWERPC_EXCP_RESET: 81 *msr |= SRR1_WAKERESET; 82 break; 83 case POWERPC_EXCP_EXTERNAL: 84 *msr |= SRR1_WAKEEE; 85 break; 86 case POWERPC_EXCP_DECR: 87 *msr |= SRR1_WAKEDEC; 88 break; 89 case POWERPC_EXCP_SDOOR: 90 *msr |= SRR1_WAKEDBELL; 91 break; 92 case POWERPC_EXCP_SDOOR_HV: 93 *msr |= SRR1_WAKEHDBELL; 94 break; 95 case POWERPC_EXCP_HV_MAINT: 96 *msr |= SRR1_WAKEHMI; 97 break; 98 case POWERPC_EXCP_HVIRT: 99 *msr |= SRR1_WAKEHVI; 100 break; 101 default: 102 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 103 excp); 104 } 105 return POWERPC_EXCP_RESET; 106 } 107 108 /* 109 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be 110 * taken with the MMU on, and which uses an alternate location (e.g., so the 111 * kernel/hv can map the vectors there with an effective address). 112 * 113 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they 114 * are delivered in this way. AIL requires the LPCR to be set to enable this 115 * mode, and then a number of conditions have to be true for AIL to apply. 116 * 117 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because 118 * they specifically want to be in real mode (e.g., the MCE might be signaling 119 * a SLB multi-hit which requires SLB flush before the MMU can be enabled). 120 * 121 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], 122 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current 123 * radix mode (LPCR[HR]). 124 * 125 * POWER8, POWER9 with LPCR[HR]=0 126 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 127 * +-----------+-------------+---------+-------------+-----+ 128 * | a | 00/01/10 | x | x | 0 | 129 * | a | 11 | 0 | 1 | 0 | 130 * | a | 11 | 1 | 1 | a | 131 * | a | 11 | 0 | 0 | a | 132 * +-------------------------------------------------------+ 133 * 134 * POWER9 with LPCR[HR]=1 135 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 136 * +-----------+-------------+---------+-------------+-----+ 137 * | a | 00/01/10 | x | x | 0 | 138 * | a | 11 | x | x | a | 139 * +-------------------------------------------------------+ 140 * 141 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to 142 * the hypervisor in AIL mode if the guest is radix. This is good for 143 * performance but allows the guest to influence the AIL of hypervisor 144 * interrupts using its MSR, and also the hypervisor must disallow guest 145 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to 146 * use AIL for its MSR[HV] 0->1 interrupts. 147 * 148 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to 149 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and 150 * MSR[HV] 1->1). 151 * 152 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. 153 * 154 * POWER10 behaviour is 155 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 156 * +-----------+------------+-------------+---------+-------------+-----+ 157 * | a | h | 00/01/10 | 0 | 0 | 0 | 158 * | a | h | 11 | 0 | 0 | a | 159 * | a | h | x | 0 | 1 | h | 160 * | a | h | 00/01/10 | 1 | 1 | 0 | 161 * | a | h | 11 | 1 | 1 | h | 162 * +--------------------------------------------------------------------+ 163 */ 164 static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, 165 target_ulong msr, 166 target_ulong *new_msr, 167 target_ulong *vector) 168 { 169 #if defined(TARGET_PPC64) 170 CPUPPCState *env = &cpu->env; 171 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); 172 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); 173 int ail = 0; 174 175 if (excp == POWERPC_EXCP_MCHECK || 176 excp == POWERPC_EXCP_RESET || 177 excp == POWERPC_EXCP_HV_MAINT) { 178 /* SRESET, MCE, HMI never apply AIL */ 179 return; 180 } 181 182 if (excp_model == POWERPC_EXCP_POWER8 || 183 excp_model == POWERPC_EXCP_POWER9) { 184 if (!mmu_all_on) { 185 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ 186 return; 187 } 188 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { 189 /* 190 * AIL does not work if there is a MSR[HV] 0->1 transition and the 191 * partition is in HPT mode. For radix guests, such interrupts are 192 * allowed to be delivered to the hypervisor in ail mode. 193 */ 194 return; 195 } 196 197 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 198 if (ail == 0) { 199 return; 200 } 201 if (ail == 1) { 202 /* AIL=1 is reserved, treat it like AIL=0 */ 203 return; 204 } 205 206 } else if (excp_model == POWERPC_EXCP_POWER10) { 207 if (!mmu_all_on && !hv_escalation) { 208 /* 209 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. 210 * Guest->guest and HV->HV interrupts do require MMU on. 211 */ 212 return; 213 } 214 215 if (*new_msr & MSR_HVB) { 216 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { 217 /* HV interrupts depend on LPCR[HAIL] */ 218 return; 219 } 220 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ 221 } else { 222 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 223 } 224 if (ail == 0) { 225 return; 226 } 227 if (ail == 1 || ail == 2) { 228 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ 229 return; 230 } 231 } else { 232 /* Other processors do not support AIL */ 233 return; 234 } 235 236 /* 237 * AIL applies, so the new MSR gets IR and DR set, and an offset applied 238 * to the new IP. 239 */ 240 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 241 242 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 243 if (ail == 2) { 244 *vector |= 0x0000000000018000ull; 245 } else if (ail == 3) { 246 *vector |= 0xc000000000004000ull; 247 } 248 } else { 249 /* 250 * scv AIL is a little different. AIL=2 does not change the address, 251 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. 252 */ 253 if (ail == 3) { 254 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ 255 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ 256 } 257 } 258 #endif 259 } 260 261 static inline void powerpc_set_excp_state(PowerPCCPU *cpu, 262 target_ulong vector, target_ulong msr) 263 { 264 CPUState *cs = CPU(cpu); 265 CPUPPCState *env = &cpu->env; 266 267 /* 268 * We don't use hreg_store_msr here as already have treated any 269 * special case that could occur. Just store MSR and update hflags 270 * 271 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 272 * will prevent setting of the HV bit which some exceptions might need 273 * to do. 274 */ 275 env->msr = msr & env->msr_mask; 276 hreg_compute_hflags(env); 277 env->nip = vector; 278 /* Reset exception state */ 279 cs->exception_index = POWERPC_EXCP_NONE; 280 env->error_code = 0; 281 282 /* Reset the reservation */ 283 env->reserve_addr = -1; 284 285 /* 286 * Any interrupt is context synchronizing, check if TCG TLB needs 287 * a delayed flush on ppc64 288 */ 289 check_tlb_flush(env, false); 290 } 291 292 /* 293 * Note that this function should be greatly optimized when called 294 * with a constant excp, from ppc_hw_interrupt 295 */ 296 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 297 { 298 CPUState *cs = CPU(cpu); 299 CPUPPCState *env = &cpu->env; 300 target_ulong msr, new_msr, vector; 301 int srr0, srr1, asrr0, asrr1, lev = -1; 302 303 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 304 " => %08x (%02x)\n", env->nip, excp, env->error_code); 305 306 /* new srr1 value excluding must-be-zero bits */ 307 if (excp_model == POWERPC_EXCP_BOOKE) { 308 msr = env->msr; 309 } else { 310 msr = env->msr & ~0x783f0000ULL; 311 } 312 313 /* 314 * new interrupt handler msr preserves existing HV and ME unless 315 * explicitly overriden 316 */ 317 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 318 319 /* target registers */ 320 srr0 = SPR_SRR0; 321 srr1 = SPR_SRR1; 322 asrr0 = -1; 323 asrr1 = -1; 324 325 /* 326 * check for special resume at 0x100 from doze/nap/sleep/winkle on 327 * P7/P8/P9 328 */ 329 if (env->resume_as_sreset) { 330 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 331 } 332 333 /* 334 * Hypervisor emulation assistance interrupt only exists on server 335 * arch 2.05 server or later. We also don't want to generate it if 336 * we don't have HVB in msr_mask (PAPR mode). 337 */ 338 if (excp == POWERPC_EXCP_HV_EMU 339 #if defined(TARGET_PPC64) 340 && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB)) 341 #endif /* defined(TARGET_PPC64) */ 342 343 ) { 344 excp = POWERPC_EXCP_PROGRAM; 345 } 346 347 switch (excp) { 348 case POWERPC_EXCP_NONE: 349 /* Should never happen */ 350 return; 351 case POWERPC_EXCP_CRITICAL: /* Critical input */ 352 switch (excp_model) { 353 case POWERPC_EXCP_40x: 354 srr0 = SPR_40x_SRR2; 355 srr1 = SPR_40x_SRR3; 356 break; 357 case POWERPC_EXCP_BOOKE: 358 srr0 = SPR_BOOKE_CSRR0; 359 srr1 = SPR_BOOKE_CSRR1; 360 break; 361 case POWERPC_EXCP_G2: 362 break; 363 default: 364 goto excp_invalid; 365 } 366 break; 367 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 368 if (msr_me == 0) { 369 /* 370 * Machine check exception is not enabled. Enter 371 * checkstop state. 372 */ 373 fprintf(stderr, "Machine check while not allowed. " 374 "Entering checkstop state\n"); 375 if (qemu_log_separate()) { 376 qemu_log("Machine check while not allowed. " 377 "Entering checkstop state\n"); 378 } 379 cs->halted = 1; 380 cpu_interrupt_exittb(cs); 381 } 382 if (env->msr_mask & MSR_HVB) { 383 /* 384 * ISA specifies HV, but can be delivered to guest with HV 385 * clear (e.g., see FWNMI in PAPR). 386 */ 387 new_msr |= (target_ulong)MSR_HVB; 388 } 389 390 /* machine check exceptions don't have ME set */ 391 new_msr &= ~((target_ulong)1 << MSR_ME); 392 393 /* XXX: should also have something loaded in DAR / DSISR */ 394 switch (excp_model) { 395 case POWERPC_EXCP_40x: 396 srr0 = SPR_40x_SRR2; 397 srr1 = SPR_40x_SRR3; 398 break; 399 case POWERPC_EXCP_BOOKE: 400 /* FIXME: choose one or the other based on CPU type */ 401 srr0 = SPR_BOOKE_MCSRR0; 402 srr1 = SPR_BOOKE_MCSRR1; 403 asrr0 = SPR_BOOKE_CSRR0; 404 asrr1 = SPR_BOOKE_CSRR1; 405 break; 406 default: 407 break; 408 } 409 break; 410 case POWERPC_EXCP_DSI: /* Data storage exception */ 411 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 412 break; 413 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 414 trace_ppc_excp_isi(msr, env->nip); 415 msr |= env->error_code; 416 break; 417 case POWERPC_EXCP_EXTERNAL: /* External input */ 418 { 419 bool lpes0; 420 421 cs = CPU(cpu); 422 423 /* 424 * Exception targeting modifiers 425 * 426 * LPES0 is supported on POWER7/8/9 427 * LPES1 is not supported (old iSeries mode) 428 * 429 * On anything else, we behave as if LPES0 is 1 430 * (externals don't alter MSR:HV) 431 */ 432 #if defined(TARGET_PPC64) 433 if (excp_model == POWERPC_EXCP_POWER7 || 434 excp_model == POWERPC_EXCP_POWER8 || 435 excp_model == POWERPC_EXCP_POWER9 || 436 excp_model == POWERPC_EXCP_POWER10) { 437 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 438 } else 439 #endif /* defined(TARGET_PPC64) */ 440 { 441 lpes0 = true; 442 } 443 444 if (!lpes0) { 445 new_msr |= (target_ulong)MSR_HVB; 446 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 447 srr0 = SPR_HSRR0; 448 srr1 = SPR_HSRR1; 449 } 450 if (env->mpic_proxy) { 451 /* IACK the IRQ on delivery */ 452 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 453 } 454 break; 455 } 456 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 457 /* 458 * Get rS/rD and rA from faulting opcode. 459 * Note: We will only invoke ALIGN for atomic operations, 460 * so all instructions are X-form. 461 */ 462 { 463 uint32_t insn = cpu_ldl_code(env, env->nip); 464 env->spr[SPR_DSISR] |= (insn & 0x03FF0000) >> 16; 465 } 466 break; 467 case POWERPC_EXCP_PROGRAM: /* Program exception */ 468 switch (env->error_code & ~0xF) { 469 case POWERPC_EXCP_FP: 470 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 471 trace_ppc_excp_fp_ignore(); 472 cs->exception_index = POWERPC_EXCP_NONE; 473 env->error_code = 0; 474 return; 475 } 476 477 /* 478 * FP exceptions always have NIP pointing to the faulting 479 * instruction, so always use store_next and claim we are 480 * precise in the MSR. 481 */ 482 msr |= 0x00100000; 483 env->spr[SPR_BOOKE_ESR] = ESR_FP; 484 break; 485 case POWERPC_EXCP_INVAL: 486 trace_ppc_excp_inval(env->nip); 487 msr |= 0x00080000; 488 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 489 break; 490 case POWERPC_EXCP_PRIV: 491 msr |= 0x00040000; 492 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 493 break; 494 case POWERPC_EXCP_TRAP: 495 msr |= 0x00020000; 496 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 497 break; 498 default: 499 /* Should never occur */ 500 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 501 env->error_code); 502 break; 503 } 504 break; 505 case POWERPC_EXCP_SYSCALL: /* System call exception */ 506 lev = env->error_code; 507 508 if ((lev == 1) && cpu->vhyp) { 509 dump_hcall(env); 510 } else { 511 dump_syscall(env); 512 } 513 514 /* 515 * We need to correct the NIP which in this case is supposed 516 * to point to the next instruction 517 */ 518 env->nip += 4; 519 520 /* "PAPR mode" built-in hypercall emulation */ 521 if ((lev == 1) && cpu->vhyp) { 522 PPCVirtualHypervisorClass *vhc = 523 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 524 vhc->hypercall(cpu->vhyp, cpu); 525 return; 526 } 527 if (lev == 1) { 528 new_msr |= (target_ulong)MSR_HVB; 529 } 530 break; 531 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 532 lev = env->error_code; 533 dump_syscall(env); 534 env->nip += 4; 535 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 536 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 537 break; 538 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 539 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 540 case POWERPC_EXCP_DECR: /* Decrementer exception */ 541 break; 542 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 543 /* FIT on 4xx */ 544 trace_ppc_excp_print("FIT"); 545 break; 546 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 547 trace_ppc_excp_print("WDT"); 548 switch (excp_model) { 549 case POWERPC_EXCP_BOOKE: 550 srr0 = SPR_BOOKE_CSRR0; 551 srr1 = SPR_BOOKE_CSRR1; 552 break; 553 default: 554 break; 555 } 556 break; 557 case POWERPC_EXCP_DTLB: /* Data TLB error */ 558 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 559 break; 560 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 561 if (env->flags & POWERPC_FLAG_DE) { 562 /* FIXME: choose one or the other based on CPU type */ 563 srr0 = SPR_BOOKE_DSRR0; 564 srr1 = SPR_BOOKE_DSRR1; 565 asrr0 = SPR_BOOKE_CSRR0; 566 asrr1 = SPR_BOOKE_CSRR1; 567 /* DBSR already modified by caller */ 568 } else { 569 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 570 } 571 break; 572 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 573 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 574 break; 575 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 576 /* XXX: TODO */ 577 cpu_abort(cs, "Embedded floating point data exception " 578 "is not implemented yet !\n"); 579 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 580 break; 581 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 582 /* XXX: TODO */ 583 cpu_abort(cs, "Embedded floating point round exception " 584 "is not implemented yet !\n"); 585 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 586 break; 587 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 588 /* XXX: TODO */ 589 cpu_abort(cs, 590 "Performance counter exception is not implemented yet !\n"); 591 break; 592 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 593 break; 594 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 595 srr0 = SPR_BOOKE_CSRR0; 596 srr1 = SPR_BOOKE_CSRR1; 597 break; 598 case POWERPC_EXCP_RESET: /* System reset exception */ 599 /* A power-saving exception sets ME, otherwise it is unchanged */ 600 if (msr_pow) { 601 /* indicate that we resumed from power save mode */ 602 msr |= 0x10000; 603 new_msr |= ((target_ulong)1 << MSR_ME); 604 } 605 if (env->msr_mask & MSR_HVB) { 606 /* 607 * ISA specifies HV, but can be delivered to guest with HV 608 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 609 */ 610 new_msr |= (target_ulong)MSR_HVB; 611 } else { 612 if (msr_pow) { 613 cpu_abort(cs, "Trying to deliver power-saving system reset " 614 "exception %d with no HV support\n", excp); 615 } 616 } 617 break; 618 case POWERPC_EXCP_DSEG: /* Data segment exception */ 619 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 620 case POWERPC_EXCP_TRACE: /* Trace exception */ 621 break; 622 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 623 msr |= env->error_code; 624 /* fall through */ 625 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 626 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 627 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 628 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 629 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 630 case POWERPC_EXCP_HV_EMU: 631 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 632 srr0 = SPR_HSRR0; 633 srr1 = SPR_HSRR1; 634 new_msr |= (target_ulong)MSR_HVB; 635 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 636 break; 637 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 638 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 639 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 640 #ifdef TARGET_PPC64 641 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 642 #endif 643 break; 644 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 645 #ifdef TARGET_PPC64 646 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 647 srr0 = SPR_HSRR0; 648 srr1 = SPR_HSRR1; 649 new_msr |= (target_ulong)MSR_HVB; 650 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 651 #endif 652 break; 653 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 654 trace_ppc_excp_print("PIT"); 655 break; 656 case POWERPC_EXCP_IO: /* IO error exception */ 657 /* XXX: TODO */ 658 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 659 break; 660 case POWERPC_EXCP_RUNM: /* Run mode exception */ 661 /* XXX: TODO */ 662 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 663 break; 664 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 665 /* XXX: TODO */ 666 cpu_abort(cs, "602 emulation trap exception " 667 "is not implemented yet !\n"); 668 break; 669 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 670 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 671 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 672 switch (excp_model) { 673 case POWERPC_EXCP_602: 674 case POWERPC_EXCP_603: 675 case POWERPC_EXCP_603E: 676 case POWERPC_EXCP_G2: 677 /* Swap temporary saved registers with GPRs */ 678 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 679 new_msr |= (target_ulong)1 << MSR_TGPR; 680 hreg_swap_gpr_tgpr(env); 681 } 682 /* fall through */ 683 case POWERPC_EXCP_7x5: 684 #if defined(DEBUG_SOFTWARE_TLB) 685 if (qemu_log_enabled()) { 686 const char *es; 687 target_ulong *miss, *cmp; 688 int en; 689 690 if (excp == POWERPC_EXCP_IFTLB) { 691 es = "I"; 692 en = 'I'; 693 miss = &env->spr[SPR_IMISS]; 694 cmp = &env->spr[SPR_ICMP]; 695 } else { 696 if (excp == POWERPC_EXCP_DLTLB) { 697 es = "DL"; 698 } else { 699 es = "DS"; 700 } 701 en = 'D'; 702 miss = &env->spr[SPR_DMISS]; 703 cmp = &env->spr[SPR_DCMP]; 704 } 705 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 706 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 707 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 708 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 709 env->error_code); 710 } 711 #endif 712 msr |= env->crf[0] << 28; 713 msr |= env->error_code; /* key, D/I, S/L bits */ 714 /* Set way using a LRU mechanism */ 715 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 716 break; 717 case POWERPC_EXCP_74xx: 718 #if defined(DEBUG_SOFTWARE_TLB) 719 if (qemu_log_enabled()) { 720 const char *es; 721 target_ulong *miss, *cmp; 722 int en; 723 724 if (excp == POWERPC_EXCP_IFTLB) { 725 es = "I"; 726 en = 'I'; 727 miss = &env->spr[SPR_TLBMISS]; 728 cmp = &env->spr[SPR_PTEHI]; 729 } else { 730 if (excp == POWERPC_EXCP_DLTLB) { 731 es = "DL"; 732 } else { 733 es = "DS"; 734 } 735 en = 'D'; 736 miss = &env->spr[SPR_TLBMISS]; 737 cmp = &env->spr[SPR_PTEHI]; 738 } 739 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 740 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 741 env->error_code); 742 } 743 #endif 744 msr |= env->error_code; /* key bit */ 745 break; 746 default: 747 cpu_abort(cs, "Invalid TLB miss exception\n"); 748 break; 749 } 750 break; 751 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 752 /* XXX: TODO */ 753 cpu_abort(cs, "Floating point assist exception " 754 "is not implemented yet !\n"); 755 break; 756 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 757 /* XXX: TODO */ 758 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 759 break; 760 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 761 /* XXX: TODO */ 762 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 763 break; 764 case POWERPC_EXCP_SMI: /* System management interrupt */ 765 /* XXX: TODO */ 766 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 767 break; 768 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 769 /* XXX: TODO */ 770 cpu_abort(cs, "Thermal management exception " 771 "is not implemented yet !\n"); 772 break; 773 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 774 /* XXX: TODO */ 775 cpu_abort(cs, 776 "Performance counter exception is not implemented yet !\n"); 777 break; 778 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 779 /* XXX: TODO */ 780 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 781 break; 782 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 783 /* XXX: TODO */ 784 cpu_abort(cs, 785 "970 soft-patch exception is not implemented yet !\n"); 786 break; 787 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 788 /* XXX: TODO */ 789 cpu_abort(cs, 790 "970 maintenance exception is not implemented yet !\n"); 791 break; 792 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 793 /* XXX: TODO */ 794 cpu_abort(cs, "Maskable external exception " 795 "is not implemented yet !\n"); 796 break; 797 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 798 /* XXX: TODO */ 799 cpu_abort(cs, "Non maskable external exception " 800 "is not implemented yet !\n"); 801 break; 802 default: 803 excp_invalid: 804 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 805 break; 806 } 807 808 /* Sanity check */ 809 if (!(env->msr_mask & MSR_HVB)) { 810 if (new_msr & MSR_HVB) { 811 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 812 "no HV support\n", excp); 813 } 814 if (srr0 == SPR_HSRR0) { 815 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 816 "no HV support\n", excp); 817 } 818 } 819 820 /* 821 * Sort out endianness of interrupt, this differs depending on the 822 * CPU, the HV mode, etc... 823 */ 824 #ifdef TARGET_PPC64 825 if (excp_model == POWERPC_EXCP_POWER7) { 826 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 827 new_msr |= (target_ulong)1 << MSR_LE; 828 } 829 } else if (excp_model == POWERPC_EXCP_POWER8) { 830 if (new_msr & MSR_HVB) { 831 if (env->spr[SPR_HID0] & HID0_HILE) { 832 new_msr |= (target_ulong)1 << MSR_LE; 833 } 834 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 835 new_msr |= (target_ulong)1 << MSR_LE; 836 } 837 } else if (excp_model == POWERPC_EXCP_POWER9 || 838 excp_model == POWERPC_EXCP_POWER10) { 839 if (new_msr & MSR_HVB) { 840 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 841 new_msr |= (target_ulong)1 << MSR_LE; 842 } 843 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 844 new_msr |= (target_ulong)1 << MSR_LE; 845 } 846 } else if (msr_ile) { 847 new_msr |= (target_ulong)1 << MSR_LE; 848 } 849 #else 850 if (msr_ile) { 851 new_msr |= (target_ulong)1 << MSR_LE; 852 } 853 #endif 854 855 vector = env->excp_vectors[excp]; 856 if (vector == (target_ulong)-1ULL) { 857 cpu_abort(cs, "Raised an exception without defined vector %d\n", 858 excp); 859 } 860 861 vector |= env->excp_prefix; 862 863 /* If any alternate SRR register are defined, duplicate saved values */ 864 if (asrr0 != -1) { 865 env->spr[asrr0] = env->nip; 866 } 867 if (asrr1 != -1) { 868 env->spr[asrr1] = msr; 869 } 870 871 #if defined(TARGET_PPC64) 872 if (excp_model == POWERPC_EXCP_BOOKE) { 873 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 874 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 875 new_msr |= (target_ulong)1 << MSR_CM; 876 } else { 877 vector = (uint32_t)vector; 878 } 879 } else { 880 if (!msr_isf && !mmu_is_64bit(env->mmu_model)) { 881 vector = (uint32_t)vector; 882 } else { 883 new_msr |= (target_ulong)1 << MSR_SF; 884 } 885 } 886 #endif 887 888 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 889 /* Save PC */ 890 env->spr[srr0] = env->nip; 891 892 /* Save MSR */ 893 env->spr[srr1] = msr; 894 895 #if defined(TARGET_PPC64) 896 } else { 897 vector += lev * 0x20; 898 899 env->lr = env->nip; 900 env->ctr = msr; 901 #endif 902 } 903 904 /* This can update new_msr and vector if AIL applies */ 905 ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector); 906 907 powerpc_set_excp_state(cpu, vector, new_msr); 908 } 909 910 void ppc_cpu_do_interrupt(CPUState *cs) 911 { 912 PowerPCCPU *cpu = POWERPC_CPU(cs); 913 CPUPPCState *env = &cpu->env; 914 915 powerpc_excp(cpu, env->excp_model, cs->exception_index); 916 } 917 918 static void ppc_hw_interrupt(CPUPPCState *env) 919 { 920 PowerPCCPU *cpu = env_archcpu(env); 921 bool async_deliver; 922 923 /* External reset */ 924 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 925 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 926 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 927 return; 928 } 929 /* Machine check exception */ 930 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 931 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 932 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 933 return; 934 } 935 #if 0 /* TODO */ 936 /* External debug exception */ 937 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 938 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 939 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 940 return; 941 } 942 #endif 943 944 /* 945 * For interrupts that gate on MSR:EE, we need to do something a 946 * bit more subtle, as we need to let them through even when EE is 947 * clear when coming out of some power management states (in order 948 * for them to become a 0x100). 949 */ 950 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 951 952 /* Hypervisor decrementer exception */ 953 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 954 /* LPCR will be clear when not supported so this will work */ 955 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 956 if ((async_deliver || msr_hv == 0) && hdice) { 957 /* HDEC clears on delivery */ 958 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 959 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 960 return; 961 } 962 } 963 964 /* Hypervisor virtualization interrupt */ 965 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 966 /* LPCR will be clear when not supported so this will work */ 967 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 968 if ((async_deliver || msr_hv == 0) && hvice) { 969 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 970 return; 971 } 972 } 973 974 /* External interrupt can ignore MSR:EE under some circumstances */ 975 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 976 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 977 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 978 /* HEIC blocks delivery to the hypervisor */ 979 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 980 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 981 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 982 return; 983 } 984 } 985 if (msr_ce != 0) { 986 /* External critical interrupt */ 987 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 988 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 989 return; 990 } 991 } 992 if (async_deliver != 0) { 993 /* Watchdog timer on embedded PowerPC */ 994 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 995 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 996 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 997 return; 998 } 999 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 1000 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 1001 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 1002 return; 1003 } 1004 /* Fixed interval timer on embedded PowerPC */ 1005 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 1006 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 1007 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 1008 return; 1009 } 1010 /* Programmable interval timer on embedded PowerPC */ 1011 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 1012 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 1013 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 1014 return; 1015 } 1016 /* Decrementer exception */ 1017 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 1018 if (ppc_decr_clear_on_delivery(env)) { 1019 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 1020 } 1021 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 1022 return; 1023 } 1024 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 1025 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1026 if (is_book3s_arch2x(env)) { 1027 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); 1028 } else { 1029 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 1030 } 1031 return; 1032 } 1033 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 1034 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1035 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 1036 return; 1037 } 1038 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 1039 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 1040 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 1041 return; 1042 } 1043 /* Thermal interrupt */ 1044 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 1045 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 1046 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 1047 return; 1048 } 1049 } 1050 1051 if (env->resume_as_sreset) { 1052 /* 1053 * This is a bug ! It means that has_work took us out of halt without 1054 * anything to deliver while in a PM state that requires getting 1055 * out via a 0x100 1056 * 1057 * This means we will incorrectly execute past the power management 1058 * instruction instead of triggering a reset. 1059 * 1060 * It generally means a discrepancy between the wakeup conditions in the 1061 * processor has_work implementation and the logic in this function. 1062 */ 1063 cpu_abort(env_cpu(env), 1064 "Wakeup from PM state but interrupt Undelivered"); 1065 } 1066 } 1067 1068 void ppc_cpu_do_system_reset(CPUState *cs) 1069 { 1070 PowerPCCPU *cpu = POWERPC_CPU(cs); 1071 CPUPPCState *env = &cpu->env; 1072 1073 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 1074 } 1075 1076 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 1077 { 1078 PowerPCCPU *cpu = POWERPC_CPU(cs); 1079 CPUPPCState *env = &cpu->env; 1080 target_ulong msr = 0; 1081 1082 /* 1083 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 1084 * been set by KVM. 1085 */ 1086 msr = (1ULL << MSR_ME); 1087 msr |= env->msr & (1ULL << MSR_SF); 1088 if (ppc_interrupts_little_endian(cpu)) { 1089 msr |= (1ULL << MSR_LE); 1090 } 1091 1092 powerpc_set_excp_state(cpu, vector, msr); 1093 } 1094 1095 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1096 { 1097 PowerPCCPU *cpu = POWERPC_CPU(cs); 1098 CPUPPCState *env = &cpu->env; 1099 1100 if (interrupt_request & CPU_INTERRUPT_HARD) { 1101 ppc_hw_interrupt(env); 1102 if (env->pending_interrupts == 0) { 1103 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 1104 } 1105 return true; 1106 } 1107 return false; 1108 } 1109 1110 #endif /* !CONFIG_USER_ONLY */ 1111 1112 /*****************************************************************************/ 1113 /* Exceptions processing helpers */ 1114 1115 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 1116 uint32_t error_code, uintptr_t raddr) 1117 { 1118 CPUState *cs = env_cpu(env); 1119 1120 cs->exception_index = exception; 1121 env->error_code = error_code; 1122 cpu_loop_exit_restore(cs, raddr); 1123 } 1124 1125 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1126 uint32_t error_code) 1127 { 1128 raise_exception_err_ra(env, exception, error_code, 0); 1129 } 1130 1131 void raise_exception(CPUPPCState *env, uint32_t exception) 1132 { 1133 raise_exception_err_ra(env, exception, 0, 0); 1134 } 1135 1136 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1137 uintptr_t raddr) 1138 { 1139 raise_exception_err_ra(env, exception, 0, raddr); 1140 } 1141 1142 #ifdef CONFIG_TCG 1143 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1144 uint32_t error_code) 1145 { 1146 raise_exception_err_ra(env, exception, error_code, 0); 1147 } 1148 1149 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1150 { 1151 raise_exception_err_ra(env, exception, 0, 0); 1152 } 1153 #endif 1154 1155 #if !defined(CONFIG_USER_ONLY) 1156 #ifdef CONFIG_TCG 1157 void helper_store_msr(CPUPPCState *env, target_ulong val) 1158 { 1159 uint32_t excp = hreg_store_msr(env, val, 0); 1160 1161 if (excp != 0) { 1162 CPUState *cs = env_cpu(env); 1163 cpu_interrupt_exittb(cs); 1164 raise_exception(env, excp); 1165 } 1166 } 1167 1168 #if defined(TARGET_PPC64) 1169 void helper_scv(CPUPPCState *env, uint32_t lev) 1170 { 1171 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 1172 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 1173 } else { 1174 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 1175 } 1176 } 1177 1178 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1179 { 1180 CPUState *cs; 1181 1182 cs = env_cpu(env); 1183 cs->halted = 1; 1184 1185 /* Condition for waking up at 0x100 */ 1186 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1187 (env->spr[SPR_PSSCR] & PSSCR_EC); 1188 } 1189 #endif /* defined(TARGET_PPC64) */ 1190 #endif /* CONFIG_TCG */ 1191 1192 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1193 { 1194 CPUState *cs = env_cpu(env); 1195 1196 /* MSR:POW cannot be set by any form of rfi */ 1197 msr &= ~(1ULL << MSR_POW); 1198 1199 #if defined(TARGET_PPC64) 1200 /* Switching to 32-bit ? Crop the nip */ 1201 if (!msr_is_64bit(env, msr)) { 1202 nip = (uint32_t)nip; 1203 } 1204 #else 1205 nip = (uint32_t)nip; 1206 #endif 1207 /* XXX: beware: this is false if VLE is supported */ 1208 env->nip = nip & ~((target_ulong)0x00000003); 1209 hreg_store_msr(env, msr, 1); 1210 trace_ppc_excp_rfi(env->nip, env->msr); 1211 /* 1212 * No need to raise an exception here, as rfi is always the last 1213 * insn of a TB 1214 */ 1215 cpu_interrupt_exittb(cs); 1216 /* Reset the reservation */ 1217 env->reserve_addr = -1; 1218 1219 /* Context synchronizing: check if TCG TLB needs flush */ 1220 check_tlb_flush(env, false); 1221 } 1222 1223 #ifdef CONFIG_TCG 1224 void helper_rfi(CPUPPCState *env) 1225 { 1226 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1227 } 1228 1229 #define MSR_BOOK3S_MASK 1230 #if defined(TARGET_PPC64) 1231 void helper_rfid(CPUPPCState *env) 1232 { 1233 /* 1234 * The architecture defines a number of rules for which bits can 1235 * change but in practice, we handle this in hreg_store_msr() 1236 * which will be called by do_rfi(), so there is no need to filter 1237 * here 1238 */ 1239 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1240 } 1241 1242 void helper_rfscv(CPUPPCState *env) 1243 { 1244 do_rfi(env, env->lr, env->ctr); 1245 } 1246 1247 void helper_hrfid(CPUPPCState *env) 1248 { 1249 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1250 } 1251 #endif 1252 1253 /*****************************************************************************/ 1254 /* Embedded PowerPC specific helpers */ 1255 void helper_40x_rfci(CPUPPCState *env) 1256 { 1257 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1258 } 1259 1260 void helper_rfci(CPUPPCState *env) 1261 { 1262 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1263 } 1264 1265 void helper_rfdi(CPUPPCState *env) 1266 { 1267 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1268 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1269 } 1270 1271 void helper_rfmci(CPUPPCState *env) 1272 { 1273 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1274 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1275 } 1276 #endif /* CONFIG_TCG */ 1277 #endif /* !defined(CONFIG_USER_ONLY) */ 1278 1279 #ifdef CONFIG_TCG 1280 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1281 uint32_t flags) 1282 { 1283 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1284 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1285 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1286 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1287 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1288 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1289 POWERPC_EXCP_TRAP, GETPC()); 1290 } 1291 } 1292 1293 #if defined(TARGET_PPC64) 1294 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1295 uint32_t flags) 1296 { 1297 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1298 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1299 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1300 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1301 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1302 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1303 POWERPC_EXCP_TRAP, GETPC()); 1304 } 1305 } 1306 #endif 1307 #endif 1308 1309 #if !defined(CONFIG_USER_ONLY) 1310 /*****************************************************************************/ 1311 /* PowerPC 601 specific instructions (POWER bridge) */ 1312 1313 #ifdef CONFIG_TCG 1314 void helper_rfsvc(CPUPPCState *env) 1315 { 1316 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1317 } 1318 1319 /* Embedded.Processor Control */ 1320 static int dbell2irq(target_ulong rb) 1321 { 1322 int msg = rb & DBELL_TYPE_MASK; 1323 int irq = -1; 1324 1325 switch (msg) { 1326 case DBELL_TYPE_DBELL: 1327 irq = PPC_INTERRUPT_DOORBELL; 1328 break; 1329 case DBELL_TYPE_DBELL_CRIT: 1330 irq = PPC_INTERRUPT_CDOORBELL; 1331 break; 1332 case DBELL_TYPE_G_DBELL: 1333 case DBELL_TYPE_G_DBELL_CRIT: 1334 case DBELL_TYPE_G_DBELL_MC: 1335 /* XXX implement */ 1336 default: 1337 break; 1338 } 1339 1340 return irq; 1341 } 1342 1343 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1344 { 1345 int irq = dbell2irq(rb); 1346 1347 if (irq < 0) { 1348 return; 1349 } 1350 1351 env->pending_interrupts &= ~(1 << irq); 1352 } 1353 1354 void helper_msgsnd(target_ulong rb) 1355 { 1356 int irq = dbell2irq(rb); 1357 int pir = rb & DBELL_PIRTAG_MASK; 1358 CPUState *cs; 1359 1360 if (irq < 0) { 1361 return; 1362 } 1363 1364 qemu_mutex_lock_iothread(); 1365 CPU_FOREACH(cs) { 1366 PowerPCCPU *cpu = POWERPC_CPU(cs); 1367 CPUPPCState *cenv = &cpu->env; 1368 1369 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1370 cenv->pending_interrupts |= 1 << irq; 1371 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1372 } 1373 } 1374 qemu_mutex_unlock_iothread(); 1375 } 1376 1377 /* Server Processor Control */ 1378 1379 static bool dbell_type_server(target_ulong rb) 1380 { 1381 /* 1382 * A Directed Hypervisor Doorbell message is sent only if the 1383 * message type is 5. All other types are reserved and the 1384 * instruction is a no-op 1385 */ 1386 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1387 } 1388 1389 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1390 { 1391 if (!dbell_type_server(rb)) { 1392 return; 1393 } 1394 1395 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1396 } 1397 1398 static void book3s_msgsnd_common(int pir, int irq) 1399 { 1400 CPUState *cs; 1401 1402 qemu_mutex_lock_iothread(); 1403 CPU_FOREACH(cs) { 1404 PowerPCCPU *cpu = POWERPC_CPU(cs); 1405 CPUPPCState *cenv = &cpu->env; 1406 1407 /* TODO: broadcast message to all threads of the same processor */ 1408 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1409 cenv->pending_interrupts |= 1 << irq; 1410 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1411 } 1412 } 1413 qemu_mutex_unlock_iothread(); 1414 } 1415 1416 void helper_book3s_msgsnd(target_ulong rb) 1417 { 1418 int pir = rb & DBELL_PROCIDTAG_MASK; 1419 1420 if (!dbell_type_server(rb)) { 1421 return; 1422 } 1423 1424 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1425 } 1426 1427 #if defined(TARGET_PPC64) 1428 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1429 { 1430 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1431 1432 if (!dbell_type_server(rb)) { 1433 return; 1434 } 1435 1436 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1437 } 1438 1439 /* 1440 * sends a message to other threads that are on the same 1441 * multi-threaded processor 1442 */ 1443 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1444 { 1445 int pir = env->spr_cb[SPR_PIR].default_value; 1446 1447 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1448 1449 if (!dbell_type_server(rb)) { 1450 return; 1451 } 1452 1453 /* TODO: TCG supports only one thread */ 1454 1455 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1456 } 1457 #endif /* TARGET_PPC64 */ 1458 1459 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1460 MMUAccessType access_type, 1461 int mmu_idx, uintptr_t retaddr) 1462 { 1463 CPUPPCState *env = cs->env_ptr; 1464 1465 switch (env->mmu_model) { 1466 case POWERPC_MMU_SOFT_4xx: 1467 case POWERPC_MMU_SOFT_4xx_Z: 1468 env->spr[SPR_40x_DEAR] = vaddr; 1469 break; 1470 case POWERPC_MMU_BOOKE: 1471 case POWERPC_MMU_BOOKE206: 1472 env->spr[SPR_BOOKE_DEAR] = vaddr; 1473 break; 1474 default: 1475 env->spr[SPR_DAR] = vaddr; 1476 break; 1477 } 1478 1479 cs->exception_index = POWERPC_EXCP_ALIGN; 1480 env->error_code = 0; 1481 cpu_loop_exit_restore(cs, retaddr); 1482 } 1483 #endif /* CONFIG_TCG */ 1484 #endif /* !CONFIG_USER_ONLY */ 1485