1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "internal.h" 24 #include "helper_regs.h" 25 26 #include "trace.h" 27 28 #ifdef CONFIG_TCG 29 #include "exec/helper-proto.h" 30 #include "exec/cpu_ldst.h" 31 #endif 32 33 /* #define DEBUG_SOFTWARE_TLB */ 34 35 /*****************************************************************************/ 36 /* Exception processing */ 37 #if !defined(CONFIG_USER_ONLY) 38 39 static const char *powerpc_excp_name(int excp) 40 { 41 switch (excp) { 42 case POWERPC_EXCP_CRITICAL: return "CRITICAL"; 43 case POWERPC_EXCP_MCHECK: return "MCHECK"; 44 case POWERPC_EXCP_DSI: return "DSI"; 45 case POWERPC_EXCP_ISI: return "ISI"; 46 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL"; 47 case POWERPC_EXCP_ALIGN: return "ALIGN"; 48 case POWERPC_EXCP_PROGRAM: return "PROGRAM"; 49 case POWERPC_EXCP_FPU: return "FPU"; 50 case POWERPC_EXCP_SYSCALL: return "SYSCALL"; 51 case POWERPC_EXCP_APU: return "APU"; 52 case POWERPC_EXCP_DECR: return "DECR"; 53 case POWERPC_EXCP_FIT: return "FIT"; 54 case POWERPC_EXCP_WDT: return "WDT"; 55 case POWERPC_EXCP_DTLB: return "DTLB"; 56 case POWERPC_EXCP_ITLB: return "ITLB"; 57 case POWERPC_EXCP_DEBUG: return "DEBUG"; 58 case POWERPC_EXCP_SPEU: return "SPEU"; 59 case POWERPC_EXCP_EFPDI: return "EFPDI"; 60 case POWERPC_EXCP_EFPRI: return "EFPRI"; 61 case POWERPC_EXCP_EPERFM: return "EPERFM"; 62 case POWERPC_EXCP_DOORI: return "DOORI"; 63 case POWERPC_EXCP_DOORCI: return "DOORCI"; 64 case POWERPC_EXCP_GDOORI: return "GDOORI"; 65 case POWERPC_EXCP_GDOORCI: return "GDOORCI"; 66 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV"; 67 case POWERPC_EXCP_RESET: return "RESET"; 68 case POWERPC_EXCP_DSEG: return "DSEG"; 69 case POWERPC_EXCP_ISEG: return "ISEG"; 70 case POWERPC_EXCP_HDECR: return "HDECR"; 71 case POWERPC_EXCP_TRACE: return "TRACE"; 72 case POWERPC_EXCP_HDSI: return "HDSI"; 73 case POWERPC_EXCP_HISI: return "HISI"; 74 case POWERPC_EXCP_HDSEG: return "HDSEG"; 75 case POWERPC_EXCP_HISEG: return "HISEG"; 76 case POWERPC_EXCP_VPU: return "VPU"; 77 case POWERPC_EXCP_PIT: return "PIT"; 78 case POWERPC_EXCP_IO: return "IO"; 79 case POWERPC_EXCP_RUNM: return "RUNM"; 80 case POWERPC_EXCP_EMUL: return "EMUL"; 81 case POWERPC_EXCP_IFTLB: return "IFTLB"; 82 case POWERPC_EXCP_DLTLB: return "DLTLB"; 83 case POWERPC_EXCP_DSTLB: return "DSTLB"; 84 case POWERPC_EXCP_FPA: return "FPA"; 85 case POWERPC_EXCP_DABR: return "DABR"; 86 case POWERPC_EXCP_IABR: return "IABR"; 87 case POWERPC_EXCP_SMI: return "SMI"; 88 case POWERPC_EXCP_PERFM: return "PERFM"; 89 case POWERPC_EXCP_THERM: return "THERM"; 90 case POWERPC_EXCP_VPUA: return "VPUA"; 91 case POWERPC_EXCP_SOFTP: return "SOFTP"; 92 case POWERPC_EXCP_MAINT: return "MAINT"; 93 case POWERPC_EXCP_MEXTBR: return "MEXTBR"; 94 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR"; 95 case POWERPC_EXCP_ITLBE: return "ITLBE"; 96 case POWERPC_EXCP_DTLBE: return "DTLBE"; 97 case POWERPC_EXCP_VSXU: return "VSXU"; 98 case POWERPC_EXCP_FU: return "FU"; 99 case POWERPC_EXCP_HV_EMU: return "HV_EMU"; 100 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT"; 101 case POWERPC_EXCP_HV_FU: return "HV_FU"; 102 case POWERPC_EXCP_SDOOR: return "SDOOR"; 103 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV"; 104 case POWERPC_EXCP_HVIRT: return "HVIRT"; 105 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED"; 106 default: 107 g_assert_not_reached(); 108 } 109 } 110 111 static void dump_syscall(CPUPPCState *env) 112 { 113 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 114 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 115 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 116 " nip=" TARGET_FMT_lx "\n", 117 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 118 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 119 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 120 ppc_dump_gpr(env, 8), env->nip); 121 } 122 123 static void dump_hcall(CPUPPCState *env) 124 { 125 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 126 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 127 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 128 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 129 " nip=" TARGET_FMT_lx "\n", 130 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 131 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 132 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 133 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 134 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 135 env->nip); 136 } 137 138 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 139 target_ulong *msr) 140 { 141 /* We no longer are in a PM state */ 142 env->resume_as_sreset = false; 143 144 /* Pretend to be returning from doze always as we don't lose state */ 145 *msr |= SRR1_WS_NOLOSS; 146 147 /* Machine checks are sent normally */ 148 if (excp == POWERPC_EXCP_MCHECK) { 149 return excp; 150 } 151 switch (excp) { 152 case POWERPC_EXCP_RESET: 153 *msr |= SRR1_WAKERESET; 154 break; 155 case POWERPC_EXCP_EXTERNAL: 156 *msr |= SRR1_WAKEEE; 157 break; 158 case POWERPC_EXCP_DECR: 159 *msr |= SRR1_WAKEDEC; 160 break; 161 case POWERPC_EXCP_SDOOR: 162 *msr |= SRR1_WAKEDBELL; 163 break; 164 case POWERPC_EXCP_SDOOR_HV: 165 *msr |= SRR1_WAKEHDBELL; 166 break; 167 case POWERPC_EXCP_HV_MAINT: 168 *msr |= SRR1_WAKEHMI; 169 break; 170 case POWERPC_EXCP_HVIRT: 171 *msr |= SRR1_WAKEHVI; 172 break; 173 default: 174 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 175 excp); 176 } 177 return POWERPC_EXCP_RESET; 178 } 179 180 /* 181 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be 182 * taken with the MMU on, and which uses an alternate location (e.g., so the 183 * kernel/hv can map the vectors there with an effective address). 184 * 185 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they 186 * are delivered in this way. AIL requires the LPCR to be set to enable this 187 * mode, and then a number of conditions have to be true for AIL to apply. 188 * 189 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because 190 * they specifically want to be in real mode (e.g., the MCE might be signaling 191 * a SLB multi-hit which requires SLB flush before the MMU can be enabled). 192 * 193 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], 194 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current 195 * radix mode (LPCR[HR]). 196 * 197 * POWER8, POWER9 with LPCR[HR]=0 198 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 199 * +-----------+-------------+---------+-------------+-----+ 200 * | a | 00/01/10 | x | x | 0 | 201 * | a | 11 | 0 | 1 | 0 | 202 * | a | 11 | 1 | 1 | a | 203 * | a | 11 | 0 | 0 | a | 204 * +-------------------------------------------------------+ 205 * 206 * POWER9 with LPCR[HR]=1 207 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 208 * +-----------+-------------+---------+-------------+-----+ 209 * | a | 00/01/10 | x | x | 0 | 210 * | a | 11 | x | x | a | 211 * +-------------------------------------------------------+ 212 * 213 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to 214 * the hypervisor in AIL mode if the guest is radix. This is good for 215 * performance but allows the guest to influence the AIL of hypervisor 216 * interrupts using its MSR, and also the hypervisor must disallow guest 217 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to 218 * use AIL for its MSR[HV] 0->1 interrupts. 219 * 220 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to 221 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and 222 * MSR[HV] 1->1). 223 * 224 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. 225 * 226 * POWER10 behaviour is 227 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 228 * +-----------+------------+-------------+---------+-------------+-----+ 229 * | a | h | 00/01/10 | 0 | 0 | 0 | 230 * | a | h | 11 | 0 | 0 | a | 231 * | a | h | x | 0 | 1 | h | 232 * | a | h | 00/01/10 | 1 | 1 | 0 | 233 * | a | h | 11 | 1 | 1 | h | 234 * +--------------------------------------------------------------------+ 235 */ 236 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, 237 target_ulong msr, 238 target_ulong *new_msr, 239 target_ulong *vector) 240 { 241 #if defined(TARGET_PPC64) 242 CPUPPCState *env = &cpu->env; 243 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); 244 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); 245 int ail = 0; 246 247 if (excp == POWERPC_EXCP_MCHECK || 248 excp == POWERPC_EXCP_RESET || 249 excp == POWERPC_EXCP_HV_MAINT) { 250 /* SRESET, MCE, HMI never apply AIL */ 251 return; 252 } 253 254 if (excp_model == POWERPC_EXCP_POWER8 || 255 excp_model == POWERPC_EXCP_POWER9) { 256 if (!mmu_all_on) { 257 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ 258 return; 259 } 260 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { 261 /* 262 * AIL does not work if there is a MSR[HV] 0->1 transition and the 263 * partition is in HPT mode. For radix guests, such interrupts are 264 * allowed to be delivered to the hypervisor in ail mode. 265 */ 266 return; 267 } 268 269 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 270 if (ail == 0) { 271 return; 272 } 273 if (ail == 1) { 274 /* AIL=1 is reserved, treat it like AIL=0 */ 275 return; 276 } 277 278 } else if (excp_model == POWERPC_EXCP_POWER10) { 279 if (!mmu_all_on && !hv_escalation) { 280 /* 281 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. 282 * Guest->guest and HV->HV interrupts do require MMU on. 283 */ 284 return; 285 } 286 287 if (*new_msr & MSR_HVB) { 288 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { 289 /* HV interrupts depend on LPCR[HAIL] */ 290 return; 291 } 292 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ 293 } else { 294 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 295 } 296 if (ail == 0) { 297 return; 298 } 299 if (ail == 1 || ail == 2) { 300 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ 301 return; 302 } 303 } else { 304 /* Other processors do not support AIL */ 305 return; 306 } 307 308 /* 309 * AIL applies, so the new MSR gets IR and DR set, and an offset applied 310 * to the new IP. 311 */ 312 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 313 314 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 315 if (ail == 2) { 316 *vector |= 0x0000000000018000ull; 317 } else if (ail == 3) { 318 *vector |= 0xc000000000004000ull; 319 } 320 } else { 321 /* 322 * scv AIL is a little different. AIL=2 does not change the address, 323 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. 324 */ 325 if (ail == 3) { 326 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ 327 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ 328 } 329 } 330 #endif 331 } 332 333 static void powerpc_set_excp_state(PowerPCCPU *cpu, 334 target_ulong vector, target_ulong msr) 335 { 336 CPUState *cs = CPU(cpu); 337 CPUPPCState *env = &cpu->env; 338 339 /* 340 * We don't use hreg_store_msr here as already have treated any 341 * special case that could occur. Just store MSR and update hflags 342 * 343 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 344 * will prevent setting of the HV bit which some exceptions might need 345 * to do. 346 */ 347 env->msr = msr & env->msr_mask; 348 hreg_compute_hflags(env); 349 env->nip = vector; 350 /* Reset exception state */ 351 cs->exception_index = POWERPC_EXCP_NONE; 352 env->error_code = 0; 353 354 /* Reset the reservation */ 355 env->reserve_addr = -1; 356 357 /* 358 * Any interrupt is context synchronizing, check if TCG TLB needs 359 * a delayed flush on ppc64 360 */ 361 check_tlb_flush(env, false); 362 } 363 364 /* 365 * Note that this function should be greatly optimized when called 366 * with a constant excp, from ppc_hw_interrupt 367 */ 368 static void powerpc_excp(PowerPCCPU *cpu, int excp) 369 { 370 CPUState *cs = CPU(cpu); 371 CPUPPCState *env = &cpu->env; 372 int excp_model = env->excp_model; 373 target_ulong msr, new_msr, vector; 374 int srr0, srr1, lev = -1; 375 376 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) { 377 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 378 } 379 380 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 381 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp), 382 excp, env->error_code); 383 384 /* new srr1 value excluding must-be-zero bits */ 385 if (excp_model == POWERPC_EXCP_BOOKE) { 386 msr = env->msr; 387 } else { 388 msr = env->msr & ~0x783f0000ULL; 389 } 390 391 /* 392 * new interrupt handler msr preserves existing HV and ME unless 393 * explicitly overriden 394 */ 395 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 396 397 /* target registers */ 398 srr0 = SPR_SRR0; 399 srr1 = SPR_SRR1; 400 401 /* 402 * check for special resume at 0x100 from doze/nap/sleep/winkle on 403 * P7/P8/P9 404 */ 405 if (env->resume_as_sreset) { 406 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 407 } 408 409 /* 410 * Hypervisor emulation assistance interrupt only exists on server 411 * arch 2.05 server or later. We also don't want to generate it if 412 * we don't have HVB in msr_mask (PAPR mode). 413 */ 414 if (excp == POWERPC_EXCP_HV_EMU 415 #if defined(TARGET_PPC64) 416 && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB)) 417 #endif /* defined(TARGET_PPC64) */ 418 419 ) { 420 excp = POWERPC_EXCP_PROGRAM; 421 } 422 423 #ifdef TARGET_PPC64 424 /* 425 * SPEU and VPU share the same IVOR but they exist in different 426 * processors. SPEU is e500v1/2 only and VPU is e6500 only. 427 */ 428 if (excp_model == POWERPC_EXCP_BOOKE && excp == POWERPC_EXCP_VPU) { 429 excp = POWERPC_EXCP_SPEU; 430 } 431 #endif 432 433 vector = env->excp_vectors[excp]; 434 if (vector == (target_ulong)-1ULL) { 435 cpu_abort(cs, "Raised an exception without defined vector %d\n", 436 excp); 437 } 438 439 vector |= env->excp_prefix; 440 441 switch (excp) { 442 case POWERPC_EXCP_CRITICAL: /* Critical input */ 443 switch (excp_model) { 444 case POWERPC_EXCP_40x: 445 srr0 = SPR_40x_SRR2; 446 srr1 = SPR_40x_SRR3; 447 break; 448 case POWERPC_EXCP_BOOKE: 449 srr0 = SPR_BOOKE_CSRR0; 450 srr1 = SPR_BOOKE_CSRR1; 451 break; 452 case POWERPC_EXCP_G2: 453 break; 454 default: 455 goto excp_invalid; 456 } 457 break; 458 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 459 if (msr_me == 0) { 460 /* 461 * Machine check exception is not enabled. Enter 462 * checkstop state. 463 */ 464 fprintf(stderr, "Machine check while not allowed. " 465 "Entering checkstop state\n"); 466 if (qemu_log_separate()) { 467 qemu_log("Machine check while not allowed. " 468 "Entering checkstop state\n"); 469 } 470 cs->halted = 1; 471 cpu_interrupt_exittb(cs); 472 } 473 if (env->msr_mask & MSR_HVB) { 474 /* 475 * ISA specifies HV, but can be delivered to guest with HV 476 * clear (e.g., see FWNMI in PAPR). 477 */ 478 new_msr |= (target_ulong)MSR_HVB; 479 } 480 481 /* machine check exceptions don't have ME set */ 482 new_msr &= ~((target_ulong)1 << MSR_ME); 483 484 /* XXX: should also have something loaded in DAR / DSISR */ 485 switch (excp_model) { 486 case POWERPC_EXCP_40x: 487 srr0 = SPR_40x_SRR2; 488 srr1 = SPR_40x_SRR3; 489 break; 490 case POWERPC_EXCP_BOOKE: 491 /* FIXME: choose one or the other based on CPU type */ 492 srr0 = SPR_BOOKE_MCSRR0; 493 srr1 = SPR_BOOKE_MCSRR1; 494 495 env->spr[SPR_BOOKE_CSRR0] = env->nip; 496 env->spr[SPR_BOOKE_CSRR1] = msr; 497 break; 498 default: 499 break; 500 } 501 break; 502 case POWERPC_EXCP_DSI: /* Data storage exception */ 503 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 504 break; 505 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 506 trace_ppc_excp_isi(msr, env->nip); 507 msr |= env->error_code; 508 break; 509 case POWERPC_EXCP_EXTERNAL: /* External input */ 510 { 511 bool lpes0; 512 513 cs = CPU(cpu); 514 515 /* 516 * Exception targeting modifiers 517 * 518 * LPES0 is supported on POWER7/8/9 519 * LPES1 is not supported (old iSeries mode) 520 * 521 * On anything else, we behave as if LPES0 is 1 522 * (externals don't alter MSR:HV) 523 */ 524 #if defined(TARGET_PPC64) 525 if (excp_model == POWERPC_EXCP_POWER7 || 526 excp_model == POWERPC_EXCP_POWER8 || 527 excp_model == POWERPC_EXCP_POWER9 || 528 excp_model == POWERPC_EXCP_POWER10) { 529 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 530 } else 531 #endif /* defined(TARGET_PPC64) */ 532 { 533 lpes0 = true; 534 } 535 536 if (!lpes0) { 537 new_msr |= (target_ulong)MSR_HVB; 538 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 539 srr0 = SPR_HSRR0; 540 srr1 = SPR_HSRR1; 541 } 542 if (env->mpic_proxy) { 543 /* IACK the IRQ on delivery */ 544 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 545 } 546 break; 547 } 548 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 549 /* Get rS/rD and rA from faulting opcode */ 550 /* 551 * Note: the opcode fields will not be set properly for a 552 * direct store load/store, but nobody cares as nobody 553 * actually uses direct store segments. 554 */ 555 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 556 break; 557 case POWERPC_EXCP_PROGRAM: /* Program exception */ 558 switch (env->error_code & ~0xF) { 559 case POWERPC_EXCP_FP: 560 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 561 trace_ppc_excp_fp_ignore(); 562 cs->exception_index = POWERPC_EXCP_NONE; 563 env->error_code = 0; 564 return; 565 } 566 567 /* 568 * FP exceptions always have NIP pointing to the faulting 569 * instruction, so always use store_next and claim we are 570 * precise in the MSR. 571 */ 572 msr |= 0x00100000; 573 env->spr[SPR_BOOKE_ESR] = ESR_FP; 574 break; 575 case POWERPC_EXCP_INVAL: 576 trace_ppc_excp_inval(env->nip); 577 msr |= 0x00080000; 578 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 579 break; 580 case POWERPC_EXCP_PRIV: 581 msr |= 0x00040000; 582 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 583 break; 584 case POWERPC_EXCP_TRAP: 585 msr |= 0x00020000; 586 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 587 break; 588 default: 589 /* Should never occur */ 590 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 591 env->error_code); 592 break; 593 } 594 break; 595 case POWERPC_EXCP_SYSCALL: /* System call exception */ 596 lev = env->error_code; 597 598 if ((lev == 1) && cpu->vhyp) { 599 dump_hcall(env); 600 } else { 601 dump_syscall(env); 602 } 603 604 /* 605 * We need to correct the NIP which in this case is supposed 606 * to point to the next instruction 607 */ 608 env->nip += 4; 609 610 /* "PAPR mode" built-in hypercall emulation */ 611 if ((lev == 1) && cpu->vhyp) { 612 PPCVirtualHypervisorClass *vhc = 613 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 614 vhc->hypercall(cpu->vhyp, cpu); 615 return; 616 } 617 if (lev == 1) { 618 new_msr |= (target_ulong)MSR_HVB; 619 } 620 break; 621 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 622 lev = env->error_code; 623 dump_syscall(env); 624 env->nip += 4; 625 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 626 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 627 628 vector += lev * 0x20; 629 630 env->lr = env->nip; 631 env->ctr = msr; 632 break; 633 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 634 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 635 case POWERPC_EXCP_DECR: /* Decrementer exception */ 636 break; 637 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 638 /* FIT on 4xx */ 639 trace_ppc_excp_print("FIT"); 640 break; 641 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 642 trace_ppc_excp_print("WDT"); 643 switch (excp_model) { 644 case POWERPC_EXCP_BOOKE: 645 srr0 = SPR_BOOKE_CSRR0; 646 srr1 = SPR_BOOKE_CSRR1; 647 break; 648 default: 649 break; 650 } 651 break; 652 case POWERPC_EXCP_DTLB: /* Data TLB error */ 653 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 654 break; 655 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 656 if (env->flags & POWERPC_FLAG_DE) { 657 /* FIXME: choose one or the other based on CPU type */ 658 srr0 = SPR_BOOKE_DSRR0; 659 srr1 = SPR_BOOKE_DSRR1; 660 661 env->spr[SPR_BOOKE_CSRR0] = env->nip; 662 env->spr[SPR_BOOKE_CSRR1] = msr; 663 664 /* DBSR already modified by caller */ 665 } else { 666 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 667 } 668 break; 669 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */ 670 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 671 break; 672 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 673 /* XXX: TODO */ 674 cpu_abort(cs, "Embedded floating point data exception " 675 "is not implemented yet !\n"); 676 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 677 break; 678 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 679 /* XXX: TODO */ 680 cpu_abort(cs, "Embedded floating point round exception " 681 "is not implemented yet !\n"); 682 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 683 break; 684 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 685 /* XXX: TODO */ 686 cpu_abort(cs, 687 "Performance counter exception is not implemented yet !\n"); 688 break; 689 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 690 break; 691 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 692 srr0 = SPR_BOOKE_CSRR0; 693 srr1 = SPR_BOOKE_CSRR1; 694 break; 695 case POWERPC_EXCP_RESET: /* System reset exception */ 696 /* A power-saving exception sets ME, otherwise it is unchanged */ 697 if (msr_pow) { 698 /* indicate that we resumed from power save mode */ 699 msr |= 0x10000; 700 new_msr |= ((target_ulong)1 << MSR_ME); 701 } 702 if (env->msr_mask & MSR_HVB) { 703 /* 704 * ISA specifies HV, but can be delivered to guest with HV 705 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 706 */ 707 new_msr |= (target_ulong)MSR_HVB; 708 } else { 709 if (msr_pow) { 710 cpu_abort(cs, "Trying to deliver power-saving system reset " 711 "exception %d with no HV support\n", excp); 712 } 713 } 714 break; 715 case POWERPC_EXCP_DSEG: /* Data segment exception */ 716 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 717 case POWERPC_EXCP_TRACE: /* Trace exception */ 718 break; 719 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 720 msr |= env->error_code; 721 /* fall through */ 722 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 723 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 724 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 725 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 726 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 727 case POWERPC_EXCP_HV_EMU: 728 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 729 srr0 = SPR_HSRR0; 730 srr1 = SPR_HSRR1; 731 new_msr |= (target_ulong)MSR_HVB; 732 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 733 break; 734 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 735 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 736 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 737 #ifdef TARGET_PPC64 738 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 739 #endif 740 break; 741 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 742 #ifdef TARGET_PPC64 743 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 744 srr0 = SPR_HSRR0; 745 srr1 = SPR_HSRR1; 746 new_msr |= (target_ulong)MSR_HVB; 747 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 748 #endif 749 break; 750 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 751 trace_ppc_excp_print("PIT"); 752 break; 753 case POWERPC_EXCP_IO: /* IO error exception */ 754 /* XXX: TODO */ 755 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 756 break; 757 case POWERPC_EXCP_RUNM: /* Run mode exception */ 758 /* XXX: TODO */ 759 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 760 break; 761 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 762 /* XXX: TODO */ 763 cpu_abort(cs, "602 emulation trap exception " 764 "is not implemented yet !\n"); 765 break; 766 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 767 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 768 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 769 switch (excp_model) { 770 case POWERPC_EXCP_602: 771 case POWERPC_EXCP_603: 772 case POWERPC_EXCP_G2: 773 /* Swap temporary saved registers with GPRs */ 774 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 775 new_msr |= (target_ulong)1 << MSR_TGPR; 776 hreg_swap_gpr_tgpr(env); 777 } 778 /* fall through */ 779 case POWERPC_EXCP_7x5: 780 #if defined(DEBUG_SOFTWARE_TLB) 781 if (qemu_log_enabled()) { 782 const char *es; 783 target_ulong *miss, *cmp; 784 int en; 785 786 if (excp == POWERPC_EXCP_IFTLB) { 787 es = "I"; 788 en = 'I'; 789 miss = &env->spr[SPR_IMISS]; 790 cmp = &env->spr[SPR_ICMP]; 791 } else { 792 if (excp == POWERPC_EXCP_DLTLB) { 793 es = "DL"; 794 } else { 795 es = "DS"; 796 } 797 en = 'D'; 798 miss = &env->spr[SPR_DMISS]; 799 cmp = &env->spr[SPR_DCMP]; 800 } 801 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 802 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 803 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 804 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 805 env->error_code); 806 } 807 #endif 808 msr |= env->crf[0] << 28; 809 msr |= env->error_code; /* key, D/I, S/L bits */ 810 /* Set way using a LRU mechanism */ 811 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 812 break; 813 default: 814 cpu_abort(cs, "Invalid TLB miss exception\n"); 815 break; 816 } 817 break; 818 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 819 /* XXX: TODO */ 820 cpu_abort(cs, "Floating point assist exception " 821 "is not implemented yet !\n"); 822 break; 823 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 824 /* XXX: TODO */ 825 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 826 break; 827 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 828 /* XXX: TODO */ 829 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 830 break; 831 case POWERPC_EXCP_SMI: /* System management interrupt */ 832 /* XXX: TODO */ 833 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 834 break; 835 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 836 /* XXX: TODO */ 837 cpu_abort(cs, "Thermal management exception " 838 "is not implemented yet !\n"); 839 break; 840 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 841 /* XXX: TODO */ 842 cpu_abort(cs, 843 "Performance counter exception is not implemented yet !\n"); 844 break; 845 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 846 /* XXX: TODO */ 847 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 848 break; 849 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 850 /* XXX: TODO */ 851 cpu_abort(cs, 852 "970 soft-patch exception is not implemented yet !\n"); 853 break; 854 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 855 /* XXX: TODO */ 856 cpu_abort(cs, 857 "970 maintenance exception is not implemented yet !\n"); 858 break; 859 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 860 /* XXX: TODO */ 861 cpu_abort(cs, "Maskable external exception " 862 "is not implemented yet !\n"); 863 break; 864 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 865 /* XXX: TODO */ 866 cpu_abort(cs, "Non maskable external exception " 867 "is not implemented yet !\n"); 868 break; 869 default: 870 excp_invalid: 871 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 872 break; 873 } 874 875 /* Sanity check */ 876 if (!(env->msr_mask & MSR_HVB)) { 877 if (new_msr & MSR_HVB) { 878 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 879 "no HV support\n", excp); 880 } 881 if (srr0 == SPR_HSRR0) { 882 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 883 "no HV support\n", excp); 884 } 885 } 886 887 /* 888 * Sort out endianness of interrupt, this differs depending on the 889 * CPU, the HV mode, etc... 890 */ 891 #ifdef TARGET_PPC64 892 if (excp_model == POWERPC_EXCP_POWER7) { 893 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 894 new_msr |= (target_ulong)1 << MSR_LE; 895 } 896 } else if (excp_model == POWERPC_EXCP_POWER8) { 897 if (new_msr & MSR_HVB) { 898 if (env->spr[SPR_HID0] & HID0_HILE) { 899 new_msr |= (target_ulong)1 << MSR_LE; 900 } 901 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 902 new_msr |= (target_ulong)1 << MSR_LE; 903 } 904 } else if (excp_model == POWERPC_EXCP_POWER9 || 905 excp_model == POWERPC_EXCP_POWER10) { 906 if (new_msr & MSR_HVB) { 907 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 908 new_msr |= (target_ulong)1 << MSR_LE; 909 } 910 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 911 new_msr |= (target_ulong)1 << MSR_LE; 912 } 913 } else if (msr_ile) { 914 new_msr |= (target_ulong)1 << MSR_LE; 915 } 916 #else 917 if (msr_ile) { 918 new_msr |= (target_ulong)1 << MSR_LE; 919 } 920 #endif 921 922 #if defined(TARGET_PPC64) 923 if (excp_model == POWERPC_EXCP_BOOKE) { 924 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 925 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 926 new_msr |= (target_ulong)1 << MSR_CM; 927 } else { 928 vector = (uint32_t)vector; 929 } 930 } else { 931 if (!msr_isf && !mmu_is_64bit(env->mmu_model)) { 932 vector = (uint32_t)vector; 933 } else { 934 new_msr |= (target_ulong)1 << MSR_SF; 935 } 936 } 937 #endif 938 939 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 940 /* Save PC */ 941 env->spr[srr0] = env->nip; 942 943 /* Save MSR */ 944 env->spr[srr1] = msr; 945 } 946 947 /* This can update new_msr and vector if AIL applies */ 948 ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector); 949 950 powerpc_set_excp_state(cpu, vector, new_msr); 951 } 952 953 void ppc_cpu_do_interrupt(CPUState *cs) 954 { 955 PowerPCCPU *cpu = POWERPC_CPU(cs); 956 957 powerpc_excp(cpu, cs->exception_index); 958 } 959 960 static void ppc_hw_interrupt(CPUPPCState *env) 961 { 962 PowerPCCPU *cpu = env_archcpu(env); 963 bool async_deliver; 964 965 /* External reset */ 966 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 967 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 968 powerpc_excp(cpu, POWERPC_EXCP_RESET); 969 return; 970 } 971 /* Machine check exception */ 972 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 973 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 974 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 975 return; 976 } 977 #if 0 /* TODO */ 978 /* External debug exception */ 979 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 980 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 981 powerpc_excp(cpu, POWERPC_EXCP_DEBUG); 982 return; 983 } 984 #endif 985 986 /* 987 * For interrupts that gate on MSR:EE, we need to do something a 988 * bit more subtle, as we need to let them through even when EE is 989 * clear when coming out of some power management states (in order 990 * for them to become a 0x100). 991 */ 992 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 993 994 /* Hypervisor decrementer exception */ 995 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 996 /* LPCR will be clear when not supported so this will work */ 997 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 998 if ((async_deliver || msr_hv == 0) && hdice) { 999 /* HDEC clears on delivery */ 1000 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1001 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 1002 return; 1003 } 1004 } 1005 1006 /* Hypervisor virtualization interrupt */ 1007 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 1008 /* LPCR will be clear when not supported so this will work */ 1009 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 1010 if ((async_deliver || msr_hv == 0) && hvice) { 1011 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 1012 return; 1013 } 1014 } 1015 1016 /* External interrupt can ignore MSR:EE under some circumstances */ 1017 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 1018 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1019 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1020 /* HEIC blocks delivery to the hypervisor */ 1021 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 1022 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 1023 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 1024 return; 1025 } 1026 } 1027 if (msr_ce != 0) { 1028 /* External critical interrupt */ 1029 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 1030 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL); 1031 return; 1032 } 1033 } 1034 if (async_deliver != 0) { 1035 /* Watchdog timer on embedded PowerPC */ 1036 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 1037 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 1038 powerpc_excp(cpu, POWERPC_EXCP_WDT); 1039 return; 1040 } 1041 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 1042 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 1043 powerpc_excp(cpu, POWERPC_EXCP_DOORCI); 1044 return; 1045 } 1046 /* Fixed interval timer on embedded PowerPC */ 1047 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 1048 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 1049 powerpc_excp(cpu, POWERPC_EXCP_FIT); 1050 return; 1051 } 1052 /* Programmable interval timer on embedded PowerPC */ 1053 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 1054 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 1055 powerpc_excp(cpu, POWERPC_EXCP_PIT); 1056 return; 1057 } 1058 /* Decrementer exception */ 1059 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 1060 if (ppc_decr_clear_on_delivery(env)) { 1061 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 1062 } 1063 powerpc_excp(cpu, POWERPC_EXCP_DECR); 1064 return; 1065 } 1066 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 1067 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1068 if (is_book3s_arch2x(env)) { 1069 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 1070 } else { 1071 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 1072 } 1073 return; 1074 } 1075 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 1076 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1077 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 1078 return; 1079 } 1080 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 1081 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 1082 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 1083 return; 1084 } 1085 /* Thermal interrupt */ 1086 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 1087 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 1088 powerpc_excp(cpu, POWERPC_EXCP_THERM); 1089 return; 1090 } 1091 } 1092 1093 if (env->resume_as_sreset) { 1094 /* 1095 * This is a bug ! It means that has_work took us out of halt without 1096 * anything to deliver while in a PM state that requires getting 1097 * out via a 0x100 1098 * 1099 * This means we will incorrectly execute past the power management 1100 * instruction instead of triggering a reset. 1101 * 1102 * It generally means a discrepancy between the wakeup conditions in the 1103 * processor has_work implementation and the logic in this function. 1104 */ 1105 cpu_abort(env_cpu(env), 1106 "Wakeup from PM state but interrupt Undelivered"); 1107 } 1108 } 1109 1110 void ppc_cpu_do_system_reset(CPUState *cs) 1111 { 1112 PowerPCCPU *cpu = POWERPC_CPU(cs); 1113 1114 powerpc_excp(cpu, POWERPC_EXCP_RESET); 1115 } 1116 1117 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 1118 { 1119 PowerPCCPU *cpu = POWERPC_CPU(cs); 1120 CPUPPCState *env = &cpu->env; 1121 target_ulong msr = 0; 1122 1123 /* 1124 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 1125 * been set by KVM. 1126 */ 1127 msr = (1ULL << MSR_ME); 1128 msr |= env->msr & (1ULL << MSR_SF); 1129 if (ppc_interrupts_little_endian(cpu)) { 1130 msr |= (1ULL << MSR_LE); 1131 } 1132 1133 powerpc_set_excp_state(cpu, vector, msr); 1134 } 1135 1136 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1137 { 1138 PowerPCCPU *cpu = POWERPC_CPU(cs); 1139 CPUPPCState *env = &cpu->env; 1140 1141 if (interrupt_request & CPU_INTERRUPT_HARD) { 1142 ppc_hw_interrupt(env); 1143 if (env->pending_interrupts == 0) { 1144 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 1145 } 1146 return true; 1147 } 1148 return false; 1149 } 1150 1151 #endif /* !CONFIG_USER_ONLY */ 1152 1153 /*****************************************************************************/ 1154 /* Exceptions processing helpers */ 1155 1156 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 1157 uint32_t error_code, uintptr_t raddr) 1158 { 1159 CPUState *cs = env_cpu(env); 1160 1161 cs->exception_index = exception; 1162 env->error_code = error_code; 1163 cpu_loop_exit_restore(cs, raddr); 1164 } 1165 1166 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1167 uint32_t error_code) 1168 { 1169 raise_exception_err_ra(env, exception, error_code, 0); 1170 } 1171 1172 void raise_exception(CPUPPCState *env, uint32_t exception) 1173 { 1174 raise_exception_err_ra(env, exception, 0, 0); 1175 } 1176 1177 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1178 uintptr_t raddr) 1179 { 1180 raise_exception_err_ra(env, exception, 0, raddr); 1181 } 1182 1183 #ifdef CONFIG_TCG 1184 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1185 uint32_t error_code) 1186 { 1187 raise_exception_err_ra(env, exception, error_code, 0); 1188 } 1189 1190 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1191 { 1192 raise_exception_err_ra(env, exception, 0, 0); 1193 } 1194 #endif 1195 1196 #if !defined(CONFIG_USER_ONLY) 1197 #ifdef CONFIG_TCG 1198 void helper_store_msr(CPUPPCState *env, target_ulong val) 1199 { 1200 uint32_t excp = hreg_store_msr(env, val, 0); 1201 1202 if (excp != 0) { 1203 CPUState *cs = env_cpu(env); 1204 cpu_interrupt_exittb(cs); 1205 raise_exception(env, excp); 1206 } 1207 } 1208 1209 #if defined(TARGET_PPC64) 1210 void helper_scv(CPUPPCState *env, uint32_t lev) 1211 { 1212 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 1213 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 1214 } else { 1215 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 1216 } 1217 } 1218 1219 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1220 { 1221 CPUState *cs; 1222 1223 cs = env_cpu(env); 1224 cs->halted = 1; 1225 1226 /* Condition for waking up at 0x100 */ 1227 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1228 (env->spr[SPR_PSSCR] & PSSCR_EC); 1229 } 1230 #endif /* defined(TARGET_PPC64) */ 1231 #endif /* CONFIG_TCG */ 1232 1233 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1234 { 1235 CPUState *cs = env_cpu(env); 1236 1237 /* MSR:POW cannot be set by any form of rfi */ 1238 msr &= ~(1ULL << MSR_POW); 1239 1240 #if defined(TARGET_PPC64) 1241 /* Switching to 32-bit ? Crop the nip */ 1242 if (!msr_is_64bit(env, msr)) { 1243 nip = (uint32_t)nip; 1244 } 1245 #else 1246 nip = (uint32_t)nip; 1247 #endif 1248 /* XXX: beware: this is false if VLE is supported */ 1249 env->nip = nip & ~((target_ulong)0x00000003); 1250 hreg_store_msr(env, msr, 1); 1251 trace_ppc_excp_rfi(env->nip, env->msr); 1252 /* 1253 * No need to raise an exception here, as rfi is always the last 1254 * insn of a TB 1255 */ 1256 cpu_interrupt_exittb(cs); 1257 /* Reset the reservation */ 1258 env->reserve_addr = -1; 1259 1260 /* Context synchronizing: check if TCG TLB needs flush */ 1261 check_tlb_flush(env, false); 1262 } 1263 1264 #ifdef CONFIG_TCG 1265 void helper_rfi(CPUPPCState *env) 1266 { 1267 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1268 } 1269 1270 #define MSR_BOOK3S_MASK 1271 #if defined(TARGET_PPC64) 1272 void helper_rfid(CPUPPCState *env) 1273 { 1274 /* 1275 * The architecture defines a number of rules for which bits can 1276 * change but in practice, we handle this in hreg_store_msr() 1277 * which will be called by do_rfi(), so there is no need to filter 1278 * here 1279 */ 1280 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1281 } 1282 1283 void helper_rfscv(CPUPPCState *env) 1284 { 1285 do_rfi(env, env->lr, env->ctr); 1286 } 1287 1288 void helper_hrfid(CPUPPCState *env) 1289 { 1290 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1291 } 1292 #endif 1293 1294 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 1295 void helper_rfebb(CPUPPCState *env, target_ulong s) 1296 { 1297 target_ulong msr = env->msr; 1298 1299 /* 1300 * Handling of BESCR bits 32:33 according to PowerISA v3.1: 1301 * 1302 * "If BESCR 32:33 != 0b00 the instruction is treated as if 1303 * the instruction form were invalid." 1304 */ 1305 if (env->spr[SPR_BESCR] & BESCR_INVALID) { 1306 raise_exception_err(env, POWERPC_EXCP_PROGRAM, 1307 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); 1308 } 1309 1310 env->nip = env->spr[SPR_EBBRR]; 1311 1312 /* Switching to 32-bit ? Crop the nip */ 1313 if (!msr_is_64bit(env, msr)) { 1314 env->nip = (uint32_t)env->spr[SPR_EBBRR]; 1315 } 1316 1317 if (s) { 1318 env->spr[SPR_BESCR] |= BESCR_GE; 1319 } else { 1320 env->spr[SPR_BESCR] &= ~BESCR_GE; 1321 } 1322 } 1323 #endif 1324 1325 /*****************************************************************************/ 1326 /* Embedded PowerPC specific helpers */ 1327 void helper_40x_rfci(CPUPPCState *env) 1328 { 1329 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1330 } 1331 1332 void helper_rfci(CPUPPCState *env) 1333 { 1334 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1335 } 1336 1337 void helper_rfdi(CPUPPCState *env) 1338 { 1339 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1340 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1341 } 1342 1343 void helper_rfmci(CPUPPCState *env) 1344 { 1345 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1346 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1347 } 1348 #endif /* CONFIG_TCG */ 1349 #endif /* !defined(CONFIG_USER_ONLY) */ 1350 1351 #ifdef CONFIG_TCG 1352 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1353 uint32_t flags) 1354 { 1355 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1356 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1357 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1358 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1359 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1360 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1361 POWERPC_EXCP_TRAP, GETPC()); 1362 } 1363 } 1364 1365 #if defined(TARGET_PPC64) 1366 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1367 uint32_t flags) 1368 { 1369 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1370 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1371 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1372 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1373 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1374 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1375 POWERPC_EXCP_TRAP, GETPC()); 1376 } 1377 } 1378 #endif 1379 #endif 1380 1381 #if !defined(CONFIG_USER_ONLY) 1382 /*****************************************************************************/ 1383 /* PowerPC 601 specific instructions (POWER bridge) */ 1384 1385 #ifdef CONFIG_TCG 1386 void helper_rfsvc(CPUPPCState *env) 1387 { 1388 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1389 } 1390 1391 /* Embedded.Processor Control */ 1392 static int dbell2irq(target_ulong rb) 1393 { 1394 int msg = rb & DBELL_TYPE_MASK; 1395 int irq = -1; 1396 1397 switch (msg) { 1398 case DBELL_TYPE_DBELL: 1399 irq = PPC_INTERRUPT_DOORBELL; 1400 break; 1401 case DBELL_TYPE_DBELL_CRIT: 1402 irq = PPC_INTERRUPT_CDOORBELL; 1403 break; 1404 case DBELL_TYPE_G_DBELL: 1405 case DBELL_TYPE_G_DBELL_CRIT: 1406 case DBELL_TYPE_G_DBELL_MC: 1407 /* XXX implement */ 1408 default: 1409 break; 1410 } 1411 1412 return irq; 1413 } 1414 1415 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1416 { 1417 int irq = dbell2irq(rb); 1418 1419 if (irq < 0) { 1420 return; 1421 } 1422 1423 env->pending_interrupts &= ~(1 << irq); 1424 } 1425 1426 void helper_msgsnd(target_ulong rb) 1427 { 1428 int irq = dbell2irq(rb); 1429 int pir = rb & DBELL_PIRTAG_MASK; 1430 CPUState *cs; 1431 1432 if (irq < 0) { 1433 return; 1434 } 1435 1436 qemu_mutex_lock_iothread(); 1437 CPU_FOREACH(cs) { 1438 PowerPCCPU *cpu = POWERPC_CPU(cs); 1439 CPUPPCState *cenv = &cpu->env; 1440 1441 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1442 cenv->pending_interrupts |= 1 << irq; 1443 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1444 } 1445 } 1446 qemu_mutex_unlock_iothread(); 1447 } 1448 1449 /* Server Processor Control */ 1450 1451 static bool dbell_type_server(target_ulong rb) 1452 { 1453 /* 1454 * A Directed Hypervisor Doorbell message is sent only if the 1455 * message type is 5. All other types are reserved and the 1456 * instruction is a no-op 1457 */ 1458 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1459 } 1460 1461 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1462 { 1463 if (!dbell_type_server(rb)) { 1464 return; 1465 } 1466 1467 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1468 } 1469 1470 static void book3s_msgsnd_common(int pir, int irq) 1471 { 1472 CPUState *cs; 1473 1474 qemu_mutex_lock_iothread(); 1475 CPU_FOREACH(cs) { 1476 PowerPCCPU *cpu = POWERPC_CPU(cs); 1477 CPUPPCState *cenv = &cpu->env; 1478 1479 /* TODO: broadcast message to all threads of the same processor */ 1480 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1481 cenv->pending_interrupts |= 1 << irq; 1482 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1483 } 1484 } 1485 qemu_mutex_unlock_iothread(); 1486 } 1487 1488 void helper_book3s_msgsnd(target_ulong rb) 1489 { 1490 int pir = rb & DBELL_PROCIDTAG_MASK; 1491 1492 if (!dbell_type_server(rb)) { 1493 return; 1494 } 1495 1496 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1497 } 1498 1499 #if defined(TARGET_PPC64) 1500 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1501 { 1502 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1503 1504 if (!dbell_type_server(rb)) { 1505 return; 1506 } 1507 1508 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1509 } 1510 1511 /* 1512 * sends a message to other threads that are on the same 1513 * multi-threaded processor 1514 */ 1515 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1516 { 1517 int pir = env->spr_cb[SPR_PIR].default_value; 1518 1519 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1520 1521 if (!dbell_type_server(rb)) { 1522 return; 1523 } 1524 1525 /* TODO: TCG supports only one thread */ 1526 1527 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1528 } 1529 #endif /* TARGET_PPC64 */ 1530 1531 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1532 MMUAccessType access_type, 1533 int mmu_idx, uintptr_t retaddr) 1534 { 1535 CPUPPCState *env = cs->env_ptr; 1536 uint32_t insn; 1537 1538 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1539 cpu_restore_state(cs, retaddr, true); 1540 insn = cpu_ldl_code(env, env->nip); 1541 1542 switch (env->mmu_model) { 1543 case POWERPC_MMU_SOFT_4xx: 1544 env->spr[SPR_40x_DEAR] = vaddr; 1545 break; 1546 case POWERPC_MMU_BOOKE: 1547 case POWERPC_MMU_BOOKE206: 1548 env->spr[SPR_BOOKE_DEAR] = vaddr; 1549 break; 1550 default: 1551 env->spr[SPR_DAR] = vaddr; 1552 break; 1553 } 1554 1555 cs->exception_index = POWERPC_EXCP_ALIGN; 1556 env->error_code = insn & 0x03FF0000; 1557 cpu_loop_exit(cs); 1558 } 1559 #endif /* CONFIG_TCG */ 1560 #endif /* !CONFIG_USER_ONLY */ 1561