1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "internal.h" 25 #include "helper_regs.h" 26 #include "hw/ppc/ppc.h" 27 28 #include "trace.h" 29 30 #ifdef CONFIG_TCG 31 #include "sysemu/tcg.h" 32 #include "exec/helper-proto.h" 33 #include "exec/cpu_ldst.h" 34 #endif 35 36 /*****************************************************************************/ 37 /* Exception processing */ 38 #if !defined(CONFIG_USER_ONLY) 39 40 static const char *powerpc_excp_name(int excp) 41 { 42 switch (excp) { 43 case POWERPC_EXCP_CRITICAL: return "CRITICAL"; 44 case POWERPC_EXCP_MCHECK: return "MCHECK"; 45 case POWERPC_EXCP_DSI: return "DSI"; 46 case POWERPC_EXCP_ISI: return "ISI"; 47 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL"; 48 case POWERPC_EXCP_ALIGN: return "ALIGN"; 49 case POWERPC_EXCP_PROGRAM: return "PROGRAM"; 50 case POWERPC_EXCP_FPU: return "FPU"; 51 case POWERPC_EXCP_SYSCALL: return "SYSCALL"; 52 case POWERPC_EXCP_APU: return "APU"; 53 case POWERPC_EXCP_DECR: return "DECR"; 54 case POWERPC_EXCP_FIT: return "FIT"; 55 case POWERPC_EXCP_WDT: return "WDT"; 56 case POWERPC_EXCP_DTLB: return "DTLB"; 57 case POWERPC_EXCP_ITLB: return "ITLB"; 58 case POWERPC_EXCP_DEBUG: return "DEBUG"; 59 case POWERPC_EXCP_SPEU: return "SPEU"; 60 case POWERPC_EXCP_EFPDI: return "EFPDI"; 61 case POWERPC_EXCP_EFPRI: return "EFPRI"; 62 case POWERPC_EXCP_EPERFM: return "EPERFM"; 63 case POWERPC_EXCP_DOORI: return "DOORI"; 64 case POWERPC_EXCP_DOORCI: return "DOORCI"; 65 case POWERPC_EXCP_GDOORI: return "GDOORI"; 66 case POWERPC_EXCP_GDOORCI: return "GDOORCI"; 67 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV"; 68 case POWERPC_EXCP_RESET: return "RESET"; 69 case POWERPC_EXCP_DSEG: return "DSEG"; 70 case POWERPC_EXCP_ISEG: return "ISEG"; 71 case POWERPC_EXCP_HDECR: return "HDECR"; 72 case POWERPC_EXCP_TRACE: return "TRACE"; 73 case POWERPC_EXCP_HDSI: return "HDSI"; 74 case POWERPC_EXCP_HISI: return "HISI"; 75 case POWERPC_EXCP_HDSEG: return "HDSEG"; 76 case POWERPC_EXCP_HISEG: return "HISEG"; 77 case POWERPC_EXCP_VPU: return "VPU"; 78 case POWERPC_EXCP_PIT: return "PIT"; 79 case POWERPC_EXCP_EMUL: return "EMUL"; 80 case POWERPC_EXCP_IFTLB: return "IFTLB"; 81 case POWERPC_EXCP_DLTLB: return "DLTLB"; 82 case POWERPC_EXCP_DSTLB: return "DSTLB"; 83 case POWERPC_EXCP_FPA: return "FPA"; 84 case POWERPC_EXCP_DABR: return "DABR"; 85 case POWERPC_EXCP_IABR: return "IABR"; 86 case POWERPC_EXCP_SMI: return "SMI"; 87 case POWERPC_EXCP_PERFM: return "PERFM"; 88 case POWERPC_EXCP_THERM: return "THERM"; 89 case POWERPC_EXCP_VPUA: return "VPUA"; 90 case POWERPC_EXCP_SOFTP: return "SOFTP"; 91 case POWERPC_EXCP_MAINT: return "MAINT"; 92 case POWERPC_EXCP_MEXTBR: return "MEXTBR"; 93 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR"; 94 case POWERPC_EXCP_ITLBE: return "ITLBE"; 95 case POWERPC_EXCP_DTLBE: return "DTLBE"; 96 case POWERPC_EXCP_VSXU: return "VSXU"; 97 case POWERPC_EXCP_FU: return "FU"; 98 case POWERPC_EXCP_HV_EMU: return "HV_EMU"; 99 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT"; 100 case POWERPC_EXCP_HV_FU: return "HV_FU"; 101 case POWERPC_EXCP_SDOOR: return "SDOOR"; 102 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV"; 103 case POWERPC_EXCP_HVIRT: return "HVIRT"; 104 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED"; 105 default: 106 g_assert_not_reached(); 107 } 108 } 109 110 static void dump_syscall(CPUPPCState *env) 111 { 112 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 113 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 114 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 115 " nip=" TARGET_FMT_lx "\n", 116 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 117 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 118 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 119 ppc_dump_gpr(env, 8), env->nip); 120 } 121 122 static void dump_hcall(CPUPPCState *env) 123 { 124 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 125 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 126 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 127 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 128 " nip=" TARGET_FMT_lx "\n", 129 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 130 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 131 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 132 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 133 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 134 env->nip); 135 } 136 137 #ifdef CONFIG_TCG 138 /* Return true iff byteswap is needed to load instruction */ 139 static inline bool insn_need_byteswap(CPUArchState *env) 140 { 141 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */ 142 return !!(env->msr & ((target_ulong)1 << MSR_LE)); 143 } 144 145 static uint32_t ppc_ldl_code(CPUArchState *env, abi_ptr addr) 146 { 147 uint32_t insn = cpu_ldl_code(env, addr); 148 149 if (insn_need_byteswap(env)) { 150 insn = bswap32(insn); 151 } 152 153 return insn; 154 } 155 #endif 156 157 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) 158 { 159 const char *es; 160 target_ulong *miss, *cmp; 161 int en; 162 163 if (!qemu_loglevel_mask(CPU_LOG_MMU)) { 164 return; 165 } 166 167 if (excp == POWERPC_EXCP_IFTLB) { 168 es = "I"; 169 en = 'I'; 170 miss = &env->spr[SPR_IMISS]; 171 cmp = &env->spr[SPR_ICMP]; 172 } else { 173 if (excp == POWERPC_EXCP_DLTLB) { 174 es = "DL"; 175 } else { 176 es = "DS"; 177 } 178 en = 'D'; 179 miss = &env->spr[SPR_DMISS]; 180 cmp = &env->spr[SPR_DCMP]; 181 } 182 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 183 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 184 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 185 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 186 env->error_code); 187 } 188 189 #if defined(TARGET_PPC64) 190 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 191 target_ulong *msr) 192 { 193 /* We no longer are in a PM state */ 194 env->resume_as_sreset = false; 195 196 /* Pretend to be returning from doze always as we don't lose state */ 197 *msr |= SRR1_WS_NOLOSS; 198 199 /* Machine checks are sent normally */ 200 if (excp == POWERPC_EXCP_MCHECK) { 201 return excp; 202 } 203 switch (excp) { 204 case POWERPC_EXCP_RESET: 205 *msr |= SRR1_WAKERESET; 206 break; 207 case POWERPC_EXCP_EXTERNAL: 208 *msr |= SRR1_WAKEEE; 209 break; 210 case POWERPC_EXCP_DECR: 211 *msr |= SRR1_WAKEDEC; 212 break; 213 case POWERPC_EXCP_SDOOR: 214 *msr |= SRR1_WAKEDBELL; 215 break; 216 case POWERPC_EXCP_SDOOR_HV: 217 *msr |= SRR1_WAKEHDBELL; 218 break; 219 case POWERPC_EXCP_HV_MAINT: 220 *msr |= SRR1_WAKEHMI; 221 break; 222 case POWERPC_EXCP_HVIRT: 223 *msr |= SRR1_WAKEHVI; 224 break; 225 default: 226 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 227 excp); 228 } 229 return POWERPC_EXCP_RESET; 230 } 231 232 /* 233 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be 234 * taken with the MMU on, and which uses an alternate location (e.g., so the 235 * kernel/hv can map the vectors there with an effective address). 236 * 237 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they 238 * are delivered in this way. AIL requires the LPCR to be set to enable this 239 * mode, and then a number of conditions have to be true for AIL to apply. 240 * 241 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because 242 * they specifically want to be in real mode (e.g., the MCE might be signaling 243 * a SLB multi-hit which requires SLB flush before the MMU can be enabled). 244 * 245 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], 246 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current 247 * radix mode (LPCR[HR]). 248 * 249 * POWER8, POWER9 with LPCR[HR]=0 250 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 251 * +-----------+-------------+---------+-------------+-----+ 252 * | a | 00/01/10 | x | x | 0 | 253 * | a | 11 | 0 | 1 | 0 | 254 * | a | 11 | 1 | 1 | a | 255 * | a | 11 | 0 | 0 | a | 256 * +-------------------------------------------------------+ 257 * 258 * POWER9 with LPCR[HR]=1 259 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 260 * +-----------+-------------+---------+-------------+-----+ 261 * | a | 00/01/10 | x | x | 0 | 262 * | a | 11 | x | x | a | 263 * +-------------------------------------------------------+ 264 * 265 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to 266 * the hypervisor in AIL mode if the guest is radix. This is good for 267 * performance but allows the guest to influence the AIL of hypervisor 268 * interrupts using its MSR, and also the hypervisor must disallow guest 269 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to 270 * use AIL for its MSR[HV] 0->1 interrupts. 271 * 272 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to 273 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and 274 * MSR[HV] 1->1). 275 * 276 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. 277 * 278 * POWER10 behaviour is 279 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 280 * +-----------+------------+-------------+---------+-------------+-----+ 281 * | a | h | 00/01/10 | 0 | 0 | 0 | 282 * | a | h | 11 | 0 | 0 | a | 283 * | a | h | x | 0 | 1 | h | 284 * | a | h | 00/01/10 | 1 | 1 | 0 | 285 * | a | h | 11 | 1 | 1 | h | 286 * +--------------------------------------------------------------------+ 287 */ 288 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, 289 target_ulong *new_msr, target_ulong *vector) 290 { 291 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 292 CPUPPCState *env = &cpu->env; 293 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); 294 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); 295 int ail = 0; 296 297 if (excp == POWERPC_EXCP_MCHECK || 298 excp == POWERPC_EXCP_RESET || 299 excp == POWERPC_EXCP_HV_MAINT) { 300 /* SRESET, MCE, HMI never apply AIL */ 301 return; 302 } 303 304 if (!(pcc->lpcr_mask & LPCR_AIL)) { 305 /* This CPU does not have AIL */ 306 return; 307 } 308 309 /* P8 & P9 */ 310 if (!(pcc->lpcr_mask & LPCR_HAIL)) { 311 if (!mmu_all_on) { 312 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ 313 return; 314 } 315 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { 316 /* 317 * AIL does not work if there is a MSR[HV] 0->1 transition and the 318 * partition is in HPT mode. For radix guests, such interrupts are 319 * allowed to be delivered to the hypervisor in ail mode. 320 */ 321 return; 322 } 323 324 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 325 if (ail == 0) { 326 return; 327 } 328 if (ail == 1) { 329 /* AIL=1 is reserved, treat it like AIL=0 */ 330 return; 331 } 332 333 /* P10 and up */ 334 } else { 335 if (!mmu_all_on && !hv_escalation) { 336 /* 337 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. 338 * Guest->guest and HV->HV interrupts do require MMU on. 339 */ 340 return; 341 } 342 343 if (*new_msr & MSR_HVB) { 344 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { 345 /* HV interrupts depend on LPCR[HAIL] */ 346 return; 347 } 348 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ 349 } else { 350 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 351 } 352 if (ail == 0) { 353 return; 354 } 355 if (ail == 1 || ail == 2) { 356 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ 357 return; 358 } 359 } 360 361 /* 362 * AIL applies, so the new MSR gets IR and DR set, and an offset applied 363 * to the new IP. 364 */ 365 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 366 367 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 368 if (ail == 2) { 369 *vector |= 0x0000000000018000ull; 370 } else if (ail == 3) { 371 *vector |= 0xc000000000004000ull; 372 } 373 } else { 374 /* 375 * scv AIL is a little different. AIL=2 does not change the address, 376 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. 377 */ 378 if (ail == 3) { 379 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ 380 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ 381 } 382 } 383 } 384 #endif 385 386 static void powerpc_reset_excp_state(PowerPCCPU *cpu) 387 { 388 CPUState *cs = CPU(cpu); 389 CPUPPCState *env = &cpu->env; 390 391 /* Reset exception state */ 392 cs->exception_index = POWERPC_EXCP_NONE; 393 env->error_code = 0; 394 } 395 396 static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, 397 target_ulong msr) 398 { 399 CPUPPCState *env = &cpu->env; 400 401 assert((msr & env->msr_mask) == msr); 402 403 /* 404 * We don't use hreg_store_msr here as already have treated any 405 * special case that could occur. Just store MSR and update hflags 406 * 407 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 408 * will prevent setting of the HV bit which some exceptions might need 409 * to do. 410 */ 411 env->nip = vector; 412 env->msr = msr; 413 hreg_compute_hflags(env); 414 ppc_maybe_interrupt(env); 415 416 powerpc_reset_excp_state(cpu); 417 418 /* 419 * Any interrupt is context synchronizing, check if TCG TLB needs 420 * a delayed flush on ppc64 421 */ 422 check_tlb_flush(env, false); 423 424 /* Reset the reservation */ 425 env->reserve_addr = -1; 426 } 427 428 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) 429 { 430 CPUState *cs = CPU(cpu); 431 CPUPPCState *env = &cpu->env; 432 target_ulong msr, new_msr, vector; 433 int srr0, srr1; 434 435 /* new srr1 value excluding must-be-zero bits */ 436 msr = env->msr & ~0x783f0000ULL; 437 438 /* 439 * new interrupt handler msr preserves existing ME unless 440 * explicitly overriden. 441 */ 442 new_msr = env->msr & (((target_ulong)1 << MSR_ME)); 443 444 /* target registers */ 445 srr0 = SPR_SRR0; 446 srr1 = SPR_SRR1; 447 448 /* 449 * Hypervisor emulation assistance interrupt only exists on server 450 * arch 2.05 server or later. 451 */ 452 if (excp == POWERPC_EXCP_HV_EMU) { 453 excp = POWERPC_EXCP_PROGRAM; 454 } 455 456 vector = env->excp_vectors[excp]; 457 if (vector == (target_ulong)-1ULL) { 458 cpu_abort(cs, "Raised an exception without defined vector %d\n", 459 excp); 460 } 461 462 vector |= env->excp_prefix; 463 464 switch (excp) { 465 case POWERPC_EXCP_CRITICAL: /* Critical input */ 466 srr0 = SPR_40x_SRR2; 467 srr1 = SPR_40x_SRR3; 468 break; 469 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 470 if (!FIELD_EX64(env->msr, MSR, ME)) { 471 /* 472 * Machine check exception is not enabled. Enter 473 * checkstop state. 474 */ 475 fprintf(stderr, "Machine check while not allowed. " 476 "Entering checkstop state\n"); 477 if (qemu_log_separate()) { 478 qemu_log("Machine check while not allowed. " 479 "Entering checkstop state\n"); 480 } 481 cs->halted = 1; 482 cpu_interrupt_exittb(cs); 483 } 484 485 /* machine check exceptions don't have ME set */ 486 new_msr &= ~((target_ulong)1 << MSR_ME); 487 488 srr0 = SPR_40x_SRR2; 489 srr1 = SPR_40x_SRR3; 490 break; 491 case POWERPC_EXCP_DSI: /* Data storage exception */ 492 trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]); 493 break; 494 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 495 trace_ppc_excp_isi(msr, env->nip); 496 break; 497 case POWERPC_EXCP_EXTERNAL: /* External input */ 498 break; 499 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 500 break; 501 case POWERPC_EXCP_PROGRAM: /* Program exception */ 502 switch (env->error_code & ~0xF) { 503 case POWERPC_EXCP_FP: 504 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 505 trace_ppc_excp_fp_ignore(); 506 powerpc_reset_excp_state(cpu); 507 return; 508 } 509 env->spr[SPR_40x_ESR] = ESR_FP; 510 break; 511 case POWERPC_EXCP_INVAL: 512 trace_ppc_excp_inval(env->nip); 513 env->spr[SPR_40x_ESR] = ESR_PIL; 514 break; 515 case POWERPC_EXCP_PRIV: 516 env->spr[SPR_40x_ESR] = ESR_PPR; 517 break; 518 case POWERPC_EXCP_TRAP: 519 env->spr[SPR_40x_ESR] = ESR_PTR; 520 break; 521 default: 522 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 523 env->error_code); 524 break; 525 } 526 break; 527 case POWERPC_EXCP_SYSCALL: /* System call exception */ 528 dump_syscall(env); 529 530 /* 531 * We need to correct the NIP which in this case is supposed 532 * to point to the next instruction 533 */ 534 env->nip += 4; 535 break; 536 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 537 trace_ppc_excp_print("FIT"); 538 break; 539 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 540 trace_ppc_excp_print("WDT"); 541 break; 542 case POWERPC_EXCP_DTLB: /* Data TLB error */ 543 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 544 break; 545 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 546 trace_ppc_excp_print("PIT"); 547 break; 548 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 549 cpu_abort(cs, "%s exception not implemented\n", 550 powerpc_excp_name(excp)); 551 break; 552 default: 553 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 554 break; 555 } 556 557 /* Save PC */ 558 env->spr[srr0] = env->nip; 559 560 /* Save MSR */ 561 env->spr[srr1] = msr; 562 563 powerpc_set_excp_state(cpu, vector, new_msr); 564 } 565 566 static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) 567 { 568 CPUState *cs = CPU(cpu); 569 CPUPPCState *env = &cpu->env; 570 target_ulong msr, new_msr, vector; 571 572 /* new srr1 value excluding must-be-zero bits */ 573 msr = env->msr & ~0x783f0000ULL; 574 575 /* 576 * new interrupt handler msr preserves existing ME unless 577 * explicitly overriden 578 */ 579 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 580 581 /* 582 * Hypervisor emulation assistance interrupt only exists on server 583 * arch 2.05 server or later. 584 */ 585 if (excp == POWERPC_EXCP_HV_EMU) { 586 excp = POWERPC_EXCP_PROGRAM; 587 } 588 589 vector = env->excp_vectors[excp]; 590 if (vector == (target_ulong)-1ULL) { 591 cpu_abort(cs, "Raised an exception without defined vector %d\n", 592 excp); 593 } 594 595 vector |= env->excp_prefix; 596 597 switch (excp) { 598 case POWERPC_EXCP_CRITICAL: /* Critical input */ 599 break; 600 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 601 if (!FIELD_EX64(env->msr, MSR, ME)) { 602 /* 603 * Machine check exception is not enabled. Enter 604 * checkstop state. 605 */ 606 fprintf(stderr, "Machine check while not allowed. " 607 "Entering checkstop state\n"); 608 if (qemu_log_separate()) { 609 qemu_log("Machine check while not allowed. " 610 "Entering checkstop state\n"); 611 } 612 cs->halted = 1; 613 cpu_interrupt_exittb(cs); 614 } 615 616 /* machine check exceptions don't have ME set */ 617 new_msr &= ~((target_ulong)1 << MSR_ME); 618 619 break; 620 case POWERPC_EXCP_DSI: /* Data storage exception */ 621 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 622 break; 623 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 624 trace_ppc_excp_isi(msr, env->nip); 625 msr |= env->error_code; 626 break; 627 case POWERPC_EXCP_EXTERNAL: /* External input */ 628 break; 629 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 630 /* Get rS/rD and rA from faulting opcode */ 631 /* 632 * Note: the opcode fields will not be set properly for a 633 * direct store load/store, but nobody cares as nobody 634 * actually uses direct store segments. 635 */ 636 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 637 break; 638 case POWERPC_EXCP_PROGRAM: /* Program exception */ 639 switch (env->error_code & ~0xF) { 640 case POWERPC_EXCP_FP: 641 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 642 trace_ppc_excp_fp_ignore(); 643 powerpc_reset_excp_state(cpu); 644 return; 645 } 646 647 /* 648 * FP exceptions always have NIP pointing to the faulting 649 * instruction, so always use store_next and claim we are 650 * precise in the MSR. 651 */ 652 msr |= 0x00100000; 653 break; 654 case POWERPC_EXCP_INVAL: 655 trace_ppc_excp_inval(env->nip); 656 msr |= 0x00080000; 657 break; 658 case POWERPC_EXCP_PRIV: 659 msr |= 0x00040000; 660 break; 661 case POWERPC_EXCP_TRAP: 662 msr |= 0x00020000; 663 break; 664 default: 665 /* Should never occur */ 666 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 667 env->error_code); 668 break; 669 } 670 break; 671 case POWERPC_EXCP_SYSCALL: /* System call exception */ 672 dump_syscall(env); 673 674 /* 675 * We need to correct the NIP which in this case is supposed 676 * to point to the next instruction 677 */ 678 env->nip += 4; 679 break; 680 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 681 case POWERPC_EXCP_DECR: /* Decrementer exception */ 682 break; 683 case POWERPC_EXCP_DTLB: /* Data TLB error */ 684 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 685 break; 686 case POWERPC_EXCP_RESET: /* System reset exception */ 687 if (FIELD_EX64(env->msr, MSR, POW)) { 688 cpu_abort(cs, "Trying to deliver power-saving system reset " 689 "exception %d with no HV support\n", excp); 690 } 691 break; 692 case POWERPC_EXCP_TRACE: /* Trace exception */ 693 break; 694 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 695 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 696 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 697 /* Swap temporary saved registers with GPRs */ 698 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 699 new_msr |= (target_ulong)1 << MSR_TGPR; 700 hreg_swap_gpr_tgpr(env); 701 } 702 703 ppc_excp_debug_sw_tlb(env, excp); 704 705 msr |= env->crf[0] << 28; 706 msr |= env->error_code; /* key, D/I, S/L bits */ 707 /* Set way using a LRU mechanism */ 708 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 709 break; 710 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 711 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 712 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 713 case POWERPC_EXCP_SMI: /* System management interrupt */ 714 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 715 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 716 cpu_abort(cs, "%s exception not implemented\n", 717 powerpc_excp_name(excp)); 718 break; 719 default: 720 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 721 break; 722 } 723 724 /* 725 * Sort out endianness of interrupt, this differs depending on the 726 * CPU, the HV mode, etc... 727 */ 728 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 729 new_msr |= (target_ulong)1 << MSR_LE; 730 } 731 732 /* Save PC */ 733 env->spr[SPR_SRR0] = env->nip; 734 735 /* Save MSR */ 736 env->spr[SPR_SRR1] = msr; 737 738 powerpc_set_excp_state(cpu, vector, new_msr); 739 } 740 741 static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) 742 { 743 CPUState *cs = CPU(cpu); 744 CPUPPCState *env = &cpu->env; 745 target_ulong msr, new_msr, vector; 746 747 /* new srr1 value excluding must-be-zero bits */ 748 msr = env->msr & ~0x783f0000ULL; 749 750 /* 751 * new interrupt handler msr preserves existing ME unless 752 * explicitly overriden 753 */ 754 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 755 756 /* 757 * Hypervisor emulation assistance interrupt only exists on server 758 * arch 2.05 server or later. 759 */ 760 if (excp == POWERPC_EXCP_HV_EMU) { 761 excp = POWERPC_EXCP_PROGRAM; 762 } 763 764 vector = env->excp_vectors[excp]; 765 if (vector == (target_ulong)-1ULL) { 766 cpu_abort(cs, "Raised an exception without defined vector %d\n", 767 excp); 768 } 769 770 vector |= env->excp_prefix; 771 772 switch (excp) { 773 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 774 if (!FIELD_EX64(env->msr, MSR, ME)) { 775 /* 776 * Machine check exception is not enabled. Enter 777 * checkstop state. 778 */ 779 fprintf(stderr, "Machine check while not allowed. " 780 "Entering checkstop state\n"); 781 if (qemu_log_separate()) { 782 qemu_log("Machine check while not allowed. " 783 "Entering checkstop state\n"); 784 } 785 cs->halted = 1; 786 cpu_interrupt_exittb(cs); 787 } 788 789 /* machine check exceptions don't have ME set */ 790 new_msr &= ~((target_ulong)1 << MSR_ME); 791 792 break; 793 case POWERPC_EXCP_DSI: /* Data storage exception */ 794 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 795 break; 796 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 797 trace_ppc_excp_isi(msr, env->nip); 798 msr |= env->error_code; 799 break; 800 case POWERPC_EXCP_EXTERNAL: /* External input */ 801 break; 802 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 803 /* Get rS/rD and rA from faulting opcode */ 804 /* 805 * Note: the opcode fields will not be set properly for a 806 * direct store load/store, but nobody cares as nobody 807 * actually uses direct store segments. 808 */ 809 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 810 break; 811 case POWERPC_EXCP_PROGRAM: /* Program exception */ 812 switch (env->error_code & ~0xF) { 813 case POWERPC_EXCP_FP: 814 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 815 trace_ppc_excp_fp_ignore(); 816 powerpc_reset_excp_state(cpu); 817 return; 818 } 819 820 /* 821 * FP exceptions always have NIP pointing to the faulting 822 * instruction, so always use store_next and claim we are 823 * precise in the MSR. 824 */ 825 msr |= 0x00100000; 826 break; 827 case POWERPC_EXCP_INVAL: 828 trace_ppc_excp_inval(env->nip); 829 msr |= 0x00080000; 830 break; 831 case POWERPC_EXCP_PRIV: 832 msr |= 0x00040000; 833 break; 834 case POWERPC_EXCP_TRAP: 835 msr |= 0x00020000; 836 break; 837 default: 838 /* Should never occur */ 839 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 840 env->error_code); 841 break; 842 } 843 break; 844 case POWERPC_EXCP_SYSCALL: /* System call exception */ 845 { 846 int lev = env->error_code; 847 848 if (lev == 1 && cpu->vhyp) { 849 dump_hcall(env); 850 } else { 851 dump_syscall(env); 852 } 853 854 /* 855 * We need to correct the NIP which in this case is supposed 856 * to point to the next instruction 857 */ 858 env->nip += 4; 859 860 /* 861 * The Virtual Open Firmware (VOF) relies on the 'sc 1' 862 * instruction to communicate with QEMU. The pegasos2 machine 863 * uses VOF and the 7xx CPUs, so although the 7xx don't have 864 * HV mode, we need to keep hypercall support. 865 */ 866 if (lev == 1 && cpu->vhyp) { 867 PPCVirtualHypervisorClass *vhc = 868 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 869 vhc->hypercall(cpu->vhyp, cpu); 870 return; 871 } 872 873 break; 874 } 875 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 876 case POWERPC_EXCP_DECR: /* Decrementer exception */ 877 break; 878 case POWERPC_EXCP_RESET: /* System reset exception */ 879 if (FIELD_EX64(env->msr, MSR, POW)) { 880 cpu_abort(cs, "Trying to deliver power-saving system reset " 881 "exception %d with no HV support\n", excp); 882 } 883 break; 884 case POWERPC_EXCP_TRACE: /* Trace exception */ 885 break; 886 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 887 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 888 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 889 ppc_excp_debug_sw_tlb(env, excp); 890 891 msr |= env->crf[0] << 28; 892 msr |= env->error_code; /* key, D/I, S/L bits */ 893 /* Set way using a LRU mechanism */ 894 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 895 896 break; 897 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 898 case POWERPC_EXCP_SMI: /* System management interrupt */ 899 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 900 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 901 cpu_abort(cs, "%s exception not implemented\n", 902 powerpc_excp_name(excp)); 903 break; 904 default: 905 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 906 break; 907 } 908 909 /* 910 * Sort out endianness of interrupt, this differs depending on the 911 * CPU, the HV mode, etc... 912 */ 913 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 914 new_msr |= (target_ulong)1 << MSR_LE; 915 } 916 917 /* Save PC */ 918 env->spr[SPR_SRR0] = env->nip; 919 920 /* Save MSR */ 921 env->spr[SPR_SRR1] = msr; 922 923 powerpc_set_excp_state(cpu, vector, new_msr); 924 } 925 926 static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) 927 { 928 CPUState *cs = CPU(cpu); 929 CPUPPCState *env = &cpu->env; 930 target_ulong msr, new_msr, vector; 931 932 /* new srr1 value excluding must-be-zero bits */ 933 msr = env->msr & ~0x783f0000ULL; 934 935 /* 936 * new interrupt handler msr preserves existing ME unless 937 * explicitly overriden 938 */ 939 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 940 941 /* 942 * Hypervisor emulation assistance interrupt only exists on server 943 * arch 2.05 server or later. 944 */ 945 if (excp == POWERPC_EXCP_HV_EMU) { 946 excp = POWERPC_EXCP_PROGRAM; 947 } 948 949 vector = env->excp_vectors[excp]; 950 if (vector == (target_ulong)-1ULL) { 951 cpu_abort(cs, "Raised an exception without defined vector %d\n", 952 excp); 953 } 954 955 vector |= env->excp_prefix; 956 957 switch (excp) { 958 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 959 if (!FIELD_EX64(env->msr, MSR, ME)) { 960 /* 961 * Machine check exception is not enabled. Enter 962 * checkstop state. 963 */ 964 fprintf(stderr, "Machine check while not allowed. " 965 "Entering checkstop state\n"); 966 if (qemu_log_separate()) { 967 qemu_log("Machine check while not allowed. " 968 "Entering checkstop state\n"); 969 } 970 cs->halted = 1; 971 cpu_interrupt_exittb(cs); 972 } 973 974 /* machine check exceptions don't have ME set */ 975 new_msr &= ~((target_ulong)1 << MSR_ME); 976 977 break; 978 case POWERPC_EXCP_DSI: /* Data storage exception */ 979 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 980 break; 981 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 982 trace_ppc_excp_isi(msr, env->nip); 983 msr |= env->error_code; 984 break; 985 case POWERPC_EXCP_EXTERNAL: /* External input */ 986 break; 987 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 988 /* Get rS/rD and rA from faulting opcode */ 989 /* 990 * Note: the opcode fields will not be set properly for a 991 * direct store load/store, but nobody cares as nobody 992 * actually uses direct store segments. 993 */ 994 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 995 break; 996 case POWERPC_EXCP_PROGRAM: /* Program exception */ 997 switch (env->error_code & ~0xF) { 998 case POWERPC_EXCP_FP: 999 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1000 trace_ppc_excp_fp_ignore(); 1001 powerpc_reset_excp_state(cpu); 1002 return; 1003 } 1004 1005 /* 1006 * FP exceptions always have NIP pointing to the faulting 1007 * instruction, so always use store_next and claim we are 1008 * precise in the MSR. 1009 */ 1010 msr |= 0x00100000; 1011 break; 1012 case POWERPC_EXCP_INVAL: 1013 trace_ppc_excp_inval(env->nip); 1014 msr |= 0x00080000; 1015 break; 1016 case POWERPC_EXCP_PRIV: 1017 msr |= 0x00040000; 1018 break; 1019 case POWERPC_EXCP_TRAP: 1020 msr |= 0x00020000; 1021 break; 1022 default: 1023 /* Should never occur */ 1024 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 1025 env->error_code); 1026 break; 1027 } 1028 break; 1029 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1030 { 1031 int lev = env->error_code; 1032 1033 if ((lev == 1) && cpu->vhyp) { 1034 dump_hcall(env); 1035 } else { 1036 dump_syscall(env); 1037 } 1038 1039 /* 1040 * We need to correct the NIP which in this case is supposed 1041 * to point to the next instruction 1042 */ 1043 env->nip += 4; 1044 1045 /* 1046 * The Virtual Open Firmware (VOF) relies on the 'sc 1' 1047 * instruction to communicate with QEMU. The pegasos2 machine 1048 * uses VOF and the 74xx CPUs, so although the 74xx don't have 1049 * HV mode, we need to keep hypercall support. 1050 */ 1051 if ((lev == 1) && cpu->vhyp) { 1052 PPCVirtualHypervisorClass *vhc = 1053 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1054 vhc->hypercall(cpu->vhyp, cpu); 1055 return; 1056 } 1057 1058 break; 1059 } 1060 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1061 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1062 break; 1063 case POWERPC_EXCP_RESET: /* System reset exception */ 1064 if (FIELD_EX64(env->msr, MSR, POW)) { 1065 cpu_abort(cs, "Trying to deliver power-saving system reset " 1066 "exception %d with no HV support\n", excp); 1067 } 1068 break; 1069 case POWERPC_EXCP_TRACE: /* Trace exception */ 1070 break; 1071 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 1072 break; 1073 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 1074 case POWERPC_EXCP_SMI: /* System management interrupt */ 1075 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 1076 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 1077 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 1078 cpu_abort(cs, "%s exception not implemented\n", 1079 powerpc_excp_name(excp)); 1080 break; 1081 default: 1082 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 1083 break; 1084 } 1085 1086 /* 1087 * Sort out endianness of interrupt, this differs depending on the 1088 * CPU, the HV mode, etc... 1089 */ 1090 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 1091 new_msr |= (target_ulong)1 << MSR_LE; 1092 } 1093 1094 /* Save PC */ 1095 env->spr[SPR_SRR0] = env->nip; 1096 1097 /* Save MSR */ 1098 env->spr[SPR_SRR1] = msr; 1099 1100 powerpc_set_excp_state(cpu, vector, new_msr); 1101 } 1102 1103 static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) 1104 { 1105 CPUState *cs = CPU(cpu); 1106 CPUPPCState *env = &cpu->env; 1107 target_ulong msr, new_msr, vector; 1108 int srr0, srr1; 1109 1110 msr = env->msr; 1111 1112 /* 1113 * new interrupt handler msr preserves existing ME unless 1114 * explicitly overriden 1115 */ 1116 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 1117 1118 /* target registers */ 1119 srr0 = SPR_SRR0; 1120 srr1 = SPR_SRR1; 1121 1122 /* 1123 * Hypervisor emulation assistance interrupt only exists on server 1124 * arch 2.05 server or later. 1125 */ 1126 if (excp == POWERPC_EXCP_HV_EMU) { 1127 excp = POWERPC_EXCP_PROGRAM; 1128 } 1129 1130 #ifdef TARGET_PPC64 1131 /* 1132 * SPEU and VPU share the same IVOR but they exist in different 1133 * processors. SPEU is e500v1/2 only and VPU is e6500 only. 1134 */ 1135 if (excp == POWERPC_EXCP_VPU) { 1136 excp = POWERPC_EXCP_SPEU; 1137 } 1138 #endif 1139 1140 vector = env->excp_vectors[excp]; 1141 if (vector == (target_ulong)-1ULL) { 1142 cpu_abort(cs, "Raised an exception without defined vector %d\n", 1143 excp); 1144 } 1145 1146 vector |= env->excp_prefix; 1147 1148 switch (excp) { 1149 case POWERPC_EXCP_CRITICAL: /* Critical input */ 1150 srr0 = SPR_BOOKE_CSRR0; 1151 srr1 = SPR_BOOKE_CSRR1; 1152 break; 1153 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1154 if (!FIELD_EX64(env->msr, MSR, ME)) { 1155 /* 1156 * Machine check exception is not enabled. Enter 1157 * checkstop state. 1158 */ 1159 fprintf(stderr, "Machine check while not allowed. " 1160 "Entering checkstop state\n"); 1161 if (qemu_log_separate()) { 1162 qemu_log("Machine check while not allowed. " 1163 "Entering checkstop state\n"); 1164 } 1165 cs->halted = 1; 1166 cpu_interrupt_exittb(cs); 1167 } 1168 1169 /* machine check exceptions don't have ME set */ 1170 new_msr &= ~((target_ulong)1 << MSR_ME); 1171 1172 /* FIXME: choose one or the other based on CPU type */ 1173 srr0 = SPR_BOOKE_MCSRR0; 1174 srr1 = SPR_BOOKE_MCSRR1; 1175 1176 env->spr[SPR_BOOKE_CSRR0] = env->nip; 1177 env->spr[SPR_BOOKE_CSRR1] = msr; 1178 1179 break; 1180 case POWERPC_EXCP_DSI: /* Data storage exception */ 1181 trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); 1182 break; 1183 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1184 trace_ppc_excp_isi(msr, env->nip); 1185 break; 1186 case POWERPC_EXCP_EXTERNAL: /* External input */ 1187 if (env->mpic_proxy) { 1188 /* IACK the IRQ on delivery */ 1189 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 1190 } 1191 break; 1192 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1193 break; 1194 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1195 switch (env->error_code & ~0xF) { 1196 case POWERPC_EXCP_FP: 1197 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1198 trace_ppc_excp_fp_ignore(); 1199 powerpc_reset_excp_state(cpu); 1200 return; 1201 } 1202 1203 /* 1204 * FP exceptions always have NIP pointing to the faulting 1205 * instruction, so always use store_next and claim we are 1206 * precise in the MSR. 1207 */ 1208 msr |= 0x00100000; 1209 env->spr[SPR_BOOKE_ESR] = ESR_FP; 1210 break; 1211 case POWERPC_EXCP_INVAL: 1212 trace_ppc_excp_inval(env->nip); 1213 msr |= 0x00080000; 1214 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 1215 break; 1216 case POWERPC_EXCP_PRIV: 1217 msr |= 0x00040000; 1218 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 1219 break; 1220 case POWERPC_EXCP_TRAP: 1221 msr |= 0x00020000; 1222 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 1223 break; 1224 default: 1225 /* Should never occur */ 1226 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 1227 env->error_code); 1228 break; 1229 } 1230 break; 1231 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1232 dump_syscall(env); 1233 1234 /* 1235 * We need to correct the NIP which in this case is supposed 1236 * to point to the next instruction 1237 */ 1238 env->nip += 4; 1239 break; 1240 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1241 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 1242 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1243 break; 1244 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 1245 /* FIT on 4xx */ 1246 trace_ppc_excp_print("FIT"); 1247 break; 1248 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 1249 trace_ppc_excp_print("WDT"); 1250 srr0 = SPR_BOOKE_CSRR0; 1251 srr1 = SPR_BOOKE_CSRR1; 1252 break; 1253 case POWERPC_EXCP_DTLB: /* Data TLB error */ 1254 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 1255 break; 1256 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 1257 if (env->flags & POWERPC_FLAG_DE) { 1258 /* FIXME: choose one or the other based on CPU type */ 1259 srr0 = SPR_BOOKE_DSRR0; 1260 srr1 = SPR_BOOKE_DSRR1; 1261 1262 env->spr[SPR_BOOKE_CSRR0] = env->nip; 1263 env->spr[SPR_BOOKE_CSRR1] = msr; 1264 1265 /* DBSR already modified by caller */ 1266 } else { 1267 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 1268 } 1269 break; 1270 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */ 1271 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 1272 break; 1273 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 1274 break; 1275 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 1276 srr0 = SPR_BOOKE_CSRR0; 1277 srr1 = SPR_BOOKE_CSRR1; 1278 break; 1279 case POWERPC_EXCP_RESET: /* System reset exception */ 1280 if (FIELD_EX64(env->msr, MSR, POW)) { 1281 cpu_abort(cs, "Trying to deliver power-saving system reset " 1282 "exception %d with no HV support\n", excp); 1283 } 1284 break; 1285 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 1286 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 1287 cpu_abort(cs, "%s exception not implemented\n", 1288 powerpc_excp_name(excp)); 1289 break; 1290 default: 1291 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 1292 break; 1293 } 1294 1295 #if defined(TARGET_PPC64) 1296 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 1297 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 1298 new_msr |= (target_ulong)1 << MSR_CM; 1299 } else { 1300 vector = (uint32_t)vector; 1301 } 1302 #endif 1303 1304 /* Save PC */ 1305 env->spr[srr0] = env->nip; 1306 1307 /* Save MSR */ 1308 env->spr[srr1] = msr; 1309 1310 powerpc_set_excp_state(cpu, vector, new_msr); 1311 } 1312 1313 /* 1314 * When running a nested HV guest under vhyp, external interrupts are 1315 * delivered as HVIRT. 1316 */ 1317 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu) 1318 { 1319 if (cpu->vhyp) { 1320 return vhyp_cpu_in_nested(cpu); 1321 } 1322 return false; 1323 } 1324 1325 #ifdef TARGET_PPC64 1326 /* 1327 * When running under vhyp, hcalls are always intercepted and sent to the 1328 * vhc->hypercall handler. 1329 */ 1330 static bool books_vhyp_handles_hcall(PowerPCCPU *cpu) 1331 { 1332 if (cpu->vhyp) { 1333 return !vhyp_cpu_in_nested(cpu); 1334 } 1335 return false; 1336 } 1337 1338 /* 1339 * When running a nested KVM HV guest under vhyp, HV exceptions are not 1340 * delivered to the guest (because there is no concept of HV support), but 1341 * rather they are sent tothe vhyp to exit from the L2 back to the L1 and 1342 * return from the H_ENTER_NESTED hypercall. 1343 */ 1344 static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu) 1345 { 1346 if (cpu->vhyp) { 1347 return vhyp_cpu_in_nested(cpu); 1348 } 1349 return false; 1350 } 1351 1352 #ifdef CONFIG_TCG 1353 static bool is_prefix_insn(CPUPPCState *env, uint32_t insn) 1354 { 1355 if (!(env->insns_flags2 & PPC2_ISA310)) { 1356 return false; 1357 } 1358 return ((insn & 0xfc000000) == 0x04000000); 1359 } 1360 1361 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) 1362 { 1363 CPUPPCState *env = &cpu->env; 1364 1365 if (!tcg_enabled()) { 1366 /* 1367 * This does not load instructions and set the prefix bit correctly 1368 * for injected interrupts with KVM. That may have to be discovered 1369 * and set by the KVM layer before injecting. 1370 */ 1371 return false; 1372 } 1373 1374 switch (excp) { 1375 case POWERPC_EXCP_HDSI: 1376 /* HDSI PRTABLE_FAULT has the originating access type in error_code */ 1377 if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) && 1378 (env->error_code == MMU_INST_FETCH)) { 1379 /* 1380 * Fetch failed due to partition scope translation, so prefix 1381 * indication is not relevant (and attempting to load the 1382 * instruction at NIP would cause recursive faults with the same 1383 * translation). 1384 */ 1385 break; 1386 } 1387 /* fall through */ 1388 case POWERPC_EXCP_MCHECK: 1389 case POWERPC_EXCP_DSI: 1390 case POWERPC_EXCP_DSEG: 1391 case POWERPC_EXCP_ALIGN: 1392 case POWERPC_EXCP_PROGRAM: 1393 case POWERPC_EXCP_FPU: 1394 case POWERPC_EXCP_TRACE: 1395 case POWERPC_EXCP_HV_EMU: 1396 case POWERPC_EXCP_VPU: 1397 case POWERPC_EXCP_VSXU: 1398 case POWERPC_EXCP_FU: 1399 case POWERPC_EXCP_HV_FU: { 1400 uint32_t insn = ppc_ldl_code(env, env->nip); 1401 if (is_prefix_insn(env, insn)) { 1402 return true; 1403 } 1404 break; 1405 } 1406 default: 1407 break; 1408 } 1409 return false; 1410 } 1411 #else 1412 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) 1413 { 1414 return false; 1415 } 1416 #endif 1417 1418 static void powerpc_excp_books(PowerPCCPU *cpu, int excp) 1419 { 1420 CPUState *cs = CPU(cpu); 1421 CPUPPCState *env = &cpu->env; 1422 target_ulong msr, new_msr, vector; 1423 int srr0, srr1, lev = -1; 1424 1425 /* new srr1 value excluding must-be-zero bits */ 1426 msr = env->msr & ~0x783f0000ULL; 1427 1428 /* 1429 * new interrupt handler msr preserves existing HV and ME unless 1430 * explicitly overriden 1431 */ 1432 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 1433 1434 /* target registers */ 1435 srr0 = SPR_SRR0; 1436 srr1 = SPR_SRR1; 1437 1438 /* 1439 * check for special resume at 0x100 from doze/nap/sleep/winkle on 1440 * P7/P8/P9 1441 */ 1442 if (env->resume_as_sreset) { 1443 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 1444 } 1445 1446 /* 1447 * We don't want to generate a Hypervisor Emulation Assistance 1448 * Interrupt if we don't have HVB in msr_mask (PAPR mode), 1449 * unless running a nested-hv guest, in which case the L1 1450 * kernel wants the interrupt. 1451 */ 1452 if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) && 1453 !books_vhyp_handles_hv_excp(cpu)) { 1454 excp = POWERPC_EXCP_PROGRAM; 1455 } 1456 1457 vector = env->excp_vectors[excp]; 1458 if (vector == (target_ulong)-1ULL) { 1459 cpu_abort(cs, "Raised an exception without defined vector %d\n", 1460 excp); 1461 } 1462 1463 vector |= env->excp_prefix; 1464 1465 if (is_prefix_insn_excp(cpu, excp)) { 1466 msr |= PPC_BIT(34); 1467 } 1468 1469 switch (excp) { 1470 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1471 if (!FIELD_EX64(env->msr, MSR, ME)) { 1472 /* 1473 * Machine check exception is not enabled. Enter 1474 * checkstop state. 1475 */ 1476 fprintf(stderr, "Machine check while not allowed. " 1477 "Entering checkstop state\n"); 1478 if (qemu_log_separate()) { 1479 qemu_log("Machine check while not allowed. " 1480 "Entering checkstop state\n"); 1481 } 1482 cs->halted = 1; 1483 cpu_interrupt_exittb(cs); 1484 } 1485 if (env->msr_mask & MSR_HVB) { 1486 /* 1487 * ISA specifies HV, but can be delivered to guest with HV 1488 * clear (e.g., see FWNMI in PAPR). 1489 */ 1490 new_msr |= (target_ulong)MSR_HVB; 1491 } 1492 1493 /* machine check exceptions don't have ME set */ 1494 new_msr &= ~((target_ulong)1 << MSR_ME); 1495 1496 break; 1497 case POWERPC_EXCP_DSI: /* Data storage exception */ 1498 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 1499 break; 1500 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1501 trace_ppc_excp_isi(msr, env->nip); 1502 msr |= env->error_code; 1503 break; 1504 case POWERPC_EXCP_EXTERNAL: /* External input */ 1505 { 1506 bool lpes0; 1507 1508 /* 1509 * LPES0 is only taken into consideration if we support HV 1510 * mode for this CPU. 1511 */ 1512 if (!env->has_hv_mode) { 1513 break; 1514 } 1515 1516 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1517 1518 if (!lpes0) { 1519 new_msr |= (target_ulong)MSR_HVB; 1520 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1521 srr0 = SPR_HSRR0; 1522 srr1 = SPR_HSRR1; 1523 } 1524 1525 break; 1526 } 1527 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1528 /* Optional DSISR update was removed from ISA v3.0 */ 1529 if (!(env->insns_flags2 & PPC2_ISA300)) { 1530 /* Get rS/rD and rA from faulting opcode */ 1531 /* 1532 * Note: the opcode fields will not be set properly for a 1533 * direct store load/store, but nobody cares as nobody 1534 * actually uses direct store segments. 1535 */ 1536 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 1537 } 1538 break; 1539 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1540 switch (env->error_code & ~0xF) { 1541 case POWERPC_EXCP_FP: 1542 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1543 trace_ppc_excp_fp_ignore(); 1544 powerpc_reset_excp_state(cpu); 1545 return; 1546 } 1547 1548 /* 1549 * FP exceptions always have NIP pointing to the faulting 1550 * instruction, so always use store_next and claim we are 1551 * precise in the MSR. 1552 */ 1553 msr |= 0x00100000; 1554 break; 1555 case POWERPC_EXCP_INVAL: 1556 trace_ppc_excp_inval(env->nip); 1557 msr |= 0x00080000; 1558 break; 1559 case POWERPC_EXCP_PRIV: 1560 msr |= 0x00040000; 1561 break; 1562 case POWERPC_EXCP_TRAP: 1563 msr |= 0x00020000; 1564 break; 1565 default: 1566 /* Should never occur */ 1567 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 1568 env->error_code); 1569 break; 1570 } 1571 break; 1572 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1573 lev = env->error_code; 1574 1575 if ((lev == 1) && cpu->vhyp) { 1576 dump_hcall(env); 1577 } else { 1578 dump_syscall(env); 1579 } 1580 1581 /* 1582 * We need to correct the NIP which in this case is supposed 1583 * to point to the next instruction 1584 */ 1585 env->nip += 4; 1586 1587 /* "PAPR mode" built-in hypercall emulation */ 1588 if ((lev == 1) && books_vhyp_handles_hcall(cpu)) { 1589 PPCVirtualHypervisorClass *vhc = 1590 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1591 vhc->hypercall(cpu->vhyp, cpu); 1592 return; 1593 } 1594 if (env->insns_flags2 & PPC2_ISA310) { 1595 /* ISAv3.1 puts LEV into SRR1 */ 1596 msr |= lev << 20; 1597 } 1598 if (lev == 1) { 1599 new_msr |= (target_ulong)MSR_HVB; 1600 } 1601 break; 1602 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 1603 lev = env->error_code; 1604 dump_syscall(env); 1605 env->nip += 4; 1606 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 1607 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1608 1609 vector += lev * 0x20; 1610 1611 env->lr = env->nip; 1612 env->ctr = msr; 1613 break; 1614 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1615 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1616 break; 1617 case POWERPC_EXCP_RESET: /* System reset exception */ 1618 /* A power-saving exception sets ME, otherwise it is unchanged */ 1619 if (FIELD_EX64(env->msr, MSR, POW)) { 1620 /* indicate that we resumed from power save mode */ 1621 msr |= 0x10000; 1622 new_msr |= ((target_ulong)1 << MSR_ME); 1623 } 1624 if (env->msr_mask & MSR_HVB) { 1625 /* 1626 * ISA specifies HV, but can be delivered to guest with HV 1627 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 1628 */ 1629 new_msr |= (target_ulong)MSR_HVB; 1630 } else { 1631 if (FIELD_EX64(env->msr, MSR, POW)) { 1632 cpu_abort(cs, "Trying to deliver power-saving system reset " 1633 "exception %d with no HV support\n", excp); 1634 } 1635 } 1636 break; 1637 case POWERPC_EXCP_DSEG: /* Data segment exception */ 1638 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 1639 case POWERPC_EXCP_TRACE: /* Trace exception */ 1640 case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */ 1641 case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */ 1642 break; 1643 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 1644 msr |= env->error_code; 1645 /* fall through */ 1646 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 1647 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 1648 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 1649 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 1650 srr0 = SPR_HSRR0; 1651 srr1 = SPR_HSRR1; 1652 new_msr |= (target_ulong)MSR_HVB; 1653 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1654 break; 1655 #ifdef CONFIG_TCG 1656 case POWERPC_EXCP_HV_EMU: { 1657 uint32_t insn = ppc_ldl_code(env, env->nip); 1658 env->spr[SPR_HEIR] = insn; 1659 if (is_prefix_insn(env, insn)) { 1660 uint32_t insn2 = ppc_ldl_code(env, env->nip + 4); 1661 env->spr[SPR_HEIR] <<= 32; 1662 env->spr[SPR_HEIR] |= insn2; 1663 } 1664 srr0 = SPR_HSRR0; 1665 srr1 = SPR_HSRR1; 1666 new_msr |= (target_ulong)MSR_HVB; 1667 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1668 break; 1669 } 1670 #endif 1671 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 1672 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 1673 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 1674 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 1675 break; 1676 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 1677 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 1678 srr0 = SPR_HSRR0; 1679 srr1 = SPR_HSRR1; 1680 new_msr |= (target_ulong)MSR_HVB; 1681 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1682 break; 1683 case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */ 1684 case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */ 1685 env->spr[SPR_BESCR] &= ~BESCR_GE; 1686 1687 /* 1688 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is 1689 * stored in the EBB Handler SPR_EBBHR. 1690 */ 1691 env->spr[SPR_EBBRR] = env->nip; 1692 powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr); 1693 1694 /* 1695 * This exception is handled in userspace. No need to proceed. 1696 */ 1697 return; 1698 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 1699 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 1700 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 1701 case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */ 1702 cpu_abort(cs, "%s exception not implemented\n", 1703 powerpc_excp_name(excp)); 1704 break; 1705 default: 1706 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 1707 break; 1708 } 1709 1710 /* 1711 * Sort out endianness of interrupt, this differs depending on the 1712 * CPU, the HV mode, etc... 1713 */ 1714 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 1715 new_msr |= (target_ulong)1 << MSR_LE; 1716 } 1717 1718 new_msr |= (target_ulong)1 << MSR_SF; 1719 1720 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 1721 /* Save PC */ 1722 env->spr[srr0] = env->nip; 1723 1724 /* Save MSR */ 1725 env->spr[srr1] = msr; 1726 } 1727 1728 if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) { 1729 PPCVirtualHypervisorClass *vhc = 1730 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1731 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */ 1732 vhc->deliver_hv_excp(cpu, excp); 1733 1734 powerpc_reset_excp_state(cpu); 1735 1736 } else { 1737 /* Sanity check */ 1738 if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) { 1739 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 1740 "no HV support\n", excp); 1741 } 1742 1743 /* This can update new_msr and vector if AIL applies */ 1744 ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector); 1745 1746 powerpc_set_excp_state(cpu, vector, new_msr); 1747 } 1748 } 1749 #else 1750 static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp) 1751 { 1752 g_assert_not_reached(); 1753 } 1754 #endif 1755 1756 static void powerpc_excp(PowerPCCPU *cpu, int excp) 1757 { 1758 CPUState *cs = CPU(cpu); 1759 CPUPPCState *env = &cpu->env; 1760 1761 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) { 1762 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 1763 } 1764 1765 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 1766 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp), 1767 excp, env->error_code); 1768 env->excp_stats[excp]++; 1769 1770 switch (env->excp_model) { 1771 case POWERPC_EXCP_40x: 1772 powerpc_excp_40x(cpu, excp); 1773 break; 1774 case POWERPC_EXCP_6xx: 1775 powerpc_excp_6xx(cpu, excp); 1776 break; 1777 case POWERPC_EXCP_7xx: 1778 powerpc_excp_7xx(cpu, excp); 1779 break; 1780 case POWERPC_EXCP_74xx: 1781 powerpc_excp_74xx(cpu, excp); 1782 break; 1783 case POWERPC_EXCP_BOOKE: 1784 powerpc_excp_booke(cpu, excp); 1785 break; 1786 case POWERPC_EXCP_970: 1787 case POWERPC_EXCP_POWER7: 1788 case POWERPC_EXCP_POWER8: 1789 case POWERPC_EXCP_POWER9: 1790 case POWERPC_EXCP_POWER10: 1791 powerpc_excp_books(cpu, excp); 1792 break; 1793 default: 1794 g_assert_not_reached(); 1795 } 1796 } 1797 1798 void ppc_cpu_do_interrupt(CPUState *cs) 1799 { 1800 PowerPCCPU *cpu = POWERPC_CPU(cs); 1801 1802 powerpc_excp(cpu, cs->exception_index); 1803 } 1804 1805 #if defined(TARGET_PPC64) 1806 #define P7_UNUSED_INTERRUPTS \ 1807 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \ 1808 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ 1809 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \ 1810 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB) 1811 1812 static int p7_interrupt_powersave(CPUPPCState *env) 1813 { 1814 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1815 (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { 1816 return PPC_INTERRUPT_EXT; 1817 } 1818 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1819 (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { 1820 return PPC_INTERRUPT_DECR; 1821 } 1822 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && 1823 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { 1824 return PPC_INTERRUPT_MCK; 1825 } 1826 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && 1827 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { 1828 return PPC_INTERRUPT_HMI; 1829 } 1830 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1831 return PPC_INTERRUPT_RESET; 1832 } 1833 return 0; 1834 } 1835 1836 static int p7_next_unmasked_interrupt(CPUPPCState *env) 1837 { 1838 PowerPCCPU *cpu = env_archcpu(env); 1839 CPUState *cs = CPU(cpu); 1840 /* Ignore MSR[EE] when coming out of some power management states */ 1841 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1842 1843 assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); 1844 1845 if (cs->halted) { 1846 /* LPCR[PECE] controls which interrupts can exit power-saving mode */ 1847 return p7_interrupt_powersave(env); 1848 } 1849 1850 /* Machine check exception */ 1851 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1852 return PPC_INTERRUPT_MCK; 1853 } 1854 1855 /* Hypervisor decrementer exception */ 1856 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1857 /* LPCR will be clear when not supported so this will work */ 1858 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1859 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1860 /* HDEC clears on delivery */ 1861 return PPC_INTERRUPT_HDECR; 1862 } 1863 } 1864 1865 /* External interrupt can ignore MSR:EE under some circumstances */ 1866 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1867 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1868 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1869 /* HEIC blocks delivery to the hypervisor */ 1870 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1871 !FIELD_EX64(env->msr, MSR, PR))) || 1872 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1873 return PPC_INTERRUPT_EXT; 1874 } 1875 } 1876 if (msr_ee != 0) { 1877 /* Decrementer exception */ 1878 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1879 return PPC_INTERRUPT_DECR; 1880 } 1881 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1882 return PPC_INTERRUPT_PERFM; 1883 } 1884 } 1885 1886 return 0; 1887 } 1888 1889 #define P8_UNUSED_INTERRUPTS \ 1890 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \ 1891 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \ 1892 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) 1893 1894 static int p8_interrupt_powersave(CPUPPCState *env) 1895 { 1896 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1897 (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { 1898 return PPC_INTERRUPT_EXT; 1899 } 1900 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1901 (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { 1902 return PPC_INTERRUPT_DECR; 1903 } 1904 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && 1905 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { 1906 return PPC_INTERRUPT_MCK; 1907 } 1908 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && 1909 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { 1910 return PPC_INTERRUPT_HMI; 1911 } 1912 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && 1913 (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { 1914 return PPC_INTERRUPT_DOORBELL; 1915 } 1916 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && 1917 (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { 1918 return PPC_INTERRUPT_HDOORBELL; 1919 } 1920 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1921 return PPC_INTERRUPT_RESET; 1922 } 1923 return 0; 1924 } 1925 1926 static int p8_next_unmasked_interrupt(CPUPPCState *env) 1927 { 1928 PowerPCCPU *cpu = env_archcpu(env); 1929 CPUState *cs = CPU(cpu); 1930 /* Ignore MSR[EE] when coming out of some power management states */ 1931 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1932 1933 assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0); 1934 1935 if (cs->halted) { 1936 /* LPCR[PECE] controls which interrupts can exit power-saving mode */ 1937 return p8_interrupt_powersave(env); 1938 } 1939 1940 /* Machine check exception */ 1941 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1942 return PPC_INTERRUPT_MCK; 1943 } 1944 1945 /* Hypervisor decrementer exception */ 1946 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1947 /* LPCR will be clear when not supported so this will work */ 1948 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1949 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1950 /* HDEC clears on delivery */ 1951 return PPC_INTERRUPT_HDECR; 1952 } 1953 } 1954 1955 /* External interrupt can ignore MSR:EE under some circumstances */ 1956 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1957 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1958 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1959 /* HEIC blocks delivery to the hypervisor */ 1960 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1961 !FIELD_EX64(env->msr, MSR, PR))) || 1962 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1963 return PPC_INTERRUPT_EXT; 1964 } 1965 } 1966 if (msr_ee != 0) { 1967 /* Decrementer exception */ 1968 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1969 return PPC_INTERRUPT_DECR; 1970 } 1971 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 1972 return PPC_INTERRUPT_DOORBELL; 1973 } 1974 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 1975 return PPC_INTERRUPT_HDOORBELL; 1976 } 1977 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1978 return PPC_INTERRUPT_PERFM; 1979 } 1980 /* EBB exception */ 1981 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 1982 /* 1983 * EBB exception must be taken in problem state and 1984 * with BESCR_GE set. 1985 */ 1986 if (FIELD_EX64(env->msr, MSR, PR) && 1987 (env->spr[SPR_BESCR] & BESCR_GE)) { 1988 return PPC_INTERRUPT_EBB; 1989 } 1990 } 1991 } 1992 1993 return 0; 1994 } 1995 1996 #define P9_UNUSED_INTERRUPTS \ 1997 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \ 1998 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ 1999 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) 2000 2001 static int p9_interrupt_powersave(CPUPPCState *env) 2002 { 2003 /* External Exception */ 2004 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 2005 (env->spr[SPR_LPCR] & LPCR_EEE)) { 2006 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 2007 if (!heic || !FIELD_EX64_HV(env->msr) || 2008 FIELD_EX64(env->msr, MSR, PR)) { 2009 return PPC_INTERRUPT_EXT; 2010 } 2011 } 2012 /* Decrementer Exception */ 2013 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 2014 (env->spr[SPR_LPCR] & LPCR_DEE)) { 2015 return PPC_INTERRUPT_DECR; 2016 } 2017 /* Machine Check or Hypervisor Maintenance Exception */ 2018 if (env->spr[SPR_LPCR] & LPCR_OEE) { 2019 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 2020 return PPC_INTERRUPT_MCK; 2021 } 2022 if (env->pending_interrupts & PPC_INTERRUPT_HMI) { 2023 return PPC_INTERRUPT_HMI; 2024 } 2025 } 2026 /* Privileged Doorbell Exception */ 2027 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && 2028 (env->spr[SPR_LPCR] & LPCR_PDEE)) { 2029 return PPC_INTERRUPT_DOORBELL; 2030 } 2031 /* Hypervisor Doorbell Exception */ 2032 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && 2033 (env->spr[SPR_LPCR] & LPCR_HDEE)) { 2034 return PPC_INTERRUPT_HDOORBELL; 2035 } 2036 /* Hypervisor virtualization exception */ 2037 if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) && 2038 (env->spr[SPR_LPCR] & LPCR_HVEE)) { 2039 return PPC_INTERRUPT_HVIRT; 2040 } 2041 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 2042 return PPC_INTERRUPT_RESET; 2043 } 2044 return 0; 2045 } 2046 2047 static int p9_next_unmasked_interrupt(CPUPPCState *env) 2048 { 2049 PowerPCCPU *cpu = env_archcpu(env); 2050 CPUState *cs = CPU(cpu); 2051 /* Ignore MSR[EE] when coming out of some power management states */ 2052 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 2053 2054 assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); 2055 2056 if (cs->halted) { 2057 if (env->spr[SPR_PSSCR] & PSSCR_EC) { 2058 /* 2059 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can 2060 * wakeup the processor 2061 */ 2062 return p9_interrupt_powersave(env); 2063 } else { 2064 /* 2065 * When it's clear, any system-caused exception exits power-saving 2066 * mode, even the ones that gate on MSR[EE]. 2067 */ 2068 msr_ee = true; 2069 } 2070 } 2071 2072 /* Machine check exception */ 2073 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 2074 return PPC_INTERRUPT_MCK; 2075 } 2076 2077 /* Hypervisor decrementer exception */ 2078 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 2079 /* LPCR will be clear when not supported so this will work */ 2080 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 2081 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 2082 /* HDEC clears on delivery */ 2083 return PPC_INTERRUPT_HDECR; 2084 } 2085 } 2086 2087 /* Hypervisor virtualization interrupt */ 2088 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { 2089 /* LPCR will be clear when not supported so this will work */ 2090 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 2091 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) { 2092 return PPC_INTERRUPT_HVIRT; 2093 } 2094 } 2095 2096 /* External interrupt can ignore MSR:EE under some circumstances */ 2097 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 2098 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 2099 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 2100 /* HEIC blocks delivery to the hypervisor */ 2101 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 2102 !FIELD_EX64(env->msr, MSR, PR))) || 2103 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 2104 return PPC_INTERRUPT_EXT; 2105 } 2106 } 2107 if (msr_ee != 0) { 2108 /* Decrementer exception */ 2109 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 2110 return PPC_INTERRUPT_DECR; 2111 } 2112 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 2113 return PPC_INTERRUPT_DOORBELL; 2114 } 2115 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 2116 return PPC_INTERRUPT_HDOORBELL; 2117 } 2118 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 2119 return PPC_INTERRUPT_PERFM; 2120 } 2121 /* EBB exception */ 2122 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 2123 /* 2124 * EBB exception must be taken in problem state and 2125 * with BESCR_GE set. 2126 */ 2127 if (FIELD_EX64(env->msr, MSR, PR) && 2128 (env->spr[SPR_BESCR] & BESCR_GE)) { 2129 return PPC_INTERRUPT_EBB; 2130 } 2131 } 2132 } 2133 2134 return 0; 2135 } 2136 #endif 2137 2138 static int ppc_next_unmasked_interrupt_generic(CPUPPCState *env) 2139 { 2140 bool async_deliver; 2141 2142 /* External reset */ 2143 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 2144 return PPC_INTERRUPT_RESET; 2145 } 2146 /* Machine check exception */ 2147 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 2148 return PPC_INTERRUPT_MCK; 2149 } 2150 #if 0 /* TODO */ 2151 /* External debug exception */ 2152 if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) { 2153 return PPC_INTERRUPT_DEBUG; 2154 } 2155 #endif 2156 2157 /* 2158 * For interrupts that gate on MSR:EE, we need to do something a 2159 * bit more subtle, as we need to let them through even when EE is 2160 * clear when coming out of some power management states (in order 2161 * for them to become a 0x100). 2162 */ 2163 async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 2164 2165 /* Hypervisor decrementer exception */ 2166 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 2167 /* LPCR will be clear when not supported so this will work */ 2168 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 2169 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) { 2170 /* HDEC clears on delivery */ 2171 return PPC_INTERRUPT_HDECR; 2172 } 2173 } 2174 2175 /* Hypervisor virtualization interrupt */ 2176 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { 2177 /* LPCR will be clear when not supported so this will work */ 2178 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 2179 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) { 2180 return PPC_INTERRUPT_HVIRT; 2181 } 2182 } 2183 2184 /* External interrupt can ignore MSR:EE under some circumstances */ 2185 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 2186 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 2187 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 2188 /* HEIC blocks delivery to the hypervisor */ 2189 if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) && 2190 !FIELD_EX64(env->msr, MSR, PR))) || 2191 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 2192 return PPC_INTERRUPT_EXT; 2193 } 2194 } 2195 if (FIELD_EX64(env->msr, MSR, CE)) { 2196 /* External critical interrupt */ 2197 if (env->pending_interrupts & PPC_INTERRUPT_CEXT) { 2198 return PPC_INTERRUPT_CEXT; 2199 } 2200 } 2201 if (async_deliver != 0) { 2202 /* Watchdog timer on embedded PowerPC */ 2203 if (env->pending_interrupts & PPC_INTERRUPT_WDT) { 2204 return PPC_INTERRUPT_WDT; 2205 } 2206 if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) { 2207 return PPC_INTERRUPT_CDOORBELL; 2208 } 2209 /* Fixed interval timer on embedded PowerPC */ 2210 if (env->pending_interrupts & PPC_INTERRUPT_FIT) { 2211 return PPC_INTERRUPT_FIT; 2212 } 2213 /* Programmable interval timer on embedded PowerPC */ 2214 if (env->pending_interrupts & PPC_INTERRUPT_PIT) { 2215 return PPC_INTERRUPT_PIT; 2216 } 2217 /* Decrementer exception */ 2218 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 2219 return PPC_INTERRUPT_DECR; 2220 } 2221 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 2222 return PPC_INTERRUPT_DOORBELL; 2223 } 2224 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 2225 return PPC_INTERRUPT_HDOORBELL; 2226 } 2227 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 2228 return PPC_INTERRUPT_PERFM; 2229 } 2230 /* Thermal interrupt */ 2231 if (env->pending_interrupts & PPC_INTERRUPT_THERM) { 2232 return PPC_INTERRUPT_THERM; 2233 } 2234 /* EBB exception */ 2235 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 2236 /* 2237 * EBB exception must be taken in problem state and 2238 * with BESCR_GE set. 2239 */ 2240 if (FIELD_EX64(env->msr, MSR, PR) && 2241 (env->spr[SPR_BESCR] & BESCR_GE)) { 2242 return PPC_INTERRUPT_EBB; 2243 } 2244 } 2245 } 2246 2247 return 0; 2248 } 2249 2250 static int ppc_next_unmasked_interrupt(CPUPPCState *env) 2251 { 2252 switch (env->excp_model) { 2253 #if defined(TARGET_PPC64) 2254 case POWERPC_EXCP_POWER7: 2255 return p7_next_unmasked_interrupt(env); 2256 case POWERPC_EXCP_POWER8: 2257 return p8_next_unmasked_interrupt(env); 2258 case POWERPC_EXCP_POWER9: 2259 case POWERPC_EXCP_POWER10: 2260 return p9_next_unmasked_interrupt(env); 2261 #endif 2262 default: 2263 return ppc_next_unmasked_interrupt_generic(env); 2264 } 2265 } 2266 2267 /* 2268 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be 2269 * delivered and clears CPU_INTERRUPT_HARD otherwise. 2270 * 2271 * This method is called by ppc_set_interrupt when an interrupt is raised or 2272 * lowered, and should also be called whenever an interrupt masking condition 2273 * is changed, e.g.: 2274 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.; 2275 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.; 2276 * - When PSSCR[EC] or env->resume_as_sreset are changed; 2277 * - When cs->halted is changed and the CPU has a different interrupt masking 2278 * logic in power-saving mode (e.g., POWER7/8/9/10); 2279 */ 2280 void ppc_maybe_interrupt(CPUPPCState *env) 2281 { 2282 CPUState *cs = env_cpu(env); 2283 QEMU_IOTHREAD_LOCK_GUARD(); 2284 2285 if (ppc_next_unmasked_interrupt(env)) { 2286 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 2287 } else { 2288 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 2289 } 2290 } 2291 2292 #if defined(TARGET_PPC64) 2293 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) 2294 { 2295 PowerPCCPU *cpu = env_archcpu(env); 2296 CPUState *cs = env_cpu(env); 2297 2298 switch (interrupt) { 2299 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2300 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2301 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2302 break; 2303 2304 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2305 /* HDEC clears on delivery */ 2306 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2307 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2308 break; 2309 2310 case PPC_INTERRUPT_EXT: 2311 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2312 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2313 } else { 2314 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2315 } 2316 break; 2317 2318 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2319 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2320 break; 2321 case PPC_INTERRUPT_PERFM: 2322 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2323 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2324 break; 2325 case 0: 2326 /* 2327 * This is a bug ! It means that has_work took us out of halt without 2328 * anything to deliver while in a PM state that requires getting 2329 * out via a 0x100 2330 * 2331 * This means we will incorrectly execute past the power management 2332 * instruction instead of triggering a reset. 2333 * 2334 * It generally means a discrepancy between the wakeup conditions in the 2335 * processor has_work implementation and the logic in this function. 2336 */ 2337 assert(!env->resume_as_sreset); 2338 break; 2339 default: 2340 cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); 2341 } 2342 } 2343 2344 static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) 2345 { 2346 PowerPCCPU *cpu = env_archcpu(env); 2347 CPUState *cs = env_cpu(env); 2348 2349 switch (interrupt) { 2350 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2351 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2352 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2353 break; 2354 2355 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2356 /* HDEC clears on delivery */ 2357 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2358 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2359 break; 2360 2361 case PPC_INTERRUPT_EXT: 2362 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2363 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2364 } else { 2365 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2366 } 2367 break; 2368 2369 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2370 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2371 break; 2372 case PPC_INTERRUPT_DOORBELL: 2373 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2374 if (is_book3s_arch2x(env)) { 2375 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2376 } else { 2377 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 2378 } 2379 break; 2380 case PPC_INTERRUPT_HDOORBELL: 2381 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2382 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2383 break; 2384 case PPC_INTERRUPT_PERFM: 2385 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2386 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2387 break; 2388 case PPC_INTERRUPT_EBB: /* EBB exception */ 2389 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2390 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2391 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2392 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2393 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2394 } 2395 break; 2396 case 0: 2397 /* 2398 * This is a bug ! It means that has_work took us out of halt without 2399 * anything to deliver while in a PM state that requires getting 2400 * out via a 0x100 2401 * 2402 * This means we will incorrectly execute past the power management 2403 * instruction instead of triggering a reset. 2404 * 2405 * It generally means a discrepancy between the wakeup conditions in the 2406 * processor has_work implementation and the logic in this function. 2407 */ 2408 assert(!env->resume_as_sreset); 2409 break; 2410 default: 2411 cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); 2412 } 2413 } 2414 2415 static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) 2416 { 2417 PowerPCCPU *cpu = env_archcpu(env); 2418 CPUState *cs = env_cpu(env); 2419 2420 if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) && 2421 !FIELD_EX64(env->msr, MSR, EE)) { 2422 /* 2423 * A pending interrupt took us out of power-saving, but MSR[EE] says 2424 * that we should return to NIP+4 instead of delivering it. 2425 */ 2426 return; 2427 } 2428 2429 switch (interrupt) { 2430 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2431 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2432 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2433 break; 2434 2435 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2436 /* HDEC clears on delivery */ 2437 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2438 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2439 break; 2440 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ 2441 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2442 break; 2443 2444 case PPC_INTERRUPT_EXT: 2445 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2446 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2447 } else { 2448 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2449 } 2450 break; 2451 2452 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2453 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2454 break; 2455 case PPC_INTERRUPT_DOORBELL: 2456 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2457 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2458 break; 2459 case PPC_INTERRUPT_HDOORBELL: 2460 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2461 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2462 break; 2463 case PPC_INTERRUPT_PERFM: 2464 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2465 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2466 break; 2467 case PPC_INTERRUPT_EBB: /* EBB exception */ 2468 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2469 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2470 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2471 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2472 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2473 } 2474 break; 2475 case 0: 2476 /* 2477 * This is a bug ! It means that has_work took us out of halt without 2478 * anything to deliver while in a PM state that requires getting 2479 * out via a 0x100 2480 * 2481 * This means we will incorrectly execute past the power management 2482 * instruction instead of triggering a reset. 2483 * 2484 * It generally means a discrepancy between the wakeup conditions in the 2485 * processor has_work implementation and the logic in this function. 2486 */ 2487 assert(!env->resume_as_sreset); 2488 break; 2489 default: 2490 cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); 2491 } 2492 } 2493 #endif 2494 2495 static void ppc_deliver_interrupt_generic(CPUPPCState *env, int interrupt) 2496 { 2497 PowerPCCPU *cpu = env_archcpu(env); 2498 CPUState *cs = env_cpu(env); 2499 2500 switch (interrupt) { 2501 case PPC_INTERRUPT_RESET: /* External reset */ 2502 env->pending_interrupts &= ~PPC_INTERRUPT_RESET; 2503 powerpc_excp(cpu, POWERPC_EXCP_RESET); 2504 break; 2505 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2506 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2507 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2508 break; 2509 2510 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2511 /* HDEC clears on delivery */ 2512 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2513 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2514 break; 2515 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ 2516 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2517 break; 2518 2519 case PPC_INTERRUPT_EXT: 2520 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2521 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2522 } else { 2523 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2524 } 2525 break; 2526 case PPC_INTERRUPT_CEXT: /* External critical interrupt */ 2527 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL); 2528 break; 2529 2530 case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */ 2531 env->pending_interrupts &= ~PPC_INTERRUPT_WDT; 2532 powerpc_excp(cpu, POWERPC_EXCP_WDT); 2533 break; 2534 case PPC_INTERRUPT_CDOORBELL: 2535 env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL; 2536 powerpc_excp(cpu, POWERPC_EXCP_DOORCI); 2537 break; 2538 case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */ 2539 env->pending_interrupts &= ~PPC_INTERRUPT_FIT; 2540 powerpc_excp(cpu, POWERPC_EXCP_FIT); 2541 break; 2542 case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */ 2543 env->pending_interrupts &= ~PPC_INTERRUPT_PIT; 2544 powerpc_excp(cpu, POWERPC_EXCP_PIT); 2545 break; 2546 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2547 if (ppc_decr_clear_on_delivery(env)) { 2548 env->pending_interrupts &= ~PPC_INTERRUPT_DECR; 2549 } 2550 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2551 break; 2552 case PPC_INTERRUPT_DOORBELL: 2553 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2554 if (is_book3s_arch2x(env)) { 2555 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2556 } else { 2557 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 2558 } 2559 break; 2560 case PPC_INTERRUPT_HDOORBELL: 2561 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2562 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2563 break; 2564 case PPC_INTERRUPT_PERFM: 2565 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2566 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2567 break; 2568 case PPC_INTERRUPT_THERM: /* Thermal interrupt */ 2569 env->pending_interrupts &= ~PPC_INTERRUPT_THERM; 2570 powerpc_excp(cpu, POWERPC_EXCP_THERM); 2571 break; 2572 case PPC_INTERRUPT_EBB: /* EBB exception */ 2573 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2574 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2575 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2576 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2577 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2578 } 2579 break; 2580 case 0: 2581 /* 2582 * This is a bug ! It means that has_work took us out of halt without 2583 * anything to deliver while in a PM state that requires getting 2584 * out via a 0x100 2585 * 2586 * This means we will incorrectly execute past the power management 2587 * instruction instead of triggering a reset. 2588 * 2589 * It generally means a discrepancy between the wakeup conditions in the 2590 * processor has_work implementation and the logic in this function. 2591 */ 2592 assert(!env->resume_as_sreset); 2593 break; 2594 default: 2595 cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); 2596 } 2597 } 2598 2599 static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) 2600 { 2601 switch (env->excp_model) { 2602 #if defined(TARGET_PPC64) 2603 case POWERPC_EXCP_POWER7: 2604 p7_deliver_interrupt(env, interrupt); 2605 break; 2606 case POWERPC_EXCP_POWER8: 2607 p8_deliver_interrupt(env, interrupt); 2608 break; 2609 case POWERPC_EXCP_POWER9: 2610 case POWERPC_EXCP_POWER10: 2611 p9_deliver_interrupt(env, interrupt); 2612 break; 2613 #endif 2614 default: 2615 ppc_deliver_interrupt_generic(env, interrupt); 2616 } 2617 } 2618 2619 void ppc_cpu_do_system_reset(CPUState *cs) 2620 { 2621 PowerPCCPU *cpu = POWERPC_CPU(cs); 2622 2623 powerpc_excp(cpu, POWERPC_EXCP_RESET); 2624 } 2625 2626 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 2627 { 2628 PowerPCCPU *cpu = POWERPC_CPU(cs); 2629 CPUPPCState *env = &cpu->env; 2630 target_ulong msr = 0; 2631 2632 /* 2633 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 2634 * been set by KVM. 2635 */ 2636 msr = (1ULL << MSR_ME); 2637 msr |= env->msr & (1ULL << MSR_SF); 2638 if (ppc_interrupts_little_endian(cpu, false)) { 2639 msr |= (1ULL << MSR_LE); 2640 } 2641 2642 /* Anything for nested required here? MSR[HV] bit? */ 2643 2644 powerpc_set_excp_state(cpu, vector, msr); 2645 } 2646 2647 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 2648 { 2649 PowerPCCPU *cpu = POWERPC_CPU(cs); 2650 CPUPPCState *env = &cpu->env; 2651 int interrupt; 2652 2653 if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) { 2654 return false; 2655 } 2656 2657 interrupt = ppc_next_unmasked_interrupt(env); 2658 if (interrupt == 0) { 2659 return false; 2660 } 2661 2662 ppc_deliver_interrupt(env, interrupt); 2663 if (env->pending_interrupts == 0) { 2664 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 2665 } 2666 return true; 2667 } 2668 2669 #endif /* !CONFIG_USER_ONLY */ 2670 2671 /*****************************************************************************/ 2672 /* Exceptions processing helpers */ 2673 2674 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 2675 uint32_t error_code, uintptr_t raddr) 2676 { 2677 CPUState *cs = env_cpu(env); 2678 2679 cs->exception_index = exception; 2680 env->error_code = error_code; 2681 cpu_loop_exit_restore(cs, raddr); 2682 } 2683 2684 void raise_exception_err(CPUPPCState *env, uint32_t exception, 2685 uint32_t error_code) 2686 { 2687 raise_exception_err_ra(env, exception, error_code, 0); 2688 } 2689 2690 void raise_exception(CPUPPCState *env, uint32_t exception) 2691 { 2692 raise_exception_err_ra(env, exception, 0, 0); 2693 } 2694 2695 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 2696 uintptr_t raddr) 2697 { 2698 raise_exception_err_ra(env, exception, 0, raddr); 2699 } 2700 2701 #ifdef CONFIG_TCG 2702 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 2703 uint32_t error_code) 2704 { 2705 raise_exception_err_ra(env, exception, error_code, 0); 2706 } 2707 2708 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 2709 { 2710 raise_exception_err_ra(env, exception, 0, 0); 2711 } 2712 #endif 2713 2714 #if !defined(CONFIG_USER_ONLY) 2715 #ifdef CONFIG_TCG 2716 void helper_store_msr(CPUPPCState *env, target_ulong val) 2717 { 2718 uint32_t excp = hreg_store_msr(env, val, 0); 2719 2720 if (excp != 0) { 2721 CPUState *cs = env_cpu(env); 2722 cpu_interrupt_exittb(cs); 2723 raise_exception(env, excp); 2724 } 2725 } 2726 2727 void helper_ppc_maybe_interrupt(CPUPPCState *env) 2728 { 2729 ppc_maybe_interrupt(env); 2730 } 2731 2732 #if defined(TARGET_PPC64) 2733 void helper_scv(CPUPPCState *env, uint32_t lev) 2734 { 2735 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 2736 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 2737 } else { 2738 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 2739 } 2740 } 2741 2742 void helper_pminsn(CPUPPCState *env, uint32_t insn) 2743 { 2744 CPUState *cs; 2745 2746 cs = env_cpu(env); 2747 cs->halted = 1; 2748 2749 /* Condition for waking up at 0x100 */ 2750 env->resume_as_sreset = (insn != PPC_PM_STOP) || 2751 (env->spr[SPR_PSSCR] & PSSCR_EC); 2752 2753 ppc_maybe_interrupt(env); 2754 } 2755 #endif /* defined(TARGET_PPC64) */ 2756 2757 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 2758 { 2759 CPUState *cs = env_cpu(env); 2760 2761 /* MSR:POW cannot be set by any form of rfi */ 2762 msr &= ~(1ULL << MSR_POW); 2763 2764 /* MSR:TGPR cannot be set by any form of rfi */ 2765 if (env->flags & POWERPC_FLAG_TGPR) 2766 msr &= ~(1ULL << MSR_TGPR); 2767 2768 #if defined(TARGET_PPC64) 2769 /* Switching to 32-bit ? Crop the nip */ 2770 if (!msr_is_64bit(env, msr)) { 2771 nip = (uint32_t)nip; 2772 } 2773 #else 2774 nip = (uint32_t)nip; 2775 #endif 2776 /* XXX: beware: this is false if VLE is supported */ 2777 env->nip = nip & ~((target_ulong)0x00000003); 2778 hreg_store_msr(env, msr, 1); 2779 trace_ppc_excp_rfi(env->nip, env->msr); 2780 /* 2781 * No need to raise an exception here, as rfi is always the last 2782 * insn of a TB 2783 */ 2784 cpu_interrupt_exittb(cs); 2785 /* Reset the reservation */ 2786 env->reserve_addr = -1; 2787 2788 /* Context synchronizing: check if TCG TLB needs flush */ 2789 check_tlb_flush(env, false); 2790 } 2791 2792 void helper_rfi(CPUPPCState *env) 2793 { 2794 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 2795 } 2796 2797 #if defined(TARGET_PPC64) 2798 void helper_rfid(CPUPPCState *env) 2799 { 2800 /* 2801 * The architecture defines a number of rules for which bits can 2802 * change but in practice, we handle this in hreg_store_msr() 2803 * which will be called by do_rfi(), so there is no need to filter 2804 * here 2805 */ 2806 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 2807 } 2808 2809 void helper_rfscv(CPUPPCState *env) 2810 { 2811 do_rfi(env, env->lr, env->ctr); 2812 } 2813 2814 void helper_hrfid(CPUPPCState *env) 2815 { 2816 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 2817 } 2818 #endif 2819 2820 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 2821 void helper_rfebb(CPUPPCState *env, target_ulong s) 2822 { 2823 target_ulong msr = env->msr; 2824 2825 /* 2826 * Handling of BESCR bits 32:33 according to PowerISA v3.1: 2827 * 2828 * "If BESCR 32:33 != 0b00 the instruction is treated as if 2829 * the instruction form were invalid." 2830 */ 2831 if (env->spr[SPR_BESCR] & BESCR_INVALID) { 2832 raise_exception_err(env, POWERPC_EXCP_PROGRAM, 2833 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); 2834 } 2835 2836 env->nip = env->spr[SPR_EBBRR]; 2837 2838 /* Switching to 32-bit ? Crop the nip */ 2839 if (!msr_is_64bit(env, msr)) { 2840 env->nip = (uint32_t)env->spr[SPR_EBBRR]; 2841 } 2842 2843 if (s) { 2844 env->spr[SPR_BESCR] |= BESCR_GE; 2845 } else { 2846 env->spr[SPR_BESCR] &= ~BESCR_GE; 2847 } 2848 } 2849 2850 /* 2851 * Triggers or queues an 'ebb_excp' EBB exception. All checks 2852 * but FSCR, HFSCR and msr_pr must be done beforehand. 2853 * 2854 * PowerISA v3.1 isn't clear about whether an EBB should be 2855 * postponed or cancelled if the EBB facility is unavailable. 2856 * Our assumption here is that the EBB is cancelled if both 2857 * FSCR and HFSCR EBB facilities aren't available. 2858 */ 2859 static void do_ebb(CPUPPCState *env, int ebb_excp) 2860 { 2861 PowerPCCPU *cpu = env_archcpu(env); 2862 2863 /* 2864 * FSCR_EBB and FSCR_IC_EBB are the same bits used with 2865 * HFSCR. 2866 */ 2867 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); 2868 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); 2869 2870 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { 2871 env->spr[SPR_BESCR] |= BESCR_PMEO; 2872 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { 2873 env->spr[SPR_BESCR] |= BESCR_EEO; 2874 } 2875 2876 if (FIELD_EX64(env->msr, MSR, PR)) { 2877 powerpc_excp(cpu, ebb_excp); 2878 } else { 2879 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); 2880 } 2881 } 2882 2883 void raise_ebb_perfm_exception(CPUPPCState *env) 2884 { 2885 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && 2886 env->spr[SPR_BESCR] & BESCR_PME && 2887 env->spr[SPR_BESCR] & BESCR_GE; 2888 2889 if (!perfm_ebb_enabled) { 2890 return; 2891 } 2892 2893 do_ebb(env, POWERPC_EXCP_PERFM_EBB); 2894 } 2895 #endif 2896 2897 /*****************************************************************************/ 2898 /* Embedded PowerPC specific helpers */ 2899 void helper_40x_rfci(CPUPPCState *env) 2900 { 2901 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 2902 } 2903 2904 void helper_rfci(CPUPPCState *env) 2905 { 2906 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 2907 } 2908 2909 void helper_rfdi(CPUPPCState *env) 2910 { 2911 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 2912 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 2913 } 2914 2915 void helper_rfmci(CPUPPCState *env) 2916 { 2917 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 2918 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 2919 } 2920 #endif /* CONFIG_TCG */ 2921 #endif /* !defined(CONFIG_USER_ONLY) */ 2922 2923 #ifdef CONFIG_TCG 2924 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 2925 uint32_t flags) 2926 { 2927 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 2928 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 2929 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 2930 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 2931 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 2932 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2933 POWERPC_EXCP_TRAP, GETPC()); 2934 } 2935 } 2936 2937 #if defined(TARGET_PPC64) 2938 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 2939 uint32_t flags) 2940 { 2941 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 2942 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 2943 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 2944 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 2945 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 2946 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2947 POWERPC_EXCP_TRAP, GETPC()); 2948 } 2949 } 2950 #endif 2951 #endif 2952 2953 #ifdef CONFIG_TCG 2954 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) 2955 { 2956 const uint16_t c = 0xfffc; 2957 const uint64_t z0 = 0xfa2561cdf44ac398ULL; 2958 uint16_t z = 0, temp; 2959 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; 2960 2961 for (int i = 3; i >= 0; i--) { 2962 k[i] = key & 0xffff; 2963 key >>= 16; 2964 } 2965 xleft[0] = x & 0xffff; 2966 xright[0] = (x >> 16) & 0xffff; 2967 2968 for (int i = 0; i < 28; i++) { 2969 z = (z0 >> (63 - i)) & 1; 2970 temp = ror16(k[i + 3], 3) ^ k[i + 1]; 2971 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); 2972 } 2973 2974 for (int i = 0; i < 8; i++) { 2975 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; 2976 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; 2977 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; 2978 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; 2979 } 2980 2981 for (int i = 0; i < 32; i++) { 2982 fxleft[i] = (rol16(xleft[i], 1) & 2983 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); 2984 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; 2985 xright[i + 1] = xleft[i]; 2986 } 2987 2988 return (((uint32_t)xright[32]) << 16) | xleft[32]; 2989 } 2990 2991 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) 2992 { 2993 uint64_t stage0_h = 0ULL, stage0_l = 0ULL; 2994 uint64_t stage1_h, stage1_l; 2995 2996 for (int i = 0; i < 4; i++) { 2997 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); 2998 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); 2999 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); 3000 stage0_l |= (ra & 0xff) << (8 * 2 * i); 3001 rb >>= 8; 3002 ra >>= 8; 3003 } 3004 3005 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; 3006 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); 3007 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; 3008 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); 3009 3010 return stage1_h ^ stage1_l; 3011 } 3012 3013 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra, 3014 target_ulong rb, uint64_t key, bool store) 3015 { 3016 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; 3017 3018 if (store) { 3019 cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); 3020 } else { 3021 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); 3022 if (loaded_hash != calculated_hash) { 3023 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 3024 POWERPC_EXCP_TRAP, GETPC()); 3025 } 3026 } 3027 } 3028 3029 #include "qemu/guest-random.h" 3030 3031 #ifdef TARGET_PPC64 3032 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 3033 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 3034 target_ulong rb) \ 3035 { \ 3036 if (env->msr & R_MSR_PR_MASK) { \ 3037 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \ 3038 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 3039 return; \ 3040 } else if (!(env->msr & R_MSR_HV_MASK)) { \ 3041 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \ 3042 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 3043 return; \ 3044 } else if (!(env->msr & R_MSR_S_MASK)) { \ 3045 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \ 3046 return; \ 3047 } \ 3048 \ 3049 do_hash(env, ea, ra, rb, key, store); \ 3050 } 3051 #else 3052 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 3053 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 3054 target_ulong rb) \ 3055 { \ 3056 do_hash(env, ea, ra, rb, key, store); \ 3057 } 3058 #endif /* TARGET_PPC64 */ 3059 3060 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) 3061 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) 3062 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) 3063 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) 3064 #endif /* CONFIG_TCG */ 3065 3066 #if !defined(CONFIG_USER_ONLY) 3067 3068 #ifdef CONFIG_TCG 3069 3070 /* Embedded.Processor Control */ 3071 static int dbell2irq(target_ulong rb) 3072 { 3073 int msg = rb & DBELL_TYPE_MASK; 3074 int irq = -1; 3075 3076 switch (msg) { 3077 case DBELL_TYPE_DBELL: 3078 irq = PPC_INTERRUPT_DOORBELL; 3079 break; 3080 case DBELL_TYPE_DBELL_CRIT: 3081 irq = PPC_INTERRUPT_CDOORBELL; 3082 break; 3083 case DBELL_TYPE_G_DBELL: 3084 case DBELL_TYPE_G_DBELL_CRIT: 3085 case DBELL_TYPE_G_DBELL_MC: 3086 /* XXX implement */ 3087 default: 3088 break; 3089 } 3090 3091 return irq; 3092 } 3093 3094 void helper_msgclr(CPUPPCState *env, target_ulong rb) 3095 { 3096 int irq = dbell2irq(rb); 3097 3098 if (irq < 0) { 3099 return; 3100 } 3101 3102 ppc_set_irq(env_archcpu(env), irq, 0); 3103 } 3104 3105 void helper_msgsnd(target_ulong rb) 3106 { 3107 int irq = dbell2irq(rb); 3108 int pir = rb & DBELL_PIRTAG_MASK; 3109 CPUState *cs; 3110 3111 if (irq < 0) { 3112 return; 3113 } 3114 3115 qemu_mutex_lock_iothread(); 3116 CPU_FOREACH(cs) { 3117 PowerPCCPU *cpu = POWERPC_CPU(cs); 3118 CPUPPCState *cenv = &cpu->env; 3119 3120 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 3121 ppc_set_irq(cpu, irq, 1); 3122 } 3123 } 3124 qemu_mutex_unlock_iothread(); 3125 } 3126 3127 /* Server Processor Control */ 3128 3129 static bool dbell_type_server(target_ulong rb) 3130 { 3131 /* 3132 * A Directed Hypervisor Doorbell message is sent only if the 3133 * message type is 5. All other types are reserved and the 3134 * instruction is a no-op 3135 */ 3136 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 3137 } 3138 3139 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 3140 { 3141 if (!dbell_type_server(rb)) { 3142 return; 3143 } 3144 3145 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); 3146 } 3147 3148 static void book3s_msgsnd_common(int pir, int irq) 3149 { 3150 CPUState *cs; 3151 3152 qemu_mutex_lock_iothread(); 3153 CPU_FOREACH(cs) { 3154 PowerPCCPU *cpu = POWERPC_CPU(cs); 3155 CPUPPCState *cenv = &cpu->env; 3156 3157 /* TODO: broadcast message to all threads of the same processor */ 3158 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 3159 ppc_set_irq(cpu, irq, 1); 3160 } 3161 } 3162 qemu_mutex_unlock_iothread(); 3163 } 3164 3165 void helper_book3s_msgsnd(target_ulong rb) 3166 { 3167 int pir = rb & DBELL_PROCIDTAG_MASK; 3168 3169 if (!dbell_type_server(rb)) { 3170 return; 3171 } 3172 3173 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 3174 } 3175 3176 #if defined(TARGET_PPC64) 3177 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 3178 { 3179 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 3180 3181 if (!dbell_type_server(rb)) { 3182 return; 3183 } 3184 3185 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0); 3186 } 3187 3188 /* 3189 * sends a message to another thread on the same 3190 * multi-threaded processor 3191 */ 3192 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 3193 { 3194 CPUState *cs = env_cpu(env); 3195 PowerPCCPU *cpu = POWERPC_CPU(cs); 3196 CPUState *ccs; 3197 uint32_t nr_threads = cs->nr_threads; 3198 int ttir = rb & PPC_BITMASK(57, 63); 3199 3200 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 3201 3202 if (!dbell_type_server(rb) || ttir >= nr_threads) { 3203 return; 3204 } 3205 3206 if (nr_threads == 1) { 3207 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, 1); 3208 return; 3209 } 3210 3211 /* Does iothread need to be locked for walking CPU list? */ 3212 qemu_mutex_lock_iothread(); 3213 THREAD_SIBLING_FOREACH(cs, ccs) { 3214 PowerPCCPU *ccpu = POWERPC_CPU(ccs); 3215 uint32_t thread_id = ppc_cpu_tir(ccpu); 3216 3217 if (ttir == thread_id) { 3218 ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1); 3219 qemu_mutex_unlock_iothread(); 3220 return; 3221 } 3222 } 3223 3224 g_assert_not_reached(); 3225 } 3226 #endif /* TARGET_PPC64 */ 3227 3228 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 3229 MMUAccessType access_type, 3230 int mmu_idx, uintptr_t retaddr) 3231 { 3232 CPUPPCState *env = cs->env_ptr; 3233 uint32_t insn; 3234 3235 /* Restore state and reload the insn we executed, for filling in DSISR. */ 3236 cpu_restore_state(cs, retaddr); 3237 insn = ppc_ldl_code(env, env->nip); 3238 3239 switch (env->mmu_model) { 3240 case POWERPC_MMU_SOFT_4xx: 3241 env->spr[SPR_40x_DEAR] = vaddr; 3242 break; 3243 case POWERPC_MMU_BOOKE: 3244 case POWERPC_MMU_BOOKE206: 3245 env->spr[SPR_BOOKE_DEAR] = vaddr; 3246 break; 3247 default: 3248 env->spr[SPR_DAR] = vaddr; 3249 break; 3250 } 3251 3252 cs->exception_index = POWERPC_EXCP_ALIGN; 3253 env->error_code = insn & 0x03FF0000; 3254 cpu_loop_exit(cs); 3255 } 3256 #endif /* CONFIG_TCG */ 3257 #endif /* !CONFIG_USER_ONLY */ 3258