1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "internal.h" 25 #include "helper_regs.h" 26 #include "hw/ppc/ppc.h" 27 28 #include "trace.h" 29 30 #ifdef CONFIG_TCG 31 #include "sysemu/tcg.h" 32 #include "exec/helper-proto.h" 33 #include "exec/cpu_ldst.h" 34 #endif 35 36 /*****************************************************************************/ 37 /* Exception processing */ 38 #ifndef CONFIG_USER_ONLY 39 40 static const char *powerpc_excp_name(int excp) 41 { 42 switch (excp) { 43 case POWERPC_EXCP_CRITICAL: return "CRITICAL"; 44 case POWERPC_EXCP_MCHECK: return "MCHECK"; 45 case POWERPC_EXCP_DSI: return "DSI"; 46 case POWERPC_EXCP_ISI: return "ISI"; 47 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL"; 48 case POWERPC_EXCP_ALIGN: return "ALIGN"; 49 case POWERPC_EXCP_PROGRAM: return "PROGRAM"; 50 case POWERPC_EXCP_FPU: return "FPU"; 51 case POWERPC_EXCP_SYSCALL: return "SYSCALL"; 52 case POWERPC_EXCP_APU: return "APU"; 53 case POWERPC_EXCP_DECR: return "DECR"; 54 case POWERPC_EXCP_FIT: return "FIT"; 55 case POWERPC_EXCP_WDT: return "WDT"; 56 case POWERPC_EXCP_DTLB: return "DTLB"; 57 case POWERPC_EXCP_ITLB: return "ITLB"; 58 case POWERPC_EXCP_DEBUG: return "DEBUG"; 59 case POWERPC_EXCP_SPEU: return "SPEU"; 60 case POWERPC_EXCP_EFPDI: return "EFPDI"; 61 case POWERPC_EXCP_EFPRI: return "EFPRI"; 62 case POWERPC_EXCP_EPERFM: return "EPERFM"; 63 case POWERPC_EXCP_DOORI: return "DOORI"; 64 case POWERPC_EXCP_DOORCI: return "DOORCI"; 65 case POWERPC_EXCP_GDOORI: return "GDOORI"; 66 case POWERPC_EXCP_GDOORCI: return "GDOORCI"; 67 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV"; 68 case POWERPC_EXCP_RESET: return "RESET"; 69 case POWERPC_EXCP_DSEG: return "DSEG"; 70 case POWERPC_EXCP_ISEG: return "ISEG"; 71 case POWERPC_EXCP_HDECR: return "HDECR"; 72 case POWERPC_EXCP_TRACE: return "TRACE"; 73 case POWERPC_EXCP_HDSI: return "HDSI"; 74 case POWERPC_EXCP_HISI: return "HISI"; 75 case POWERPC_EXCP_HDSEG: return "HDSEG"; 76 case POWERPC_EXCP_HISEG: return "HISEG"; 77 case POWERPC_EXCP_VPU: return "VPU"; 78 case POWERPC_EXCP_PIT: return "PIT"; 79 case POWERPC_EXCP_EMUL: return "EMUL"; 80 case POWERPC_EXCP_IFTLB: return "IFTLB"; 81 case POWERPC_EXCP_DLTLB: return "DLTLB"; 82 case POWERPC_EXCP_DSTLB: return "DSTLB"; 83 case POWERPC_EXCP_FPA: return "FPA"; 84 case POWERPC_EXCP_DABR: return "DABR"; 85 case POWERPC_EXCP_IABR: return "IABR"; 86 case POWERPC_EXCP_SMI: return "SMI"; 87 case POWERPC_EXCP_PERFM: return "PERFM"; 88 case POWERPC_EXCP_THERM: return "THERM"; 89 case POWERPC_EXCP_VPUA: return "VPUA"; 90 case POWERPC_EXCP_SOFTP: return "SOFTP"; 91 case POWERPC_EXCP_MAINT: return "MAINT"; 92 case POWERPC_EXCP_MEXTBR: return "MEXTBR"; 93 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR"; 94 case POWERPC_EXCP_ITLBE: return "ITLBE"; 95 case POWERPC_EXCP_DTLBE: return "DTLBE"; 96 case POWERPC_EXCP_VSXU: return "VSXU"; 97 case POWERPC_EXCP_FU: return "FU"; 98 case POWERPC_EXCP_HV_EMU: return "HV_EMU"; 99 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT"; 100 case POWERPC_EXCP_HV_FU: return "HV_FU"; 101 case POWERPC_EXCP_SDOOR: return "SDOOR"; 102 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV"; 103 case POWERPC_EXCP_HVIRT: return "HVIRT"; 104 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED"; 105 default: 106 g_assert_not_reached(); 107 } 108 } 109 110 static void dump_syscall(CPUPPCState *env) 111 { 112 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 113 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 114 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 115 " nip=" TARGET_FMT_lx "\n", 116 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 117 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 118 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 119 ppc_dump_gpr(env, 8), env->nip); 120 } 121 122 static void dump_hcall(CPUPPCState *env) 123 { 124 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 125 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 126 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 127 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 128 " nip=" TARGET_FMT_lx "\n", 129 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 130 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 131 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 132 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 133 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 134 env->nip); 135 } 136 137 #ifdef CONFIG_TCG 138 /* Return true iff byteswap is needed to load instruction */ 139 static inline bool insn_need_byteswap(CPUArchState *env) 140 { 141 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */ 142 return !!(env->msr & ((target_ulong)1 << MSR_LE)); 143 } 144 145 static uint32_t ppc_ldl_code(CPUArchState *env, abi_ptr addr) 146 { 147 uint32_t insn = cpu_ldl_code(env, addr); 148 149 if (insn_need_byteswap(env)) { 150 insn = bswap32(insn); 151 } 152 153 return insn; 154 } 155 #endif 156 157 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) 158 { 159 const char *es; 160 target_ulong *miss, *cmp; 161 int en; 162 163 if (!qemu_loglevel_mask(CPU_LOG_MMU)) { 164 return; 165 } 166 167 if (excp == POWERPC_EXCP_IFTLB) { 168 es = "I"; 169 en = 'I'; 170 miss = &env->spr[SPR_IMISS]; 171 cmp = &env->spr[SPR_ICMP]; 172 } else { 173 if (excp == POWERPC_EXCP_DLTLB) { 174 es = "DL"; 175 } else { 176 es = "DS"; 177 } 178 en = 'D'; 179 miss = &env->spr[SPR_DMISS]; 180 cmp = &env->spr[SPR_DCMP]; 181 } 182 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 183 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 184 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 185 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 186 env->error_code); 187 } 188 189 #ifdef TARGET_PPC64 190 static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr) 191 { 192 /* We no longer are in a PM state */ 193 env->resume_as_sreset = false; 194 195 /* Pretend to be returning from doze always as we don't lose state */ 196 *msr |= SRR1_WS_NOLOSS; 197 198 /* Machine checks are sent normally */ 199 if (excp == POWERPC_EXCP_MCHECK) { 200 return excp; 201 } 202 switch (excp) { 203 case POWERPC_EXCP_RESET: 204 *msr |= SRR1_WAKERESET; 205 break; 206 case POWERPC_EXCP_EXTERNAL: 207 *msr |= SRR1_WAKEEE; 208 break; 209 case POWERPC_EXCP_DECR: 210 *msr |= SRR1_WAKEDEC; 211 break; 212 case POWERPC_EXCP_SDOOR: 213 *msr |= SRR1_WAKEDBELL; 214 break; 215 case POWERPC_EXCP_SDOOR_HV: 216 *msr |= SRR1_WAKEHDBELL; 217 break; 218 case POWERPC_EXCP_HV_MAINT: 219 *msr |= SRR1_WAKEHMI; 220 break; 221 case POWERPC_EXCP_HVIRT: 222 *msr |= SRR1_WAKEHVI; 223 break; 224 default: 225 cpu_abort(env_cpu(env), 226 "Unsupported exception %d in Power Save mode\n", excp); 227 } 228 return POWERPC_EXCP_RESET; 229 } 230 231 /* 232 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be 233 * taken with the MMU on, and which uses an alternate location (e.g., so the 234 * kernel/hv can map the vectors there with an effective address). 235 * 236 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they 237 * are delivered in this way. AIL requires the LPCR to be set to enable this 238 * mode, and then a number of conditions have to be true for AIL to apply. 239 * 240 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because 241 * they specifically want to be in real mode (e.g., the MCE might be signaling 242 * a SLB multi-hit which requires SLB flush before the MMU can be enabled). 243 * 244 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], 245 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current 246 * radix mode (LPCR[HR]). 247 * 248 * POWER8, POWER9 with LPCR[HR]=0 249 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 250 * +-----------+-------------+---------+-------------+-----+ 251 * | a | 00/01/10 | x | x | 0 | 252 * | a | 11 | 0 | 1 | 0 | 253 * | a | 11 | 1 | 1 | a | 254 * | a | 11 | 0 | 0 | a | 255 * +-------------------------------------------------------+ 256 * 257 * POWER9 with LPCR[HR]=1 258 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 259 * +-----------+-------------+---------+-------------+-----+ 260 * | a | 00/01/10 | x | x | 0 | 261 * | a | 11 | x | x | a | 262 * +-------------------------------------------------------+ 263 * 264 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to 265 * the hypervisor in AIL mode if the guest is radix. This is good for 266 * performance but allows the guest to influence the AIL of hypervisor 267 * interrupts using its MSR, and also the hypervisor must disallow guest 268 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to 269 * use AIL for its MSR[HV] 0->1 interrupts. 270 * 271 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to 272 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and 273 * MSR[HV] 1->1). 274 * 275 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. 276 * 277 * POWER10 behaviour is 278 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 279 * +-----------+------------+-------------+---------+-------------+-----+ 280 * | a | h | 00/01/10 | 0 | 0 | 0 | 281 * | a | h | 11 | 0 | 0 | a | 282 * | a | h | x | 0 | 1 | h | 283 * | a | h | 00/01/10 | 1 | 1 | 0 | 284 * | a | h | 11 | 1 | 1 | h | 285 * +--------------------------------------------------------------------+ 286 */ 287 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, 288 target_ulong *new_msr, target_ulong *vector) 289 { 290 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 291 CPUPPCState *env = &cpu->env; 292 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); 293 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); 294 int ail = 0; 295 296 if (excp == POWERPC_EXCP_MCHECK || 297 excp == POWERPC_EXCP_RESET || 298 excp == POWERPC_EXCP_HV_MAINT) { 299 /* SRESET, MCE, HMI never apply AIL */ 300 return; 301 } 302 303 if (!(pcc->lpcr_mask & LPCR_AIL)) { 304 /* This CPU does not have AIL */ 305 return; 306 } 307 308 /* P8 & P9 */ 309 if (!(pcc->lpcr_mask & LPCR_HAIL)) { 310 if (!mmu_all_on) { 311 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ 312 return; 313 } 314 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { 315 /* 316 * AIL does not work if there is a MSR[HV] 0->1 transition and the 317 * partition is in HPT mode. For radix guests, such interrupts are 318 * allowed to be delivered to the hypervisor in ail mode. 319 */ 320 return; 321 } 322 323 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 324 if (ail == 0) { 325 return; 326 } 327 if (ail == 1) { 328 /* AIL=1 is reserved, treat it like AIL=0 */ 329 return; 330 } 331 332 /* P10 and up */ 333 } else { 334 if (!mmu_all_on && !hv_escalation) { 335 /* 336 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. 337 * Guest->guest and HV->HV interrupts do require MMU on. 338 */ 339 return; 340 } 341 342 if (*new_msr & MSR_HVB) { 343 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { 344 /* HV interrupts depend on LPCR[HAIL] */ 345 return; 346 } 347 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ 348 } else { 349 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 350 } 351 if (ail == 0) { 352 return; 353 } 354 if (ail == 1 || ail == 2) { 355 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ 356 return; 357 } 358 } 359 360 /* 361 * AIL applies, so the new MSR gets IR and DR set, and an offset applied 362 * to the new IP. 363 */ 364 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 365 366 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 367 if (ail == 2) { 368 *vector |= 0x0000000000018000ull; 369 } else if (ail == 3) { 370 *vector |= 0xc000000000004000ull; 371 } 372 } else { 373 /* 374 * scv AIL is a little different. AIL=2 does not change the address, 375 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. 376 */ 377 if (ail == 3) { 378 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ 379 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ 380 } 381 } 382 } 383 #endif /* TARGET_PPC64 */ 384 385 static void powerpc_reset_excp_state(PowerPCCPU *cpu) 386 { 387 CPUState *cs = CPU(cpu); 388 CPUPPCState *env = &cpu->env; 389 390 /* Reset exception state */ 391 cs->exception_index = POWERPC_EXCP_NONE; 392 env->error_code = 0; 393 } 394 395 static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, 396 target_ulong msr) 397 { 398 CPUPPCState *env = &cpu->env; 399 400 assert((msr & env->msr_mask) == msr); 401 402 /* 403 * We don't use hreg_store_msr here as already have treated any 404 * special case that could occur. Just store MSR and update hflags 405 * 406 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will 407 * prevent setting of the HV bit which some exceptions might need to do. 408 */ 409 env->nip = vector; 410 env->msr = msr; 411 hreg_compute_hflags(env); 412 ppc_maybe_interrupt(env); 413 414 powerpc_reset_excp_state(cpu); 415 416 /* 417 * Any interrupt is context synchronizing, check if TCG TLB needs 418 * a delayed flush on ppc64 419 */ 420 check_tlb_flush(env, false); 421 422 /* Reset the reservation */ 423 env->reserve_addr = -1; 424 } 425 426 static void powerpc_mcheck_checkstop(CPUPPCState *env) 427 { 428 CPUState *cs = env_cpu(env); 429 430 if (FIELD_EX64(env->msr, MSR, ME)) { 431 return; 432 } 433 434 /* Machine check exception is not enabled. Enter checkstop state. */ 435 fprintf(stderr, "Machine check while not allowed. " 436 "Entering checkstop state\n"); 437 if (qemu_log_separate()) { 438 qemu_log("Machine check while not allowed. " 439 "Entering checkstop state\n"); 440 } 441 cs->halted = 1; 442 cpu_interrupt_exittb(cs); 443 } 444 445 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) 446 { 447 CPUPPCState *env = &cpu->env; 448 target_ulong msr, new_msr, vector; 449 int srr0 = SPR_SRR0, srr1 = SPR_SRR1; 450 451 /* new srr1 value excluding must-be-zero bits */ 452 msr = env->msr & ~0x783f0000ULL; 453 454 /* new interrupt handler msr preserves ME unless explicitly overridden */ 455 new_msr = env->msr & (((target_ulong)1 << MSR_ME)); 456 457 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 458 if (excp == POWERPC_EXCP_HV_EMU) { 459 excp = POWERPC_EXCP_PROGRAM; 460 } 461 462 vector = env->excp_vectors[excp]; 463 if (vector == (target_ulong)-1ULL) { 464 cpu_abort(env_cpu(env), 465 "Raised an exception without defined vector %d\n", excp); 466 } 467 vector |= env->excp_prefix; 468 469 switch (excp) { 470 case POWERPC_EXCP_CRITICAL: /* Critical input */ 471 srr0 = SPR_40x_SRR2; 472 srr1 = SPR_40x_SRR3; 473 break; 474 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 475 powerpc_mcheck_checkstop(env); 476 /* machine check exceptions don't have ME set */ 477 new_msr &= ~((target_ulong)1 << MSR_ME); 478 srr0 = SPR_40x_SRR2; 479 srr1 = SPR_40x_SRR3; 480 break; 481 case POWERPC_EXCP_DSI: /* Data storage exception */ 482 trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]); 483 break; 484 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 485 trace_ppc_excp_isi(msr, env->nip); 486 break; 487 case POWERPC_EXCP_EXTERNAL: /* External input */ 488 break; 489 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 490 break; 491 case POWERPC_EXCP_PROGRAM: /* Program exception */ 492 switch (env->error_code & ~0xF) { 493 case POWERPC_EXCP_FP: 494 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 495 trace_ppc_excp_fp_ignore(); 496 powerpc_reset_excp_state(cpu); 497 return; 498 } 499 env->spr[SPR_40x_ESR] = ESR_FP; 500 break; 501 case POWERPC_EXCP_INVAL: 502 trace_ppc_excp_inval(env->nip); 503 env->spr[SPR_40x_ESR] = ESR_PIL; 504 break; 505 case POWERPC_EXCP_PRIV: 506 env->spr[SPR_40x_ESR] = ESR_PPR; 507 break; 508 case POWERPC_EXCP_TRAP: 509 env->spr[SPR_40x_ESR] = ESR_PTR; 510 break; 511 default: 512 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 513 env->error_code); 514 break; 515 } 516 break; 517 case POWERPC_EXCP_SYSCALL: /* System call exception */ 518 dump_syscall(env); 519 520 /* 521 * We need to correct the NIP which in this case is supposed 522 * to point to the next instruction 523 */ 524 env->nip += 4; 525 break; 526 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 527 trace_ppc_excp_print("FIT"); 528 break; 529 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 530 trace_ppc_excp_print("WDT"); 531 break; 532 case POWERPC_EXCP_DTLB: /* Data TLB error */ 533 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 534 break; 535 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 536 trace_ppc_excp_print("PIT"); 537 break; 538 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 539 cpu_abort(env_cpu(env), "%s exception not implemented\n", 540 powerpc_excp_name(excp)); 541 break; 542 default: 543 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 544 excp); 545 break; 546 } 547 548 env->spr[srr0] = env->nip; 549 env->spr[srr1] = msr; 550 powerpc_set_excp_state(cpu, vector, new_msr); 551 } 552 553 static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) 554 { 555 CPUPPCState *env = &cpu->env; 556 target_ulong msr, new_msr, vector; 557 558 /* new srr1 value excluding must-be-zero bits */ 559 msr = env->msr & ~0x783f0000ULL; 560 561 /* new interrupt handler msr preserves ME unless explicitly overridden */ 562 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 563 564 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 565 if (excp == POWERPC_EXCP_HV_EMU) { 566 excp = POWERPC_EXCP_PROGRAM; 567 } 568 569 vector = env->excp_vectors[excp]; 570 if (vector == (target_ulong)-1ULL) { 571 cpu_abort(env_cpu(env), 572 "Raised an exception without defined vector %d\n", excp); 573 } 574 vector |= env->excp_prefix; 575 576 switch (excp) { 577 case POWERPC_EXCP_CRITICAL: /* Critical input */ 578 break; 579 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 580 powerpc_mcheck_checkstop(env); 581 /* machine check exceptions don't have ME set */ 582 new_msr &= ~((target_ulong)1 << MSR_ME); 583 break; 584 case POWERPC_EXCP_DSI: /* Data storage exception */ 585 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 586 break; 587 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 588 trace_ppc_excp_isi(msr, env->nip); 589 msr |= env->error_code; 590 break; 591 case POWERPC_EXCP_EXTERNAL: /* External input */ 592 break; 593 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 594 /* Get rS/rD and rA from faulting opcode */ 595 /* 596 * Note: the opcode fields will not be set properly for a 597 * direct store load/store, but nobody cares as nobody 598 * actually uses direct store segments. 599 */ 600 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 601 break; 602 case POWERPC_EXCP_PROGRAM: /* Program exception */ 603 switch (env->error_code & ~0xF) { 604 case POWERPC_EXCP_FP: 605 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 606 trace_ppc_excp_fp_ignore(); 607 powerpc_reset_excp_state(cpu); 608 return; 609 } 610 /* 611 * NIP always points to the faulting instruction for FP exceptions, 612 * so always use store_next and claim we are precise in the MSR. 613 */ 614 msr |= 0x00100000; 615 break; 616 case POWERPC_EXCP_INVAL: 617 trace_ppc_excp_inval(env->nip); 618 msr |= 0x00080000; 619 break; 620 case POWERPC_EXCP_PRIV: 621 msr |= 0x00040000; 622 break; 623 case POWERPC_EXCP_TRAP: 624 msr |= 0x00020000; 625 break; 626 default: 627 /* Should never occur */ 628 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 629 env->error_code); 630 break; 631 } 632 break; 633 case POWERPC_EXCP_SYSCALL: /* System call exception */ 634 dump_syscall(env); 635 636 /* 637 * We need to correct the NIP which in this case is supposed 638 * to point to the next instruction 639 */ 640 env->nip += 4; 641 break; 642 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 643 case POWERPC_EXCP_DECR: /* Decrementer exception */ 644 break; 645 case POWERPC_EXCP_DTLB: /* Data TLB error */ 646 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 647 break; 648 case POWERPC_EXCP_RESET: /* System reset exception */ 649 if (FIELD_EX64(env->msr, MSR, POW)) { 650 cpu_abort(env_cpu(env), 651 "Trying to deliver power-saving system reset exception " 652 "%d with no HV support\n", excp); 653 } 654 break; 655 case POWERPC_EXCP_TRACE: /* Trace exception */ 656 break; 657 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 658 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 659 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 660 /* Swap temporary saved registers with GPRs */ 661 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 662 new_msr |= (target_ulong)1 << MSR_TGPR; 663 hreg_swap_gpr_tgpr(env); 664 } 665 666 ppc_excp_debug_sw_tlb(env, excp); 667 668 msr |= env->crf[0] << 28; 669 msr |= env->error_code; /* key, D/I, S/L bits */ 670 /* Set way using a LRU mechanism */ 671 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 672 break; 673 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 674 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 675 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 676 case POWERPC_EXCP_SMI: /* System management interrupt */ 677 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 678 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 679 cpu_abort(env_cpu(env), "%s exception not implemented\n", 680 powerpc_excp_name(excp)); 681 break; 682 default: 683 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 684 excp); 685 break; 686 } 687 688 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 689 new_msr |= (target_ulong)1 << MSR_LE; 690 } 691 env->spr[SPR_SRR0] = env->nip; 692 env->spr[SPR_SRR1] = msr; 693 powerpc_set_excp_state(cpu, vector, new_msr); 694 } 695 696 static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) 697 { 698 CPUPPCState *env = &cpu->env; 699 target_ulong msr, new_msr, vector; 700 701 /* new srr1 value excluding must-be-zero bits */ 702 msr = env->msr & ~0x783f0000ULL; 703 704 /* new interrupt handler msr preserves ME unless explicitly overridden */ 705 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 706 707 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 708 if (excp == POWERPC_EXCP_HV_EMU) { 709 excp = POWERPC_EXCP_PROGRAM; 710 } 711 712 vector = env->excp_vectors[excp]; 713 if (vector == (target_ulong)-1ULL) { 714 cpu_abort(env_cpu(env), 715 "Raised an exception without defined vector %d\n", excp); 716 } 717 vector |= env->excp_prefix; 718 719 switch (excp) { 720 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 721 powerpc_mcheck_checkstop(env); 722 /* machine check exceptions don't have ME set */ 723 new_msr &= ~((target_ulong)1 << MSR_ME); 724 break; 725 case POWERPC_EXCP_DSI: /* Data storage exception */ 726 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 727 break; 728 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 729 trace_ppc_excp_isi(msr, env->nip); 730 msr |= env->error_code; 731 break; 732 case POWERPC_EXCP_EXTERNAL: /* External input */ 733 break; 734 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 735 /* Get rS/rD and rA from faulting opcode */ 736 /* 737 * Note: the opcode fields will not be set properly for a 738 * direct store load/store, but nobody cares as nobody 739 * actually uses direct store segments. 740 */ 741 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 742 break; 743 case POWERPC_EXCP_PROGRAM: /* Program exception */ 744 switch (env->error_code & ~0xF) { 745 case POWERPC_EXCP_FP: 746 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 747 trace_ppc_excp_fp_ignore(); 748 powerpc_reset_excp_state(cpu); 749 return; 750 } 751 /* 752 * NIP always points to the faulting instruction for FP exceptions, 753 * so always use store_next and claim we are precise in the MSR. 754 */ 755 msr |= 0x00100000; 756 break; 757 case POWERPC_EXCP_INVAL: 758 trace_ppc_excp_inval(env->nip); 759 msr |= 0x00080000; 760 break; 761 case POWERPC_EXCP_PRIV: 762 msr |= 0x00040000; 763 break; 764 case POWERPC_EXCP_TRAP: 765 msr |= 0x00020000; 766 break; 767 default: 768 /* Should never occur */ 769 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 770 env->error_code); 771 break; 772 } 773 break; 774 case POWERPC_EXCP_SYSCALL: /* System call exception */ 775 { 776 int lev = env->error_code; 777 778 if (lev == 1 && cpu->vhyp) { 779 dump_hcall(env); 780 } else { 781 dump_syscall(env); 782 } 783 784 /* 785 * We need to correct the NIP which in this case is supposed 786 * to point to the next instruction 787 */ 788 env->nip += 4; 789 790 /* 791 * The Virtual Open Firmware (VOF) relies on the 'sc 1' 792 * instruction to communicate with QEMU. The pegasos2 machine 793 * uses VOF and the 7xx CPUs, so although the 7xx don't have 794 * HV mode, we need to keep hypercall support. 795 */ 796 if (lev == 1 && cpu->vhyp) { 797 PPCVirtualHypervisorClass *vhc = 798 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 799 vhc->hypercall(cpu->vhyp, cpu); 800 powerpc_reset_excp_state(cpu); 801 return; 802 } 803 804 break; 805 } 806 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 807 case POWERPC_EXCP_DECR: /* Decrementer exception */ 808 break; 809 case POWERPC_EXCP_RESET: /* System reset exception */ 810 if (FIELD_EX64(env->msr, MSR, POW)) { 811 cpu_abort(env_cpu(env), 812 "Trying to deliver power-saving system reset exception " 813 "%d with no HV support\n", excp); 814 } 815 break; 816 case POWERPC_EXCP_TRACE: /* Trace exception */ 817 break; 818 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 819 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 820 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 821 ppc_excp_debug_sw_tlb(env, excp); 822 msr |= env->crf[0] << 28; 823 msr |= env->error_code; /* key, D/I, S/L bits */ 824 /* Set way using a LRU mechanism */ 825 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 826 break; 827 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 828 case POWERPC_EXCP_SMI: /* System management interrupt */ 829 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 830 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 831 cpu_abort(env_cpu(env), "%s exception not implemented\n", 832 powerpc_excp_name(excp)); 833 break; 834 default: 835 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 836 excp); 837 break; 838 } 839 840 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 841 new_msr |= (target_ulong)1 << MSR_LE; 842 } 843 env->spr[SPR_SRR0] = env->nip; 844 env->spr[SPR_SRR1] = msr; 845 powerpc_set_excp_state(cpu, vector, new_msr); 846 } 847 848 static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) 849 { 850 CPUPPCState *env = &cpu->env; 851 target_ulong msr, new_msr, vector; 852 853 /* new srr1 value excluding must-be-zero bits */ 854 msr = env->msr & ~0x783f0000ULL; 855 856 /* new interrupt handler msr preserves ME unless explicitly overridden */ 857 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 858 859 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 860 if (excp == POWERPC_EXCP_HV_EMU) { 861 excp = POWERPC_EXCP_PROGRAM; 862 } 863 864 vector = env->excp_vectors[excp]; 865 if (vector == (target_ulong)-1ULL) { 866 cpu_abort(env_cpu(env), 867 "Raised an exception without defined vector %d\n", excp); 868 } 869 vector |= env->excp_prefix; 870 871 switch (excp) { 872 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 873 powerpc_mcheck_checkstop(env); 874 /* machine check exceptions don't have ME set */ 875 new_msr &= ~((target_ulong)1 << MSR_ME); 876 break; 877 case POWERPC_EXCP_DSI: /* Data storage exception */ 878 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 879 break; 880 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 881 trace_ppc_excp_isi(msr, env->nip); 882 msr |= env->error_code; 883 break; 884 case POWERPC_EXCP_EXTERNAL: /* External input */ 885 break; 886 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 887 /* Get rS/rD and rA from faulting opcode */ 888 /* 889 * Note: the opcode fields will not be set properly for a 890 * direct store load/store, but nobody cares as nobody 891 * actually uses direct store segments. 892 */ 893 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 894 break; 895 case POWERPC_EXCP_PROGRAM: /* Program exception */ 896 switch (env->error_code & ~0xF) { 897 case POWERPC_EXCP_FP: 898 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 899 trace_ppc_excp_fp_ignore(); 900 powerpc_reset_excp_state(cpu); 901 return; 902 } 903 /* 904 * NIP always points to the faulting instruction for FP exceptions, 905 * so always use store_next and claim we are precise in the MSR. 906 */ 907 msr |= 0x00100000; 908 break; 909 case POWERPC_EXCP_INVAL: 910 trace_ppc_excp_inval(env->nip); 911 msr |= 0x00080000; 912 break; 913 case POWERPC_EXCP_PRIV: 914 msr |= 0x00040000; 915 break; 916 case POWERPC_EXCP_TRAP: 917 msr |= 0x00020000; 918 break; 919 default: 920 /* Should never occur */ 921 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 922 env->error_code); 923 break; 924 } 925 break; 926 case POWERPC_EXCP_SYSCALL: /* System call exception */ 927 { 928 int lev = env->error_code; 929 930 if (lev == 1 && cpu->vhyp) { 931 dump_hcall(env); 932 } else { 933 dump_syscall(env); 934 } 935 936 /* 937 * We need to correct the NIP which in this case is supposed 938 * to point to the next instruction 939 */ 940 env->nip += 4; 941 942 /* 943 * The Virtual Open Firmware (VOF) relies on the 'sc 1' 944 * instruction to communicate with QEMU. The pegasos2 machine 945 * uses VOF and the 74xx CPUs, so although the 74xx don't have 946 * HV mode, we need to keep hypercall support. 947 */ 948 if (lev == 1 && cpu->vhyp) { 949 PPCVirtualHypervisorClass *vhc = 950 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 951 vhc->hypercall(cpu->vhyp, cpu); 952 powerpc_reset_excp_state(cpu); 953 return; 954 } 955 956 break; 957 } 958 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 959 case POWERPC_EXCP_DECR: /* Decrementer exception */ 960 break; 961 case POWERPC_EXCP_RESET: /* System reset exception */ 962 if (FIELD_EX64(env->msr, MSR, POW)) { 963 cpu_abort(env_cpu(env), 964 "Trying to deliver power-saving system reset " 965 "exception %d with no HV support\n", excp); 966 } 967 break; 968 case POWERPC_EXCP_TRACE: /* Trace exception */ 969 break; 970 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 971 break; 972 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 973 case POWERPC_EXCP_SMI: /* System management interrupt */ 974 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 975 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 976 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 977 cpu_abort(env_cpu(env), "%s exception not implemented\n", 978 powerpc_excp_name(excp)); 979 break; 980 default: 981 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 982 excp); 983 break; 984 } 985 986 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 987 new_msr |= (target_ulong)1 << MSR_LE; 988 } 989 env->spr[SPR_SRR0] = env->nip; 990 env->spr[SPR_SRR1] = msr; 991 powerpc_set_excp_state(cpu, vector, new_msr); 992 } 993 994 static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) 995 { 996 CPUPPCState *env = &cpu->env; 997 target_ulong msr, new_msr, vector; 998 int srr0 = SPR_SRR0, srr1 = SPR_SRR1; 999 1000 /* 1001 * Book E does not play games with certain bits of xSRR1 being MSR save 1002 * bits and others being error status. xSRR1 is the old MSR, period. 1003 */ 1004 msr = env->msr; 1005 1006 /* new interrupt handler msr preserves ME unless explicitly overridden */ 1007 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 1008 1009 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 1010 if (excp == POWERPC_EXCP_HV_EMU) { 1011 excp = POWERPC_EXCP_PROGRAM; 1012 } 1013 1014 #ifdef TARGET_PPC64 1015 /* 1016 * SPEU and VPU share the same IVOR but they exist in different 1017 * processors. SPEU is e500v1/2 only and VPU is e6500 only. 1018 */ 1019 if (excp == POWERPC_EXCP_VPU) { 1020 excp = POWERPC_EXCP_SPEU; 1021 } 1022 #endif 1023 1024 vector = env->excp_vectors[excp]; 1025 if (vector == (target_ulong)-1ULL) { 1026 cpu_abort(env_cpu(env), 1027 "Raised an exception without defined vector %d\n", excp); 1028 } 1029 vector |= env->excp_prefix; 1030 1031 switch (excp) { 1032 case POWERPC_EXCP_CRITICAL: /* Critical input */ 1033 srr0 = SPR_BOOKE_CSRR0; 1034 srr1 = SPR_BOOKE_CSRR1; 1035 break; 1036 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1037 powerpc_mcheck_checkstop(env); 1038 /* machine check exceptions don't have ME set */ 1039 new_msr &= ~((target_ulong)1 << MSR_ME); 1040 1041 /* FIXME: choose one or the other based on CPU type */ 1042 srr0 = SPR_BOOKE_MCSRR0; 1043 srr1 = SPR_BOOKE_MCSRR1; 1044 1045 env->spr[SPR_BOOKE_CSRR0] = env->nip; 1046 env->spr[SPR_BOOKE_CSRR1] = msr; 1047 1048 break; 1049 case POWERPC_EXCP_DSI: /* Data storage exception */ 1050 trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); 1051 break; 1052 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1053 trace_ppc_excp_isi(msr, env->nip); 1054 break; 1055 case POWERPC_EXCP_EXTERNAL: /* External input */ 1056 if (env->mpic_proxy) { 1057 CPUState *cs = env_cpu(env); 1058 /* IACK the IRQ on delivery */ 1059 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 1060 } 1061 break; 1062 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1063 break; 1064 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1065 switch (env->error_code & ~0xF) { 1066 case POWERPC_EXCP_FP: 1067 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1068 trace_ppc_excp_fp_ignore(); 1069 powerpc_reset_excp_state(cpu); 1070 return; 1071 } 1072 /* 1073 * NIP always points to the faulting instruction for FP exceptions, 1074 * so always use store_next and claim we are precise in the MSR. 1075 */ 1076 msr |= 0x00100000; 1077 env->spr[SPR_BOOKE_ESR] = ESR_FP; 1078 break; 1079 case POWERPC_EXCP_INVAL: 1080 trace_ppc_excp_inval(env->nip); 1081 msr |= 0x00080000; 1082 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 1083 break; 1084 case POWERPC_EXCP_PRIV: 1085 msr |= 0x00040000; 1086 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 1087 break; 1088 case POWERPC_EXCP_TRAP: 1089 msr |= 0x00020000; 1090 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 1091 break; 1092 default: 1093 /* Should never occur */ 1094 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 1095 env->error_code); 1096 break; 1097 } 1098 break; 1099 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1100 dump_syscall(env); 1101 1102 /* 1103 * We need to correct the NIP which in this case is supposed 1104 * to point to the next instruction 1105 */ 1106 env->nip += 4; 1107 break; 1108 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1109 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 1110 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1111 break; 1112 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 1113 /* FIT on 4xx */ 1114 trace_ppc_excp_print("FIT"); 1115 break; 1116 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 1117 trace_ppc_excp_print("WDT"); 1118 srr0 = SPR_BOOKE_CSRR0; 1119 srr1 = SPR_BOOKE_CSRR1; 1120 break; 1121 case POWERPC_EXCP_DTLB: /* Data TLB error */ 1122 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 1123 break; 1124 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 1125 if (env->flags & POWERPC_FLAG_DE) { 1126 /* FIXME: choose one or the other based on CPU type */ 1127 srr0 = SPR_BOOKE_DSRR0; 1128 srr1 = SPR_BOOKE_DSRR1; 1129 1130 env->spr[SPR_BOOKE_CSRR0] = env->nip; 1131 env->spr[SPR_BOOKE_CSRR1] = msr; 1132 1133 /* DBSR already modified by caller */ 1134 } else { 1135 cpu_abort(env_cpu(env), 1136 "Debug exception triggered on unsupported model\n"); 1137 } 1138 break; 1139 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */ 1140 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 1141 break; 1142 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 1143 break; 1144 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 1145 srr0 = SPR_BOOKE_CSRR0; 1146 srr1 = SPR_BOOKE_CSRR1; 1147 break; 1148 case POWERPC_EXCP_RESET: /* System reset exception */ 1149 if (FIELD_EX64(env->msr, MSR, POW)) { 1150 cpu_abort(env_cpu(env), 1151 "Trying to deliver power-saving system reset " 1152 "exception %d with no HV support\n", excp); 1153 } 1154 break; 1155 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 1156 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 1157 cpu_abort(env_cpu(env), "%s exception not implemented\n", 1158 powerpc_excp_name(excp)); 1159 break; 1160 default: 1161 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 1162 excp); 1163 break; 1164 } 1165 1166 #ifdef TARGET_PPC64 1167 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 1168 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 1169 new_msr |= (target_ulong)1 << MSR_CM; 1170 } else { 1171 vector = (uint32_t)vector; 1172 } 1173 #endif 1174 1175 env->spr[srr0] = env->nip; 1176 env->spr[srr1] = msr; 1177 powerpc_set_excp_state(cpu, vector, new_msr); 1178 } 1179 1180 /* 1181 * When running a nested HV guest under vhyp, external interrupts are 1182 * delivered as HVIRT. 1183 */ 1184 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu) 1185 { 1186 if (cpu->vhyp) { 1187 return vhyp_cpu_in_nested(cpu); 1188 } 1189 return false; 1190 } 1191 1192 #ifdef TARGET_PPC64 1193 /* 1194 * When running under vhyp, hcalls are always intercepted and sent to the 1195 * vhc->hypercall handler. 1196 */ 1197 static bool books_vhyp_handles_hcall(PowerPCCPU *cpu) 1198 { 1199 if (cpu->vhyp) { 1200 return !vhyp_cpu_in_nested(cpu); 1201 } 1202 return false; 1203 } 1204 1205 /* 1206 * When running a nested KVM HV guest under vhyp, HV exceptions are not 1207 * delivered to the guest (because there is no concept of HV support), but 1208 * rather they are sent to the vhyp to exit from the L2 back to the L1 and 1209 * return from the H_ENTER_NESTED hypercall. 1210 */ 1211 static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu) 1212 { 1213 if (cpu->vhyp) { 1214 return vhyp_cpu_in_nested(cpu); 1215 } 1216 return false; 1217 } 1218 1219 #ifdef CONFIG_TCG 1220 static bool is_prefix_insn(CPUPPCState *env, uint32_t insn) 1221 { 1222 if (!(env->insns_flags2 & PPC2_ISA310)) { 1223 return false; 1224 } 1225 return ((insn & 0xfc000000) == 0x04000000); 1226 } 1227 1228 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) 1229 { 1230 CPUPPCState *env = &cpu->env; 1231 1232 if (!(env->insns_flags2 & PPC2_ISA310)) { 1233 return false; 1234 } 1235 1236 if (!tcg_enabled()) { 1237 /* 1238 * This does not load instructions and set the prefix bit correctly 1239 * for injected interrupts with KVM. That may have to be discovered 1240 * and set by the KVM layer before injecting. 1241 */ 1242 return false; 1243 } 1244 1245 switch (excp) { 1246 case POWERPC_EXCP_MCHECK: 1247 if (!(env->error_code & PPC_BIT(42))) { 1248 /* 1249 * Fetch attempt caused a machine check, so attempting to fetch 1250 * again would cause a recursive machine check. 1251 */ 1252 return false; 1253 } 1254 break; 1255 case POWERPC_EXCP_HDSI: 1256 /* HDSI PRTABLE_FAULT has the originating access type in error_code */ 1257 if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) && 1258 (env->error_code == MMU_INST_FETCH)) { 1259 /* 1260 * Fetch failed due to partition scope translation, so prefix 1261 * indication is not relevant (and attempting to load the 1262 * instruction at NIP would cause recursive faults with the same 1263 * translation). 1264 */ 1265 return false; 1266 } 1267 break; 1268 1269 case POWERPC_EXCP_DSI: 1270 case POWERPC_EXCP_DSEG: 1271 case POWERPC_EXCP_ALIGN: 1272 case POWERPC_EXCP_PROGRAM: 1273 case POWERPC_EXCP_FPU: 1274 case POWERPC_EXCP_TRACE: 1275 case POWERPC_EXCP_HV_EMU: 1276 case POWERPC_EXCP_VPU: 1277 case POWERPC_EXCP_VSXU: 1278 case POWERPC_EXCP_FU: 1279 case POWERPC_EXCP_HV_FU: 1280 break; 1281 default: 1282 return false; 1283 } 1284 1285 return is_prefix_insn(env, ppc_ldl_code(env, env->nip)); 1286 } 1287 #else 1288 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) 1289 { 1290 return false; 1291 } 1292 #endif 1293 1294 static void powerpc_excp_books(PowerPCCPU *cpu, int excp) 1295 { 1296 CPUPPCState *env = &cpu->env; 1297 target_ulong msr, new_msr, vector; 1298 int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1; 1299 1300 /* new srr1 value excluding must-be-zero bits */ 1301 msr = env->msr & ~0x783f0000ULL; 1302 1303 /* 1304 * new interrupt handler msr preserves HV and ME unless explicitly 1305 * overridden 1306 */ 1307 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 1308 1309 /* 1310 * check for special resume at 0x100 from doze/nap/sleep/winkle on 1311 * P7/P8/P9 1312 */ 1313 if (env->resume_as_sreset) { 1314 excp = powerpc_reset_wakeup(env, excp, &msr); 1315 } 1316 1317 /* 1318 * We don't want to generate a Hypervisor Emulation Assistance 1319 * Interrupt if we don't have HVB in msr_mask (PAPR mode), 1320 * unless running a nested-hv guest, in which case the L1 1321 * kernel wants the interrupt. 1322 */ 1323 if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) && 1324 !books_vhyp_handles_hv_excp(cpu)) { 1325 excp = POWERPC_EXCP_PROGRAM; 1326 } 1327 1328 vector = env->excp_vectors[excp]; 1329 if (vector == (target_ulong)-1ULL) { 1330 cpu_abort(env_cpu(env), 1331 "Raised an exception without defined vector %d\n", excp); 1332 } 1333 vector |= env->excp_prefix; 1334 1335 if (is_prefix_insn_excp(cpu, excp)) { 1336 msr |= PPC_BIT(34); 1337 } 1338 1339 switch (excp) { 1340 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1341 powerpc_mcheck_checkstop(env); 1342 if (env->msr_mask & MSR_HVB) { 1343 /* 1344 * ISA specifies HV, but can be delivered to guest with HV 1345 * clear (e.g., see FWNMI in PAPR). 1346 */ 1347 new_msr |= (target_ulong)MSR_HVB; 1348 } 1349 /* machine check exceptions don't have ME set */ 1350 new_msr &= ~((target_ulong)1 << MSR_ME); 1351 1352 msr |= env->error_code; 1353 break; 1354 1355 case POWERPC_EXCP_DSI: /* Data storage exception */ 1356 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 1357 break; 1358 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1359 trace_ppc_excp_isi(msr, env->nip); 1360 msr |= env->error_code; 1361 break; 1362 case POWERPC_EXCP_EXTERNAL: /* External input */ 1363 { 1364 bool lpes0; 1365 1366 /* LPES0 is only taken into consideration if we support HV mode */ 1367 if (!env->has_hv_mode) { 1368 break; 1369 } 1370 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1371 if (!lpes0) { 1372 new_msr |= (target_ulong)MSR_HVB; 1373 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1374 srr0 = SPR_HSRR0; 1375 srr1 = SPR_HSRR1; 1376 } 1377 break; 1378 } 1379 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1380 /* Optional DSISR update was removed from ISA v3.0 */ 1381 if (!(env->insns_flags2 & PPC2_ISA300)) { 1382 /* Get rS/rD and rA from faulting opcode */ 1383 /* 1384 * Note: the opcode fields will not be set properly for a 1385 * direct store load/store, but nobody cares as nobody 1386 * actually uses direct store segments. 1387 */ 1388 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 1389 } 1390 break; 1391 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1392 switch (env->error_code & ~0xF) { 1393 case POWERPC_EXCP_FP: 1394 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1395 trace_ppc_excp_fp_ignore(); 1396 powerpc_reset_excp_state(cpu); 1397 return; 1398 } 1399 /* 1400 * NIP always points to the faulting instruction for FP exceptions, 1401 * so always use store_next and claim we are precise in the MSR. 1402 */ 1403 msr |= 0x00100000; 1404 break; 1405 case POWERPC_EXCP_INVAL: 1406 trace_ppc_excp_inval(env->nip); 1407 msr |= 0x00080000; 1408 break; 1409 case POWERPC_EXCP_PRIV: 1410 msr |= 0x00040000; 1411 break; 1412 case POWERPC_EXCP_TRAP: 1413 msr |= 0x00020000; 1414 break; 1415 default: 1416 /* Should never occur */ 1417 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 1418 env->error_code); 1419 break; 1420 } 1421 break; 1422 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1423 lev = env->error_code; 1424 1425 if (lev == 1 && cpu->vhyp) { 1426 dump_hcall(env); 1427 } else { 1428 dump_syscall(env); 1429 } 1430 1431 /* 1432 * We need to correct the NIP which in this case is supposed 1433 * to point to the next instruction 1434 */ 1435 env->nip += 4; 1436 1437 /* "PAPR mode" built-in hypercall emulation */ 1438 if (lev == 1 && books_vhyp_handles_hcall(cpu)) { 1439 PPCVirtualHypervisorClass *vhc = 1440 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1441 vhc->hypercall(cpu->vhyp, cpu); 1442 powerpc_reset_excp_state(cpu); 1443 return; 1444 } 1445 if (env->insns_flags2 & PPC2_ISA310) { 1446 /* ISAv3.1 puts LEV into SRR1 */ 1447 msr |= lev << 20; 1448 } 1449 if (lev == 1) { 1450 new_msr |= (target_ulong)MSR_HVB; 1451 } 1452 break; 1453 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 1454 lev = env->error_code; 1455 dump_syscall(env); 1456 env->nip += 4; 1457 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 1458 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1459 1460 vector += lev * 0x20; 1461 1462 env->lr = env->nip; 1463 env->ctr = msr; 1464 break; 1465 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1466 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1467 break; 1468 case POWERPC_EXCP_RESET: /* System reset exception */ 1469 /* A power-saving exception sets ME, otherwise it is unchanged */ 1470 if (FIELD_EX64(env->msr, MSR, POW)) { 1471 /* indicate that we resumed from power save mode */ 1472 msr |= 0x10000; 1473 new_msr |= ((target_ulong)1 << MSR_ME); 1474 } 1475 if (env->msr_mask & MSR_HVB) { 1476 /* 1477 * ISA specifies HV, but can be delivered to guest with HV 1478 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 1479 */ 1480 new_msr |= (target_ulong)MSR_HVB; 1481 } else { 1482 if (FIELD_EX64(env->msr, MSR, POW)) { 1483 cpu_abort(env_cpu(env), 1484 "Trying to deliver power-saving system reset " 1485 "exception %d with no HV support\n", excp); 1486 } 1487 } 1488 break; 1489 case POWERPC_EXCP_TRACE: /* Trace exception */ 1490 msr |= env->error_code; 1491 /* fall through */ 1492 case POWERPC_EXCP_DSEG: /* Data segment exception */ 1493 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 1494 case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */ 1495 case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */ 1496 break; 1497 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 1498 msr |= env->error_code; 1499 /* fall through */ 1500 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 1501 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 1502 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 1503 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 1504 srr0 = SPR_HSRR0; 1505 srr1 = SPR_HSRR1; 1506 new_msr |= (target_ulong)MSR_HVB; 1507 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1508 break; 1509 #ifdef CONFIG_TCG 1510 case POWERPC_EXCP_HV_EMU: { 1511 uint32_t insn = ppc_ldl_code(env, env->nip); 1512 env->spr[SPR_HEIR] = insn; 1513 if (is_prefix_insn(env, insn)) { 1514 uint32_t insn2 = ppc_ldl_code(env, env->nip + 4); 1515 env->spr[SPR_HEIR] <<= 32; 1516 env->spr[SPR_HEIR] |= insn2; 1517 } 1518 srr0 = SPR_HSRR0; 1519 srr1 = SPR_HSRR1; 1520 new_msr |= (target_ulong)MSR_HVB; 1521 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1522 break; 1523 } 1524 #endif 1525 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 1526 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 1527 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 1528 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 1529 break; 1530 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 1531 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 1532 srr0 = SPR_HSRR0; 1533 srr1 = SPR_HSRR1; 1534 new_msr |= (target_ulong)MSR_HVB; 1535 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1536 break; 1537 case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */ 1538 case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */ 1539 env->spr[SPR_BESCR] &= ~BESCR_GE; 1540 1541 /* 1542 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is 1543 * stored in the EBB Handler SPR_EBBHR. 1544 */ 1545 env->spr[SPR_EBBRR] = env->nip; 1546 powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr); 1547 1548 /* 1549 * This exception is handled in userspace. No need to proceed. 1550 */ 1551 return; 1552 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 1553 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 1554 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 1555 case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */ 1556 cpu_abort(env_cpu(env), "%s exception not implemented\n", 1557 powerpc_excp_name(excp)); 1558 break; 1559 default: 1560 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 1561 excp); 1562 break; 1563 } 1564 1565 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 1566 new_msr |= (target_ulong)1 << MSR_LE; 1567 } 1568 new_msr |= (target_ulong)1 << MSR_SF; 1569 1570 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 1571 env->spr[srr0] = env->nip; 1572 env->spr[srr1] = msr; 1573 } 1574 1575 if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) { 1576 PPCVirtualHypervisorClass *vhc = 1577 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1578 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */ 1579 vhc->deliver_hv_excp(cpu, excp); 1580 powerpc_reset_excp_state(cpu); 1581 } else { 1582 /* Sanity check */ 1583 if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) { 1584 cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d " 1585 "with no HV support\n", excp); 1586 } 1587 /* This can update new_msr and vector if AIL applies */ 1588 ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector); 1589 powerpc_set_excp_state(cpu, vector, new_msr); 1590 } 1591 } 1592 #else 1593 static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp) 1594 { 1595 g_assert_not_reached(); 1596 } 1597 #endif /* TARGET_PPC64 */ 1598 1599 static void powerpc_excp(PowerPCCPU *cpu, int excp) 1600 { 1601 CPUPPCState *env = &cpu->env; 1602 1603 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) { 1604 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 1605 excp); 1606 } 1607 1608 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 1609 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp), 1610 excp, env->error_code); 1611 env->excp_stats[excp]++; 1612 1613 switch (env->excp_model) { 1614 case POWERPC_EXCP_40x: 1615 powerpc_excp_40x(cpu, excp); 1616 break; 1617 case POWERPC_EXCP_6xx: 1618 powerpc_excp_6xx(cpu, excp); 1619 break; 1620 case POWERPC_EXCP_7xx: 1621 powerpc_excp_7xx(cpu, excp); 1622 break; 1623 case POWERPC_EXCP_74xx: 1624 powerpc_excp_74xx(cpu, excp); 1625 break; 1626 case POWERPC_EXCP_BOOKE: 1627 powerpc_excp_booke(cpu, excp); 1628 break; 1629 case POWERPC_EXCP_970: 1630 case POWERPC_EXCP_POWER7: 1631 case POWERPC_EXCP_POWER8: 1632 case POWERPC_EXCP_POWER9: 1633 case POWERPC_EXCP_POWER10: 1634 powerpc_excp_books(cpu, excp); 1635 break; 1636 default: 1637 g_assert_not_reached(); 1638 } 1639 } 1640 1641 void ppc_cpu_do_interrupt(CPUState *cs) 1642 { 1643 PowerPCCPU *cpu = POWERPC_CPU(cs); 1644 1645 powerpc_excp(cpu, cs->exception_index); 1646 } 1647 1648 #ifdef TARGET_PPC64 1649 #define P7_UNUSED_INTERRUPTS \ 1650 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \ 1651 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ 1652 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \ 1653 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB) 1654 1655 static int p7_interrupt_powersave(CPUPPCState *env) 1656 { 1657 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1658 (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { 1659 return PPC_INTERRUPT_EXT; 1660 } 1661 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1662 (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { 1663 return PPC_INTERRUPT_DECR; 1664 } 1665 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && 1666 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { 1667 return PPC_INTERRUPT_MCK; 1668 } 1669 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && 1670 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { 1671 return PPC_INTERRUPT_HMI; 1672 } 1673 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1674 return PPC_INTERRUPT_RESET; 1675 } 1676 return 0; 1677 } 1678 1679 static int p7_next_unmasked_interrupt(CPUPPCState *env) 1680 { 1681 CPUState *cs = env_cpu(env); 1682 1683 /* Ignore MSR[EE] when coming out of some power management states */ 1684 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1685 1686 assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); 1687 1688 if (cs->halted) { 1689 /* LPCR[PECE] controls which interrupts can exit power-saving mode */ 1690 return p7_interrupt_powersave(env); 1691 } 1692 1693 /* Machine check exception */ 1694 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1695 return PPC_INTERRUPT_MCK; 1696 } 1697 1698 /* Hypervisor decrementer exception */ 1699 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1700 /* LPCR will be clear when not supported so this will work */ 1701 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1702 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1703 /* HDEC clears on delivery */ 1704 return PPC_INTERRUPT_HDECR; 1705 } 1706 } 1707 1708 /* External interrupt can ignore MSR:EE under some circumstances */ 1709 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1710 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1711 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1712 /* HEIC blocks delivery to the hypervisor */ 1713 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1714 !FIELD_EX64(env->msr, MSR, PR))) || 1715 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1716 return PPC_INTERRUPT_EXT; 1717 } 1718 } 1719 if (msr_ee != 0) { 1720 /* Decrementer exception */ 1721 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1722 return PPC_INTERRUPT_DECR; 1723 } 1724 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1725 return PPC_INTERRUPT_PERFM; 1726 } 1727 } 1728 1729 return 0; 1730 } 1731 1732 #define P8_UNUSED_INTERRUPTS \ 1733 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \ 1734 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \ 1735 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) 1736 1737 static int p8_interrupt_powersave(CPUPPCState *env) 1738 { 1739 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1740 (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { 1741 return PPC_INTERRUPT_EXT; 1742 } 1743 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1744 (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { 1745 return PPC_INTERRUPT_DECR; 1746 } 1747 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && 1748 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { 1749 return PPC_INTERRUPT_MCK; 1750 } 1751 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && 1752 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { 1753 return PPC_INTERRUPT_HMI; 1754 } 1755 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && 1756 (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { 1757 return PPC_INTERRUPT_DOORBELL; 1758 } 1759 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && 1760 (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { 1761 return PPC_INTERRUPT_HDOORBELL; 1762 } 1763 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1764 return PPC_INTERRUPT_RESET; 1765 } 1766 return 0; 1767 } 1768 1769 static int p8_next_unmasked_interrupt(CPUPPCState *env) 1770 { 1771 CPUState *cs = env_cpu(env); 1772 1773 /* Ignore MSR[EE] when coming out of some power management states */ 1774 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1775 1776 assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0); 1777 1778 if (cs->halted) { 1779 /* LPCR[PECE] controls which interrupts can exit power-saving mode */ 1780 return p8_interrupt_powersave(env); 1781 } 1782 1783 /* Machine check exception */ 1784 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1785 return PPC_INTERRUPT_MCK; 1786 } 1787 1788 /* Hypervisor decrementer exception */ 1789 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1790 /* LPCR will be clear when not supported so this will work */ 1791 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1792 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1793 /* HDEC clears on delivery */ 1794 return PPC_INTERRUPT_HDECR; 1795 } 1796 } 1797 1798 /* External interrupt can ignore MSR:EE under some circumstances */ 1799 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1800 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1801 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1802 /* HEIC blocks delivery to the hypervisor */ 1803 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1804 !FIELD_EX64(env->msr, MSR, PR))) || 1805 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1806 return PPC_INTERRUPT_EXT; 1807 } 1808 } 1809 if (msr_ee != 0) { 1810 /* Decrementer exception */ 1811 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1812 return PPC_INTERRUPT_DECR; 1813 } 1814 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 1815 return PPC_INTERRUPT_DOORBELL; 1816 } 1817 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 1818 return PPC_INTERRUPT_HDOORBELL; 1819 } 1820 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1821 return PPC_INTERRUPT_PERFM; 1822 } 1823 /* EBB exception */ 1824 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 1825 /* 1826 * EBB exception must be taken in problem state and 1827 * with BESCR_GE set. 1828 */ 1829 if (FIELD_EX64(env->msr, MSR, PR) && 1830 (env->spr[SPR_BESCR] & BESCR_GE)) { 1831 return PPC_INTERRUPT_EBB; 1832 } 1833 } 1834 } 1835 1836 return 0; 1837 } 1838 1839 #define P9_UNUSED_INTERRUPTS \ 1840 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \ 1841 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ 1842 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) 1843 1844 static int p9_interrupt_powersave(CPUPPCState *env) 1845 { 1846 /* External Exception */ 1847 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1848 (env->spr[SPR_LPCR] & LPCR_EEE)) { 1849 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1850 if (!heic || !FIELD_EX64_HV(env->msr) || 1851 FIELD_EX64(env->msr, MSR, PR)) { 1852 return PPC_INTERRUPT_EXT; 1853 } 1854 } 1855 /* Decrementer Exception */ 1856 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1857 (env->spr[SPR_LPCR] & LPCR_DEE)) { 1858 return PPC_INTERRUPT_DECR; 1859 } 1860 /* Machine Check or Hypervisor Maintenance Exception */ 1861 if (env->spr[SPR_LPCR] & LPCR_OEE) { 1862 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1863 return PPC_INTERRUPT_MCK; 1864 } 1865 if (env->pending_interrupts & PPC_INTERRUPT_HMI) { 1866 return PPC_INTERRUPT_HMI; 1867 } 1868 } 1869 /* Privileged Doorbell Exception */ 1870 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && 1871 (env->spr[SPR_LPCR] & LPCR_PDEE)) { 1872 return PPC_INTERRUPT_DOORBELL; 1873 } 1874 /* Hypervisor Doorbell Exception */ 1875 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && 1876 (env->spr[SPR_LPCR] & LPCR_HDEE)) { 1877 return PPC_INTERRUPT_HDOORBELL; 1878 } 1879 /* Hypervisor virtualization exception */ 1880 if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) && 1881 (env->spr[SPR_LPCR] & LPCR_HVEE)) { 1882 return PPC_INTERRUPT_HVIRT; 1883 } 1884 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1885 return PPC_INTERRUPT_RESET; 1886 } 1887 return 0; 1888 } 1889 1890 static int p9_next_unmasked_interrupt(CPUPPCState *env) 1891 { 1892 CPUState *cs = env_cpu(env); 1893 1894 /* Ignore MSR[EE] when coming out of some power management states */ 1895 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1896 1897 assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); 1898 1899 if (cs->halted) { 1900 if (env->spr[SPR_PSSCR] & PSSCR_EC) { 1901 /* 1902 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can 1903 * wakeup the processor 1904 */ 1905 return p9_interrupt_powersave(env); 1906 } else { 1907 /* 1908 * When it's clear, any system-caused exception exits power-saving 1909 * mode, even the ones that gate on MSR[EE]. 1910 */ 1911 msr_ee = true; 1912 } 1913 } 1914 1915 /* Machine check exception */ 1916 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1917 return PPC_INTERRUPT_MCK; 1918 } 1919 1920 /* Hypervisor decrementer exception */ 1921 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1922 /* LPCR will be clear when not supported so this will work */ 1923 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1924 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1925 /* HDEC clears on delivery */ 1926 return PPC_INTERRUPT_HDECR; 1927 } 1928 } 1929 1930 /* Hypervisor virtualization interrupt */ 1931 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { 1932 /* LPCR will be clear when not supported so this will work */ 1933 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 1934 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) { 1935 return PPC_INTERRUPT_HVIRT; 1936 } 1937 } 1938 1939 /* External interrupt can ignore MSR:EE under some circumstances */ 1940 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1941 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1942 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1943 /* HEIC blocks delivery to the hypervisor */ 1944 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1945 !FIELD_EX64(env->msr, MSR, PR))) || 1946 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1947 return PPC_INTERRUPT_EXT; 1948 } 1949 } 1950 if (msr_ee != 0) { 1951 /* Decrementer exception */ 1952 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1953 return PPC_INTERRUPT_DECR; 1954 } 1955 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 1956 return PPC_INTERRUPT_DOORBELL; 1957 } 1958 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 1959 return PPC_INTERRUPT_HDOORBELL; 1960 } 1961 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1962 return PPC_INTERRUPT_PERFM; 1963 } 1964 /* EBB exception */ 1965 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 1966 /* 1967 * EBB exception must be taken in problem state and 1968 * with BESCR_GE set. 1969 */ 1970 if (FIELD_EX64(env->msr, MSR, PR) && 1971 (env->spr[SPR_BESCR] & BESCR_GE)) { 1972 return PPC_INTERRUPT_EBB; 1973 } 1974 } 1975 } 1976 1977 return 0; 1978 } 1979 #endif /* TARGET_PPC64 */ 1980 1981 static int ppc_next_unmasked_interrupt(CPUPPCState *env) 1982 { 1983 #ifdef TARGET_PPC64 1984 switch (env->excp_model) { 1985 case POWERPC_EXCP_POWER7: 1986 return p7_next_unmasked_interrupt(env); 1987 case POWERPC_EXCP_POWER8: 1988 return p8_next_unmasked_interrupt(env); 1989 case POWERPC_EXCP_POWER9: 1990 case POWERPC_EXCP_POWER10: 1991 return p9_next_unmasked_interrupt(env); 1992 default: 1993 break; 1994 } 1995 #endif 1996 bool async_deliver; 1997 1998 /* External reset */ 1999 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 2000 return PPC_INTERRUPT_RESET; 2001 } 2002 /* Machine check exception */ 2003 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 2004 return PPC_INTERRUPT_MCK; 2005 } 2006 #if 0 /* TODO */ 2007 /* External debug exception */ 2008 if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) { 2009 return PPC_INTERRUPT_DEBUG; 2010 } 2011 #endif 2012 2013 /* 2014 * For interrupts that gate on MSR:EE, we need to do something a 2015 * bit more subtle, as we need to let them through even when EE is 2016 * clear when coming out of some power management states (in order 2017 * for them to become a 0x100). 2018 */ 2019 async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 2020 2021 /* Hypervisor decrementer exception */ 2022 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 2023 /* LPCR will be clear when not supported so this will work */ 2024 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 2025 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) { 2026 /* HDEC clears on delivery */ 2027 return PPC_INTERRUPT_HDECR; 2028 } 2029 } 2030 2031 /* Hypervisor virtualization interrupt */ 2032 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { 2033 /* LPCR will be clear when not supported so this will work */ 2034 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 2035 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) { 2036 return PPC_INTERRUPT_HVIRT; 2037 } 2038 } 2039 2040 /* External interrupt can ignore MSR:EE under some circumstances */ 2041 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 2042 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 2043 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 2044 /* HEIC blocks delivery to the hypervisor */ 2045 if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) && 2046 !FIELD_EX64(env->msr, MSR, PR))) || 2047 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 2048 return PPC_INTERRUPT_EXT; 2049 } 2050 } 2051 if (FIELD_EX64(env->msr, MSR, CE)) { 2052 /* External critical interrupt */ 2053 if (env->pending_interrupts & PPC_INTERRUPT_CEXT) { 2054 return PPC_INTERRUPT_CEXT; 2055 } 2056 } 2057 if (async_deliver != 0) { 2058 /* Watchdog timer on embedded PowerPC */ 2059 if (env->pending_interrupts & PPC_INTERRUPT_WDT) { 2060 return PPC_INTERRUPT_WDT; 2061 } 2062 if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) { 2063 return PPC_INTERRUPT_CDOORBELL; 2064 } 2065 /* Fixed interval timer on embedded PowerPC */ 2066 if (env->pending_interrupts & PPC_INTERRUPT_FIT) { 2067 return PPC_INTERRUPT_FIT; 2068 } 2069 /* Programmable interval timer on embedded PowerPC */ 2070 if (env->pending_interrupts & PPC_INTERRUPT_PIT) { 2071 return PPC_INTERRUPT_PIT; 2072 } 2073 /* Decrementer exception */ 2074 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 2075 return PPC_INTERRUPT_DECR; 2076 } 2077 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 2078 return PPC_INTERRUPT_DOORBELL; 2079 } 2080 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 2081 return PPC_INTERRUPT_HDOORBELL; 2082 } 2083 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 2084 return PPC_INTERRUPT_PERFM; 2085 } 2086 /* Thermal interrupt */ 2087 if (env->pending_interrupts & PPC_INTERRUPT_THERM) { 2088 return PPC_INTERRUPT_THERM; 2089 } 2090 /* EBB exception */ 2091 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 2092 /* 2093 * EBB exception must be taken in problem state and 2094 * with BESCR_GE set. 2095 */ 2096 if (FIELD_EX64(env->msr, MSR, PR) && 2097 (env->spr[SPR_BESCR] & BESCR_GE)) { 2098 return PPC_INTERRUPT_EBB; 2099 } 2100 } 2101 } 2102 2103 return 0; 2104 } 2105 2106 /* 2107 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be 2108 * delivered and clears CPU_INTERRUPT_HARD otherwise. 2109 * 2110 * This method is called by ppc_set_interrupt when an interrupt is raised or 2111 * lowered, and should also be called whenever an interrupt masking condition 2112 * is changed, e.g.: 2113 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.; 2114 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.; 2115 * - When PSSCR[EC] or env->resume_as_sreset are changed; 2116 * - When cs->halted is changed and the CPU has a different interrupt masking 2117 * logic in power-saving mode (e.g., POWER7/8/9/10); 2118 */ 2119 void ppc_maybe_interrupt(CPUPPCState *env) 2120 { 2121 CPUState *cs = env_cpu(env); 2122 BQL_LOCK_GUARD(); 2123 2124 if (ppc_next_unmasked_interrupt(env)) { 2125 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 2126 } else { 2127 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 2128 } 2129 } 2130 2131 #ifdef TARGET_PPC64 2132 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) 2133 { 2134 PowerPCCPU *cpu = env_archcpu(env); 2135 2136 switch (interrupt) { 2137 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2138 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2139 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2140 break; 2141 2142 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2143 /* HDEC clears on delivery */ 2144 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2145 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2146 break; 2147 2148 case PPC_INTERRUPT_EXT: 2149 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2150 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2151 } else { 2152 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2153 } 2154 break; 2155 2156 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2157 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2158 break; 2159 case PPC_INTERRUPT_PERFM: 2160 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2161 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2162 break; 2163 case 0: 2164 /* 2165 * This is a bug ! It means that has_work took us out of halt without 2166 * anything to deliver while in a PM state that requires getting 2167 * out via a 0x100 2168 * 2169 * This means we will incorrectly execute past the power management 2170 * instruction instead of triggering a reset. 2171 * 2172 * It generally means a discrepancy between the wakeup conditions in the 2173 * processor has_work implementation and the logic in this function. 2174 */ 2175 assert(!env->resume_as_sreset); 2176 break; 2177 default: 2178 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2179 interrupt); 2180 } 2181 } 2182 2183 static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) 2184 { 2185 PowerPCCPU *cpu = env_archcpu(env); 2186 2187 switch (interrupt) { 2188 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2189 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2190 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2191 break; 2192 2193 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2194 /* HDEC clears on delivery */ 2195 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2196 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2197 break; 2198 2199 case PPC_INTERRUPT_EXT: 2200 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2201 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2202 } else { 2203 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2204 } 2205 break; 2206 2207 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2208 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2209 break; 2210 case PPC_INTERRUPT_DOORBELL: 2211 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2212 if (is_book3s_arch2x(env)) { 2213 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2214 } else { 2215 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 2216 } 2217 break; 2218 case PPC_INTERRUPT_HDOORBELL: 2219 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2220 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2221 break; 2222 case PPC_INTERRUPT_PERFM: 2223 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2224 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2225 break; 2226 case PPC_INTERRUPT_EBB: /* EBB exception */ 2227 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2228 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2229 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2230 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2231 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2232 } 2233 break; 2234 case 0: 2235 /* 2236 * This is a bug ! It means that has_work took us out of halt without 2237 * anything to deliver while in a PM state that requires getting 2238 * out via a 0x100 2239 * 2240 * This means we will incorrectly execute past the power management 2241 * instruction instead of triggering a reset. 2242 * 2243 * It generally means a discrepancy between the wakeup conditions in the 2244 * processor has_work implementation and the logic in this function. 2245 */ 2246 assert(!env->resume_as_sreset); 2247 break; 2248 default: 2249 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2250 interrupt); 2251 } 2252 } 2253 2254 static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) 2255 { 2256 PowerPCCPU *cpu = env_archcpu(env); 2257 CPUState *cs = env_cpu(env); 2258 2259 if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) && 2260 !FIELD_EX64(env->msr, MSR, EE)) { 2261 /* 2262 * A pending interrupt took us out of power-saving, but MSR[EE] says 2263 * that we should return to NIP+4 instead of delivering it. 2264 */ 2265 return; 2266 } 2267 2268 switch (interrupt) { 2269 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2270 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2271 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2272 break; 2273 2274 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2275 /* HDEC clears on delivery */ 2276 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2277 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2278 break; 2279 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ 2280 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2281 break; 2282 2283 case PPC_INTERRUPT_EXT: 2284 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2285 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2286 } else { 2287 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2288 } 2289 break; 2290 2291 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2292 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2293 break; 2294 case PPC_INTERRUPT_DOORBELL: 2295 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2296 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2297 break; 2298 case PPC_INTERRUPT_HDOORBELL: 2299 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2300 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2301 break; 2302 case PPC_INTERRUPT_PERFM: 2303 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2304 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2305 break; 2306 case PPC_INTERRUPT_EBB: /* EBB exception */ 2307 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2308 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2309 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2310 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2311 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2312 } 2313 break; 2314 case 0: 2315 /* 2316 * This is a bug ! It means that has_work took us out of halt without 2317 * anything to deliver while in a PM state that requires getting 2318 * out via a 0x100 2319 * 2320 * This means we will incorrectly execute past the power management 2321 * instruction instead of triggering a reset. 2322 * 2323 * It generally means a discrepancy between the wakeup conditions in the 2324 * processor has_work implementation and the logic in this function. 2325 */ 2326 assert(!env->resume_as_sreset); 2327 break; 2328 default: 2329 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2330 interrupt); 2331 } 2332 } 2333 #endif /* TARGET_PPC64 */ 2334 2335 static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) 2336 { 2337 #ifdef TARGET_PPC64 2338 switch (env->excp_model) { 2339 case POWERPC_EXCP_POWER7: 2340 return p7_deliver_interrupt(env, interrupt); 2341 case POWERPC_EXCP_POWER8: 2342 return p8_deliver_interrupt(env, interrupt); 2343 case POWERPC_EXCP_POWER9: 2344 case POWERPC_EXCP_POWER10: 2345 return p9_deliver_interrupt(env, interrupt); 2346 default: 2347 break; 2348 } 2349 #endif 2350 PowerPCCPU *cpu = env_archcpu(env); 2351 2352 switch (interrupt) { 2353 case PPC_INTERRUPT_RESET: /* External reset */ 2354 env->pending_interrupts &= ~PPC_INTERRUPT_RESET; 2355 powerpc_excp(cpu, POWERPC_EXCP_RESET); 2356 break; 2357 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2358 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2359 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2360 break; 2361 2362 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2363 /* HDEC clears on delivery */ 2364 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2365 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2366 break; 2367 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ 2368 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2369 break; 2370 2371 case PPC_INTERRUPT_EXT: 2372 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2373 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2374 } else { 2375 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2376 } 2377 break; 2378 case PPC_INTERRUPT_CEXT: /* External critical interrupt */ 2379 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL); 2380 break; 2381 2382 case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */ 2383 env->pending_interrupts &= ~PPC_INTERRUPT_WDT; 2384 powerpc_excp(cpu, POWERPC_EXCP_WDT); 2385 break; 2386 case PPC_INTERRUPT_CDOORBELL: 2387 env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL; 2388 powerpc_excp(cpu, POWERPC_EXCP_DOORCI); 2389 break; 2390 case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */ 2391 env->pending_interrupts &= ~PPC_INTERRUPT_FIT; 2392 powerpc_excp(cpu, POWERPC_EXCP_FIT); 2393 break; 2394 case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */ 2395 env->pending_interrupts &= ~PPC_INTERRUPT_PIT; 2396 powerpc_excp(cpu, POWERPC_EXCP_PIT); 2397 break; 2398 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2399 if (ppc_decr_clear_on_delivery(env)) { 2400 env->pending_interrupts &= ~PPC_INTERRUPT_DECR; 2401 } 2402 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2403 break; 2404 case PPC_INTERRUPT_DOORBELL: 2405 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2406 if (is_book3s_arch2x(env)) { 2407 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2408 } else { 2409 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 2410 } 2411 break; 2412 case PPC_INTERRUPT_HDOORBELL: 2413 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2414 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2415 break; 2416 case PPC_INTERRUPT_PERFM: 2417 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2418 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2419 break; 2420 case PPC_INTERRUPT_THERM: /* Thermal interrupt */ 2421 env->pending_interrupts &= ~PPC_INTERRUPT_THERM; 2422 powerpc_excp(cpu, POWERPC_EXCP_THERM); 2423 break; 2424 case PPC_INTERRUPT_EBB: /* EBB exception */ 2425 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2426 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2427 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2428 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2429 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2430 } 2431 break; 2432 case 0: 2433 /* 2434 * This is a bug ! It means that has_work took us out of halt without 2435 * anything to deliver while in a PM state that requires getting 2436 * out via a 0x100 2437 * 2438 * This means we will incorrectly execute past the power management 2439 * instruction instead of triggering a reset. 2440 * 2441 * It generally means a discrepancy between the wakeup conditions in the 2442 * processor has_work implementation and the logic in this function. 2443 */ 2444 assert(!env->resume_as_sreset); 2445 break; 2446 default: 2447 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2448 interrupt); 2449 } 2450 } 2451 2452 void ppc_cpu_do_system_reset(CPUState *cs) 2453 { 2454 PowerPCCPU *cpu = POWERPC_CPU(cs); 2455 2456 powerpc_excp(cpu, POWERPC_EXCP_RESET); 2457 } 2458 2459 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 2460 { 2461 PowerPCCPU *cpu = POWERPC_CPU(cs); 2462 CPUPPCState *env = &cpu->env; 2463 target_ulong msr = 0; 2464 2465 /* 2466 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 2467 * been set by KVM. 2468 */ 2469 msr = (1ULL << MSR_ME); 2470 msr |= env->msr & (1ULL << MSR_SF); 2471 if (ppc_interrupts_little_endian(cpu, false)) { 2472 msr |= (1ULL << MSR_LE); 2473 } 2474 2475 /* Anything for nested required here? MSR[HV] bit? */ 2476 2477 powerpc_set_excp_state(cpu, vector, msr); 2478 } 2479 2480 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 2481 { 2482 CPUPPCState *env = cpu_env(cs); 2483 int interrupt; 2484 2485 if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) { 2486 return false; 2487 } 2488 2489 interrupt = ppc_next_unmasked_interrupt(env); 2490 if (interrupt == 0) { 2491 return false; 2492 } 2493 2494 ppc_deliver_interrupt(env, interrupt); 2495 if (env->pending_interrupts == 0) { 2496 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 2497 } 2498 return true; 2499 } 2500 2501 #endif /* !CONFIG_USER_ONLY */ 2502 2503 /*****************************************************************************/ 2504 /* Exceptions processing helpers */ 2505 2506 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 2507 uint32_t error_code, uintptr_t raddr) 2508 { 2509 CPUState *cs = env_cpu(env); 2510 2511 cs->exception_index = exception; 2512 env->error_code = error_code; 2513 cpu_loop_exit_restore(cs, raddr); 2514 } 2515 2516 void raise_exception_err(CPUPPCState *env, uint32_t exception, 2517 uint32_t error_code) 2518 { 2519 raise_exception_err_ra(env, exception, error_code, 0); 2520 } 2521 2522 void raise_exception(CPUPPCState *env, uint32_t exception) 2523 { 2524 raise_exception_err_ra(env, exception, 0, 0); 2525 } 2526 2527 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 2528 uintptr_t raddr) 2529 { 2530 raise_exception_err_ra(env, exception, 0, raddr); 2531 } 2532 2533 #ifdef CONFIG_TCG 2534 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 2535 uint32_t error_code) 2536 { 2537 raise_exception_err_ra(env, exception, error_code, 0); 2538 } 2539 2540 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 2541 { 2542 raise_exception_err_ra(env, exception, 0, 0); 2543 } 2544 2545 #ifndef CONFIG_USER_ONLY 2546 void helper_store_msr(CPUPPCState *env, target_ulong val) 2547 { 2548 uint32_t excp = hreg_store_msr(env, val, 0); 2549 2550 if (excp != 0) { 2551 cpu_interrupt_exittb(env_cpu(env)); 2552 raise_exception(env, excp); 2553 } 2554 } 2555 2556 void helper_ppc_maybe_interrupt(CPUPPCState *env) 2557 { 2558 ppc_maybe_interrupt(env); 2559 } 2560 2561 #ifdef TARGET_PPC64 2562 void helper_scv(CPUPPCState *env, uint32_t lev) 2563 { 2564 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 2565 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 2566 } else { 2567 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 2568 } 2569 } 2570 2571 void helper_pminsn(CPUPPCState *env, uint32_t insn) 2572 { 2573 CPUState *cs = env_cpu(env); 2574 2575 cs->halted = 1; 2576 2577 /* Condition for waking up at 0x100 */ 2578 env->resume_as_sreset = (insn != PPC_PM_STOP) || 2579 (env->spr[SPR_PSSCR] & PSSCR_EC); 2580 2581 /* HDECR is not to wake from PM state, it may have already fired */ 2582 if (env->resume_as_sreset) { 2583 PowerPCCPU *cpu = env_archcpu(env); 2584 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); 2585 } 2586 2587 ppc_maybe_interrupt(env); 2588 } 2589 #endif /* TARGET_PPC64 */ 2590 2591 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 2592 { 2593 /* MSR:POW cannot be set by any form of rfi */ 2594 msr &= ~(1ULL << MSR_POW); 2595 2596 /* MSR:TGPR cannot be set by any form of rfi */ 2597 if (env->flags & POWERPC_FLAG_TGPR) 2598 msr &= ~(1ULL << MSR_TGPR); 2599 2600 #ifdef TARGET_PPC64 2601 /* Switching to 32-bit ? Crop the nip */ 2602 if (!msr_is_64bit(env, msr)) { 2603 nip = (uint32_t)nip; 2604 } 2605 #else 2606 nip = (uint32_t)nip; 2607 #endif 2608 /* XXX: beware: this is false if VLE is supported */ 2609 env->nip = nip & ~((target_ulong)0x00000003); 2610 hreg_store_msr(env, msr, 1); 2611 trace_ppc_excp_rfi(env->nip, env->msr); 2612 /* 2613 * No need to raise an exception here, as rfi is always the last 2614 * insn of a TB 2615 */ 2616 cpu_interrupt_exittb(env_cpu(env)); 2617 /* Reset the reservation */ 2618 env->reserve_addr = -1; 2619 2620 /* Context synchronizing: check if TCG TLB needs flush */ 2621 check_tlb_flush(env, false); 2622 } 2623 2624 void helper_rfi(CPUPPCState *env) 2625 { 2626 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 2627 } 2628 2629 #ifdef TARGET_PPC64 2630 void helper_rfid(CPUPPCState *env) 2631 { 2632 /* 2633 * The architecture defines a number of rules for which bits can 2634 * change but in practice, we handle this in hreg_store_msr() 2635 * which will be called by do_rfi(), so there is no need to filter 2636 * here 2637 */ 2638 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 2639 } 2640 2641 void helper_rfscv(CPUPPCState *env) 2642 { 2643 do_rfi(env, env->lr, env->ctr); 2644 } 2645 2646 void helper_hrfid(CPUPPCState *env) 2647 { 2648 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 2649 } 2650 2651 void helper_rfebb(CPUPPCState *env, target_ulong s) 2652 { 2653 target_ulong msr = env->msr; 2654 2655 /* 2656 * Handling of BESCR bits 32:33 according to PowerISA v3.1: 2657 * 2658 * "If BESCR 32:33 != 0b00 the instruction is treated as if 2659 * the instruction form were invalid." 2660 */ 2661 if (env->spr[SPR_BESCR] & BESCR_INVALID) { 2662 raise_exception_err(env, POWERPC_EXCP_PROGRAM, 2663 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); 2664 } 2665 2666 env->nip = env->spr[SPR_EBBRR]; 2667 2668 /* Switching to 32-bit ? Crop the nip */ 2669 if (!msr_is_64bit(env, msr)) { 2670 env->nip = (uint32_t)env->spr[SPR_EBBRR]; 2671 } 2672 2673 if (s) { 2674 env->spr[SPR_BESCR] |= BESCR_GE; 2675 } else { 2676 env->spr[SPR_BESCR] &= ~BESCR_GE; 2677 } 2678 } 2679 2680 /* 2681 * Triggers or queues an 'ebb_excp' EBB exception. All checks 2682 * but FSCR, HFSCR and msr_pr must be done beforehand. 2683 * 2684 * PowerISA v3.1 isn't clear about whether an EBB should be 2685 * postponed or cancelled if the EBB facility is unavailable. 2686 * Our assumption here is that the EBB is cancelled if both 2687 * FSCR and HFSCR EBB facilities aren't available. 2688 */ 2689 static void do_ebb(CPUPPCState *env, int ebb_excp) 2690 { 2691 PowerPCCPU *cpu = env_archcpu(env); 2692 2693 /* 2694 * FSCR_EBB and FSCR_IC_EBB are the same bits used with 2695 * HFSCR. 2696 */ 2697 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); 2698 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); 2699 2700 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { 2701 env->spr[SPR_BESCR] |= BESCR_PMEO; 2702 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { 2703 env->spr[SPR_BESCR] |= BESCR_EEO; 2704 } 2705 2706 if (FIELD_EX64(env->msr, MSR, PR)) { 2707 powerpc_excp(cpu, ebb_excp); 2708 } else { 2709 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); 2710 } 2711 } 2712 2713 void raise_ebb_perfm_exception(CPUPPCState *env) 2714 { 2715 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && 2716 env->spr[SPR_BESCR] & BESCR_PME && 2717 env->spr[SPR_BESCR] & BESCR_GE; 2718 2719 if (!perfm_ebb_enabled) { 2720 return; 2721 } 2722 2723 do_ebb(env, POWERPC_EXCP_PERFM_EBB); 2724 } 2725 #endif /* TARGET_PPC64 */ 2726 2727 /*****************************************************************************/ 2728 /* Embedded PowerPC specific helpers */ 2729 void helper_40x_rfci(CPUPPCState *env) 2730 { 2731 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 2732 } 2733 2734 void helper_rfci(CPUPPCState *env) 2735 { 2736 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 2737 } 2738 2739 void helper_rfdi(CPUPPCState *env) 2740 { 2741 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 2742 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 2743 } 2744 2745 void helper_rfmci(CPUPPCState *env) 2746 { 2747 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 2748 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 2749 } 2750 #endif /* !CONFIG_USER_ONLY */ 2751 2752 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 2753 uint32_t flags) 2754 { 2755 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 2756 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 2757 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 2758 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 2759 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 2760 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2761 POWERPC_EXCP_TRAP, GETPC()); 2762 } 2763 } 2764 2765 #ifdef TARGET_PPC64 2766 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 2767 uint32_t flags) 2768 { 2769 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 2770 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 2771 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 2772 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 2773 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 2774 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2775 POWERPC_EXCP_TRAP, GETPC()); 2776 } 2777 } 2778 #endif /* TARGET_PPC64 */ 2779 2780 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) 2781 { 2782 const uint16_t c = 0xfffc; 2783 const uint64_t z0 = 0xfa2561cdf44ac398ULL; 2784 uint16_t z = 0, temp; 2785 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; 2786 2787 for (int i = 3; i >= 0; i--) { 2788 k[i] = key & 0xffff; 2789 key >>= 16; 2790 } 2791 xleft[0] = x & 0xffff; 2792 xright[0] = (x >> 16) & 0xffff; 2793 2794 for (int i = 0; i < 28; i++) { 2795 z = (z0 >> (63 - i)) & 1; 2796 temp = ror16(k[i + 3], 3) ^ k[i + 1]; 2797 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); 2798 } 2799 2800 for (int i = 0; i < 8; i++) { 2801 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; 2802 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; 2803 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; 2804 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; 2805 } 2806 2807 for (int i = 0; i < 32; i++) { 2808 fxleft[i] = (rol16(xleft[i], 1) & 2809 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); 2810 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; 2811 xright[i + 1] = xleft[i]; 2812 } 2813 2814 return (((uint32_t)xright[32]) << 16) | xleft[32]; 2815 } 2816 2817 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) 2818 { 2819 uint64_t stage0_h = 0ULL, stage0_l = 0ULL; 2820 uint64_t stage1_h, stage1_l; 2821 2822 for (int i = 0; i < 4; i++) { 2823 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); 2824 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); 2825 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); 2826 stage0_l |= (ra & 0xff) << (8 * 2 * i); 2827 rb >>= 8; 2828 ra >>= 8; 2829 } 2830 2831 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; 2832 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); 2833 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; 2834 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); 2835 2836 return stage1_h ^ stage1_l; 2837 } 2838 2839 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra, 2840 target_ulong rb, uint64_t key, bool store) 2841 { 2842 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; 2843 2844 if (store) { 2845 cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); 2846 } else { 2847 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); 2848 if (loaded_hash != calculated_hash) { 2849 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2850 POWERPC_EXCP_TRAP, GETPC()); 2851 } 2852 } 2853 } 2854 2855 #include "qemu/guest-random.h" 2856 2857 #ifdef TARGET_PPC64 2858 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 2859 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 2860 target_ulong rb) \ 2861 { \ 2862 if (env->msr & R_MSR_PR_MASK) { \ 2863 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \ 2864 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 2865 return; \ 2866 } else if (!(env->msr & R_MSR_HV_MASK)) { \ 2867 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \ 2868 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 2869 return; \ 2870 } else if (!(env->msr & R_MSR_S_MASK)) { \ 2871 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \ 2872 return; \ 2873 } \ 2874 \ 2875 do_hash(env, ea, ra, rb, key, store); \ 2876 } 2877 #else 2878 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 2879 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 2880 target_ulong rb) \ 2881 { \ 2882 do_hash(env, ea, ra, rb, key, store); \ 2883 } 2884 #endif /* TARGET_PPC64 */ 2885 2886 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) 2887 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) 2888 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) 2889 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) 2890 2891 #ifndef CONFIG_USER_ONLY 2892 /* Embedded.Processor Control */ 2893 static int dbell2irq(target_ulong rb) 2894 { 2895 int msg = rb & DBELL_TYPE_MASK; 2896 int irq = -1; 2897 2898 switch (msg) { 2899 case DBELL_TYPE_DBELL: 2900 irq = PPC_INTERRUPT_DOORBELL; 2901 break; 2902 case DBELL_TYPE_DBELL_CRIT: 2903 irq = PPC_INTERRUPT_CDOORBELL; 2904 break; 2905 case DBELL_TYPE_G_DBELL: 2906 case DBELL_TYPE_G_DBELL_CRIT: 2907 case DBELL_TYPE_G_DBELL_MC: 2908 /* XXX implement */ 2909 default: 2910 break; 2911 } 2912 2913 return irq; 2914 } 2915 2916 void helper_msgclr(CPUPPCState *env, target_ulong rb) 2917 { 2918 int irq = dbell2irq(rb); 2919 2920 if (irq < 0) { 2921 return; 2922 } 2923 2924 ppc_set_irq(env_archcpu(env), irq, 0); 2925 } 2926 2927 void helper_msgsnd(target_ulong rb) 2928 { 2929 int irq = dbell2irq(rb); 2930 int pir = rb & DBELL_PIRTAG_MASK; 2931 CPUState *cs; 2932 2933 if (irq < 0) { 2934 return; 2935 } 2936 2937 bql_lock(); 2938 CPU_FOREACH(cs) { 2939 PowerPCCPU *cpu = POWERPC_CPU(cs); 2940 CPUPPCState *cenv = &cpu->env; 2941 2942 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 2943 ppc_set_irq(cpu, irq, 1); 2944 } 2945 } 2946 bql_unlock(); 2947 } 2948 2949 /* Server Processor Control */ 2950 2951 static bool dbell_type_server(target_ulong rb) 2952 { 2953 /* 2954 * A Directed Hypervisor Doorbell message is sent only if the 2955 * message type is 5. All other types are reserved and the 2956 * instruction is a no-op 2957 */ 2958 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 2959 } 2960 2961 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 2962 { 2963 if (!dbell_type_server(rb)) { 2964 return; 2965 } 2966 2967 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); 2968 } 2969 2970 static void book3s_msgsnd_common(int pir, int irq) 2971 { 2972 CPUState *cs; 2973 2974 bql_lock(); 2975 CPU_FOREACH(cs) { 2976 PowerPCCPU *cpu = POWERPC_CPU(cs); 2977 CPUPPCState *cenv = &cpu->env; 2978 2979 /* TODO: broadcast message to all threads of the same processor */ 2980 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 2981 ppc_set_irq(cpu, irq, 1); 2982 } 2983 } 2984 bql_unlock(); 2985 } 2986 2987 void helper_book3s_msgsnd(target_ulong rb) 2988 { 2989 int pir = rb & DBELL_PROCIDTAG_MASK; 2990 2991 if (!dbell_type_server(rb)) { 2992 return; 2993 } 2994 2995 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 2996 } 2997 2998 #ifdef TARGET_PPC64 2999 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 3000 { 3001 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 3002 3003 if (!dbell_type_server(rb)) { 3004 return; 3005 } 3006 3007 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0); 3008 } 3009 3010 /* 3011 * sends a message to another thread on the same 3012 * multi-threaded processor 3013 */ 3014 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 3015 { 3016 CPUState *cs = env_cpu(env); 3017 PowerPCCPU *cpu = env_archcpu(env); 3018 CPUState *ccs; 3019 uint32_t nr_threads = cs->nr_threads; 3020 int ttir = rb & PPC_BITMASK(57, 63); 3021 3022 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 3023 3024 if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) { 3025 nr_threads = 1; /* msgsndp behaves as 1-thread in LPAR-per-thread mode*/ 3026 } 3027 3028 if (!dbell_type_server(rb) || ttir >= nr_threads) { 3029 return; 3030 } 3031 3032 if (nr_threads == 1) { 3033 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, 1); 3034 return; 3035 } 3036 3037 /* Does iothread need to be locked for walking CPU list? */ 3038 bql_lock(); 3039 THREAD_SIBLING_FOREACH(cs, ccs) { 3040 PowerPCCPU *ccpu = POWERPC_CPU(ccs); 3041 uint32_t thread_id = ppc_cpu_tir(ccpu); 3042 3043 if (ttir == thread_id) { 3044 ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1); 3045 bql_unlock(); 3046 return; 3047 } 3048 } 3049 3050 g_assert_not_reached(); 3051 } 3052 #endif /* TARGET_PPC64 */ 3053 3054 /* Single-step tracing */ 3055 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip) 3056 { 3057 uint32_t error_code = 0; 3058 if (env->insns_flags2 & PPC2_ISA207S) { 3059 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */ 3060 env->spr[SPR_POWER_SIAR] = prev_ip; 3061 error_code = PPC_BIT(33); 3062 } 3063 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code); 3064 } 3065 3066 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 3067 MMUAccessType access_type, 3068 int mmu_idx, uintptr_t retaddr) 3069 { 3070 CPUPPCState *env = cpu_env(cs); 3071 uint32_t insn; 3072 3073 /* Restore state and reload the insn we executed, for filling in DSISR. */ 3074 cpu_restore_state(cs, retaddr); 3075 insn = ppc_ldl_code(env, env->nip); 3076 3077 switch (env->mmu_model) { 3078 case POWERPC_MMU_SOFT_4xx: 3079 env->spr[SPR_40x_DEAR] = vaddr; 3080 break; 3081 case POWERPC_MMU_BOOKE: 3082 case POWERPC_MMU_BOOKE206: 3083 env->spr[SPR_BOOKE_DEAR] = vaddr; 3084 break; 3085 default: 3086 env->spr[SPR_DAR] = vaddr; 3087 break; 3088 } 3089 3090 cs->exception_index = POWERPC_EXCP_ALIGN; 3091 env->error_code = insn & 0x03FF0000; 3092 cpu_loop_exit(cs); 3093 } 3094 3095 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 3096 vaddr vaddr, unsigned size, 3097 MMUAccessType access_type, 3098 int mmu_idx, MemTxAttrs attrs, 3099 MemTxResult response, uintptr_t retaddr) 3100 { 3101 CPUPPCState *env = cpu_env(cs); 3102 3103 switch (env->excp_model) { 3104 #if defined(TARGET_PPC64) 3105 case POWERPC_EXCP_POWER8: 3106 case POWERPC_EXCP_POWER9: 3107 case POWERPC_EXCP_POWER10: 3108 /* 3109 * Machine check codes can be found in processor User Manual or 3110 * Linux or skiboot source. 3111 */ 3112 if (access_type == MMU_DATA_LOAD) { 3113 env->spr[SPR_DAR] = vaddr; 3114 env->spr[SPR_DSISR] = PPC_BIT(57); 3115 env->error_code = PPC_BIT(42); 3116 3117 } else if (access_type == MMU_DATA_STORE) { 3118 /* 3119 * MCE for stores in POWER is asynchronous so hardware does 3120 * not set DAR, but QEMU can do better. 3121 */ 3122 env->spr[SPR_DAR] = vaddr; 3123 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45); 3124 env->error_code |= PPC_BIT(42); 3125 3126 } else { /* Fetch */ 3127 /* 3128 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching 3129 * the instruction, so that must always be clear for fetches. 3130 */ 3131 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45); 3132 } 3133 break; 3134 #endif 3135 default: 3136 /* 3137 * TODO: Check behaviour for other CPUs, for now do nothing. 3138 * Could add a basic MCE even if real hardware ignores. 3139 */ 3140 return; 3141 } 3142 3143 cs->exception_index = POWERPC_EXCP_MCHECK; 3144 cpu_loop_exit_restore(cs, retaddr); 3145 } 3146 3147 void ppc_cpu_debug_excp_handler(CPUState *cs) 3148 { 3149 #if defined(TARGET_PPC64) 3150 CPUPPCState *env = cpu_env(cs); 3151 3152 if (env->insns_flags2 & PPC2_ISA207S) { 3153 if (cs->watchpoint_hit) { 3154 if (cs->watchpoint_hit->flags & BP_CPU) { 3155 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr; 3156 env->spr[SPR_DSISR] = PPC_BIT(41); 3157 cs->watchpoint_hit = NULL; 3158 raise_exception(env, POWERPC_EXCP_DSI); 3159 } 3160 cs->watchpoint_hit = NULL; 3161 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) { 3162 raise_exception_err(env, POWERPC_EXCP_TRACE, 3163 PPC_BIT(33) | PPC_BIT(43)); 3164 } 3165 } 3166 #endif 3167 } 3168 3169 bool ppc_cpu_debug_check_breakpoint(CPUState *cs) 3170 { 3171 #if defined(TARGET_PPC64) 3172 CPUPPCState *env = cpu_env(cs); 3173 3174 if (env->insns_flags2 & PPC2_ISA207S) { 3175 target_ulong priv; 3176 3177 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63); 3178 switch (priv) { 3179 case 0x1: /* problem */ 3180 return env->msr & ((target_ulong)1 << MSR_PR); 3181 case 0x2: /* supervisor */ 3182 return (!(env->msr & ((target_ulong)1 << MSR_PR)) && 3183 !(env->msr & ((target_ulong)1 << MSR_HV))); 3184 case 0x3: /* hypervisor */ 3185 return (!(env->msr & ((target_ulong)1 << MSR_PR)) && 3186 (env->msr & ((target_ulong)1 << MSR_HV))); 3187 default: 3188 g_assert_not_reached(); 3189 } 3190 } 3191 #endif 3192 3193 return false; 3194 } 3195 3196 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 3197 { 3198 #if defined(TARGET_PPC64) 3199 CPUPPCState *env = cpu_env(cs); 3200 3201 if (env->insns_flags2 & PPC2_ISA207S) { 3202 if (wp == env->dawr0_watchpoint) { 3203 uint32_t dawrx = env->spr[SPR_DAWRX0]; 3204 bool wt = extract32(dawrx, PPC_BIT_NR(59), 1); 3205 bool wti = extract32(dawrx, PPC_BIT_NR(60), 1); 3206 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1); 3207 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1); 3208 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1); 3209 3210 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) { 3211 return false; 3212 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) { 3213 return false; 3214 } else if (!sv) { 3215 return false; 3216 } 3217 3218 if (!wti) { 3219 if (env->msr & ((target_ulong)1 << MSR_DR)) { 3220 if (!wt) { 3221 return false; 3222 } 3223 } else { 3224 if (wt) { 3225 return false; 3226 } 3227 } 3228 } 3229 3230 return true; 3231 } 3232 } 3233 #endif 3234 3235 return false; 3236 } 3237 3238 #endif /* !CONFIG_USER_ONLY */ 3239 #endif /* CONFIG_TCG */ 3240