1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "internal.h" 25 #include "helper_regs.h" 26 #include "hw/ppc/ppc.h" 27 28 #include "trace.h" 29 30 #ifdef CONFIG_TCG 31 #include "sysemu/tcg.h" 32 #include "exec/helper-proto.h" 33 #include "exec/cpu_ldst.h" 34 #endif 35 36 /*****************************************************************************/ 37 /* Exception processing */ 38 #ifndef CONFIG_USER_ONLY 39 40 static const char *powerpc_excp_name(int excp) 41 { 42 switch (excp) { 43 case POWERPC_EXCP_CRITICAL: return "CRITICAL"; 44 case POWERPC_EXCP_MCHECK: return "MCHECK"; 45 case POWERPC_EXCP_DSI: return "DSI"; 46 case POWERPC_EXCP_ISI: return "ISI"; 47 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL"; 48 case POWERPC_EXCP_ALIGN: return "ALIGN"; 49 case POWERPC_EXCP_PROGRAM: return "PROGRAM"; 50 case POWERPC_EXCP_FPU: return "FPU"; 51 case POWERPC_EXCP_SYSCALL: return "SYSCALL"; 52 case POWERPC_EXCP_APU: return "APU"; 53 case POWERPC_EXCP_DECR: return "DECR"; 54 case POWERPC_EXCP_FIT: return "FIT"; 55 case POWERPC_EXCP_WDT: return "WDT"; 56 case POWERPC_EXCP_DTLB: return "DTLB"; 57 case POWERPC_EXCP_ITLB: return "ITLB"; 58 case POWERPC_EXCP_DEBUG: return "DEBUG"; 59 case POWERPC_EXCP_SPEU: return "SPEU"; 60 case POWERPC_EXCP_EFPDI: return "EFPDI"; 61 case POWERPC_EXCP_EFPRI: return "EFPRI"; 62 case POWERPC_EXCP_EPERFM: return "EPERFM"; 63 case POWERPC_EXCP_DOORI: return "DOORI"; 64 case POWERPC_EXCP_DOORCI: return "DOORCI"; 65 case POWERPC_EXCP_GDOORI: return "GDOORI"; 66 case POWERPC_EXCP_GDOORCI: return "GDOORCI"; 67 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV"; 68 case POWERPC_EXCP_RESET: return "RESET"; 69 case POWERPC_EXCP_DSEG: return "DSEG"; 70 case POWERPC_EXCP_ISEG: return "ISEG"; 71 case POWERPC_EXCP_HDECR: return "HDECR"; 72 case POWERPC_EXCP_TRACE: return "TRACE"; 73 case POWERPC_EXCP_HDSI: return "HDSI"; 74 case POWERPC_EXCP_HISI: return "HISI"; 75 case POWERPC_EXCP_HDSEG: return "HDSEG"; 76 case POWERPC_EXCP_HISEG: return "HISEG"; 77 case POWERPC_EXCP_VPU: return "VPU"; 78 case POWERPC_EXCP_PIT: return "PIT"; 79 case POWERPC_EXCP_EMUL: return "EMUL"; 80 case POWERPC_EXCP_IFTLB: return "IFTLB"; 81 case POWERPC_EXCP_DLTLB: return "DLTLB"; 82 case POWERPC_EXCP_DSTLB: return "DSTLB"; 83 case POWERPC_EXCP_FPA: return "FPA"; 84 case POWERPC_EXCP_DABR: return "DABR"; 85 case POWERPC_EXCP_IABR: return "IABR"; 86 case POWERPC_EXCP_SMI: return "SMI"; 87 case POWERPC_EXCP_PERFM: return "PERFM"; 88 case POWERPC_EXCP_THERM: return "THERM"; 89 case POWERPC_EXCP_VPUA: return "VPUA"; 90 case POWERPC_EXCP_SOFTP: return "SOFTP"; 91 case POWERPC_EXCP_MAINT: return "MAINT"; 92 case POWERPC_EXCP_MEXTBR: return "MEXTBR"; 93 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR"; 94 case POWERPC_EXCP_ITLBE: return "ITLBE"; 95 case POWERPC_EXCP_DTLBE: return "DTLBE"; 96 case POWERPC_EXCP_VSXU: return "VSXU"; 97 case POWERPC_EXCP_FU: return "FU"; 98 case POWERPC_EXCP_HV_EMU: return "HV_EMU"; 99 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT"; 100 case POWERPC_EXCP_HV_FU: return "HV_FU"; 101 case POWERPC_EXCP_SDOOR: return "SDOOR"; 102 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV"; 103 case POWERPC_EXCP_HVIRT: return "HVIRT"; 104 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED"; 105 default: 106 g_assert_not_reached(); 107 } 108 } 109 110 static void dump_syscall(CPUPPCState *env) 111 { 112 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 113 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 114 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 115 " nip=" TARGET_FMT_lx "\n", 116 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 117 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 118 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 119 ppc_dump_gpr(env, 8), env->nip); 120 } 121 122 static void dump_hcall(CPUPPCState *env) 123 { 124 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 125 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 126 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 127 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 128 " nip=" TARGET_FMT_lx "\n", 129 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 130 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 131 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 132 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 133 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 134 env->nip); 135 } 136 137 #ifdef CONFIG_TCG 138 /* Return true iff byteswap is needed to load instruction */ 139 static inline bool insn_need_byteswap(CPUArchState *env) 140 { 141 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */ 142 return !!(env->msr & ((target_ulong)1 << MSR_LE)); 143 } 144 145 static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr) 146 { 147 uint32_t insn = cpu_ldl_code(env, addr); 148 149 if (insn_need_byteswap(env)) { 150 insn = bswap32(insn); 151 } 152 153 return insn; 154 } 155 #endif 156 157 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) 158 { 159 const char *es; 160 target_ulong *miss, *cmp; 161 int en; 162 163 if (!qemu_loglevel_mask(CPU_LOG_MMU)) { 164 return; 165 } 166 167 if (excp == POWERPC_EXCP_IFTLB) { 168 es = "I"; 169 en = 'I'; 170 miss = &env->spr[SPR_IMISS]; 171 cmp = &env->spr[SPR_ICMP]; 172 } else { 173 if (excp == POWERPC_EXCP_DLTLB) { 174 es = "DL"; 175 } else { 176 es = "DS"; 177 } 178 en = 'D'; 179 miss = &env->spr[SPR_DMISS]; 180 cmp = &env->spr[SPR_DCMP]; 181 } 182 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 183 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 184 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 185 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 186 env->error_code); 187 } 188 189 #ifdef TARGET_PPC64 190 static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr) 191 { 192 /* We no longer are in a PM state */ 193 env->resume_as_sreset = false; 194 195 /* Pretend to be returning from doze always as we don't lose state */ 196 *msr |= SRR1_WS_NOLOSS; 197 198 /* Machine checks are sent normally */ 199 if (excp == POWERPC_EXCP_MCHECK) { 200 return excp; 201 } 202 switch (excp) { 203 case POWERPC_EXCP_RESET: 204 *msr |= SRR1_WAKERESET; 205 break; 206 case POWERPC_EXCP_EXTERNAL: 207 *msr |= SRR1_WAKEEE; 208 break; 209 case POWERPC_EXCP_DECR: 210 *msr |= SRR1_WAKEDEC; 211 break; 212 case POWERPC_EXCP_SDOOR: 213 *msr |= SRR1_WAKEDBELL; 214 break; 215 case POWERPC_EXCP_SDOOR_HV: 216 *msr |= SRR1_WAKEHDBELL; 217 break; 218 case POWERPC_EXCP_HV_MAINT: 219 *msr |= SRR1_WAKEHMI; 220 break; 221 case POWERPC_EXCP_HVIRT: 222 *msr |= SRR1_WAKEHVI; 223 break; 224 default: 225 cpu_abort(env_cpu(env), 226 "Unsupported exception %d in Power Save mode\n", excp); 227 } 228 return POWERPC_EXCP_RESET; 229 } 230 231 /* 232 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be 233 * taken with the MMU on, and which uses an alternate location (e.g., so the 234 * kernel/hv can map the vectors there with an effective address). 235 * 236 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they 237 * are delivered in this way. AIL requires the LPCR to be set to enable this 238 * mode, and then a number of conditions have to be true for AIL to apply. 239 * 240 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because 241 * they specifically want to be in real mode (e.g., the MCE might be signaling 242 * a SLB multi-hit which requires SLB flush before the MMU can be enabled). 243 * 244 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], 245 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current 246 * radix mode (LPCR[HR]). 247 * 248 * POWER8, POWER9 with LPCR[HR]=0 249 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 250 * +-----------+-------------+---------+-------------+-----+ 251 * | a | 00/01/10 | x | x | 0 | 252 * | a | 11 | 0 | 1 | 0 | 253 * | a | 11 | 1 | 1 | a | 254 * | a | 11 | 0 | 0 | a | 255 * +-------------------------------------------------------+ 256 * 257 * POWER9 with LPCR[HR]=1 258 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 259 * +-----------+-------------+---------+-------------+-----+ 260 * | a | 00/01/10 | x | x | 0 | 261 * | a | 11 | x | x | a | 262 * +-------------------------------------------------------+ 263 * 264 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to 265 * the hypervisor in AIL mode if the guest is radix. This is good for 266 * performance but allows the guest to influence the AIL of hypervisor 267 * interrupts using its MSR, and also the hypervisor must disallow guest 268 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to 269 * use AIL for its MSR[HV] 0->1 interrupts. 270 * 271 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to 272 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and 273 * MSR[HV] 1->1). 274 * 275 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. 276 * 277 * POWER10 behaviour is 278 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 279 * +-----------+------------+-------------+---------+-------------+-----+ 280 * | a | h | 00/01/10 | 0 | 0 | 0 | 281 * | a | h | 11 | 0 | 0 | a | 282 * | a | h | x | 0 | 1 | h | 283 * | a | h | 00/01/10 | 1 | 1 | 0 | 284 * | a | h | 11 | 1 | 1 | h | 285 * +--------------------------------------------------------------------+ 286 */ 287 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, 288 target_ulong *new_msr, target_ulong *vector) 289 { 290 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 291 CPUPPCState *env = &cpu->env; 292 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); 293 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); 294 int ail = 0; 295 296 if (excp == POWERPC_EXCP_MCHECK || 297 excp == POWERPC_EXCP_RESET || 298 excp == POWERPC_EXCP_HV_MAINT) { 299 /* SRESET, MCE, HMI never apply AIL */ 300 return; 301 } 302 303 if (!(pcc->lpcr_mask & LPCR_AIL)) { 304 /* This CPU does not have AIL */ 305 return; 306 } 307 308 /* P8 & P9 */ 309 if (!(pcc->lpcr_mask & LPCR_HAIL)) { 310 if (!mmu_all_on) { 311 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ 312 return; 313 } 314 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { 315 /* 316 * AIL does not work if there is a MSR[HV] 0->1 transition and the 317 * partition is in HPT mode. For radix guests, such interrupts are 318 * allowed to be delivered to the hypervisor in ail mode. 319 */ 320 return; 321 } 322 323 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 324 if (ail == 0) { 325 return; 326 } 327 if (ail == 1) { 328 /* AIL=1 is reserved, treat it like AIL=0 */ 329 return; 330 } 331 332 /* P10 and up */ 333 } else { 334 if (!mmu_all_on && !hv_escalation) { 335 /* 336 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. 337 * Guest->guest and HV->HV interrupts do require MMU on. 338 */ 339 return; 340 } 341 342 if (*new_msr & MSR_HVB) { 343 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { 344 /* HV interrupts depend on LPCR[HAIL] */ 345 return; 346 } 347 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ 348 } else { 349 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 350 } 351 if (ail == 0) { 352 return; 353 } 354 if (ail == 1 || ail == 2) { 355 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ 356 return; 357 } 358 } 359 360 /* 361 * AIL applies, so the new MSR gets IR and DR set, and an offset applied 362 * to the new IP. 363 */ 364 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 365 366 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 367 if (ail == 2) { 368 *vector |= 0x0000000000018000ull; 369 } else if (ail == 3) { 370 *vector |= 0xc000000000004000ull; 371 } 372 } else { 373 /* 374 * scv AIL is a little different. AIL=2 does not change the address, 375 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. 376 */ 377 if (ail == 3) { 378 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ 379 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ 380 } 381 } 382 } 383 #endif /* TARGET_PPC64 */ 384 385 static void powerpc_reset_excp_state(PowerPCCPU *cpu) 386 { 387 CPUState *cs = CPU(cpu); 388 CPUPPCState *env = &cpu->env; 389 390 /* Reset exception state */ 391 cs->exception_index = POWERPC_EXCP_NONE; 392 env->error_code = 0; 393 } 394 395 static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, 396 target_ulong msr) 397 { 398 CPUPPCState *env = &cpu->env; 399 400 assert((msr & env->msr_mask) == msr); 401 402 /* 403 * We don't use hreg_store_msr here as already have treated any 404 * special case that could occur. Just store MSR and update hflags 405 * 406 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will 407 * prevent setting of the HV bit which some exceptions might need to do. 408 */ 409 env->nip = vector; 410 env->msr = msr; 411 hreg_compute_hflags(env); 412 ppc_maybe_interrupt(env); 413 414 powerpc_reset_excp_state(cpu); 415 416 /* 417 * Any interrupt is context synchronizing, check if TCG TLB needs 418 * a delayed flush on ppc64 419 */ 420 check_tlb_flush(env, false); 421 422 /* Reset the reservation */ 423 env->reserve_addr = -1; 424 } 425 426 static void powerpc_mcheck_checkstop(CPUPPCState *env) 427 { 428 CPUState *cs = env_cpu(env); 429 430 if (FIELD_EX64(env->msr, MSR, ME)) { 431 return; 432 } 433 434 /* Machine check exception is not enabled. Enter checkstop state. */ 435 fprintf(stderr, "Machine check while not allowed. " 436 "Entering checkstop state\n"); 437 if (qemu_log_separate()) { 438 qemu_log("Machine check while not allowed. " 439 "Entering checkstop state\n"); 440 } 441 cs->halted = 1; 442 cpu_interrupt_exittb(cs); 443 } 444 445 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) 446 { 447 CPUPPCState *env = &cpu->env; 448 target_ulong msr, new_msr, vector; 449 int srr0 = SPR_SRR0, srr1 = SPR_SRR1; 450 451 /* new srr1 value excluding must-be-zero bits */ 452 msr = env->msr & ~0x783f0000ULL; 453 454 /* new interrupt handler msr preserves ME unless explicitly overridden */ 455 new_msr = env->msr & (((target_ulong)1 << MSR_ME)); 456 457 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 458 if (excp == POWERPC_EXCP_HV_EMU) { 459 excp = POWERPC_EXCP_PROGRAM; 460 } 461 462 vector = env->excp_vectors[excp]; 463 if (vector == (target_ulong)-1ULL) { 464 cpu_abort(env_cpu(env), 465 "Raised an exception without defined vector %d\n", excp); 466 } 467 vector |= env->excp_prefix; 468 469 switch (excp) { 470 case POWERPC_EXCP_CRITICAL: /* Critical input */ 471 srr0 = SPR_40x_SRR2; 472 srr1 = SPR_40x_SRR3; 473 break; 474 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 475 powerpc_mcheck_checkstop(env); 476 /* machine check exceptions don't have ME set */ 477 new_msr &= ~((target_ulong)1 << MSR_ME); 478 srr0 = SPR_40x_SRR2; 479 srr1 = SPR_40x_SRR3; 480 break; 481 case POWERPC_EXCP_DSI: /* Data storage exception */ 482 trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]); 483 break; 484 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 485 trace_ppc_excp_isi(msr, env->nip); 486 break; 487 case POWERPC_EXCP_EXTERNAL: /* External input */ 488 break; 489 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 490 break; 491 case POWERPC_EXCP_PROGRAM: /* Program exception */ 492 switch (env->error_code & ~0xF) { 493 case POWERPC_EXCP_FP: 494 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 495 trace_ppc_excp_fp_ignore(); 496 powerpc_reset_excp_state(cpu); 497 return; 498 } 499 env->spr[SPR_40x_ESR] = ESR_FP; 500 break; 501 case POWERPC_EXCP_INVAL: 502 trace_ppc_excp_inval(env->nip); 503 env->spr[SPR_40x_ESR] = ESR_PIL; 504 break; 505 case POWERPC_EXCP_PRIV: 506 env->spr[SPR_40x_ESR] = ESR_PPR; 507 break; 508 case POWERPC_EXCP_TRAP: 509 env->spr[SPR_40x_ESR] = ESR_PTR; 510 break; 511 default: 512 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 513 env->error_code); 514 break; 515 } 516 break; 517 case POWERPC_EXCP_SYSCALL: /* System call exception */ 518 dump_syscall(env); 519 520 /* 521 * We need to correct the NIP which in this case is supposed 522 * to point to the next instruction 523 */ 524 env->nip += 4; 525 break; 526 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 527 trace_ppc_excp_print("FIT"); 528 break; 529 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 530 trace_ppc_excp_print("WDT"); 531 break; 532 case POWERPC_EXCP_DTLB: /* Data TLB error */ 533 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 534 break; 535 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 536 trace_ppc_excp_print("PIT"); 537 break; 538 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 539 cpu_abort(env_cpu(env), "%s exception not implemented\n", 540 powerpc_excp_name(excp)); 541 break; 542 default: 543 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 544 excp); 545 break; 546 } 547 548 env->spr[srr0] = env->nip; 549 env->spr[srr1] = msr; 550 powerpc_set_excp_state(cpu, vector, new_msr); 551 } 552 553 static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) 554 { 555 CPUPPCState *env = &cpu->env; 556 target_ulong msr, new_msr, vector; 557 558 /* new srr1 value excluding must-be-zero bits */ 559 msr = env->msr & ~0x783f0000ULL; 560 561 /* new interrupt handler msr preserves ME unless explicitly overridden */ 562 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 563 564 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 565 if (excp == POWERPC_EXCP_HV_EMU) { 566 excp = POWERPC_EXCP_PROGRAM; 567 } 568 569 vector = env->excp_vectors[excp]; 570 if (vector == (target_ulong)-1ULL) { 571 cpu_abort(env_cpu(env), 572 "Raised an exception without defined vector %d\n", excp); 573 } 574 vector |= env->excp_prefix; 575 576 switch (excp) { 577 case POWERPC_EXCP_CRITICAL: /* Critical input */ 578 break; 579 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 580 powerpc_mcheck_checkstop(env); 581 /* machine check exceptions don't have ME set */ 582 new_msr &= ~((target_ulong)1 << MSR_ME); 583 break; 584 case POWERPC_EXCP_DSI: /* Data storage exception */ 585 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 586 break; 587 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 588 trace_ppc_excp_isi(msr, env->nip); 589 msr |= env->error_code; 590 break; 591 case POWERPC_EXCP_EXTERNAL: /* External input */ 592 break; 593 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 594 /* Get rS/rD and rA from faulting opcode */ 595 /* 596 * Note: the opcode fields will not be set properly for a 597 * direct store load/store, but nobody cares as nobody 598 * actually uses direct store segments. 599 */ 600 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 601 break; 602 case POWERPC_EXCP_PROGRAM: /* Program exception */ 603 switch (env->error_code & ~0xF) { 604 case POWERPC_EXCP_FP: 605 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 606 trace_ppc_excp_fp_ignore(); 607 powerpc_reset_excp_state(cpu); 608 return; 609 } 610 /* 611 * NIP always points to the faulting instruction for FP exceptions, 612 * so always use store_next and claim we are precise in the MSR. 613 */ 614 msr |= 0x00100000; 615 break; 616 case POWERPC_EXCP_INVAL: 617 trace_ppc_excp_inval(env->nip); 618 msr |= 0x00080000; 619 break; 620 case POWERPC_EXCP_PRIV: 621 msr |= 0x00040000; 622 break; 623 case POWERPC_EXCP_TRAP: 624 msr |= 0x00020000; 625 break; 626 default: 627 /* Should never occur */ 628 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 629 env->error_code); 630 break; 631 } 632 break; 633 case POWERPC_EXCP_SYSCALL: /* System call exception */ 634 dump_syscall(env); 635 636 /* 637 * We need to correct the NIP which in this case is supposed 638 * to point to the next instruction 639 */ 640 env->nip += 4; 641 break; 642 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 643 case POWERPC_EXCP_DECR: /* Decrementer exception */ 644 break; 645 case POWERPC_EXCP_DTLB: /* Data TLB error */ 646 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 647 break; 648 case POWERPC_EXCP_RESET: /* System reset exception */ 649 if (FIELD_EX64(env->msr, MSR, POW)) { 650 cpu_abort(env_cpu(env), 651 "Trying to deliver power-saving system reset exception " 652 "%d with no HV support\n", excp); 653 } 654 break; 655 case POWERPC_EXCP_TRACE: /* Trace exception */ 656 break; 657 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 658 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 659 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 660 /* Swap temporary saved registers with GPRs */ 661 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 662 new_msr |= (target_ulong)1 << MSR_TGPR; 663 hreg_swap_gpr_tgpr(env); 664 } 665 666 ppc_excp_debug_sw_tlb(env, excp); 667 668 msr |= env->crf[0] << 28; 669 msr |= env->error_code; /* key, D/I, S/L bits */ 670 /* Set way using a LRU mechanism */ 671 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 672 break; 673 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 674 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 675 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 676 case POWERPC_EXCP_SMI: /* System management interrupt */ 677 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 678 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 679 cpu_abort(env_cpu(env), "%s exception not implemented\n", 680 powerpc_excp_name(excp)); 681 break; 682 default: 683 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 684 excp); 685 break; 686 } 687 688 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 689 new_msr |= (target_ulong)1 << MSR_LE; 690 } 691 env->spr[SPR_SRR0] = env->nip; 692 env->spr[SPR_SRR1] = msr; 693 powerpc_set_excp_state(cpu, vector, new_msr); 694 } 695 696 static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) 697 { 698 CPUPPCState *env = &cpu->env; 699 target_ulong msr, new_msr, vector; 700 701 /* new srr1 value excluding must-be-zero bits */ 702 msr = env->msr & ~0x783f0000ULL; 703 704 /* new interrupt handler msr preserves ME unless explicitly overridden */ 705 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 706 707 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 708 if (excp == POWERPC_EXCP_HV_EMU) { 709 excp = POWERPC_EXCP_PROGRAM; 710 } 711 712 vector = env->excp_vectors[excp]; 713 if (vector == (target_ulong)-1ULL) { 714 cpu_abort(env_cpu(env), 715 "Raised an exception without defined vector %d\n", excp); 716 } 717 vector |= env->excp_prefix; 718 719 switch (excp) { 720 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 721 powerpc_mcheck_checkstop(env); 722 /* machine check exceptions don't have ME set */ 723 new_msr &= ~((target_ulong)1 << MSR_ME); 724 break; 725 case POWERPC_EXCP_DSI: /* Data storage exception */ 726 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 727 break; 728 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 729 trace_ppc_excp_isi(msr, env->nip); 730 msr |= env->error_code; 731 break; 732 case POWERPC_EXCP_EXTERNAL: /* External input */ 733 break; 734 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 735 /* Get rS/rD and rA from faulting opcode */ 736 /* 737 * Note: the opcode fields will not be set properly for a 738 * direct store load/store, but nobody cares as nobody 739 * actually uses direct store segments. 740 */ 741 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 742 break; 743 case POWERPC_EXCP_PROGRAM: /* Program exception */ 744 switch (env->error_code & ~0xF) { 745 case POWERPC_EXCP_FP: 746 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 747 trace_ppc_excp_fp_ignore(); 748 powerpc_reset_excp_state(cpu); 749 return; 750 } 751 /* 752 * NIP always points to the faulting instruction for FP exceptions, 753 * so always use store_next and claim we are precise in the MSR. 754 */ 755 msr |= 0x00100000; 756 break; 757 case POWERPC_EXCP_INVAL: 758 trace_ppc_excp_inval(env->nip); 759 msr |= 0x00080000; 760 break; 761 case POWERPC_EXCP_PRIV: 762 msr |= 0x00040000; 763 break; 764 case POWERPC_EXCP_TRAP: 765 msr |= 0x00020000; 766 break; 767 default: 768 /* Should never occur */ 769 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 770 env->error_code); 771 break; 772 } 773 break; 774 case POWERPC_EXCP_SYSCALL: /* System call exception */ 775 { 776 int lev = env->error_code; 777 778 if (lev == 1 && cpu->vhyp) { 779 dump_hcall(env); 780 } else { 781 dump_syscall(env); 782 } 783 784 /* 785 * We need to correct the NIP which in this case is supposed 786 * to point to the next instruction 787 */ 788 env->nip += 4; 789 790 /* 791 * The Virtual Open Firmware (VOF) relies on the 'sc 1' 792 * instruction to communicate with QEMU. The pegasos2 machine 793 * uses VOF and the 7xx CPUs, so although the 7xx don't have 794 * HV mode, we need to keep hypercall support. 795 */ 796 if (lev == 1 && cpu->vhyp) { 797 PPCVirtualHypervisorClass *vhc = 798 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 799 vhc->hypercall(cpu->vhyp, cpu); 800 powerpc_reset_excp_state(cpu); 801 return; 802 } 803 804 break; 805 } 806 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 807 case POWERPC_EXCP_DECR: /* Decrementer exception */ 808 break; 809 case POWERPC_EXCP_RESET: /* System reset exception */ 810 if (FIELD_EX64(env->msr, MSR, POW)) { 811 cpu_abort(env_cpu(env), 812 "Trying to deliver power-saving system reset exception " 813 "%d with no HV support\n", excp); 814 } 815 break; 816 case POWERPC_EXCP_TRACE: /* Trace exception */ 817 break; 818 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 819 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 820 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 821 ppc_excp_debug_sw_tlb(env, excp); 822 msr |= env->crf[0] << 28; 823 msr |= env->error_code; /* key, D/I, S/L bits */ 824 /* Set way using a LRU mechanism */ 825 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 826 break; 827 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 828 case POWERPC_EXCP_SMI: /* System management interrupt */ 829 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 830 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 831 cpu_abort(env_cpu(env), "%s exception not implemented\n", 832 powerpc_excp_name(excp)); 833 break; 834 default: 835 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 836 excp); 837 break; 838 } 839 840 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 841 new_msr |= (target_ulong)1 << MSR_LE; 842 } 843 env->spr[SPR_SRR0] = env->nip; 844 env->spr[SPR_SRR1] = msr; 845 powerpc_set_excp_state(cpu, vector, new_msr); 846 } 847 848 static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) 849 { 850 CPUPPCState *env = &cpu->env; 851 target_ulong msr, new_msr, vector; 852 853 /* new srr1 value excluding must-be-zero bits */ 854 msr = env->msr & ~0x783f0000ULL; 855 856 /* new interrupt handler msr preserves ME unless explicitly overridden */ 857 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 858 859 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 860 if (excp == POWERPC_EXCP_HV_EMU) { 861 excp = POWERPC_EXCP_PROGRAM; 862 } 863 864 vector = env->excp_vectors[excp]; 865 if (vector == (target_ulong)-1ULL) { 866 cpu_abort(env_cpu(env), 867 "Raised an exception without defined vector %d\n", excp); 868 } 869 vector |= env->excp_prefix; 870 871 switch (excp) { 872 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 873 powerpc_mcheck_checkstop(env); 874 /* machine check exceptions don't have ME set */ 875 new_msr &= ~((target_ulong)1 << MSR_ME); 876 break; 877 case POWERPC_EXCP_DSI: /* Data storage exception */ 878 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 879 break; 880 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 881 trace_ppc_excp_isi(msr, env->nip); 882 msr |= env->error_code; 883 break; 884 case POWERPC_EXCP_EXTERNAL: /* External input */ 885 break; 886 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 887 /* Get rS/rD and rA from faulting opcode */ 888 /* 889 * Note: the opcode fields will not be set properly for a 890 * direct store load/store, but nobody cares as nobody 891 * actually uses direct store segments. 892 */ 893 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 894 break; 895 case POWERPC_EXCP_PROGRAM: /* Program exception */ 896 switch (env->error_code & ~0xF) { 897 case POWERPC_EXCP_FP: 898 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 899 trace_ppc_excp_fp_ignore(); 900 powerpc_reset_excp_state(cpu); 901 return; 902 } 903 /* 904 * NIP always points to the faulting instruction for FP exceptions, 905 * so always use store_next and claim we are precise in the MSR. 906 */ 907 msr |= 0x00100000; 908 break; 909 case POWERPC_EXCP_INVAL: 910 trace_ppc_excp_inval(env->nip); 911 msr |= 0x00080000; 912 break; 913 case POWERPC_EXCP_PRIV: 914 msr |= 0x00040000; 915 break; 916 case POWERPC_EXCP_TRAP: 917 msr |= 0x00020000; 918 break; 919 default: 920 /* Should never occur */ 921 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 922 env->error_code); 923 break; 924 } 925 break; 926 case POWERPC_EXCP_SYSCALL: /* System call exception */ 927 { 928 int lev = env->error_code; 929 930 if (lev == 1 && cpu->vhyp) { 931 dump_hcall(env); 932 } else { 933 dump_syscall(env); 934 } 935 936 /* 937 * We need to correct the NIP which in this case is supposed 938 * to point to the next instruction 939 */ 940 env->nip += 4; 941 942 /* 943 * The Virtual Open Firmware (VOF) relies on the 'sc 1' 944 * instruction to communicate with QEMU. The pegasos2 machine 945 * uses VOF and the 74xx CPUs, so although the 74xx don't have 946 * HV mode, we need to keep hypercall support. 947 */ 948 if (lev == 1 && cpu->vhyp) { 949 PPCVirtualHypervisorClass *vhc = 950 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 951 vhc->hypercall(cpu->vhyp, cpu); 952 powerpc_reset_excp_state(cpu); 953 return; 954 } 955 956 break; 957 } 958 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 959 case POWERPC_EXCP_DECR: /* Decrementer exception */ 960 break; 961 case POWERPC_EXCP_RESET: /* System reset exception */ 962 if (FIELD_EX64(env->msr, MSR, POW)) { 963 cpu_abort(env_cpu(env), 964 "Trying to deliver power-saving system reset " 965 "exception %d with no HV support\n", excp); 966 } 967 break; 968 case POWERPC_EXCP_TRACE: /* Trace exception */ 969 break; 970 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 971 break; 972 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 973 case POWERPC_EXCP_SMI: /* System management interrupt */ 974 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 975 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 976 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 977 cpu_abort(env_cpu(env), "%s exception not implemented\n", 978 powerpc_excp_name(excp)); 979 break; 980 default: 981 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 982 excp); 983 break; 984 } 985 986 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 987 new_msr |= (target_ulong)1 << MSR_LE; 988 } 989 env->spr[SPR_SRR0] = env->nip; 990 env->spr[SPR_SRR1] = msr; 991 powerpc_set_excp_state(cpu, vector, new_msr); 992 } 993 994 static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) 995 { 996 CPUPPCState *env = &cpu->env; 997 target_ulong msr, new_msr, vector; 998 int srr0 = SPR_SRR0, srr1 = SPR_SRR1; 999 1000 /* 1001 * Book E does not play games with certain bits of xSRR1 being MSR save 1002 * bits and others being error status. xSRR1 is the old MSR, period. 1003 */ 1004 msr = env->msr; 1005 1006 /* new interrupt handler msr preserves ME unless explicitly overridden */ 1007 new_msr = env->msr & ((target_ulong)1 << MSR_ME); 1008 1009 /* HV emu assistance interrupt only exists on server arch 2.05 or later */ 1010 if (excp == POWERPC_EXCP_HV_EMU) { 1011 excp = POWERPC_EXCP_PROGRAM; 1012 } 1013 1014 #ifdef TARGET_PPC64 1015 /* 1016 * SPEU and VPU share the same IVOR but they exist in different 1017 * processors. SPEU is e500v1/2 only and VPU is e6500 only. 1018 */ 1019 if (excp == POWERPC_EXCP_VPU) { 1020 excp = POWERPC_EXCP_SPEU; 1021 } 1022 #endif 1023 1024 vector = env->excp_vectors[excp]; 1025 if (vector == (target_ulong)-1ULL) { 1026 cpu_abort(env_cpu(env), 1027 "Raised an exception without defined vector %d\n", excp); 1028 } 1029 vector |= env->excp_prefix; 1030 1031 switch (excp) { 1032 case POWERPC_EXCP_CRITICAL: /* Critical input */ 1033 srr0 = SPR_BOOKE_CSRR0; 1034 srr1 = SPR_BOOKE_CSRR1; 1035 break; 1036 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1037 powerpc_mcheck_checkstop(env); 1038 /* machine check exceptions don't have ME set */ 1039 new_msr &= ~((target_ulong)1 << MSR_ME); 1040 1041 /* FIXME: choose one or the other based on CPU type */ 1042 srr0 = SPR_BOOKE_MCSRR0; 1043 srr1 = SPR_BOOKE_MCSRR1; 1044 1045 env->spr[SPR_BOOKE_CSRR0] = env->nip; 1046 env->spr[SPR_BOOKE_CSRR1] = msr; 1047 1048 break; 1049 case POWERPC_EXCP_DSI: /* Data storage exception */ 1050 trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); 1051 break; 1052 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1053 trace_ppc_excp_isi(msr, env->nip); 1054 break; 1055 case POWERPC_EXCP_EXTERNAL: /* External input */ 1056 if (env->mpic_proxy) { 1057 CPUState *cs = env_cpu(env); 1058 /* IACK the IRQ on delivery */ 1059 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 1060 } 1061 break; 1062 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1063 break; 1064 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1065 switch (env->error_code & ~0xF) { 1066 case POWERPC_EXCP_FP: 1067 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1068 trace_ppc_excp_fp_ignore(); 1069 powerpc_reset_excp_state(cpu); 1070 return; 1071 } 1072 /* 1073 * NIP always points to the faulting instruction for FP exceptions, 1074 * so always use store_next and claim we are precise in the MSR. 1075 */ 1076 msr |= 0x00100000; 1077 env->spr[SPR_BOOKE_ESR] = ESR_FP; 1078 break; 1079 case POWERPC_EXCP_INVAL: 1080 trace_ppc_excp_inval(env->nip); 1081 msr |= 0x00080000; 1082 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 1083 break; 1084 case POWERPC_EXCP_PRIV: 1085 msr |= 0x00040000; 1086 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 1087 break; 1088 case POWERPC_EXCP_TRAP: 1089 msr |= 0x00020000; 1090 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 1091 break; 1092 default: 1093 /* Should never occur */ 1094 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 1095 env->error_code); 1096 break; 1097 } 1098 break; 1099 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1100 dump_syscall(env); 1101 1102 /* 1103 * We need to correct the NIP which in this case is supposed 1104 * to point to the next instruction 1105 */ 1106 env->nip += 4; 1107 break; 1108 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1109 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 1110 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1111 break; 1112 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 1113 /* FIT on 4xx */ 1114 trace_ppc_excp_print("FIT"); 1115 break; 1116 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 1117 trace_ppc_excp_print("WDT"); 1118 srr0 = SPR_BOOKE_CSRR0; 1119 srr1 = SPR_BOOKE_CSRR1; 1120 break; 1121 case POWERPC_EXCP_DTLB: /* Data TLB error */ 1122 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 1123 break; 1124 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 1125 if (env->flags & POWERPC_FLAG_DE) { 1126 /* FIXME: choose one or the other based on CPU type */ 1127 srr0 = SPR_BOOKE_DSRR0; 1128 srr1 = SPR_BOOKE_DSRR1; 1129 1130 env->spr[SPR_BOOKE_CSRR0] = env->nip; 1131 env->spr[SPR_BOOKE_CSRR1] = msr; 1132 1133 /* DBSR already modified by caller */ 1134 } else { 1135 cpu_abort(env_cpu(env), 1136 "Debug exception triggered on unsupported model\n"); 1137 } 1138 break; 1139 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */ 1140 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 1141 break; 1142 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 1143 break; 1144 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 1145 srr0 = SPR_BOOKE_CSRR0; 1146 srr1 = SPR_BOOKE_CSRR1; 1147 break; 1148 case POWERPC_EXCP_RESET: /* System reset exception */ 1149 if (FIELD_EX64(env->msr, MSR, POW)) { 1150 cpu_abort(env_cpu(env), 1151 "Trying to deliver power-saving system reset " 1152 "exception %d with no HV support\n", excp); 1153 } 1154 break; 1155 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 1156 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 1157 cpu_abort(env_cpu(env), "%s exception not implemented\n", 1158 powerpc_excp_name(excp)); 1159 break; 1160 default: 1161 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 1162 excp); 1163 break; 1164 } 1165 1166 #ifdef TARGET_PPC64 1167 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 1168 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 1169 new_msr |= (target_ulong)1 << MSR_CM; 1170 } else { 1171 vector = (uint32_t)vector; 1172 } 1173 #endif 1174 1175 env->spr[srr0] = env->nip; 1176 env->spr[srr1] = msr; 1177 powerpc_set_excp_state(cpu, vector, new_msr); 1178 } 1179 1180 /* 1181 * When running a nested HV guest under vhyp, external interrupts are 1182 * delivered as HVIRT. 1183 */ 1184 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu) 1185 { 1186 if (cpu->vhyp) { 1187 return vhyp_cpu_in_nested(cpu); 1188 } 1189 return false; 1190 } 1191 1192 #ifdef TARGET_PPC64 1193 /* 1194 * When running under vhyp, hcalls are always intercepted and sent to the 1195 * vhc->hypercall handler. 1196 */ 1197 static bool books_vhyp_handles_hcall(PowerPCCPU *cpu) 1198 { 1199 if (cpu->vhyp) { 1200 return !vhyp_cpu_in_nested(cpu); 1201 } 1202 return false; 1203 } 1204 1205 /* 1206 * When running a nested KVM HV guest under vhyp, HV exceptions are not 1207 * delivered to the guest (because there is no concept of HV support), but 1208 * rather they are sent to the vhyp to exit from the L2 back to the L1 and 1209 * return from the H_ENTER_NESTED hypercall. 1210 */ 1211 static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu) 1212 { 1213 if (cpu->vhyp) { 1214 return vhyp_cpu_in_nested(cpu); 1215 } 1216 return false; 1217 } 1218 1219 #ifdef CONFIG_TCG 1220 static bool is_prefix_insn(CPUPPCState *env, uint32_t insn) 1221 { 1222 if (!(env->insns_flags2 & PPC2_ISA310)) { 1223 return false; 1224 } 1225 return ((insn & 0xfc000000) == 0x04000000); 1226 } 1227 1228 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) 1229 { 1230 CPUPPCState *env = &cpu->env; 1231 1232 if (!(env->insns_flags2 & PPC2_ISA310)) { 1233 return false; 1234 } 1235 1236 if (!tcg_enabled()) { 1237 /* 1238 * This does not load instructions and set the prefix bit correctly 1239 * for injected interrupts with KVM. That may have to be discovered 1240 * and set by the KVM layer before injecting. 1241 */ 1242 return false; 1243 } 1244 1245 switch (excp) { 1246 case POWERPC_EXCP_MCHECK: 1247 if (!(env->error_code & PPC_BIT(42))) { 1248 /* 1249 * Fetch attempt caused a machine check, so attempting to fetch 1250 * again would cause a recursive machine check. 1251 */ 1252 return false; 1253 } 1254 break; 1255 case POWERPC_EXCP_HDSI: 1256 /* HDSI PRTABLE_FAULT has the originating access type in error_code */ 1257 if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) && 1258 (env->error_code == MMU_INST_FETCH)) { 1259 /* 1260 * Fetch failed due to partition scope translation, so prefix 1261 * indication is not relevant (and attempting to load the 1262 * instruction at NIP would cause recursive faults with the same 1263 * translation). 1264 */ 1265 return false; 1266 } 1267 break; 1268 1269 case POWERPC_EXCP_DSI: 1270 case POWERPC_EXCP_DSEG: 1271 case POWERPC_EXCP_ALIGN: 1272 case POWERPC_EXCP_PROGRAM: 1273 case POWERPC_EXCP_FPU: 1274 case POWERPC_EXCP_TRACE: 1275 case POWERPC_EXCP_HV_EMU: 1276 case POWERPC_EXCP_VPU: 1277 case POWERPC_EXCP_VSXU: 1278 case POWERPC_EXCP_FU: 1279 case POWERPC_EXCP_HV_FU: 1280 break; 1281 default: 1282 return false; 1283 } 1284 1285 return is_prefix_insn(env, ppc_ldl_code(env, env->nip)); 1286 } 1287 #else 1288 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) 1289 { 1290 return false; 1291 } 1292 #endif 1293 1294 static void powerpc_excp_books(PowerPCCPU *cpu, int excp) 1295 { 1296 CPUPPCState *env = &cpu->env; 1297 target_ulong msr, new_msr, vector; 1298 int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1; 1299 1300 /* new srr1 value excluding must-be-zero bits */ 1301 msr = env->msr & ~0x783f0000ULL; 1302 1303 /* 1304 * new interrupt handler msr preserves HV and ME unless explicitly 1305 * overridden 1306 */ 1307 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 1308 1309 /* 1310 * check for special resume at 0x100 from doze/nap/sleep/winkle on 1311 * P7/P8/P9 1312 */ 1313 if (env->resume_as_sreset) { 1314 excp = powerpc_reset_wakeup(env, excp, &msr); 1315 } 1316 1317 /* 1318 * We don't want to generate a Hypervisor Emulation Assistance 1319 * Interrupt if we don't have HVB in msr_mask (PAPR mode), 1320 * unless running a nested-hv guest, in which case the L1 1321 * kernel wants the interrupt. 1322 */ 1323 if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) && 1324 !books_vhyp_handles_hv_excp(cpu)) { 1325 excp = POWERPC_EXCP_PROGRAM; 1326 } 1327 1328 vector = env->excp_vectors[excp]; 1329 if (vector == (target_ulong)-1ULL) { 1330 cpu_abort(env_cpu(env), 1331 "Raised an exception without defined vector %d\n", excp); 1332 } 1333 vector |= env->excp_prefix; 1334 1335 if (is_prefix_insn_excp(cpu, excp)) { 1336 msr |= PPC_BIT(34); 1337 } 1338 1339 switch (excp) { 1340 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1341 powerpc_mcheck_checkstop(env); 1342 if (env->msr_mask & MSR_HVB) { 1343 /* 1344 * ISA specifies HV, but can be delivered to guest with HV 1345 * clear (e.g., see FWNMI in PAPR). 1346 */ 1347 new_msr |= (target_ulong)MSR_HVB; 1348 1349 /* HV machine check exceptions don't have ME set */ 1350 new_msr &= ~((target_ulong)1 << MSR_ME); 1351 } 1352 1353 msr |= env->error_code; 1354 break; 1355 1356 case POWERPC_EXCP_DSI: /* Data storage exception */ 1357 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); 1358 break; 1359 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1360 trace_ppc_excp_isi(msr, env->nip); 1361 msr |= env->error_code; 1362 break; 1363 case POWERPC_EXCP_EXTERNAL: /* External input */ 1364 { 1365 bool lpes0; 1366 1367 /* LPES0 is only taken into consideration if we support HV mode */ 1368 if (!env->has_hv_mode) { 1369 break; 1370 } 1371 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1372 if (!lpes0) { 1373 new_msr |= (target_ulong)MSR_HVB; 1374 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1375 srr0 = SPR_HSRR0; 1376 srr1 = SPR_HSRR1; 1377 } 1378 break; 1379 } 1380 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1381 /* Optional DSISR update was removed from ISA v3.0 */ 1382 if (!(env->insns_flags2 & PPC2_ISA300)) { 1383 /* Get rS/rD and rA from faulting opcode */ 1384 /* 1385 * Note: the opcode fields will not be set properly for a 1386 * direct store load/store, but nobody cares as nobody 1387 * actually uses direct store segments. 1388 */ 1389 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 1390 } 1391 break; 1392 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1393 switch (env->error_code & ~0xF) { 1394 case POWERPC_EXCP_FP: 1395 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) { 1396 trace_ppc_excp_fp_ignore(); 1397 powerpc_reset_excp_state(cpu); 1398 return; 1399 } 1400 /* 1401 * NIP always points to the faulting instruction for FP exceptions, 1402 * so always use store_next and claim we are precise in the MSR. 1403 */ 1404 msr |= 0x00100000; 1405 break; 1406 case POWERPC_EXCP_INVAL: 1407 trace_ppc_excp_inval(env->nip); 1408 msr |= 0x00080000; 1409 break; 1410 case POWERPC_EXCP_PRIV: 1411 msr |= 0x00040000; 1412 break; 1413 case POWERPC_EXCP_TRAP: 1414 msr |= 0x00020000; 1415 break; 1416 default: 1417 /* Should never occur */ 1418 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", 1419 env->error_code); 1420 break; 1421 } 1422 break; 1423 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1424 lev = env->error_code; 1425 1426 if (lev == 1 && cpu->vhyp) { 1427 dump_hcall(env); 1428 } else { 1429 dump_syscall(env); 1430 } 1431 1432 /* 1433 * We need to correct the NIP which in this case is supposed 1434 * to point to the next instruction 1435 */ 1436 env->nip += 4; 1437 1438 /* "PAPR mode" built-in hypercall emulation */ 1439 if (lev == 1 && books_vhyp_handles_hcall(cpu)) { 1440 PPCVirtualHypervisorClass *vhc = 1441 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1442 vhc->hypercall(cpu->vhyp, cpu); 1443 powerpc_reset_excp_state(cpu); 1444 return; 1445 } 1446 if (env->insns_flags2 & PPC2_ISA310) { 1447 /* ISAv3.1 puts LEV into SRR1 */ 1448 msr |= lev << 20; 1449 } 1450 if (lev == 1) { 1451 new_msr |= (target_ulong)MSR_HVB; 1452 } 1453 break; 1454 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 1455 lev = env->error_code; 1456 dump_syscall(env); 1457 env->nip += 4; 1458 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 1459 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1460 1461 vector += lev * 0x20; 1462 1463 env->lr = env->nip; 1464 env->ctr = msr; 1465 break; 1466 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1467 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1468 break; 1469 case POWERPC_EXCP_RESET: /* System reset exception */ 1470 /* A power-saving exception sets ME, otherwise it is unchanged */ 1471 if (FIELD_EX64(env->msr, MSR, POW)) { 1472 /* indicate that we resumed from power save mode */ 1473 msr |= 0x10000; 1474 new_msr |= ((target_ulong)1 << MSR_ME); 1475 } 1476 if (env->msr_mask & MSR_HVB) { 1477 /* 1478 * ISA specifies HV, but can be delivered to guest with HV 1479 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 1480 */ 1481 new_msr |= (target_ulong)MSR_HVB; 1482 } else { 1483 if (FIELD_EX64(env->msr, MSR, POW)) { 1484 cpu_abort(env_cpu(env), 1485 "Trying to deliver power-saving system reset " 1486 "exception %d with no HV support\n", excp); 1487 } 1488 } 1489 break; 1490 case POWERPC_EXCP_TRACE: /* Trace exception */ 1491 msr |= env->error_code; 1492 /* fall through */ 1493 case POWERPC_EXCP_DSEG: /* Data segment exception */ 1494 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 1495 case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */ 1496 case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */ 1497 break; 1498 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 1499 msr |= env->error_code; 1500 /* fall through */ 1501 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 1502 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 1503 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 1504 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 1505 srr0 = SPR_HSRR0; 1506 srr1 = SPR_HSRR1; 1507 new_msr |= (target_ulong)MSR_HVB; 1508 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1509 break; 1510 #ifdef CONFIG_TCG 1511 case POWERPC_EXCP_HV_EMU: { 1512 uint32_t insn = ppc_ldl_code(env, env->nip); 1513 env->spr[SPR_HEIR] = insn; 1514 if (is_prefix_insn(env, insn)) { 1515 uint32_t insn2 = ppc_ldl_code(env, env->nip + 4); 1516 env->spr[SPR_HEIR] <<= 32; 1517 env->spr[SPR_HEIR] |= insn2; 1518 } 1519 srr0 = SPR_HSRR0; 1520 srr1 = SPR_HSRR1; 1521 new_msr |= (target_ulong)MSR_HVB; 1522 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1523 break; 1524 } 1525 #endif 1526 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 1527 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 1528 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 1529 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 1530 break; 1531 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 1532 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 1533 srr0 = SPR_HSRR0; 1534 srr1 = SPR_HSRR1; 1535 new_msr |= (target_ulong)MSR_HVB; 1536 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 1537 break; 1538 case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */ 1539 case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */ 1540 env->spr[SPR_BESCR] &= ~BESCR_GE; 1541 1542 /* 1543 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is 1544 * stored in the EBB Handler SPR_EBBHR. 1545 */ 1546 env->spr[SPR_EBBRR] = env->nip; 1547 powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr); 1548 1549 /* 1550 * This exception is handled in userspace. No need to proceed. 1551 */ 1552 return; 1553 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 1554 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 1555 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 1556 case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */ 1557 cpu_abort(env_cpu(env), "%s exception not implemented\n", 1558 powerpc_excp_name(excp)); 1559 break; 1560 default: 1561 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 1562 excp); 1563 break; 1564 } 1565 1566 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { 1567 new_msr |= (target_ulong)1 << MSR_LE; 1568 } 1569 new_msr |= (target_ulong)1 << MSR_SF; 1570 1571 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 1572 env->spr[srr0] = env->nip; 1573 env->spr[srr1] = msr; 1574 } 1575 1576 if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) { 1577 PPCVirtualHypervisorClass *vhc = 1578 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 1579 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */ 1580 vhc->deliver_hv_excp(cpu, excp); 1581 powerpc_reset_excp_state(cpu); 1582 } else { 1583 /* Sanity check */ 1584 if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) { 1585 cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d " 1586 "with no HV support\n", excp); 1587 } 1588 /* This can update new_msr and vector if AIL applies */ 1589 ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector); 1590 powerpc_set_excp_state(cpu, vector, new_msr); 1591 } 1592 } 1593 #else 1594 static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp) 1595 { 1596 g_assert_not_reached(); 1597 } 1598 #endif /* TARGET_PPC64 */ 1599 1600 static void powerpc_excp(PowerPCCPU *cpu, int excp) 1601 { 1602 CPUPPCState *env = &cpu->env; 1603 1604 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) { 1605 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", 1606 excp); 1607 } 1608 1609 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 1610 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp), 1611 excp, env->error_code); 1612 env->excp_stats[excp]++; 1613 1614 switch (env->excp_model) { 1615 case POWERPC_EXCP_40x: 1616 powerpc_excp_40x(cpu, excp); 1617 break; 1618 case POWERPC_EXCP_6xx: 1619 powerpc_excp_6xx(cpu, excp); 1620 break; 1621 case POWERPC_EXCP_7xx: 1622 powerpc_excp_7xx(cpu, excp); 1623 break; 1624 case POWERPC_EXCP_74xx: 1625 powerpc_excp_74xx(cpu, excp); 1626 break; 1627 case POWERPC_EXCP_BOOKE: 1628 powerpc_excp_booke(cpu, excp); 1629 break; 1630 case POWERPC_EXCP_970: 1631 case POWERPC_EXCP_POWER7: 1632 case POWERPC_EXCP_POWER8: 1633 case POWERPC_EXCP_POWER9: 1634 case POWERPC_EXCP_POWER10: 1635 powerpc_excp_books(cpu, excp); 1636 break; 1637 default: 1638 g_assert_not_reached(); 1639 } 1640 } 1641 1642 void ppc_cpu_do_interrupt(CPUState *cs) 1643 { 1644 PowerPCCPU *cpu = POWERPC_CPU(cs); 1645 1646 powerpc_excp(cpu, cs->exception_index); 1647 } 1648 1649 #ifdef TARGET_PPC64 1650 #define P7_UNUSED_INTERRUPTS \ 1651 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \ 1652 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ 1653 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \ 1654 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB) 1655 1656 static int p7_interrupt_powersave(CPUPPCState *env) 1657 { 1658 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1659 (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { 1660 return PPC_INTERRUPT_EXT; 1661 } 1662 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1663 (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { 1664 return PPC_INTERRUPT_DECR; 1665 } 1666 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && 1667 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { 1668 return PPC_INTERRUPT_MCK; 1669 } 1670 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && 1671 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { 1672 return PPC_INTERRUPT_HMI; 1673 } 1674 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1675 return PPC_INTERRUPT_RESET; 1676 } 1677 return 0; 1678 } 1679 1680 static int p7_next_unmasked_interrupt(CPUPPCState *env) 1681 { 1682 CPUState *cs = env_cpu(env); 1683 1684 /* Ignore MSR[EE] when coming out of some power management states */ 1685 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1686 1687 assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); 1688 1689 if (cs->halted) { 1690 /* LPCR[PECE] controls which interrupts can exit power-saving mode */ 1691 return p7_interrupt_powersave(env); 1692 } 1693 1694 /* Machine check exception */ 1695 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1696 return PPC_INTERRUPT_MCK; 1697 } 1698 1699 /* Hypervisor decrementer exception */ 1700 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1701 /* LPCR will be clear when not supported so this will work */ 1702 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1703 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1704 /* HDEC clears on delivery */ 1705 return PPC_INTERRUPT_HDECR; 1706 } 1707 } 1708 1709 /* External interrupt can ignore MSR:EE under some circumstances */ 1710 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1711 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1712 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1713 /* HEIC blocks delivery to the hypervisor */ 1714 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1715 !FIELD_EX64(env->msr, MSR, PR))) || 1716 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1717 return PPC_INTERRUPT_EXT; 1718 } 1719 } 1720 if (msr_ee != 0) { 1721 /* Decrementer exception */ 1722 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1723 return PPC_INTERRUPT_DECR; 1724 } 1725 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1726 return PPC_INTERRUPT_PERFM; 1727 } 1728 } 1729 1730 return 0; 1731 } 1732 1733 #define P8_UNUSED_INTERRUPTS \ 1734 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \ 1735 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \ 1736 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) 1737 1738 static int p8_interrupt_powersave(CPUPPCState *env) 1739 { 1740 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1741 (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { 1742 return PPC_INTERRUPT_EXT; 1743 } 1744 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1745 (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { 1746 return PPC_INTERRUPT_DECR; 1747 } 1748 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && 1749 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { 1750 return PPC_INTERRUPT_MCK; 1751 } 1752 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && 1753 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { 1754 return PPC_INTERRUPT_HMI; 1755 } 1756 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && 1757 (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { 1758 return PPC_INTERRUPT_DOORBELL; 1759 } 1760 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && 1761 (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { 1762 return PPC_INTERRUPT_HDOORBELL; 1763 } 1764 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1765 return PPC_INTERRUPT_RESET; 1766 } 1767 return 0; 1768 } 1769 1770 static int p8_next_unmasked_interrupt(CPUPPCState *env) 1771 { 1772 CPUState *cs = env_cpu(env); 1773 1774 /* Ignore MSR[EE] when coming out of some power management states */ 1775 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1776 1777 assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0); 1778 1779 if (cs->halted) { 1780 /* LPCR[PECE] controls which interrupts can exit power-saving mode */ 1781 return p8_interrupt_powersave(env); 1782 } 1783 1784 /* Machine check exception */ 1785 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1786 return PPC_INTERRUPT_MCK; 1787 } 1788 1789 /* Hypervisor decrementer exception */ 1790 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1791 /* LPCR will be clear when not supported so this will work */ 1792 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1793 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1794 /* HDEC clears on delivery */ 1795 return PPC_INTERRUPT_HDECR; 1796 } 1797 } 1798 1799 /* External interrupt can ignore MSR:EE under some circumstances */ 1800 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1801 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1802 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1803 /* HEIC blocks delivery to the hypervisor */ 1804 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1805 !FIELD_EX64(env->msr, MSR, PR))) || 1806 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1807 return PPC_INTERRUPT_EXT; 1808 } 1809 } 1810 if (msr_ee != 0) { 1811 /* Decrementer exception */ 1812 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1813 return PPC_INTERRUPT_DECR; 1814 } 1815 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 1816 return PPC_INTERRUPT_DOORBELL; 1817 } 1818 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 1819 return PPC_INTERRUPT_HDOORBELL; 1820 } 1821 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1822 return PPC_INTERRUPT_PERFM; 1823 } 1824 /* EBB exception */ 1825 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 1826 /* 1827 * EBB exception must be taken in problem state and 1828 * with BESCR_GE set. 1829 */ 1830 if (FIELD_EX64(env->msr, MSR, PR) && 1831 (env->spr[SPR_BESCR] & BESCR_GE)) { 1832 return PPC_INTERRUPT_EBB; 1833 } 1834 } 1835 } 1836 1837 return 0; 1838 } 1839 1840 #define P9_UNUSED_INTERRUPTS \ 1841 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \ 1842 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ 1843 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) 1844 1845 static int p9_interrupt_powersave(CPUPPCState *env) 1846 { 1847 /* External Exception */ 1848 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && 1849 (env->spr[SPR_LPCR] & LPCR_EEE)) { 1850 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1851 if (!heic || !FIELD_EX64_HV(env->msr) || 1852 FIELD_EX64(env->msr, MSR, PR)) { 1853 return PPC_INTERRUPT_EXT; 1854 } 1855 } 1856 /* Decrementer Exception */ 1857 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && 1858 (env->spr[SPR_LPCR] & LPCR_DEE)) { 1859 return PPC_INTERRUPT_DECR; 1860 } 1861 /* Machine Check or Hypervisor Maintenance Exception */ 1862 if (env->spr[SPR_LPCR] & LPCR_OEE) { 1863 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1864 return PPC_INTERRUPT_MCK; 1865 } 1866 if (env->pending_interrupts & PPC_INTERRUPT_HMI) { 1867 return PPC_INTERRUPT_HMI; 1868 } 1869 } 1870 /* Privileged Doorbell Exception */ 1871 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && 1872 (env->spr[SPR_LPCR] & LPCR_PDEE)) { 1873 return PPC_INTERRUPT_DOORBELL; 1874 } 1875 /* Hypervisor Doorbell Exception */ 1876 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && 1877 (env->spr[SPR_LPCR] & LPCR_HDEE)) { 1878 return PPC_INTERRUPT_HDOORBELL; 1879 } 1880 /* Hypervisor virtualization exception */ 1881 if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) && 1882 (env->spr[SPR_LPCR] & LPCR_HVEE)) { 1883 return PPC_INTERRUPT_HVIRT; 1884 } 1885 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 1886 return PPC_INTERRUPT_RESET; 1887 } 1888 return 0; 1889 } 1890 1891 static int p9_next_unmasked_interrupt(CPUPPCState *env) 1892 { 1893 CPUState *cs = env_cpu(env); 1894 1895 /* Ignore MSR[EE] when coming out of some power management states */ 1896 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 1897 1898 assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); 1899 1900 if (cs->halted) { 1901 if (env->spr[SPR_PSSCR] & PSSCR_EC) { 1902 /* 1903 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can 1904 * wakeup the processor 1905 */ 1906 return p9_interrupt_powersave(env); 1907 } else { 1908 /* 1909 * When it's clear, any system-caused exception exits power-saving 1910 * mode, even the ones that gate on MSR[EE]. 1911 */ 1912 msr_ee = true; 1913 } 1914 } 1915 1916 /* Machine check exception */ 1917 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 1918 return PPC_INTERRUPT_MCK; 1919 } 1920 1921 /* Hypervisor decrementer exception */ 1922 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 1923 /* LPCR will be clear when not supported so this will work */ 1924 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1925 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { 1926 /* HDEC clears on delivery */ 1927 return PPC_INTERRUPT_HDECR; 1928 } 1929 } 1930 1931 /* Hypervisor virtualization interrupt */ 1932 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { 1933 /* LPCR will be clear when not supported so this will work */ 1934 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 1935 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) { 1936 return PPC_INTERRUPT_HVIRT; 1937 } 1938 } 1939 1940 /* External interrupt can ignore MSR:EE under some circumstances */ 1941 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 1942 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1943 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1944 /* HEIC blocks delivery to the hypervisor */ 1945 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && 1946 !FIELD_EX64(env->msr, MSR, PR))) || 1947 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 1948 return PPC_INTERRUPT_EXT; 1949 } 1950 } 1951 if (msr_ee != 0) { 1952 /* Decrementer exception */ 1953 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 1954 return PPC_INTERRUPT_DECR; 1955 } 1956 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 1957 return PPC_INTERRUPT_DOORBELL; 1958 } 1959 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 1960 return PPC_INTERRUPT_HDOORBELL; 1961 } 1962 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 1963 return PPC_INTERRUPT_PERFM; 1964 } 1965 /* EBB exception */ 1966 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 1967 /* 1968 * EBB exception must be taken in problem state and 1969 * with BESCR_GE set. 1970 */ 1971 if (FIELD_EX64(env->msr, MSR, PR) && 1972 (env->spr[SPR_BESCR] & BESCR_GE)) { 1973 return PPC_INTERRUPT_EBB; 1974 } 1975 } 1976 } 1977 1978 return 0; 1979 } 1980 #endif /* TARGET_PPC64 */ 1981 1982 static int ppc_next_unmasked_interrupt(CPUPPCState *env) 1983 { 1984 #ifdef TARGET_PPC64 1985 switch (env->excp_model) { 1986 case POWERPC_EXCP_POWER7: 1987 return p7_next_unmasked_interrupt(env); 1988 case POWERPC_EXCP_POWER8: 1989 return p8_next_unmasked_interrupt(env); 1990 case POWERPC_EXCP_POWER9: 1991 case POWERPC_EXCP_POWER10: 1992 return p9_next_unmasked_interrupt(env); 1993 default: 1994 break; 1995 } 1996 #endif 1997 bool async_deliver; 1998 1999 /* External reset */ 2000 if (env->pending_interrupts & PPC_INTERRUPT_RESET) { 2001 return PPC_INTERRUPT_RESET; 2002 } 2003 /* Machine check exception */ 2004 if (env->pending_interrupts & PPC_INTERRUPT_MCK) { 2005 return PPC_INTERRUPT_MCK; 2006 } 2007 #if 0 /* TODO */ 2008 /* External debug exception */ 2009 if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) { 2010 return PPC_INTERRUPT_DEBUG; 2011 } 2012 #endif 2013 2014 /* 2015 * For interrupts that gate on MSR:EE, we need to do something a 2016 * bit more subtle, as we need to let them through even when EE is 2017 * clear when coming out of some power management states (in order 2018 * for them to become a 0x100). 2019 */ 2020 async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; 2021 2022 /* Hypervisor decrementer exception */ 2023 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { 2024 /* LPCR will be clear when not supported so this will work */ 2025 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 2026 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) { 2027 /* HDEC clears on delivery */ 2028 return PPC_INTERRUPT_HDECR; 2029 } 2030 } 2031 2032 /* Hypervisor virtualization interrupt */ 2033 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { 2034 /* LPCR will be clear when not supported so this will work */ 2035 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 2036 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) { 2037 return PPC_INTERRUPT_HVIRT; 2038 } 2039 } 2040 2041 /* External interrupt can ignore MSR:EE under some circumstances */ 2042 if (env->pending_interrupts & PPC_INTERRUPT_EXT) { 2043 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 2044 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 2045 /* HEIC blocks delivery to the hypervisor */ 2046 if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) && 2047 !FIELD_EX64(env->msr, MSR, PR))) || 2048 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) { 2049 return PPC_INTERRUPT_EXT; 2050 } 2051 } 2052 if (FIELD_EX64(env->msr, MSR, CE)) { 2053 /* External critical interrupt */ 2054 if (env->pending_interrupts & PPC_INTERRUPT_CEXT) { 2055 return PPC_INTERRUPT_CEXT; 2056 } 2057 } 2058 if (async_deliver != 0) { 2059 /* Watchdog timer on embedded PowerPC */ 2060 if (env->pending_interrupts & PPC_INTERRUPT_WDT) { 2061 return PPC_INTERRUPT_WDT; 2062 } 2063 if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) { 2064 return PPC_INTERRUPT_CDOORBELL; 2065 } 2066 /* Fixed interval timer on embedded PowerPC */ 2067 if (env->pending_interrupts & PPC_INTERRUPT_FIT) { 2068 return PPC_INTERRUPT_FIT; 2069 } 2070 /* Programmable interval timer on embedded PowerPC */ 2071 if (env->pending_interrupts & PPC_INTERRUPT_PIT) { 2072 return PPC_INTERRUPT_PIT; 2073 } 2074 /* Decrementer exception */ 2075 if (env->pending_interrupts & PPC_INTERRUPT_DECR) { 2076 return PPC_INTERRUPT_DECR; 2077 } 2078 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 2079 return PPC_INTERRUPT_DOORBELL; 2080 } 2081 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { 2082 return PPC_INTERRUPT_HDOORBELL; 2083 } 2084 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { 2085 return PPC_INTERRUPT_PERFM; 2086 } 2087 /* Thermal interrupt */ 2088 if (env->pending_interrupts & PPC_INTERRUPT_THERM) { 2089 return PPC_INTERRUPT_THERM; 2090 } 2091 /* EBB exception */ 2092 if (env->pending_interrupts & PPC_INTERRUPT_EBB) { 2093 /* 2094 * EBB exception must be taken in problem state and 2095 * with BESCR_GE set. 2096 */ 2097 if (FIELD_EX64(env->msr, MSR, PR) && 2098 (env->spr[SPR_BESCR] & BESCR_GE)) { 2099 return PPC_INTERRUPT_EBB; 2100 } 2101 } 2102 } 2103 2104 return 0; 2105 } 2106 2107 /* 2108 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be 2109 * delivered and clears CPU_INTERRUPT_HARD otherwise. 2110 * 2111 * This method is called by ppc_set_interrupt when an interrupt is raised or 2112 * lowered, and should also be called whenever an interrupt masking condition 2113 * is changed, e.g.: 2114 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.; 2115 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.; 2116 * - When PSSCR[EC] or env->resume_as_sreset are changed; 2117 * - When cs->halted is changed and the CPU has a different interrupt masking 2118 * logic in power-saving mode (e.g., POWER7/8/9/10); 2119 */ 2120 void ppc_maybe_interrupt(CPUPPCState *env) 2121 { 2122 CPUState *cs = env_cpu(env); 2123 BQL_LOCK_GUARD(); 2124 2125 if (ppc_next_unmasked_interrupt(env)) { 2126 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 2127 } else { 2128 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 2129 } 2130 } 2131 2132 #ifdef TARGET_PPC64 2133 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) 2134 { 2135 PowerPCCPU *cpu = env_archcpu(env); 2136 2137 switch (interrupt) { 2138 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2139 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2140 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2141 break; 2142 2143 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2144 /* HDEC clears on delivery */ 2145 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2146 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2147 break; 2148 2149 case PPC_INTERRUPT_EXT: 2150 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2151 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2152 } else { 2153 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2154 } 2155 break; 2156 2157 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2158 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2159 break; 2160 case PPC_INTERRUPT_PERFM: 2161 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2162 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2163 break; 2164 case 0: 2165 /* 2166 * This is a bug ! It means that has_work took us out of halt without 2167 * anything to deliver while in a PM state that requires getting 2168 * out via a 0x100 2169 * 2170 * This means we will incorrectly execute past the power management 2171 * instruction instead of triggering a reset. 2172 * 2173 * It generally means a discrepancy between the wakeup conditions in the 2174 * processor has_work implementation and the logic in this function. 2175 */ 2176 assert(!env->resume_as_sreset); 2177 break; 2178 default: 2179 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2180 interrupt); 2181 } 2182 } 2183 2184 static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) 2185 { 2186 PowerPCCPU *cpu = env_archcpu(env); 2187 2188 switch (interrupt) { 2189 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2190 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2191 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2192 break; 2193 2194 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2195 /* HDEC clears on delivery */ 2196 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2197 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2198 break; 2199 2200 case PPC_INTERRUPT_EXT: 2201 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2202 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2203 } else { 2204 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2205 } 2206 break; 2207 2208 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2209 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2210 break; 2211 case PPC_INTERRUPT_DOORBELL: 2212 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2213 if (is_book3s_arch2x(env)) { 2214 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2215 } else { 2216 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 2217 } 2218 break; 2219 case PPC_INTERRUPT_HDOORBELL: 2220 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2221 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2222 break; 2223 case PPC_INTERRUPT_PERFM: 2224 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2225 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2226 break; 2227 case PPC_INTERRUPT_EBB: /* EBB exception */ 2228 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2229 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2230 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2231 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2232 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2233 } 2234 break; 2235 case 0: 2236 /* 2237 * This is a bug ! It means that has_work took us out of halt without 2238 * anything to deliver while in a PM state that requires getting 2239 * out via a 0x100 2240 * 2241 * This means we will incorrectly execute past the power management 2242 * instruction instead of triggering a reset. 2243 * 2244 * It generally means a discrepancy between the wakeup conditions in the 2245 * processor has_work implementation and the logic in this function. 2246 */ 2247 assert(!env->resume_as_sreset); 2248 break; 2249 default: 2250 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2251 interrupt); 2252 } 2253 } 2254 2255 static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) 2256 { 2257 PowerPCCPU *cpu = env_archcpu(env); 2258 CPUState *cs = env_cpu(env); 2259 2260 if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) && 2261 !FIELD_EX64(env->msr, MSR, EE)) { 2262 /* 2263 * A pending interrupt took us out of power-saving, but MSR[EE] says 2264 * that we should return to NIP+4 instead of delivering it. 2265 */ 2266 return; 2267 } 2268 2269 switch (interrupt) { 2270 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2271 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2272 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2273 break; 2274 2275 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2276 /* HDEC clears on delivery */ 2277 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2278 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2279 break; 2280 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ 2281 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2282 break; 2283 2284 case PPC_INTERRUPT_EXT: 2285 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2286 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2287 } else { 2288 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2289 } 2290 break; 2291 2292 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2293 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2294 break; 2295 case PPC_INTERRUPT_DOORBELL: 2296 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2297 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2298 break; 2299 case PPC_INTERRUPT_HDOORBELL: 2300 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2301 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2302 break; 2303 case PPC_INTERRUPT_PERFM: 2304 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2305 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2306 break; 2307 case PPC_INTERRUPT_EBB: /* EBB exception */ 2308 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2309 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2310 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2311 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2312 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2313 } 2314 break; 2315 case 0: 2316 /* 2317 * This is a bug ! It means that has_work took us out of halt without 2318 * anything to deliver while in a PM state that requires getting 2319 * out via a 0x100 2320 * 2321 * This means we will incorrectly execute past the power management 2322 * instruction instead of triggering a reset. 2323 * 2324 * It generally means a discrepancy between the wakeup conditions in the 2325 * processor has_work implementation and the logic in this function. 2326 */ 2327 assert(!env->resume_as_sreset); 2328 break; 2329 default: 2330 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2331 interrupt); 2332 } 2333 } 2334 #endif /* TARGET_PPC64 */ 2335 2336 static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) 2337 { 2338 #ifdef TARGET_PPC64 2339 switch (env->excp_model) { 2340 case POWERPC_EXCP_POWER7: 2341 return p7_deliver_interrupt(env, interrupt); 2342 case POWERPC_EXCP_POWER8: 2343 return p8_deliver_interrupt(env, interrupt); 2344 case POWERPC_EXCP_POWER9: 2345 case POWERPC_EXCP_POWER10: 2346 return p9_deliver_interrupt(env, interrupt); 2347 default: 2348 break; 2349 } 2350 #endif 2351 PowerPCCPU *cpu = env_archcpu(env); 2352 2353 switch (interrupt) { 2354 case PPC_INTERRUPT_RESET: /* External reset */ 2355 env->pending_interrupts &= ~PPC_INTERRUPT_RESET; 2356 powerpc_excp(cpu, POWERPC_EXCP_RESET); 2357 break; 2358 case PPC_INTERRUPT_MCK: /* Machine check exception */ 2359 env->pending_interrupts &= ~PPC_INTERRUPT_MCK; 2360 powerpc_excp(cpu, POWERPC_EXCP_MCHECK); 2361 break; 2362 2363 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ 2364 /* HDEC clears on delivery */ 2365 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; 2366 powerpc_excp(cpu, POWERPC_EXCP_HDECR); 2367 break; 2368 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */ 2369 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2370 break; 2371 2372 case PPC_INTERRUPT_EXT: 2373 if (books_vhyp_promotes_external_to_hvirt(cpu)) { 2374 powerpc_excp(cpu, POWERPC_EXCP_HVIRT); 2375 } else { 2376 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL); 2377 } 2378 break; 2379 case PPC_INTERRUPT_CEXT: /* External critical interrupt */ 2380 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL); 2381 break; 2382 2383 case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */ 2384 env->pending_interrupts &= ~PPC_INTERRUPT_WDT; 2385 powerpc_excp(cpu, POWERPC_EXCP_WDT); 2386 break; 2387 case PPC_INTERRUPT_CDOORBELL: 2388 env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL; 2389 powerpc_excp(cpu, POWERPC_EXCP_DOORCI); 2390 break; 2391 case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */ 2392 env->pending_interrupts &= ~PPC_INTERRUPT_FIT; 2393 powerpc_excp(cpu, POWERPC_EXCP_FIT); 2394 break; 2395 case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */ 2396 env->pending_interrupts &= ~PPC_INTERRUPT_PIT; 2397 powerpc_excp(cpu, POWERPC_EXCP_PIT); 2398 break; 2399 case PPC_INTERRUPT_DECR: /* Decrementer exception */ 2400 if (ppc_decr_clear_on_delivery(env)) { 2401 env->pending_interrupts &= ~PPC_INTERRUPT_DECR; 2402 } 2403 powerpc_excp(cpu, POWERPC_EXCP_DECR); 2404 break; 2405 case PPC_INTERRUPT_DOORBELL: 2406 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; 2407 if (is_book3s_arch2x(env)) { 2408 powerpc_excp(cpu, POWERPC_EXCP_SDOOR); 2409 } else { 2410 powerpc_excp(cpu, POWERPC_EXCP_DOORI); 2411 } 2412 break; 2413 case PPC_INTERRUPT_HDOORBELL: 2414 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; 2415 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); 2416 break; 2417 case PPC_INTERRUPT_PERFM: 2418 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; 2419 powerpc_excp(cpu, POWERPC_EXCP_PERFM); 2420 break; 2421 case PPC_INTERRUPT_THERM: /* Thermal interrupt */ 2422 env->pending_interrupts &= ~PPC_INTERRUPT_THERM; 2423 powerpc_excp(cpu, POWERPC_EXCP_THERM); 2424 break; 2425 case PPC_INTERRUPT_EBB: /* EBB exception */ 2426 env->pending_interrupts &= ~PPC_INTERRUPT_EBB; 2427 if (env->spr[SPR_BESCR] & BESCR_PMEO) { 2428 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB); 2429 } else if (env->spr[SPR_BESCR] & BESCR_EEO) { 2430 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB); 2431 } 2432 break; 2433 case 0: 2434 /* 2435 * This is a bug ! It means that has_work took us out of halt without 2436 * anything to deliver while in a PM state that requires getting 2437 * out via a 0x100 2438 * 2439 * This means we will incorrectly execute past the power management 2440 * instruction instead of triggering a reset. 2441 * 2442 * It generally means a discrepancy between the wakeup conditions in the 2443 * processor has_work implementation and the logic in this function. 2444 */ 2445 assert(!env->resume_as_sreset); 2446 break; 2447 default: 2448 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", 2449 interrupt); 2450 } 2451 } 2452 2453 void ppc_cpu_do_system_reset(CPUState *cs) 2454 { 2455 PowerPCCPU *cpu = POWERPC_CPU(cs); 2456 2457 powerpc_excp(cpu, POWERPC_EXCP_RESET); 2458 } 2459 2460 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 2461 { 2462 PowerPCCPU *cpu = POWERPC_CPU(cs); 2463 CPUPPCState *env = &cpu->env; 2464 target_ulong msr = 0; 2465 2466 /* 2467 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 2468 * been set by KVM. 2469 */ 2470 msr = (1ULL << MSR_ME); 2471 msr |= env->msr & (1ULL << MSR_SF); 2472 if (ppc_interrupts_little_endian(cpu, false)) { 2473 msr |= (1ULL << MSR_LE); 2474 } 2475 2476 /* Anything for nested required here? MSR[HV] bit? */ 2477 2478 powerpc_set_excp_state(cpu, vector, msr); 2479 } 2480 2481 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 2482 { 2483 CPUPPCState *env = cpu_env(cs); 2484 int interrupt; 2485 2486 if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) { 2487 return false; 2488 } 2489 2490 interrupt = ppc_next_unmasked_interrupt(env); 2491 if (interrupt == 0) { 2492 return false; 2493 } 2494 2495 ppc_deliver_interrupt(env, interrupt); 2496 if (env->pending_interrupts == 0) { 2497 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 2498 } 2499 return true; 2500 } 2501 2502 #endif /* !CONFIG_USER_ONLY */ 2503 2504 /*****************************************************************************/ 2505 /* Exceptions processing helpers */ 2506 2507 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 2508 uint32_t error_code, uintptr_t raddr) 2509 { 2510 CPUState *cs = env_cpu(env); 2511 2512 cs->exception_index = exception; 2513 env->error_code = error_code; 2514 cpu_loop_exit_restore(cs, raddr); 2515 } 2516 2517 void raise_exception_err(CPUPPCState *env, uint32_t exception, 2518 uint32_t error_code) 2519 { 2520 raise_exception_err_ra(env, exception, error_code, 0); 2521 } 2522 2523 void raise_exception(CPUPPCState *env, uint32_t exception) 2524 { 2525 raise_exception_err_ra(env, exception, 0, 0); 2526 } 2527 2528 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 2529 uintptr_t raddr) 2530 { 2531 raise_exception_err_ra(env, exception, 0, raddr); 2532 } 2533 2534 #ifdef CONFIG_TCG 2535 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 2536 uint32_t error_code) 2537 { 2538 raise_exception_err_ra(env, exception, error_code, 0); 2539 } 2540 2541 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 2542 { 2543 raise_exception_err_ra(env, exception, 0, 0); 2544 } 2545 2546 #ifndef CONFIG_USER_ONLY 2547 void helper_store_msr(CPUPPCState *env, target_ulong val) 2548 { 2549 uint32_t excp = hreg_store_msr(env, val, 0); 2550 2551 if (excp != 0) { 2552 cpu_interrupt_exittb(env_cpu(env)); 2553 raise_exception(env, excp); 2554 } 2555 } 2556 2557 void helper_ppc_maybe_interrupt(CPUPPCState *env) 2558 { 2559 ppc_maybe_interrupt(env); 2560 } 2561 2562 #ifdef TARGET_PPC64 2563 void helper_scv(CPUPPCState *env, uint32_t lev) 2564 { 2565 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 2566 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 2567 } else { 2568 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 2569 } 2570 } 2571 2572 void helper_pminsn(CPUPPCState *env, uint32_t insn) 2573 { 2574 CPUState *cs = env_cpu(env); 2575 2576 cs->halted = 1; 2577 2578 /* Condition for waking up at 0x100 */ 2579 env->resume_as_sreset = (insn != PPC_PM_STOP) || 2580 (env->spr[SPR_PSSCR] & PSSCR_EC); 2581 2582 /* HDECR is not to wake from PM state, it may have already fired */ 2583 if (env->resume_as_sreset) { 2584 PowerPCCPU *cpu = env_archcpu(env); 2585 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); 2586 } 2587 2588 ppc_maybe_interrupt(env); 2589 } 2590 #endif /* TARGET_PPC64 */ 2591 2592 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 2593 { 2594 /* MSR:POW cannot be set by any form of rfi */ 2595 msr &= ~(1ULL << MSR_POW); 2596 2597 /* MSR:TGPR cannot be set by any form of rfi */ 2598 if (env->flags & POWERPC_FLAG_TGPR) 2599 msr &= ~(1ULL << MSR_TGPR); 2600 2601 #ifdef TARGET_PPC64 2602 /* Switching to 32-bit ? Crop the nip */ 2603 if (!msr_is_64bit(env, msr)) { 2604 nip = (uint32_t)nip; 2605 } 2606 #else 2607 nip = (uint32_t)nip; 2608 #endif 2609 /* XXX: beware: this is false if VLE is supported */ 2610 env->nip = nip & ~((target_ulong)0x00000003); 2611 hreg_store_msr(env, msr, 1); 2612 trace_ppc_excp_rfi(env->nip, env->msr); 2613 /* 2614 * No need to raise an exception here, as rfi is always the last 2615 * insn of a TB 2616 */ 2617 cpu_interrupt_exittb(env_cpu(env)); 2618 /* Reset the reservation */ 2619 env->reserve_addr = -1; 2620 2621 /* Context synchronizing: check if TCG TLB needs flush */ 2622 check_tlb_flush(env, false); 2623 } 2624 2625 void helper_rfi(CPUPPCState *env) 2626 { 2627 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 2628 } 2629 2630 #ifdef TARGET_PPC64 2631 void helper_rfid(CPUPPCState *env) 2632 { 2633 /* 2634 * The architecture defines a number of rules for which bits can 2635 * change but in practice, we handle this in hreg_store_msr() 2636 * which will be called by do_rfi(), so there is no need to filter 2637 * here 2638 */ 2639 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 2640 } 2641 2642 void helper_rfscv(CPUPPCState *env) 2643 { 2644 do_rfi(env, env->lr, env->ctr); 2645 } 2646 2647 void helper_hrfid(CPUPPCState *env) 2648 { 2649 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 2650 } 2651 2652 void helper_rfebb(CPUPPCState *env, target_ulong s) 2653 { 2654 target_ulong msr = env->msr; 2655 2656 /* 2657 * Handling of BESCR bits 32:33 according to PowerISA v3.1: 2658 * 2659 * "If BESCR 32:33 != 0b00 the instruction is treated as if 2660 * the instruction form were invalid." 2661 */ 2662 if (env->spr[SPR_BESCR] & BESCR_INVALID) { 2663 raise_exception_err(env, POWERPC_EXCP_PROGRAM, 2664 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); 2665 } 2666 2667 env->nip = env->spr[SPR_EBBRR]; 2668 2669 /* Switching to 32-bit ? Crop the nip */ 2670 if (!msr_is_64bit(env, msr)) { 2671 env->nip = (uint32_t)env->spr[SPR_EBBRR]; 2672 } 2673 2674 if (s) { 2675 env->spr[SPR_BESCR] |= BESCR_GE; 2676 } else { 2677 env->spr[SPR_BESCR] &= ~BESCR_GE; 2678 } 2679 } 2680 2681 /* 2682 * Triggers or queues an 'ebb_excp' EBB exception. All checks 2683 * but FSCR, HFSCR and msr_pr must be done beforehand. 2684 * 2685 * PowerISA v3.1 isn't clear about whether an EBB should be 2686 * postponed or cancelled if the EBB facility is unavailable. 2687 * Our assumption here is that the EBB is cancelled if both 2688 * FSCR and HFSCR EBB facilities aren't available. 2689 */ 2690 static void do_ebb(CPUPPCState *env, int ebb_excp) 2691 { 2692 PowerPCCPU *cpu = env_archcpu(env); 2693 2694 /* 2695 * FSCR_EBB and FSCR_IC_EBB are the same bits used with 2696 * HFSCR. 2697 */ 2698 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); 2699 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); 2700 2701 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { 2702 env->spr[SPR_BESCR] |= BESCR_PMEO; 2703 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { 2704 env->spr[SPR_BESCR] |= BESCR_EEO; 2705 } 2706 2707 if (FIELD_EX64(env->msr, MSR, PR)) { 2708 powerpc_excp(cpu, ebb_excp); 2709 } else { 2710 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); 2711 } 2712 } 2713 2714 void raise_ebb_perfm_exception(CPUPPCState *env) 2715 { 2716 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && 2717 env->spr[SPR_BESCR] & BESCR_PME && 2718 env->spr[SPR_BESCR] & BESCR_GE; 2719 2720 if (!perfm_ebb_enabled) { 2721 return; 2722 } 2723 2724 do_ebb(env, POWERPC_EXCP_PERFM_EBB); 2725 } 2726 #endif /* TARGET_PPC64 */ 2727 2728 /*****************************************************************************/ 2729 /* Embedded PowerPC specific helpers */ 2730 void helper_40x_rfci(CPUPPCState *env) 2731 { 2732 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 2733 } 2734 2735 void helper_rfci(CPUPPCState *env) 2736 { 2737 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 2738 } 2739 2740 void helper_rfdi(CPUPPCState *env) 2741 { 2742 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 2743 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 2744 } 2745 2746 void helper_rfmci(CPUPPCState *env) 2747 { 2748 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 2749 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 2750 } 2751 #endif /* !CONFIG_USER_ONLY */ 2752 2753 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 2754 uint32_t flags) 2755 { 2756 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 2757 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 2758 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 2759 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 2760 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 2761 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2762 POWERPC_EXCP_TRAP, GETPC()); 2763 } 2764 } 2765 2766 #ifdef TARGET_PPC64 2767 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 2768 uint32_t flags) 2769 { 2770 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 2771 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 2772 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 2773 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 2774 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 2775 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2776 POWERPC_EXCP_TRAP, GETPC()); 2777 } 2778 } 2779 #endif /* TARGET_PPC64 */ 2780 2781 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) 2782 { 2783 const uint16_t c = 0xfffc; 2784 const uint64_t z0 = 0xfa2561cdf44ac398ULL; 2785 uint16_t z = 0, temp; 2786 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; 2787 2788 for (int i = 3; i >= 0; i--) { 2789 k[i] = key & 0xffff; 2790 key >>= 16; 2791 } 2792 xleft[0] = x & 0xffff; 2793 xright[0] = (x >> 16) & 0xffff; 2794 2795 for (int i = 0; i < 28; i++) { 2796 z = (z0 >> (63 - i)) & 1; 2797 temp = ror16(k[i + 3], 3) ^ k[i + 1]; 2798 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); 2799 } 2800 2801 for (int i = 0; i < 8; i++) { 2802 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; 2803 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; 2804 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; 2805 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; 2806 } 2807 2808 for (int i = 0; i < 32; i++) { 2809 fxleft[i] = (rol16(xleft[i], 1) & 2810 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); 2811 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; 2812 xright[i + 1] = xleft[i]; 2813 } 2814 2815 return (((uint32_t)xright[32]) << 16) | xleft[32]; 2816 } 2817 2818 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) 2819 { 2820 uint64_t stage0_h = 0ULL, stage0_l = 0ULL; 2821 uint64_t stage1_h, stage1_l; 2822 2823 for (int i = 0; i < 4; i++) { 2824 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); 2825 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); 2826 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); 2827 stage0_l |= (ra & 0xff) << (8 * 2 * i); 2828 rb >>= 8; 2829 ra >>= 8; 2830 } 2831 2832 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; 2833 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); 2834 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; 2835 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); 2836 2837 return stage1_h ^ stage1_l; 2838 } 2839 2840 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra, 2841 target_ulong rb, uint64_t key, bool store) 2842 { 2843 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; 2844 2845 if (store) { 2846 cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); 2847 } else { 2848 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); 2849 if (loaded_hash != calculated_hash) { 2850 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2851 POWERPC_EXCP_TRAP, GETPC()); 2852 } 2853 } 2854 } 2855 2856 #include "qemu/guest-random.h" 2857 2858 #ifdef TARGET_PPC64 2859 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 2860 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 2861 target_ulong rb) \ 2862 { \ 2863 if (env->msr & R_MSR_PR_MASK) { \ 2864 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \ 2865 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 2866 return; \ 2867 } else if (!(env->msr & R_MSR_HV_MASK)) { \ 2868 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \ 2869 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ 2870 return; \ 2871 } else if (!(env->msr & R_MSR_S_MASK)) { \ 2872 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \ 2873 return; \ 2874 } \ 2875 \ 2876 do_hash(env, ea, ra, rb, key, store); \ 2877 } 2878 #else 2879 #define HELPER_HASH(op, key, store, dexcr_aspect) \ 2880 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ 2881 target_ulong rb) \ 2882 { \ 2883 do_hash(env, ea, ra, rb, key, store); \ 2884 } 2885 #endif /* TARGET_PPC64 */ 2886 2887 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) 2888 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) 2889 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) 2890 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) 2891 2892 #ifndef CONFIG_USER_ONLY 2893 /* Embedded.Processor Control */ 2894 static int dbell2irq(target_ulong rb) 2895 { 2896 int msg = rb & DBELL_TYPE_MASK; 2897 int irq = -1; 2898 2899 switch (msg) { 2900 case DBELL_TYPE_DBELL: 2901 irq = PPC_INTERRUPT_DOORBELL; 2902 break; 2903 case DBELL_TYPE_DBELL_CRIT: 2904 irq = PPC_INTERRUPT_CDOORBELL; 2905 break; 2906 case DBELL_TYPE_G_DBELL: 2907 case DBELL_TYPE_G_DBELL_CRIT: 2908 case DBELL_TYPE_G_DBELL_MC: 2909 /* XXX implement */ 2910 default: 2911 break; 2912 } 2913 2914 return irq; 2915 } 2916 2917 void helper_msgclr(CPUPPCState *env, target_ulong rb) 2918 { 2919 int irq = dbell2irq(rb); 2920 2921 if (irq < 0) { 2922 return; 2923 } 2924 2925 ppc_set_irq(env_archcpu(env), irq, 0); 2926 } 2927 2928 void helper_msgsnd(target_ulong rb) 2929 { 2930 int irq = dbell2irq(rb); 2931 int pir = rb & DBELL_PIRTAG_MASK; 2932 CPUState *cs; 2933 2934 if (irq < 0) { 2935 return; 2936 } 2937 2938 bql_lock(); 2939 CPU_FOREACH(cs) { 2940 PowerPCCPU *cpu = POWERPC_CPU(cs); 2941 CPUPPCState *cenv = &cpu->env; 2942 2943 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 2944 ppc_set_irq(cpu, irq, 1); 2945 } 2946 } 2947 bql_unlock(); 2948 } 2949 2950 /* Server Processor Control */ 2951 2952 static bool dbell_type_server(target_ulong rb) 2953 { 2954 /* 2955 * A Directed Hypervisor Doorbell message is sent only if the 2956 * message type is 5. All other types are reserved and the 2957 * instruction is a no-op 2958 */ 2959 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 2960 } 2961 2962 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 2963 { 2964 if (!dbell_type_server(rb)) { 2965 return; 2966 } 2967 2968 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); 2969 } 2970 2971 static void book3s_msgsnd_common(int pir, int irq) 2972 { 2973 CPUState *cs; 2974 2975 bql_lock(); 2976 CPU_FOREACH(cs) { 2977 PowerPCCPU *cpu = POWERPC_CPU(cs); 2978 CPUPPCState *cenv = &cpu->env; 2979 2980 /* TODO: broadcast message to all threads of the same processor */ 2981 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 2982 ppc_set_irq(cpu, irq, 1); 2983 } 2984 } 2985 bql_unlock(); 2986 } 2987 2988 void helper_book3s_msgsnd(target_ulong rb) 2989 { 2990 int pir = rb & DBELL_PROCIDTAG_MASK; 2991 2992 if (!dbell_type_server(rb)) { 2993 return; 2994 } 2995 2996 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 2997 } 2998 2999 #ifdef TARGET_PPC64 3000 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 3001 { 3002 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 3003 3004 if (!dbell_type_server(rb)) { 3005 return; 3006 } 3007 3008 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0); 3009 } 3010 3011 /* 3012 * sends a message to another thread on the same 3013 * multi-threaded processor 3014 */ 3015 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 3016 { 3017 CPUState *cs = env_cpu(env); 3018 PowerPCCPU *cpu = env_archcpu(env); 3019 CPUState *ccs; 3020 uint32_t nr_threads = cs->nr_threads; 3021 int ttir = rb & PPC_BITMASK(57, 63); 3022 3023 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 3024 3025 if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) { 3026 nr_threads = 1; /* msgsndp behaves as 1-thread in LPAR-per-thread mode*/ 3027 } 3028 3029 if (!dbell_type_server(rb) || ttir >= nr_threads) { 3030 return; 3031 } 3032 3033 if (nr_threads == 1) { 3034 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, 1); 3035 return; 3036 } 3037 3038 /* Does iothread need to be locked for walking CPU list? */ 3039 bql_lock(); 3040 THREAD_SIBLING_FOREACH(cs, ccs) { 3041 PowerPCCPU *ccpu = POWERPC_CPU(ccs); 3042 uint32_t thread_id = ppc_cpu_tir(ccpu); 3043 3044 if (ttir == thread_id) { 3045 ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1); 3046 bql_unlock(); 3047 return; 3048 } 3049 } 3050 3051 g_assert_not_reached(); 3052 } 3053 #endif /* TARGET_PPC64 */ 3054 3055 /* Single-step tracing */ 3056 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip) 3057 { 3058 uint32_t error_code = 0; 3059 if (env->insns_flags2 & PPC2_ISA207S) { 3060 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */ 3061 env->spr[SPR_POWER_SIAR] = prev_ip; 3062 error_code = PPC_BIT(33); 3063 } 3064 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code); 3065 } 3066 3067 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 3068 MMUAccessType access_type, 3069 int mmu_idx, uintptr_t retaddr) 3070 { 3071 CPUPPCState *env = cpu_env(cs); 3072 uint32_t insn; 3073 3074 /* Restore state and reload the insn we executed, for filling in DSISR. */ 3075 cpu_restore_state(cs, retaddr); 3076 insn = ppc_ldl_code(env, env->nip); 3077 3078 switch (env->mmu_model) { 3079 case POWERPC_MMU_SOFT_4xx: 3080 env->spr[SPR_40x_DEAR] = vaddr; 3081 break; 3082 case POWERPC_MMU_BOOKE: 3083 case POWERPC_MMU_BOOKE206: 3084 env->spr[SPR_BOOKE_DEAR] = vaddr; 3085 break; 3086 default: 3087 env->spr[SPR_DAR] = vaddr; 3088 break; 3089 } 3090 3091 cs->exception_index = POWERPC_EXCP_ALIGN; 3092 env->error_code = insn & 0x03FF0000; 3093 cpu_loop_exit(cs); 3094 } 3095 3096 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 3097 vaddr vaddr, unsigned size, 3098 MMUAccessType access_type, 3099 int mmu_idx, MemTxAttrs attrs, 3100 MemTxResult response, uintptr_t retaddr) 3101 { 3102 CPUPPCState *env = cpu_env(cs); 3103 3104 switch (env->excp_model) { 3105 #if defined(TARGET_PPC64) 3106 case POWERPC_EXCP_POWER8: 3107 case POWERPC_EXCP_POWER9: 3108 case POWERPC_EXCP_POWER10: 3109 /* 3110 * Machine check codes can be found in processor User Manual or 3111 * Linux or skiboot source. 3112 */ 3113 if (access_type == MMU_DATA_LOAD) { 3114 env->spr[SPR_DAR] = vaddr; 3115 env->spr[SPR_DSISR] = PPC_BIT(57); 3116 env->error_code = PPC_BIT(42); 3117 3118 } else if (access_type == MMU_DATA_STORE) { 3119 /* 3120 * MCE for stores in POWER is asynchronous so hardware does 3121 * not set DAR, but QEMU can do better. 3122 */ 3123 env->spr[SPR_DAR] = vaddr; 3124 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45); 3125 env->error_code |= PPC_BIT(42); 3126 3127 } else { /* Fetch */ 3128 /* 3129 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching 3130 * the instruction, so that must always be clear for fetches. 3131 */ 3132 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45); 3133 } 3134 break; 3135 #endif 3136 default: 3137 /* 3138 * TODO: Check behaviour for other CPUs, for now do nothing. 3139 * Could add a basic MCE even if real hardware ignores. 3140 */ 3141 return; 3142 } 3143 3144 cs->exception_index = POWERPC_EXCP_MCHECK; 3145 cpu_loop_exit_restore(cs, retaddr); 3146 } 3147 3148 void ppc_cpu_debug_excp_handler(CPUState *cs) 3149 { 3150 #if defined(TARGET_PPC64) 3151 CPUPPCState *env = cpu_env(cs); 3152 3153 if (env->insns_flags2 & PPC2_ISA207S) { 3154 if (cs->watchpoint_hit) { 3155 if (cs->watchpoint_hit->flags & BP_CPU) { 3156 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr; 3157 env->spr[SPR_DSISR] = PPC_BIT(41); 3158 cs->watchpoint_hit = NULL; 3159 raise_exception(env, POWERPC_EXCP_DSI); 3160 } 3161 cs->watchpoint_hit = NULL; 3162 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) { 3163 raise_exception_err(env, POWERPC_EXCP_TRACE, 3164 PPC_BIT(33) | PPC_BIT(43)); 3165 } 3166 } 3167 #endif 3168 } 3169 3170 bool ppc_cpu_debug_check_breakpoint(CPUState *cs) 3171 { 3172 #if defined(TARGET_PPC64) 3173 CPUPPCState *env = cpu_env(cs); 3174 3175 if (env->insns_flags2 & PPC2_ISA207S) { 3176 target_ulong priv; 3177 3178 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63); 3179 switch (priv) { 3180 case 0x1: /* problem */ 3181 return env->msr & ((target_ulong)1 << MSR_PR); 3182 case 0x2: /* supervisor */ 3183 return (!(env->msr & ((target_ulong)1 << MSR_PR)) && 3184 !(env->msr & ((target_ulong)1 << MSR_HV))); 3185 case 0x3: /* hypervisor */ 3186 return (!(env->msr & ((target_ulong)1 << MSR_PR)) && 3187 (env->msr & ((target_ulong)1 << MSR_HV))); 3188 default: 3189 g_assert_not_reached(); 3190 } 3191 } 3192 #endif 3193 3194 return false; 3195 } 3196 3197 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 3198 { 3199 #if defined(TARGET_PPC64) 3200 CPUPPCState *env = cpu_env(cs); 3201 3202 if (env->insns_flags2 & PPC2_ISA207S) { 3203 if (wp == env->dawr0_watchpoint) { 3204 uint32_t dawrx = env->spr[SPR_DAWRX0]; 3205 bool wt = extract32(dawrx, PPC_BIT_NR(59), 1); 3206 bool wti = extract32(dawrx, PPC_BIT_NR(60), 1); 3207 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1); 3208 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1); 3209 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1); 3210 3211 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) { 3212 return false; 3213 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) { 3214 return false; 3215 } else if (!sv) { 3216 return false; 3217 } 3218 3219 if (!wti) { 3220 if (env->msr & ((target_ulong)1 << MSR_DR)) { 3221 if (!wt) { 3222 return false; 3223 } 3224 } else { 3225 if (wt) { 3226 return false; 3227 } 3228 } 3229 } 3230 3231 return true; 3232 } 3233 } 3234 #endif 3235 3236 return false; 3237 } 3238 3239 #endif /* !CONFIG_USER_ONLY */ 3240 #endif /* CONFIG_TCG */ 3241