1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "internal.h" 24 #include "helper_regs.h" 25 26 #ifdef CONFIG_TCG 27 #include "exec/helper-proto.h" 28 #include "exec/cpu_ldst.h" 29 #endif 30 31 /* #define DEBUG_OP */ 32 /* #define DEBUG_SOFTWARE_TLB */ 33 /* #define DEBUG_EXCEPTIONS */ 34 35 #ifdef DEBUG_EXCEPTIONS 36 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 37 #else 38 # define LOG_EXCP(...) do { } while (0) 39 #endif 40 41 /*****************************************************************************/ 42 /* Exception processing */ 43 #if defined(CONFIG_USER_ONLY) 44 void ppc_cpu_do_interrupt(CPUState *cs) 45 { 46 PowerPCCPU *cpu = POWERPC_CPU(cs); 47 CPUPPCState *env = &cpu->env; 48 49 cs->exception_index = POWERPC_EXCP_NONE; 50 env->error_code = 0; 51 } 52 53 static void ppc_hw_interrupt(CPUPPCState *env) 54 { 55 CPUState *cs = env_cpu(env); 56 57 cs->exception_index = POWERPC_EXCP_NONE; 58 env->error_code = 0; 59 } 60 #else /* defined(CONFIG_USER_ONLY) */ 61 static inline void dump_syscall(CPUPPCState *env) 62 { 63 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 64 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 65 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 66 " nip=" TARGET_FMT_lx "\n", 67 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 68 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 69 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 70 ppc_dump_gpr(env, 8), env->nip); 71 } 72 73 static inline void dump_syscall_vectored(CPUPPCState *env) 74 { 75 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 76 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 77 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 78 " nip=" TARGET_FMT_lx "\n", 79 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 80 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 81 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 82 ppc_dump_gpr(env, 8), env->nip); 83 } 84 85 static inline void dump_hcall(CPUPPCState *env) 86 { 87 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 88 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 89 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 90 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 91 " nip=" TARGET_FMT_lx "\n", 92 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 93 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 94 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 95 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 96 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 97 env->nip); 98 } 99 100 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 101 target_ulong *msr) 102 { 103 /* We no longer are in a PM state */ 104 env->resume_as_sreset = false; 105 106 /* Pretend to be returning from doze always as we don't lose state */ 107 *msr |= SRR1_WS_NOLOSS; 108 109 /* Machine checks are sent normally */ 110 if (excp == POWERPC_EXCP_MCHECK) { 111 return excp; 112 } 113 switch (excp) { 114 case POWERPC_EXCP_RESET: 115 *msr |= SRR1_WAKERESET; 116 break; 117 case POWERPC_EXCP_EXTERNAL: 118 *msr |= SRR1_WAKEEE; 119 break; 120 case POWERPC_EXCP_DECR: 121 *msr |= SRR1_WAKEDEC; 122 break; 123 case POWERPC_EXCP_SDOOR: 124 *msr |= SRR1_WAKEDBELL; 125 break; 126 case POWERPC_EXCP_SDOOR_HV: 127 *msr |= SRR1_WAKEHDBELL; 128 break; 129 case POWERPC_EXCP_HV_MAINT: 130 *msr |= SRR1_WAKEHMI; 131 break; 132 case POWERPC_EXCP_HVIRT: 133 *msr |= SRR1_WAKEHVI; 134 break; 135 default: 136 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 137 excp); 138 } 139 return POWERPC_EXCP_RESET; 140 } 141 142 /* 143 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be 144 * taken with the MMU on, and which uses an alternate location (e.g., so the 145 * kernel/hv can map the vectors there with an effective address). 146 * 147 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they 148 * are delivered in this way. AIL requires the LPCR to be set to enable this 149 * mode, and then a number of conditions have to be true for AIL to apply. 150 * 151 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because 152 * they specifically want to be in real mode (e.g., the MCE might be signaling 153 * a SLB multi-hit which requires SLB flush before the MMU can be enabled). 154 * 155 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV], 156 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current 157 * radix mode (LPCR[HR]). 158 * 159 * POWER8, POWER9 with LPCR[HR]=0 160 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 161 * +-----------+-------------+---------+-------------+-----+ 162 * | a | 00/01/10 | x | x | 0 | 163 * | a | 11 | 0 | 1 | 0 | 164 * | a | 11 | 1 | 1 | a | 165 * | a | 11 | 0 | 0 | a | 166 * +-------------------------------------------------------+ 167 * 168 * POWER9 with LPCR[HR]=1 169 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 170 * +-----------+-------------+---------+-------------+-----+ 171 * | a | 00/01/10 | x | x | 0 | 172 * | a | 11 | x | x | a | 173 * +-------------------------------------------------------+ 174 * 175 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to 176 * the hypervisor in AIL mode if the guest is radix. This is good for 177 * performance but allows the guest to influence the AIL of hypervisor 178 * interrupts using its MSR, and also the hypervisor must disallow guest 179 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to 180 * use AIL for its MSR[HV] 0->1 interrupts. 181 * 182 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to 183 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and 184 * MSR[HV] 1->1). 185 * 186 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1. 187 * 188 * POWER10 behaviour is 189 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL | 190 * +-----------+------------+-------------+---------+-------------+-----+ 191 * | a | h | 00/01/10 | 0 | 0 | 0 | 192 * | a | h | 11 | 0 | 0 | a | 193 * | a | h | x | 0 | 1 | h | 194 * | a | h | 00/01/10 | 1 | 1 | 0 | 195 * | a | h | 11 | 1 | 1 | h | 196 * +--------------------------------------------------------------------+ 197 */ 198 static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp, 199 target_ulong msr, 200 target_ulong *new_msr, 201 target_ulong *vector) 202 { 203 #if defined(TARGET_PPC64) 204 CPUPPCState *env = &cpu->env; 205 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1); 206 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB); 207 int ail = 0; 208 209 if (excp == POWERPC_EXCP_MCHECK || 210 excp == POWERPC_EXCP_RESET || 211 excp == POWERPC_EXCP_HV_MAINT) { 212 /* SRESET, MCE, HMI never apply AIL */ 213 return; 214 } 215 216 if (excp_model == POWERPC_EXCP_POWER8 || 217 excp_model == POWERPC_EXCP_POWER9) { 218 if (!mmu_all_on) { 219 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */ 220 return; 221 } 222 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) { 223 /* 224 * AIL does not work if there is a MSR[HV] 0->1 transition and the 225 * partition is in HPT mode. For radix guests, such interrupts are 226 * allowed to be delivered to the hypervisor in ail mode. 227 */ 228 return; 229 } 230 231 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 232 if (ail == 0) { 233 return; 234 } 235 if (ail == 1) { 236 /* AIL=1 is reserved, treat it like AIL=0 */ 237 return; 238 } 239 240 } else if (excp_model == POWERPC_EXCP_POWER10) { 241 if (!mmu_all_on && !hv_escalation) { 242 /* 243 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled. 244 * Guest->guest and HV->HV interrupts do require MMU on. 245 */ 246 return; 247 } 248 249 if (*new_msr & MSR_HVB) { 250 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) { 251 /* HV interrupts depend on LPCR[HAIL] */ 252 return; 253 } 254 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */ 255 } else { 256 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 257 } 258 if (ail == 0) { 259 return; 260 } 261 if (ail == 1 || ail == 2) { 262 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ 263 return; 264 } 265 } else { 266 /* Other processors do not support AIL */ 267 return; 268 } 269 270 /* 271 * AIL applies, so the new MSR gets IR and DR set, and an offset applied 272 * to the new IP. 273 */ 274 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 275 276 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 277 if (ail == 2) { 278 *vector |= 0x0000000000018000ull; 279 } else if (ail == 3) { 280 *vector |= 0xc000000000004000ull; 281 } 282 } else { 283 /* 284 * scv AIL is a little different. AIL=2 does not change the address, 285 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000. 286 */ 287 if (ail == 3) { 288 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */ 289 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */ 290 } 291 } 292 #endif 293 } 294 295 static inline void powerpc_set_excp_state(PowerPCCPU *cpu, 296 target_ulong vector, target_ulong msr) 297 { 298 CPUState *cs = CPU(cpu); 299 CPUPPCState *env = &cpu->env; 300 301 /* 302 * We don't use hreg_store_msr here as already have treated any 303 * special case that could occur. Just store MSR and update hflags 304 * 305 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 306 * will prevent setting of the HV bit which some exceptions might need 307 * to do. 308 */ 309 env->msr = msr & env->msr_mask; 310 hreg_compute_hflags(env); 311 env->nip = vector; 312 /* Reset exception state */ 313 cs->exception_index = POWERPC_EXCP_NONE; 314 env->error_code = 0; 315 316 /* Reset the reservation */ 317 env->reserve_addr = -1; 318 319 /* 320 * Any interrupt is context synchronizing, check if TCG TLB needs 321 * a delayed flush on ppc64 322 */ 323 check_tlb_flush(env, false); 324 } 325 326 /* 327 * Note that this function should be greatly optimized when called 328 * with a constant excp, from ppc_hw_interrupt 329 */ 330 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 331 { 332 CPUState *cs = CPU(cpu); 333 CPUPPCState *env = &cpu->env; 334 target_ulong msr, new_msr, vector; 335 int srr0, srr1, asrr0, asrr1, lev = -1; 336 bool lpes0; 337 338 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 339 " => %08x (%02x)\n", env->nip, excp, env->error_code); 340 341 /* new srr1 value excluding must-be-zero bits */ 342 if (excp_model == POWERPC_EXCP_BOOKE) { 343 msr = env->msr; 344 } else { 345 msr = env->msr & ~0x783f0000ULL; 346 } 347 348 /* 349 * new interrupt handler msr preserves existing HV and ME unless 350 * explicitly overriden 351 */ 352 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 353 354 /* target registers */ 355 srr0 = SPR_SRR0; 356 srr1 = SPR_SRR1; 357 asrr0 = -1; 358 asrr1 = -1; 359 360 /* 361 * check for special resume at 0x100 from doze/nap/sleep/winkle on 362 * P7/P8/P9 363 */ 364 if (env->resume_as_sreset) { 365 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 366 } 367 368 /* 369 * Exception targeting modifiers 370 * 371 * LPES0 is supported on POWER7/8/9 372 * LPES1 is not supported (old iSeries mode) 373 * 374 * On anything else, we behave as if LPES0 is 1 375 * (externals don't alter MSR:HV) 376 */ 377 #if defined(TARGET_PPC64) 378 if (excp_model == POWERPC_EXCP_POWER7 || 379 excp_model == POWERPC_EXCP_POWER8 || 380 excp_model == POWERPC_EXCP_POWER9 || 381 excp_model == POWERPC_EXCP_POWER10) { 382 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 383 } else 384 #endif /* defined(TARGET_PPC64) */ 385 { 386 lpes0 = true; 387 } 388 389 /* 390 * Hypervisor emulation assistance interrupt only exists on server 391 * arch 2.05 server or later. We also don't want to generate it if 392 * we don't have HVB in msr_mask (PAPR mode). 393 */ 394 if (excp == POWERPC_EXCP_HV_EMU 395 #if defined(TARGET_PPC64) 396 && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB)) 397 #endif /* defined(TARGET_PPC64) */ 398 399 ) { 400 excp = POWERPC_EXCP_PROGRAM; 401 } 402 403 switch (excp) { 404 case POWERPC_EXCP_NONE: 405 /* Should never happen */ 406 return; 407 case POWERPC_EXCP_CRITICAL: /* Critical input */ 408 switch (excp_model) { 409 case POWERPC_EXCP_40x: 410 srr0 = SPR_40x_SRR2; 411 srr1 = SPR_40x_SRR3; 412 break; 413 case POWERPC_EXCP_BOOKE: 414 srr0 = SPR_BOOKE_CSRR0; 415 srr1 = SPR_BOOKE_CSRR1; 416 break; 417 case POWERPC_EXCP_G2: 418 break; 419 default: 420 goto excp_invalid; 421 } 422 break; 423 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 424 if (msr_me == 0) { 425 /* 426 * Machine check exception is not enabled. Enter 427 * checkstop state. 428 */ 429 fprintf(stderr, "Machine check while not allowed. " 430 "Entering checkstop state\n"); 431 if (qemu_log_separate()) { 432 qemu_log("Machine check while not allowed. " 433 "Entering checkstop state\n"); 434 } 435 cs->halted = 1; 436 cpu_interrupt_exittb(cs); 437 } 438 if (env->msr_mask & MSR_HVB) { 439 /* 440 * ISA specifies HV, but can be delivered to guest with HV 441 * clear (e.g., see FWNMI in PAPR). 442 */ 443 new_msr |= (target_ulong)MSR_HVB; 444 } 445 446 /* machine check exceptions don't have ME set */ 447 new_msr &= ~((target_ulong)1 << MSR_ME); 448 449 /* XXX: should also have something loaded in DAR / DSISR */ 450 switch (excp_model) { 451 case POWERPC_EXCP_40x: 452 srr0 = SPR_40x_SRR2; 453 srr1 = SPR_40x_SRR3; 454 break; 455 case POWERPC_EXCP_BOOKE: 456 /* FIXME: choose one or the other based on CPU type */ 457 srr0 = SPR_BOOKE_MCSRR0; 458 srr1 = SPR_BOOKE_MCSRR1; 459 asrr0 = SPR_BOOKE_CSRR0; 460 asrr1 = SPR_BOOKE_CSRR1; 461 break; 462 default: 463 break; 464 } 465 break; 466 case POWERPC_EXCP_DSI: /* Data storage exception */ 467 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 468 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 469 break; 470 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 471 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 472 "\n", msr, env->nip); 473 msr |= env->error_code; 474 break; 475 case POWERPC_EXCP_EXTERNAL: /* External input */ 476 cs = CPU(cpu); 477 478 if (!lpes0) { 479 new_msr |= (target_ulong)MSR_HVB; 480 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 481 srr0 = SPR_HSRR0; 482 srr1 = SPR_HSRR1; 483 } 484 if (env->mpic_proxy) { 485 /* IACK the IRQ on delivery */ 486 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 487 } 488 break; 489 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 490 /* Get rS/rD and rA from faulting opcode */ 491 /* 492 * Note: the opcode fields will not be set properly for a 493 * direct store load/store, but nobody cares as nobody 494 * actually uses direct store segments. 495 */ 496 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 497 break; 498 case POWERPC_EXCP_PROGRAM: /* Program exception */ 499 switch (env->error_code & ~0xF) { 500 case POWERPC_EXCP_FP: 501 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 502 LOG_EXCP("Ignore floating point exception\n"); 503 cs->exception_index = POWERPC_EXCP_NONE; 504 env->error_code = 0; 505 return; 506 } 507 508 /* 509 * FP exceptions always have NIP pointing to the faulting 510 * instruction, so always use store_next and claim we are 511 * precise in the MSR. 512 */ 513 msr |= 0x00100000; 514 env->spr[SPR_BOOKE_ESR] = ESR_FP; 515 break; 516 case POWERPC_EXCP_INVAL: 517 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 518 msr |= 0x00080000; 519 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 520 break; 521 case POWERPC_EXCP_PRIV: 522 msr |= 0x00040000; 523 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 524 break; 525 case POWERPC_EXCP_TRAP: 526 msr |= 0x00020000; 527 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 528 break; 529 default: 530 /* Should never occur */ 531 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 532 env->error_code); 533 break; 534 } 535 break; 536 case POWERPC_EXCP_SYSCALL: /* System call exception */ 537 lev = env->error_code; 538 539 if ((lev == 1) && cpu->vhyp) { 540 dump_hcall(env); 541 } else { 542 dump_syscall(env); 543 } 544 545 /* 546 * We need to correct the NIP which in this case is supposed 547 * to point to the next instruction 548 */ 549 env->nip += 4; 550 551 /* "PAPR mode" built-in hypercall emulation */ 552 if ((lev == 1) && cpu->vhyp) { 553 PPCVirtualHypervisorClass *vhc = 554 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 555 vhc->hypercall(cpu->vhyp, cpu); 556 return; 557 } 558 if (lev == 1) { 559 new_msr |= (target_ulong)MSR_HVB; 560 } 561 break; 562 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 563 lev = env->error_code; 564 dump_syscall_vectored(env); 565 env->nip += 4; 566 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 567 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 568 break; 569 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 570 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 571 case POWERPC_EXCP_DECR: /* Decrementer exception */ 572 break; 573 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 574 /* FIT on 4xx */ 575 LOG_EXCP("FIT exception\n"); 576 break; 577 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 578 LOG_EXCP("WDT exception\n"); 579 switch (excp_model) { 580 case POWERPC_EXCP_BOOKE: 581 srr0 = SPR_BOOKE_CSRR0; 582 srr1 = SPR_BOOKE_CSRR1; 583 break; 584 default: 585 break; 586 } 587 break; 588 case POWERPC_EXCP_DTLB: /* Data TLB error */ 589 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 590 break; 591 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 592 if (env->flags & POWERPC_FLAG_DE) { 593 /* FIXME: choose one or the other based on CPU type */ 594 srr0 = SPR_BOOKE_DSRR0; 595 srr1 = SPR_BOOKE_DSRR1; 596 asrr0 = SPR_BOOKE_CSRR0; 597 asrr1 = SPR_BOOKE_CSRR1; 598 /* DBSR already modified by caller */ 599 } else { 600 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 601 } 602 break; 603 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 604 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 605 break; 606 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 607 /* XXX: TODO */ 608 cpu_abort(cs, "Embedded floating point data exception " 609 "is not implemented yet !\n"); 610 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 611 break; 612 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 613 /* XXX: TODO */ 614 cpu_abort(cs, "Embedded floating point round exception " 615 "is not implemented yet !\n"); 616 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 617 break; 618 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 619 /* XXX: TODO */ 620 cpu_abort(cs, 621 "Performance counter exception is not implemented yet !\n"); 622 break; 623 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 624 break; 625 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 626 srr0 = SPR_BOOKE_CSRR0; 627 srr1 = SPR_BOOKE_CSRR1; 628 break; 629 case POWERPC_EXCP_RESET: /* System reset exception */ 630 /* A power-saving exception sets ME, otherwise it is unchanged */ 631 if (msr_pow) { 632 /* indicate that we resumed from power save mode */ 633 msr |= 0x10000; 634 new_msr |= ((target_ulong)1 << MSR_ME); 635 } 636 if (env->msr_mask & MSR_HVB) { 637 /* 638 * ISA specifies HV, but can be delivered to guest with HV 639 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 640 */ 641 new_msr |= (target_ulong)MSR_HVB; 642 } else { 643 if (msr_pow) { 644 cpu_abort(cs, "Trying to deliver power-saving system reset " 645 "exception %d with no HV support\n", excp); 646 } 647 } 648 break; 649 case POWERPC_EXCP_DSEG: /* Data segment exception */ 650 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 651 case POWERPC_EXCP_TRACE: /* Trace exception */ 652 break; 653 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 654 msr |= env->error_code; 655 /* fall through */ 656 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 657 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 658 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 659 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 660 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 661 case POWERPC_EXCP_HV_EMU: 662 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 663 srr0 = SPR_HSRR0; 664 srr1 = SPR_HSRR1; 665 new_msr |= (target_ulong)MSR_HVB; 666 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 667 break; 668 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 669 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 670 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 671 #ifdef TARGET_PPC64 672 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 673 #endif 674 break; 675 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 676 #ifdef TARGET_PPC64 677 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 678 srr0 = SPR_HSRR0; 679 srr1 = SPR_HSRR1; 680 new_msr |= (target_ulong)MSR_HVB; 681 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 682 #endif 683 break; 684 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 685 LOG_EXCP("PIT exception\n"); 686 break; 687 case POWERPC_EXCP_IO: /* IO error exception */ 688 /* XXX: TODO */ 689 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 690 break; 691 case POWERPC_EXCP_RUNM: /* Run mode exception */ 692 /* XXX: TODO */ 693 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 694 break; 695 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 696 /* XXX: TODO */ 697 cpu_abort(cs, "602 emulation trap exception " 698 "is not implemented yet !\n"); 699 break; 700 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 701 switch (excp_model) { 702 case POWERPC_EXCP_602: 703 case POWERPC_EXCP_603: 704 case POWERPC_EXCP_603E: 705 case POWERPC_EXCP_G2: 706 goto tlb_miss_tgpr; 707 case POWERPC_EXCP_7x5: 708 goto tlb_miss; 709 case POWERPC_EXCP_74xx: 710 goto tlb_miss_74xx; 711 default: 712 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 713 break; 714 } 715 break; 716 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 717 switch (excp_model) { 718 case POWERPC_EXCP_602: 719 case POWERPC_EXCP_603: 720 case POWERPC_EXCP_603E: 721 case POWERPC_EXCP_G2: 722 goto tlb_miss_tgpr; 723 case POWERPC_EXCP_7x5: 724 goto tlb_miss; 725 case POWERPC_EXCP_74xx: 726 goto tlb_miss_74xx; 727 default: 728 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 729 break; 730 } 731 break; 732 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 733 switch (excp_model) { 734 case POWERPC_EXCP_602: 735 case POWERPC_EXCP_603: 736 case POWERPC_EXCP_603E: 737 case POWERPC_EXCP_G2: 738 tlb_miss_tgpr: 739 /* Swap temporary saved registers with GPRs */ 740 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 741 new_msr |= (target_ulong)1 << MSR_TGPR; 742 hreg_swap_gpr_tgpr(env); 743 } 744 goto tlb_miss; 745 case POWERPC_EXCP_7x5: 746 tlb_miss: 747 #if defined(DEBUG_SOFTWARE_TLB) 748 if (qemu_log_enabled()) { 749 const char *es; 750 target_ulong *miss, *cmp; 751 int en; 752 753 if (excp == POWERPC_EXCP_IFTLB) { 754 es = "I"; 755 en = 'I'; 756 miss = &env->spr[SPR_IMISS]; 757 cmp = &env->spr[SPR_ICMP]; 758 } else { 759 if (excp == POWERPC_EXCP_DLTLB) { 760 es = "DL"; 761 } else { 762 es = "DS"; 763 } 764 en = 'D'; 765 miss = &env->spr[SPR_DMISS]; 766 cmp = &env->spr[SPR_DCMP]; 767 } 768 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 769 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 770 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 771 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 772 env->error_code); 773 } 774 #endif 775 msr |= env->crf[0] << 28; 776 msr |= env->error_code; /* key, D/I, S/L bits */ 777 /* Set way using a LRU mechanism */ 778 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 779 break; 780 case POWERPC_EXCP_74xx: 781 tlb_miss_74xx: 782 #if defined(DEBUG_SOFTWARE_TLB) 783 if (qemu_log_enabled()) { 784 const char *es; 785 target_ulong *miss, *cmp; 786 int en; 787 788 if (excp == POWERPC_EXCP_IFTLB) { 789 es = "I"; 790 en = 'I'; 791 miss = &env->spr[SPR_TLBMISS]; 792 cmp = &env->spr[SPR_PTEHI]; 793 } else { 794 if (excp == POWERPC_EXCP_DLTLB) { 795 es = "DL"; 796 } else { 797 es = "DS"; 798 } 799 en = 'D'; 800 miss = &env->spr[SPR_TLBMISS]; 801 cmp = &env->spr[SPR_PTEHI]; 802 } 803 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 804 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 805 env->error_code); 806 } 807 #endif 808 msr |= env->error_code; /* key bit */ 809 break; 810 default: 811 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 812 break; 813 } 814 break; 815 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 816 /* XXX: TODO */ 817 cpu_abort(cs, "Floating point assist exception " 818 "is not implemented yet !\n"); 819 break; 820 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 821 /* XXX: TODO */ 822 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 823 break; 824 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 825 /* XXX: TODO */ 826 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 827 break; 828 case POWERPC_EXCP_SMI: /* System management interrupt */ 829 /* XXX: TODO */ 830 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 831 break; 832 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 833 /* XXX: TODO */ 834 cpu_abort(cs, "Thermal management exception " 835 "is not implemented yet !\n"); 836 break; 837 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 838 /* XXX: TODO */ 839 cpu_abort(cs, 840 "Performance counter exception is not implemented yet !\n"); 841 break; 842 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 843 /* XXX: TODO */ 844 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 845 break; 846 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 847 /* XXX: TODO */ 848 cpu_abort(cs, 849 "970 soft-patch exception is not implemented yet !\n"); 850 break; 851 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 852 /* XXX: TODO */ 853 cpu_abort(cs, 854 "970 maintenance exception is not implemented yet !\n"); 855 break; 856 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 857 /* XXX: TODO */ 858 cpu_abort(cs, "Maskable external exception " 859 "is not implemented yet !\n"); 860 break; 861 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 862 /* XXX: TODO */ 863 cpu_abort(cs, "Non maskable external exception " 864 "is not implemented yet !\n"); 865 break; 866 default: 867 excp_invalid: 868 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 869 break; 870 } 871 872 /* Sanity check */ 873 if (!(env->msr_mask & MSR_HVB)) { 874 if (new_msr & MSR_HVB) { 875 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 876 "no HV support\n", excp); 877 } 878 if (srr0 == SPR_HSRR0) { 879 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 880 "no HV support\n", excp); 881 } 882 } 883 884 /* 885 * Sort out endianness of interrupt, this differs depending on the 886 * CPU, the HV mode, etc... 887 */ 888 #ifdef TARGET_PPC64 889 if (excp_model == POWERPC_EXCP_POWER7) { 890 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 891 new_msr |= (target_ulong)1 << MSR_LE; 892 } 893 } else if (excp_model == POWERPC_EXCP_POWER8) { 894 if (new_msr & MSR_HVB) { 895 if (env->spr[SPR_HID0] & HID0_HILE) { 896 new_msr |= (target_ulong)1 << MSR_LE; 897 } 898 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 899 new_msr |= (target_ulong)1 << MSR_LE; 900 } 901 } else if (excp_model == POWERPC_EXCP_POWER9 || 902 excp_model == POWERPC_EXCP_POWER10) { 903 if (new_msr & MSR_HVB) { 904 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 905 new_msr |= (target_ulong)1 << MSR_LE; 906 } 907 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 908 new_msr |= (target_ulong)1 << MSR_LE; 909 } 910 } else if (msr_ile) { 911 new_msr |= (target_ulong)1 << MSR_LE; 912 } 913 #else 914 if (msr_ile) { 915 new_msr |= (target_ulong)1 << MSR_LE; 916 } 917 #endif 918 919 vector = env->excp_vectors[excp]; 920 if (vector == (target_ulong)-1ULL) { 921 cpu_abort(cs, "Raised an exception without defined vector %d\n", 922 excp); 923 } 924 925 vector |= env->excp_prefix; 926 927 /* If any alternate SRR register are defined, duplicate saved values */ 928 if (asrr0 != -1) { 929 env->spr[asrr0] = env->nip; 930 } 931 if (asrr1 != -1) { 932 env->spr[asrr1] = msr; 933 } 934 935 #if defined(TARGET_PPC64) 936 if (excp_model == POWERPC_EXCP_BOOKE) { 937 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 938 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 939 new_msr |= (target_ulong)1 << MSR_CM; 940 } else { 941 vector = (uint32_t)vector; 942 } 943 } else { 944 if (!msr_isf && !mmu_is_64bit(env->mmu_model)) { 945 vector = (uint32_t)vector; 946 } else { 947 new_msr |= (target_ulong)1 << MSR_SF; 948 } 949 } 950 #endif 951 952 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 953 /* Save PC */ 954 env->spr[srr0] = env->nip; 955 956 /* Save MSR */ 957 env->spr[srr1] = msr; 958 959 #if defined(TARGET_PPC64) 960 } else { 961 vector += lev * 0x20; 962 963 env->lr = env->nip; 964 env->ctr = msr; 965 #endif 966 } 967 968 /* This can update new_msr and vector if AIL applies */ 969 ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector); 970 971 powerpc_set_excp_state(cpu, vector, new_msr); 972 } 973 974 void ppc_cpu_do_interrupt(CPUState *cs) 975 { 976 PowerPCCPU *cpu = POWERPC_CPU(cs); 977 CPUPPCState *env = &cpu->env; 978 979 powerpc_excp(cpu, env->excp_model, cs->exception_index); 980 } 981 982 static void ppc_hw_interrupt(CPUPPCState *env) 983 { 984 PowerPCCPU *cpu = env_archcpu(env); 985 bool async_deliver; 986 987 /* External reset */ 988 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 989 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 990 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 991 return; 992 } 993 /* Machine check exception */ 994 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 995 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 996 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 997 return; 998 } 999 #if 0 /* TODO */ 1000 /* External debug exception */ 1001 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 1002 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 1003 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 1004 return; 1005 } 1006 #endif 1007 1008 /* 1009 * For interrupts that gate on MSR:EE, we need to do something a 1010 * bit more subtle, as we need to let them through even when EE is 1011 * clear when coming out of some power management states (in order 1012 * for them to become a 0x100). 1013 */ 1014 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 1015 1016 /* Hypervisor decrementer exception */ 1017 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 1018 /* LPCR will be clear when not supported so this will work */ 1019 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 1020 if ((async_deliver || msr_hv == 0) && hdice) { 1021 /* HDEC clears on delivery */ 1022 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1023 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 1024 return; 1025 } 1026 } 1027 1028 /* Hypervisor virtualization interrupt */ 1029 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 1030 /* LPCR will be clear when not supported so this will work */ 1031 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 1032 if ((async_deliver || msr_hv == 0) && hvice) { 1033 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 1034 return; 1035 } 1036 } 1037 1038 /* External interrupt can ignore MSR:EE under some circumstances */ 1039 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 1040 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 1041 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 1042 /* HEIC blocks delivery to the hypervisor */ 1043 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 1044 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 1045 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 1046 return; 1047 } 1048 } 1049 if (msr_ce != 0) { 1050 /* External critical interrupt */ 1051 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 1052 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 1053 return; 1054 } 1055 } 1056 if (async_deliver != 0) { 1057 /* Watchdog timer on embedded PowerPC */ 1058 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 1059 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 1060 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 1061 return; 1062 } 1063 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 1064 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 1065 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 1066 return; 1067 } 1068 /* Fixed interval timer on embedded PowerPC */ 1069 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 1070 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 1071 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 1072 return; 1073 } 1074 /* Programmable interval timer on embedded PowerPC */ 1075 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 1076 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 1077 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 1078 return; 1079 } 1080 /* Decrementer exception */ 1081 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 1082 if (ppc_decr_clear_on_delivery(env)) { 1083 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 1084 } 1085 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 1086 return; 1087 } 1088 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 1089 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1090 if (is_book3s_arch2x(env)) { 1091 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); 1092 } else { 1093 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 1094 } 1095 return; 1096 } 1097 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 1098 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1099 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 1100 return; 1101 } 1102 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 1103 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 1104 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 1105 return; 1106 } 1107 /* Thermal interrupt */ 1108 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 1109 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 1110 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 1111 return; 1112 } 1113 } 1114 1115 if (env->resume_as_sreset) { 1116 /* 1117 * This is a bug ! It means that has_work took us out of halt without 1118 * anything to deliver while in a PM state that requires getting 1119 * out via a 0x100 1120 * 1121 * This means we will incorrectly execute past the power management 1122 * instruction instead of triggering a reset. 1123 * 1124 * It generally means a discrepancy between the wakeup conditions in the 1125 * processor has_work implementation and the logic in this function. 1126 */ 1127 cpu_abort(env_cpu(env), 1128 "Wakeup from PM state but interrupt Undelivered"); 1129 } 1130 } 1131 1132 void ppc_cpu_do_system_reset(CPUState *cs) 1133 { 1134 PowerPCCPU *cpu = POWERPC_CPU(cs); 1135 CPUPPCState *env = &cpu->env; 1136 1137 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 1138 } 1139 1140 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 1141 { 1142 PowerPCCPU *cpu = POWERPC_CPU(cs); 1143 CPUPPCState *env = &cpu->env; 1144 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1145 target_ulong msr = 0; 1146 1147 /* 1148 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 1149 * been set by KVM. 1150 */ 1151 msr = (1ULL << MSR_ME); 1152 msr |= env->msr & (1ULL << MSR_SF); 1153 if (!(*pcc->interrupts_big_endian)(cpu)) { 1154 msr |= (1ULL << MSR_LE); 1155 } 1156 1157 powerpc_set_excp_state(cpu, vector, msr); 1158 } 1159 #endif /* !CONFIG_USER_ONLY */ 1160 1161 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1162 { 1163 PowerPCCPU *cpu = POWERPC_CPU(cs); 1164 CPUPPCState *env = &cpu->env; 1165 1166 if (interrupt_request & CPU_INTERRUPT_HARD) { 1167 ppc_hw_interrupt(env); 1168 if (env->pending_interrupts == 0) { 1169 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 1170 } 1171 return true; 1172 } 1173 return false; 1174 } 1175 1176 #if defined(DEBUG_OP) 1177 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 1178 { 1179 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 1180 TARGET_FMT_lx "\n", RA, msr); 1181 } 1182 #endif 1183 1184 /*****************************************************************************/ 1185 /* Exceptions processing helpers */ 1186 1187 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 1188 uint32_t error_code, uintptr_t raddr) 1189 { 1190 CPUState *cs = env_cpu(env); 1191 1192 cs->exception_index = exception; 1193 env->error_code = error_code; 1194 cpu_loop_exit_restore(cs, raddr); 1195 } 1196 1197 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1198 uint32_t error_code) 1199 { 1200 raise_exception_err_ra(env, exception, error_code, 0); 1201 } 1202 1203 void raise_exception(CPUPPCState *env, uint32_t exception) 1204 { 1205 raise_exception_err_ra(env, exception, 0, 0); 1206 } 1207 1208 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1209 uintptr_t raddr) 1210 { 1211 raise_exception_err_ra(env, exception, 0, raddr); 1212 } 1213 1214 #ifdef CONFIG_TCG 1215 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1216 uint32_t error_code) 1217 { 1218 raise_exception_err_ra(env, exception, error_code, 0); 1219 } 1220 1221 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1222 { 1223 raise_exception_err_ra(env, exception, 0, 0); 1224 } 1225 #endif 1226 1227 #if !defined(CONFIG_USER_ONLY) 1228 #ifdef CONFIG_TCG 1229 void helper_store_msr(CPUPPCState *env, target_ulong val) 1230 { 1231 uint32_t excp = hreg_store_msr(env, val, 0); 1232 1233 if (excp != 0) { 1234 CPUState *cs = env_cpu(env); 1235 cpu_interrupt_exittb(cs); 1236 raise_exception(env, excp); 1237 } 1238 } 1239 1240 #if defined(TARGET_PPC64) 1241 void helper_scv(CPUPPCState *env, uint32_t lev) 1242 { 1243 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { 1244 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); 1245 } else { 1246 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); 1247 } 1248 } 1249 1250 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1251 { 1252 CPUState *cs; 1253 1254 cs = env_cpu(env); 1255 cs->halted = 1; 1256 1257 /* 1258 * The architecture specifies that HDEC interrupts are discarded 1259 * in PM states 1260 */ 1261 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1262 1263 /* Condition for waking up at 0x100 */ 1264 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1265 (env->spr[SPR_PSSCR] & PSSCR_EC); 1266 } 1267 #endif /* defined(TARGET_PPC64) */ 1268 #endif /* CONFIG_TCG */ 1269 1270 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1271 { 1272 CPUState *cs = env_cpu(env); 1273 1274 /* MSR:POW cannot be set by any form of rfi */ 1275 msr &= ~(1ULL << MSR_POW); 1276 1277 #if defined(TARGET_PPC64) 1278 /* Switching to 32-bit ? Crop the nip */ 1279 if (!msr_is_64bit(env, msr)) { 1280 nip = (uint32_t)nip; 1281 } 1282 #else 1283 nip = (uint32_t)nip; 1284 #endif 1285 /* XXX: beware: this is false if VLE is supported */ 1286 env->nip = nip & ~((target_ulong)0x00000003); 1287 hreg_store_msr(env, msr, 1); 1288 #if defined(DEBUG_OP) 1289 cpu_dump_rfi(env->nip, env->msr); 1290 #endif 1291 /* 1292 * No need to raise an exception here, as rfi is always the last 1293 * insn of a TB 1294 */ 1295 cpu_interrupt_exittb(cs); 1296 /* Reset the reservation */ 1297 env->reserve_addr = -1; 1298 1299 /* Context synchronizing: check if TCG TLB needs flush */ 1300 check_tlb_flush(env, false); 1301 } 1302 1303 #ifdef CONFIG_TCG 1304 void helper_rfi(CPUPPCState *env) 1305 { 1306 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1307 } 1308 1309 #define MSR_BOOK3S_MASK 1310 #if defined(TARGET_PPC64) 1311 void helper_rfid(CPUPPCState *env) 1312 { 1313 /* 1314 * The architecture defines a number of rules for which bits can 1315 * change but in practice, we handle this in hreg_store_msr() 1316 * which will be called by do_rfi(), so there is no need to filter 1317 * here 1318 */ 1319 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1320 } 1321 1322 void helper_rfscv(CPUPPCState *env) 1323 { 1324 do_rfi(env, env->lr, env->ctr); 1325 } 1326 1327 void helper_hrfid(CPUPPCState *env) 1328 { 1329 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1330 } 1331 #endif 1332 1333 /*****************************************************************************/ 1334 /* Embedded PowerPC specific helpers */ 1335 void helper_40x_rfci(CPUPPCState *env) 1336 { 1337 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1338 } 1339 1340 void helper_rfci(CPUPPCState *env) 1341 { 1342 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1343 } 1344 1345 void helper_rfdi(CPUPPCState *env) 1346 { 1347 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1348 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1349 } 1350 1351 void helper_rfmci(CPUPPCState *env) 1352 { 1353 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1354 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1355 } 1356 #endif /* CONFIG_TCG */ 1357 #endif /* !defined(CONFIG_USER_ONLY) */ 1358 1359 #ifdef CONFIG_TCG 1360 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1361 uint32_t flags) 1362 { 1363 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1364 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1365 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1366 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1367 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1368 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1369 POWERPC_EXCP_TRAP, GETPC()); 1370 } 1371 } 1372 1373 #if defined(TARGET_PPC64) 1374 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1375 uint32_t flags) 1376 { 1377 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1378 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1379 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1380 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1381 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1382 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1383 POWERPC_EXCP_TRAP, GETPC()); 1384 } 1385 } 1386 #endif 1387 #endif 1388 1389 #if !defined(CONFIG_USER_ONLY) 1390 /*****************************************************************************/ 1391 /* PowerPC 601 specific instructions (POWER bridge) */ 1392 1393 #ifdef CONFIG_TCG 1394 void helper_rfsvc(CPUPPCState *env) 1395 { 1396 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1397 } 1398 1399 /* Embedded.Processor Control */ 1400 static int dbell2irq(target_ulong rb) 1401 { 1402 int msg = rb & DBELL_TYPE_MASK; 1403 int irq = -1; 1404 1405 switch (msg) { 1406 case DBELL_TYPE_DBELL: 1407 irq = PPC_INTERRUPT_DOORBELL; 1408 break; 1409 case DBELL_TYPE_DBELL_CRIT: 1410 irq = PPC_INTERRUPT_CDOORBELL; 1411 break; 1412 case DBELL_TYPE_G_DBELL: 1413 case DBELL_TYPE_G_DBELL_CRIT: 1414 case DBELL_TYPE_G_DBELL_MC: 1415 /* XXX implement */ 1416 default: 1417 break; 1418 } 1419 1420 return irq; 1421 } 1422 1423 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1424 { 1425 int irq = dbell2irq(rb); 1426 1427 if (irq < 0) { 1428 return; 1429 } 1430 1431 env->pending_interrupts &= ~(1 << irq); 1432 } 1433 1434 void helper_msgsnd(target_ulong rb) 1435 { 1436 int irq = dbell2irq(rb); 1437 int pir = rb & DBELL_PIRTAG_MASK; 1438 CPUState *cs; 1439 1440 if (irq < 0) { 1441 return; 1442 } 1443 1444 qemu_mutex_lock_iothread(); 1445 CPU_FOREACH(cs) { 1446 PowerPCCPU *cpu = POWERPC_CPU(cs); 1447 CPUPPCState *cenv = &cpu->env; 1448 1449 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1450 cenv->pending_interrupts |= 1 << irq; 1451 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1452 } 1453 } 1454 qemu_mutex_unlock_iothread(); 1455 } 1456 1457 /* Server Processor Control */ 1458 1459 static bool dbell_type_server(target_ulong rb) 1460 { 1461 /* 1462 * A Directed Hypervisor Doorbell message is sent only if the 1463 * message type is 5. All other types are reserved and the 1464 * instruction is a no-op 1465 */ 1466 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1467 } 1468 1469 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1470 { 1471 if (!dbell_type_server(rb)) { 1472 return; 1473 } 1474 1475 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1476 } 1477 1478 static void book3s_msgsnd_common(int pir, int irq) 1479 { 1480 CPUState *cs; 1481 1482 qemu_mutex_lock_iothread(); 1483 CPU_FOREACH(cs) { 1484 PowerPCCPU *cpu = POWERPC_CPU(cs); 1485 CPUPPCState *cenv = &cpu->env; 1486 1487 /* TODO: broadcast message to all threads of the same processor */ 1488 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1489 cenv->pending_interrupts |= 1 << irq; 1490 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1491 } 1492 } 1493 qemu_mutex_unlock_iothread(); 1494 } 1495 1496 void helper_book3s_msgsnd(target_ulong rb) 1497 { 1498 int pir = rb & DBELL_PROCIDTAG_MASK; 1499 1500 if (!dbell_type_server(rb)) { 1501 return; 1502 } 1503 1504 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1505 } 1506 1507 #if defined(TARGET_PPC64) 1508 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1509 { 1510 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1511 1512 if (!dbell_type_server(rb)) { 1513 return; 1514 } 1515 1516 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1517 } 1518 1519 /* 1520 * sends a message to other threads that are on the same 1521 * multi-threaded processor 1522 */ 1523 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1524 { 1525 int pir = env->spr_cb[SPR_PIR].default_value; 1526 1527 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1528 1529 if (!dbell_type_server(rb)) { 1530 return; 1531 } 1532 1533 /* TODO: TCG supports only one thread */ 1534 1535 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1536 } 1537 #endif 1538 #endif /* CONFIG_TCG */ 1539 #endif 1540 1541 #ifdef CONFIG_TCG 1542 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1543 MMUAccessType access_type, 1544 int mmu_idx, uintptr_t retaddr) 1545 { 1546 CPUPPCState *env = cs->env_ptr; 1547 uint32_t insn; 1548 1549 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1550 cpu_restore_state(cs, retaddr, true); 1551 insn = cpu_ldl_code(env, env->nip); 1552 1553 cs->exception_index = POWERPC_EXCP_ALIGN; 1554 env->error_code = insn & 0x03FF0000; 1555 cpu_loop_exit(cs); 1556 } 1557 #endif 1558