1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 26 #include "helper_regs.h" 27 28 //#define DEBUG_OP 29 //#define DEBUG_SOFTWARE_TLB 30 //#define DEBUG_EXCEPTIONS 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = CPU(ppc_env_get_cpu(env)); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 62 " nip=" TARGET_FMT_lx "\n", 63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 65 ppc_dump_gpr(env, 6), env->nip); 66 } 67 68 /* Note that this function should be greatly optimized 69 * when called with a constant excp, from ppc_hw_interrupt 70 */ 71 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 72 { 73 CPUState *cs = CPU(cpu); 74 CPUPPCState *env = &cpu->env; 75 target_ulong msr, new_msr, vector; 76 int srr0, srr1, asrr0, asrr1, lev, ail; 77 bool lpes0; 78 79 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 80 " => %08x (%02x)\n", env->nip, excp, env->error_code); 81 82 /* new srr1 value excluding must-be-zero bits */ 83 if (excp_model == POWERPC_EXCP_BOOKE) { 84 msr = env->msr; 85 } else { 86 msr = env->msr & ~0x783f0000ULL; 87 } 88 89 /* new interrupt handler msr preserves existing HV and ME unless 90 * explicitly overriden 91 */ 92 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 93 94 /* target registers */ 95 srr0 = SPR_SRR0; 96 srr1 = SPR_SRR1; 97 asrr0 = -1; 98 asrr1 = -1; 99 100 /* check for special resume at 0x100 from doze/nap/sleep/winkle on P7/P8 */ 101 if (env->in_pm_state) { 102 env->in_pm_state = false; 103 104 /* Pretend to be returning from doze always as we don't lose state */ 105 msr |= (0x1ull << (63 - 47)); 106 107 /* Non-machine check are routed to 0x100 with a wakeup cause 108 * encoded in SRR1 109 */ 110 if (excp != POWERPC_EXCP_MCHECK) { 111 switch (excp) { 112 case POWERPC_EXCP_RESET: 113 msr |= 0x4ull << (63 - 45); 114 break; 115 case POWERPC_EXCP_EXTERNAL: 116 msr |= 0x8ull << (63 - 45); 117 break; 118 case POWERPC_EXCP_DECR: 119 msr |= 0x6ull << (63 - 45); 120 break; 121 case POWERPC_EXCP_SDOOR: 122 msr |= 0x5ull << (63 - 45); 123 break; 124 case POWERPC_EXCP_SDOOR_HV: 125 msr |= 0x3ull << (63 - 45); 126 break; 127 case POWERPC_EXCP_HV_MAINT: 128 msr |= 0xaull << (63 - 45); 129 break; 130 default: 131 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 132 excp); 133 } 134 excp = POWERPC_EXCP_RESET; 135 } 136 } 137 138 /* Exception targetting modifiers 139 * 140 * LPES0 is supported on POWER7/8 141 * LPES1 is not supported (old iSeries mode) 142 * 143 * On anything else, we behave as if LPES0 is 1 144 * (externals don't alter MSR:HV) 145 * 146 * AIL is initialized here but can be cleared by 147 * selected exceptions 148 */ 149 #if defined(TARGET_PPC64) 150 if (excp_model == POWERPC_EXCP_POWER7 || 151 excp_model == POWERPC_EXCP_POWER8) { 152 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 153 if (excp_model == POWERPC_EXCP_POWER8) { 154 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 155 } else { 156 ail = 0; 157 } 158 } else 159 #endif /* defined(TARGET_PPC64) */ 160 { 161 lpes0 = true; 162 ail = 0; 163 } 164 165 /* Hypervisor emulation assistance interrupt only exists on server 166 * arch 2.05 server or later. We also don't want to generate it if 167 * we don't have HVB in msr_mask (PAPR mode). 168 */ 169 if (excp == POWERPC_EXCP_HV_EMU 170 #if defined(TARGET_PPC64) 171 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 172 #endif /* defined(TARGET_PPC64) */ 173 174 ) { 175 excp = POWERPC_EXCP_PROGRAM; 176 } 177 178 switch (excp) { 179 case POWERPC_EXCP_NONE: 180 /* Should never happen */ 181 return; 182 case POWERPC_EXCP_CRITICAL: /* Critical input */ 183 switch (excp_model) { 184 case POWERPC_EXCP_40x: 185 srr0 = SPR_40x_SRR2; 186 srr1 = SPR_40x_SRR3; 187 break; 188 case POWERPC_EXCP_BOOKE: 189 srr0 = SPR_BOOKE_CSRR0; 190 srr1 = SPR_BOOKE_CSRR1; 191 break; 192 case POWERPC_EXCP_G2: 193 break; 194 default: 195 goto excp_invalid; 196 } 197 break; 198 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 199 if (msr_me == 0) { 200 /* Machine check exception is not enabled. 201 * Enter checkstop state. 202 */ 203 fprintf(stderr, "Machine check while not allowed. " 204 "Entering checkstop state\n"); 205 if (qemu_log_separate()) { 206 qemu_log("Machine check while not allowed. " 207 "Entering checkstop state\n"); 208 } 209 cs->halted = 1; 210 cpu_interrupt_exittb(cs); 211 } 212 if (env->msr_mask & MSR_HVB) { 213 /* ISA specifies HV, but can be delivered to guest with HV clear 214 * (e.g., see FWNMI in PAPR). 215 */ 216 new_msr |= (target_ulong)MSR_HVB; 217 } 218 ail = 0; 219 220 /* machine check exceptions don't have ME set */ 221 new_msr &= ~((target_ulong)1 << MSR_ME); 222 223 /* XXX: should also have something loaded in DAR / DSISR */ 224 switch (excp_model) { 225 case POWERPC_EXCP_40x: 226 srr0 = SPR_40x_SRR2; 227 srr1 = SPR_40x_SRR3; 228 break; 229 case POWERPC_EXCP_BOOKE: 230 /* FIXME: choose one or the other based on CPU type */ 231 srr0 = SPR_BOOKE_MCSRR0; 232 srr1 = SPR_BOOKE_MCSRR1; 233 asrr0 = SPR_BOOKE_CSRR0; 234 asrr1 = SPR_BOOKE_CSRR1; 235 break; 236 default: 237 break; 238 } 239 break; 240 case POWERPC_EXCP_DSI: /* Data storage exception */ 241 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 242 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 243 break; 244 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 245 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 246 "\n", msr, env->nip); 247 msr |= env->error_code; 248 break; 249 case POWERPC_EXCP_EXTERNAL: /* External input */ 250 cs = CPU(cpu); 251 252 if (!lpes0) { 253 new_msr |= (target_ulong)MSR_HVB; 254 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 255 srr0 = SPR_HSRR0; 256 srr1 = SPR_HSRR1; 257 } 258 if (env->mpic_proxy) { 259 /* IACK the IRQ on delivery */ 260 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 261 } 262 break; 263 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 264 /* Get rS/rD and rA from faulting opcode */ 265 /* Note: the opcode fields will not be set properly for a direct 266 * store load/store, but nobody cares as nobody actually uses 267 * direct store segments. 268 */ 269 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 270 break; 271 case POWERPC_EXCP_PROGRAM: /* Program exception */ 272 switch (env->error_code & ~0xF) { 273 case POWERPC_EXCP_FP: 274 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 275 LOG_EXCP("Ignore floating point exception\n"); 276 cs->exception_index = POWERPC_EXCP_NONE; 277 env->error_code = 0; 278 return; 279 } 280 281 /* FP exceptions always have NIP pointing to the faulting 282 * instruction, so always use store_next and claim we are 283 * precise in the MSR. 284 */ 285 msr |= 0x00100000; 286 env->spr[SPR_BOOKE_ESR] = ESR_FP; 287 break; 288 case POWERPC_EXCP_INVAL: 289 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 290 msr |= 0x00080000; 291 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 292 break; 293 case POWERPC_EXCP_PRIV: 294 msr |= 0x00040000; 295 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 296 break; 297 case POWERPC_EXCP_TRAP: 298 msr |= 0x00020000; 299 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 300 break; 301 default: 302 /* Should never occur */ 303 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 304 env->error_code); 305 break; 306 } 307 break; 308 case POWERPC_EXCP_SYSCALL: /* System call exception */ 309 dump_syscall(env); 310 lev = env->error_code; 311 312 /* We need to correct the NIP which in this case is supposed 313 * to point to the next instruction 314 */ 315 env->nip += 4; 316 317 /* "PAPR mode" built-in hypercall emulation */ 318 if ((lev == 1) && cpu->vhyp) { 319 PPCVirtualHypervisorClass *vhc = 320 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 321 vhc->hypercall(cpu->vhyp, cpu); 322 return; 323 } 324 if (lev == 1) { 325 new_msr |= (target_ulong)MSR_HVB; 326 } 327 break; 328 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 329 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 330 case POWERPC_EXCP_DECR: /* Decrementer exception */ 331 break; 332 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 333 /* FIT on 4xx */ 334 LOG_EXCP("FIT exception\n"); 335 break; 336 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 337 LOG_EXCP("WDT exception\n"); 338 switch (excp_model) { 339 case POWERPC_EXCP_BOOKE: 340 srr0 = SPR_BOOKE_CSRR0; 341 srr1 = SPR_BOOKE_CSRR1; 342 break; 343 default: 344 break; 345 } 346 break; 347 case POWERPC_EXCP_DTLB: /* Data TLB error */ 348 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 349 break; 350 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 351 switch (excp_model) { 352 case POWERPC_EXCP_BOOKE: 353 /* FIXME: choose one or the other based on CPU type */ 354 srr0 = SPR_BOOKE_DSRR0; 355 srr1 = SPR_BOOKE_DSRR1; 356 asrr0 = SPR_BOOKE_CSRR0; 357 asrr1 = SPR_BOOKE_CSRR1; 358 break; 359 default: 360 break; 361 } 362 /* XXX: TODO */ 363 cpu_abort(cs, "Debug exception is not implemented yet !\n"); 364 break; 365 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 366 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 367 break; 368 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 369 /* XXX: TODO */ 370 cpu_abort(cs, "Embedded floating point data exception " 371 "is not implemented yet !\n"); 372 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 373 break; 374 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 375 /* XXX: TODO */ 376 cpu_abort(cs, "Embedded floating point round exception " 377 "is not implemented yet !\n"); 378 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 379 break; 380 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 381 /* XXX: TODO */ 382 cpu_abort(cs, 383 "Performance counter exception is not implemented yet !\n"); 384 break; 385 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 386 break; 387 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 388 srr0 = SPR_BOOKE_CSRR0; 389 srr1 = SPR_BOOKE_CSRR1; 390 break; 391 case POWERPC_EXCP_RESET: /* System reset exception */ 392 /* A power-saving exception sets ME, otherwise it is unchanged */ 393 if (msr_pow) { 394 /* indicate that we resumed from power save mode */ 395 msr |= 0x10000; 396 new_msr |= ((target_ulong)1 << MSR_ME); 397 } 398 if (env->msr_mask & MSR_HVB) { 399 /* ISA specifies HV, but can be delivered to guest with HV clear 400 * (e.g., see FWNMI in PAPR, NMI injection in QEMU). 401 */ 402 new_msr |= (target_ulong)MSR_HVB; 403 } else { 404 if (msr_pow) { 405 cpu_abort(cs, "Trying to deliver power-saving system reset " 406 "exception %d with no HV support\n", excp); 407 } 408 } 409 ail = 0; 410 break; 411 case POWERPC_EXCP_DSEG: /* Data segment exception */ 412 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 413 case POWERPC_EXCP_TRACE: /* Trace exception */ 414 break; 415 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 416 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 417 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 418 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 419 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 420 case POWERPC_EXCP_HV_EMU: 421 srr0 = SPR_HSRR0; 422 srr1 = SPR_HSRR1; 423 new_msr |= (target_ulong)MSR_HVB; 424 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 425 break; 426 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 427 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 428 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 429 #ifdef TARGET_PPC64 430 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 431 #endif 432 break; 433 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 434 LOG_EXCP("PIT exception\n"); 435 break; 436 case POWERPC_EXCP_IO: /* IO error exception */ 437 /* XXX: TODO */ 438 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 439 break; 440 case POWERPC_EXCP_RUNM: /* Run mode exception */ 441 /* XXX: TODO */ 442 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 443 break; 444 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 445 /* XXX: TODO */ 446 cpu_abort(cs, "602 emulation trap exception " 447 "is not implemented yet !\n"); 448 break; 449 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 450 switch (excp_model) { 451 case POWERPC_EXCP_602: 452 case POWERPC_EXCP_603: 453 case POWERPC_EXCP_603E: 454 case POWERPC_EXCP_G2: 455 goto tlb_miss_tgpr; 456 case POWERPC_EXCP_7x5: 457 goto tlb_miss; 458 case POWERPC_EXCP_74xx: 459 goto tlb_miss_74xx; 460 default: 461 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 462 break; 463 } 464 break; 465 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 466 switch (excp_model) { 467 case POWERPC_EXCP_602: 468 case POWERPC_EXCP_603: 469 case POWERPC_EXCP_603E: 470 case POWERPC_EXCP_G2: 471 goto tlb_miss_tgpr; 472 case POWERPC_EXCP_7x5: 473 goto tlb_miss; 474 case POWERPC_EXCP_74xx: 475 goto tlb_miss_74xx; 476 default: 477 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 478 break; 479 } 480 break; 481 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 482 switch (excp_model) { 483 case POWERPC_EXCP_602: 484 case POWERPC_EXCP_603: 485 case POWERPC_EXCP_603E: 486 case POWERPC_EXCP_G2: 487 tlb_miss_tgpr: 488 /* Swap temporary saved registers with GPRs */ 489 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 490 new_msr |= (target_ulong)1 << MSR_TGPR; 491 hreg_swap_gpr_tgpr(env); 492 } 493 goto tlb_miss; 494 case POWERPC_EXCP_7x5: 495 tlb_miss: 496 #if defined(DEBUG_SOFTWARE_TLB) 497 if (qemu_log_enabled()) { 498 const char *es; 499 target_ulong *miss, *cmp; 500 int en; 501 502 if (excp == POWERPC_EXCP_IFTLB) { 503 es = "I"; 504 en = 'I'; 505 miss = &env->spr[SPR_IMISS]; 506 cmp = &env->spr[SPR_ICMP]; 507 } else { 508 if (excp == POWERPC_EXCP_DLTLB) { 509 es = "DL"; 510 } else { 511 es = "DS"; 512 } 513 en = 'D'; 514 miss = &env->spr[SPR_DMISS]; 515 cmp = &env->spr[SPR_DCMP]; 516 } 517 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 518 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 519 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 520 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 521 env->error_code); 522 } 523 #endif 524 msr |= env->crf[0] << 28; 525 msr |= env->error_code; /* key, D/I, S/L bits */ 526 /* Set way using a LRU mechanism */ 527 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 528 break; 529 case POWERPC_EXCP_74xx: 530 tlb_miss_74xx: 531 #if defined(DEBUG_SOFTWARE_TLB) 532 if (qemu_log_enabled()) { 533 const char *es; 534 target_ulong *miss, *cmp; 535 int en; 536 537 if (excp == POWERPC_EXCP_IFTLB) { 538 es = "I"; 539 en = 'I'; 540 miss = &env->spr[SPR_TLBMISS]; 541 cmp = &env->spr[SPR_PTEHI]; 542 } else { 543 if (excp == POWERPC_EXCP_DLTLB) { 544 es = "DL"; 545 } else { 546 es = "DS"; 547 } 548 en = 'D'; 549 miss = &env->spr[SPR_TLBMISS]; 550 cmp = &env->spr[SPR_PTEHI]; 551 } 552 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 553 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 554 env->error_code); 555 } 556 #endif 557 msr |= env->error_code; /* key bit */ 558 break; 559 default: 560 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 561 break; 562 } 563 break; 564 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 565 /* XXX: TODO */ 566 cpu_abort(cs, "Floating point assist exception " 567 "is not implemented yet !\n"); 568 break; 569 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 570 /* XXX: TODO */ 571 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 572 break; 573 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 574 /* XXX: TODO */ 575 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 576 break; 577 case POWERPC_EXCP_SMI: /* System management interrupt */ 578 /* XXX: TODO */ 579 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 580 break; 581 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 582 /* XXX: TODO */ 583 cpu_abort(cs, "Thermal management exception " 584 "is not implemented yet !\n"); 585 break; 586 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 587 /* XXX: TODO */ 588 cpu_abort(cs, 589 "Performance counter exception is not implemented yet !\n"); 590 break; 591 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 592 /* XXX: TODO */ 593 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 594 break; 595 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 596 /* XXX: TODO */ 597 cpu_abort(cs, 598 "970 soft-patch exception is not implemented yet !\n"); 599 break; 600 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 601 /* XXX: TODO */ 602 cpu_abort(cs, 603 "970 maintenance exception is not implemented yet !\n"); 604 break; 605 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 606 /* XXX: TODO */ 607 cpu_abort(cs, "Maskable external exception " 608 "is not implemented yet !\n"); 609 break; 610 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 611 /* XXX: TODO */ 612 cpu_abort(cs, "Non maskable external exception " 613 "is not implemented yet !\n"); 614 break; 615 default: 616 excp_invalid: 617 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 618 break; 619 } 620 621 /* Save PC */ 622 env->spr[srr0] = env->nip; 623 624 /* Save MSR */ 625 env->spr[srr1] = msr; 626 627 /* Sanity check */ 628 if (!(env->msr_mask & MSR_HVB)) { 629 if (new_msr & MSR_HVB) { 630 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 631 "no HV support\n", excp); 632 } 633 if (srr0 == SPR_HSRR0) { 634 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 635 "no HV support\n", excp); 636 } 637 } 638 639 /* If any alternate SRR register are defined, duplicate saved values */ 640 if (asrr0 != -1) { 641 env->spr[asrr0] = env->spr[srr0]; 642 } 643 if (asrr1 != -1) { 644 env->spr[asrr1] = env->spr[srr1]; 645 } 646 647 /* Sort out endianness of interrupt, this differs depending on the 648 * CPU, the HV mode, etc... 649 */ 650 #ifdef TARGET_PPC64 651 if (excp_model == POWERPC_EXCP_POWER7) { 652 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 653 new_msr |= (target_ulong)1 << MSR_LE; 654 } 655 } else if (excp_model == POWERPC_EXCP_POWER8) { 656 if (new_msr & MSR_HVB) { 657 if (env->spr[SPR_HID0] & (HID0_HILE | HID0_POWER9_HILE)) { 658 new_msr |= (target_ulong)1 << MSR_LE; 659 } 660 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 661 new_msr |= (target_ulong)1 << MSR_LE; 662 } 663 } else if (msr_ile) { 664 new_msr |= (target_ulong)1 << MSR_LE; 665 } 666 #else 667 if (msr_ile) { 668 new_msr |= (target_ulong)1 << MSR_LE; 669 } 670 #endif 671 672 /* Jump to handler */ 673 vector = env->excp_vectors[excp]; 674 if (vector == (target_ulong)-1ULL) { 675 cpu_abort(cs, "Raised an exception without defined vector %d\n", 676 excp); 677 } 678 vector |= env->excp_prefix; 679 680 /* AIL only works if there is no HV transition and we are running with 681 * translations enabled 682 */ 683 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 684 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 685 ail = 0; 686 } 687 /* Handle AIL */ 688 if (ail) { 689 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 690 switch(ail) { 691 case AIL_0001_8000: 692 vector |= 0x18000; 693 break; 694 case AIL_C000_0000_0000_4000: 695 vector |= 0xc000000000004000ull; 696 break; 697 default: 698 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 699 break; 700 } 701 } 702 703 #if defined(TARGET_PPC64) 704 if (excp_model == POWERPC_EXCP_BOOKE) { 705 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 706 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 707 new_msr |= (target_ulong)1 << MSR_CM; 708 } else { 709 vector = (uint32_t)vector; 710 } 711 } else { 712 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 713 vector = (uint32_t)vector; 714 } else { 715 new_msr |= (target_ulong)1 << MSR_SF; 716 } 717 } 718 #endif 719 /* We don't use hreg_store_msr here as already have treated 720 * any special case that could occur. Just store MSR and update hflags 721 * 722 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 723 * will prevent setting of the HV bit which some exceptions might need 724 * to do. 725 */ 726 env->msr = new_msr & env->msr_mask; 727 hreg_compute_hflags(env); 728 env->nip = vector; 729 /* Reset exception state */ 730 cs->exception_index = POWERPC_EXCP_NONE; 731 env->error_code = 0; 732 733 /* Reset the reservation */ 734 env->reserve_addr = -1; 735 736 /* Any interrupt is context synchronizing, check if TCG TLB 737 * needs a delayed flush on ppc64 738 */ 739 check_tlb_flush(env, false); 740 } 741 742 void ppc_cpu_do_interrupt(CPUState *cs) 743 { 744 PowerPCCPU *cpu = POWERPC_CPU(cs); 745 CPUPPCState *env = &cpu->env; 746 747 powerpc_excp(cpu, env->excp_model, cs->exception_index); 748 } 749 750 static void ppc_hw_interrupt(CPUPPCState *env) 751 { 752 PowerPCCPU *cpu = ppc_env_get_cpu(env); 753 #if 0 754 CPUState *cs = CPU(cpu); 755 756 qemu_log_mask(CPU_LOG_INT, "%s: %p pending %08x req %08x me %d ee %d\n", 757 __func__, env, env->pending_interrupts, 758 cs->interrupt_request, (int)msr_me, (int)msr_ee); 759 #endif 760 /* External reset */ 761 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 762 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 763 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 764 return; 765 } 766 /* Machine check exception */ 767 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 768 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 769 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 770 return; 771 } 772 #if 0 /* TODO */ 773 /* External debug exception */ 774 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 775 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 776 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 777 return; 778 } 779 #endif 780 /* Hypervisor decrementer exception */ 781 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 782 /* LPCR will be clear when not supported so this will work */ 783 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 784 if ((msr_ee != 0 || msr_hv == 0) && hdice) { 785 /* HDEC clears on delivery */ 786 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 787 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 788 return; 789 } 790 } 791 /* Extermal interrupt can ignore MSR:EE under some circumstances */ 792 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 793 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 794 if (msr_ee != 0 || (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 795 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 796 return; 797 } 798 } 799 if (msr_ce != 0) { 800 /* External critical interrupt */ 801 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 802 /* Taking a critical external interrupt does not clear the external 803 * critical interrupt status 804 */ 805 #if 0 806 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT); 807 #endif 808 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 809 return; 810 } 811 } 812 if (msr_ee != 0) { 813 /* Watchdog timer on embedded PowerPC */ 814 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 815 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 816 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 817 return; 818 } 819 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 820 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 821 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 822 return; 823 } 824 /* Fixed interval timer on embedded PowerPC */ 825 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 826 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 827 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 828 return; 829 } 830 /* Programmable interval timer on embedded PowerPC */ 831 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 832 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 833 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 834 return; 835 } 836 /* Decrementer exception */ 837 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 838 if (ppc_decr_clear_on_delivery(env)) { 839 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 840 } 841 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 842 return; 843 } 844 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 845 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 846 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 847 return; 848 } 849 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 850 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 851 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 852 return; 853 } 854 /* Thermal interrupt */ 855 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 856 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 857 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 858 return; 859 } 860 } 861 } 862 863 void ppc_cpu_do_system_reset(CPUState *cs) 864 { 865 PowerPCCPU *cpu = POWERPC_CPU(cs); 866 CPUPPCState *env = &cpu->env; 867 868 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 869 } 870 #endif /* !CONFIG_USER_ONLY */ 871 872 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 873 { 874 PowerPCCPU *cpu = POWERPC_CPU(cs); 875 CPUPPCState *env = &cpu->env; 876 877 if (interrupt_request & CPU_INTERRUPT_HARD) { 878 ppc_hw_interrupt(env); 879 if (env->pending_interrupts == 0) { 880 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 881 } 882 return true; 883 } 884 return false; 885 } 886 887 #if defined(DEBUG_OP) 888 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 889 { 890 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 891 TARGET_FMT_lx "\n", RA, msr); 892 } 893 #endif 894 895 /*****************************************************************************/ 896 /* Exceptions processing helpers */ 897 898 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 899 uint32_t error_code, uintptr_t raddr) 900 { 901 CPUState *cs = CPU(ppc_env_get_cpu(env)); 902 903 cs->exception_index = exception; 904 env->error_code = error_code; 905 cpu_loop_exit_restore(cs, raddr); 906 } 907 908 void raise_exception_err(CPUPPCState *env, uint32_t exception, 909 uint32_t error_code) 910 { 911 raise_exception_err_ra(env, exception, error_code, 0); 912 } 913 914 void raise_exception(CPUPPCState *env, uint32_t exception) 915 { 916 raise_exception_err_ra(env, exception, 0, 0); 917 } 918 919 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 920 uintptr_t raddr) 921 { 922 raise_exception_err_ra(env, exception, 0, raddr); 923 } 924 925 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 926 uint32_t error_code) 927 { 928 raise_exception_err_ra(env, exception, error_code, 0); 929 } 930 931 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 932 { 933 raise_exception_err_ra(env, exception, 0, 0); 934 } 935 936 #if !defined(CONFIG_USER_ONLY) 937 void helper_store_msr(CPUPPCState *env, target_ulong val) 938 { 939 uint32_t excp = hreg_store_msr(env, val, 0); 940 941 if (excp != 0) { 942 CPUState *cs = CPU(ppc_env_get_cpu(env)); 943 cpu_interrupt_exittb(cs); 944 raise_exception(env, excp); 945 } 946 } 947 948 #if defined(TARGET_PPC64) 949 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 950 { 951 CPUState *cs; 952 953 cs = CPU(ppc_env_get_cpu(env)); 954 cs->halted = 1; 955 env->in_pm_state = true; 956 957 /* The architecture specifies that HDEC interrupts are 958 * discarded in PM states 959 */ 960 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 961 962 /* Technically, nap doesn't set EE, but if we don't set it 963 * then ppc_hw_interrupt() won't deliver. We could add some 964 * other tests there based on LPCR but it's simpler to just 965 * whack EE in. It will be cleared by the 0x100 at wakeup 966 * anyway. It will still be observable by the guest in SRR1 967 * but this doesn't seem to be a problem. 968 */ 969 env->msr |= (1ull << MSR_EE); 970 raise_exception(env, EXCP_HLT); 971 } 972 #endif /* defined(TARGET_PPC64) */ 973 974 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 975 { 976 CPUState *cs = CPU(ppc_env_get_cpu(env)); 977 978 /* MSR:POW cannot be set by any form of rfi */ 979 msr &= ~(1ULL << MSR_POW); 980 981 #if defined(TARGET_PPC64) 982 /* Switching to 32-bit ? Crop the nip */ 983 if (!msr_is_64bit(env, msr)) { 984 nip = (uint32_t)nip; 985 } 986 #else 987 nip = (uint32_t)nip; 988 #endif 989 /* XXX: beware: this is false if VLE is supported */ 990 env->nip = nip & ~((target_ulong)0x00000003); 991 hreg_store_msr(env, msr, 1); 992 #if defined(DEBUG_OP) 993 cpu_dump_rfi(env->nip, env->msr); 994 #endif 995 /* No need to raise an exception here, 996 * as rfi is always the last insn of a TB 997 */ 998 cpu_interrupt_exittb(cs); 999 /* Reset the reservation */ 1000 env->reserve_addr = -1; 1001 1002 /* Context synchronizing: check if TCG TLB needs flush */ 1003 check_tlb_flush(env, false); 1004 } 1005 1006 void helper_rfi(CPUPPCState *env) 1007 { 1008 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1009 } 1010 1011 #define MSR_BOOK3S_MASK 1012 #if defined(TARGET_PPC64) 1013 void helper_rfid(CPUPPCState *env) 1014 { 1015 /* The architeture defines a number of rules for which bits 1016 * can change but in practice, we handle this in hreg_store_msr() 1017 * which will be called by do_rfi(), so there is no need to filter 1018 * here 1019 */ 1020 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1021 } 1022 1023 void helper_hrfid(CPUPPCState *env) 1024 { 1025 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1026 } 1027 #endif 1028 1029 /*****************************************************************************/ 1030 /* Embedded PowerPC specific helpers */ 1031 void helper_40x_rfci(CPUPPCState *env) 1032 { 1033 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1034 } 1035 1036 void helper_rfci(CPUPPCState *env) 1037 { 1038 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1039 } 1040 1041 void helper_rfdi(CPUPPCState *env) 1042 { 1043 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1044 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1045 } 1046 1047 void helper_rfmci(CPUPPCState *env) 1048 { 1049 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1050 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1051 } 1052 #endif 1053 1054 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1055 uint32_t flags) 1056 { 1057 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1058 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1059 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1060 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1061 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1062 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1063 POWERPC_EXCP_TRAP, GETPC()); 1064 } 1065 } 1066 1067 #if defined(TARGET_PPC64) 1068 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1069 uint32_t flags) 1070 { 1071 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1072 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1073 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1074 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1075 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1076 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1077 POWERPC_EXCP_TRAP, GETPC()); 1078 } 1079 } 1080 #endif 1081 1082 #if !defined(CONFIG_USER_ONLY) 1083 /*****************************************************************************/ 1084 /* PowerPC 601 specific instructions (POWER bridge) */ 1085 1086 void helper_rfsvc(CPUPPCState *env) 1087 { 1088 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1089 } 1090 1091 /* Embedded.Processor Control */ 1092 static int dbell2irq(target_ulong rb) 1093 { 1094 int msg = rb & DBELL_TYPE_MASK; 1095 int irq = -1; 1096 1097 switch (msg) { 1098 case DBELL_TYPE_DBELL: 1099 irq = PPC_INTERRUPT_DOORBELL; 1100 break; 1101 case DBELL_TYPE_DBELL_CRIT: 1102 irq = PPC_INTERRUPT_CDOORBELL; 1103 break; 1104 case DBELL_TYPE_G_DBELL: 1105 case DBELL_TYPE_G_DBELL_CRIT: 1106 case DBELL_TYPE_G_DBELL_MC: 1107 /* XXX implement */ 1108 default: 1109 break; 1110 } 1111 1112 return irq; 1113 } 1114 1115 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1116 { 1117 int irq = dbell2irq(rb); 1118 1119 if (irq < 0) { 1120 return; 1121 } 1122 1123 env->pending_interrupts &= ~(1 << irq); 1124 } 1125 1126 void helper_msgsnd(target_ulong rb) 1127 { 1128 int irq = dbell2irq(rb); 1129 int pir = rb & DBELL_PIRTAG_MASK; 1130 CPUState *cs; 1131 1132 if (irq < 0) { 1133 return; 1134 } 1135 1136 qemu_mutex_lock_iothread(); 1137 CPU_FOREACH(cs) { 1138 PowerPCCPU *cpu = POWERPC_CPU(cs); 1139 CPUPPCState *cenv = &cpu->env; 1140 1141 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1142 cenv->pending_interrupts |= 1 << irq; 1143 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1144 } 1145 } 1146 qemu_mutex_unlock_iothread(); 1147 } 1148 #endif 1149