1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "internal.h" 26 #include "helper_regs.h" 27 28 /* #define DEBUG_OP */ 29 /* #define DEBUG_SOFTWARE_TLB */ 30 /* #define DEBUG_EXCEPTIONS */ 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = env_cpu(env); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 62 " nip=" TARGET_FMT_lx "\n", 63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 65 ppc_dump_gpr(env, 6), env->nip); 66 } 67 68 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 69 target_ulong *msr) 70 { 71 /* We no longer are in a PM state */ 72 env->resume_as_sreset = false; 73 74 /* Pretend to be returning from doze always as we don't lose state */ 75 *msr |= (0x1ull << (63 - 47)); 76 77 /* Machine checks are sent normally */ 78 if (excp == POWERPC_EXCP_MCHECK) { 79 return excp; 80 } 81 switch (excp) { 82 case POWERPC_EXCP_RESET: 83 *msr |= 0x4ull << (63 - 45); 84 break; 85 case POWERPC_EXCP_EXTERNAL: 86 *msr |= 0x8ull << (63 - 45); 87 break; 88 case POWERPC_EXCP_DECR: 89 *msr |= 0x6ull << (63 - 45); 90 break; 91 case POWERPC_EXCP_SDOOR: 92 *msr |= 0x5ull << (63 - 45); 93 break; 94 case POWERPC_EXCP_SDOOR_HV: 95 *msr |= 0x3ull << (63 - 45); 96 break; 97 case POWERPC_EXCP_HV_MAINT: 98 *msr |= 0xaull << (63 - 45); 99 break; 100 case POWERPC_EXCP_HVIRT: 101 *msr |= 0x9ull << (63 - 45); 102 break; 103 default: 104 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 105 excp); 106 } 107 return POWERPC_EXCP_RESET; 108 } 109 110 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) 111 { 112 uint64_t offset = 0; 113 114 switch (ail) { 115 case AIL_NONE: 116 break; 117 case AIL_0001_8000: 118 offset = 0x18000; 119 break; 120 case AIL_C000_0000_0000_4000: 121 offset = 0xc000000000004000ull; 122 break; 123 default: 124 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 125 break; 126 } 127 128 return offset; 129 } 130 131 /* 132 * Note that this function should be greatly optimized when called 133 * with a constant excp, from ppc_hw_interrupt 134 */ 135 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 136 { 137 CPUState *cs = CPU(cpu); 138 CPUPPCState *env = &cpu->env; 139 target_ulong msr, new_msr, vector; 140 int srr0, srr1, asrr0, asrr1, lev, ail; 141 bool lpes0; 142 143 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 144 " => %08x (%02x)\n", env->nip, excp, env->error_code); 145 146 /* new srr1 value excluding must-be-zero bits */ 147 if (excp_model == POWERPC_EXCP_BOOKE) { 148 msr = env->msr; 149 } else { 150 msr = env->msr & ~0x783f0000ULL; 151 } 152 153 /* 154 * new interrupt handler msr preserves existing HV and ME unless 155 * explicitly overriden 156 */ 157 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 158 159 /* target registers */ 160 srr0 = SPR_SRR0; 161 srr1 = SPR_SRR1; 162 asrr0 = -1; 163 asrr1 = -1; 164 165 /* 166 * check for special resume at 0x100 from doze/nap/sleep/winkle on 167 * P7/P8/P9 168 */ 169 if (env->resume_as_sreset) { 170 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 171 } 172 173 /* 174 * Exception targetting modifiers 175 * 176 * LPES0 is supported on POWER7/8/9 177 * LPES1 is not supported (old iSeries mode) 178 * 179 * On anything else, we behave as if LPES0 is 1 180 * (externals don't alter MSR:HV) 181 * 182 * AIL is initialized here but can be cleared by 183 * selected exceptions 184 */ 185 #if defined(TARGET_PPC64) 186 if (excp_model == POWERPC_EXCP_POWER7 || 187 excp_model == POWERPC_EXCP_POWER8 || 188 excp_model == POWERPC_EXCP_POWER9) { 189 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 190 if (excp_model != POWERPC_EXCP_POWER7) { 191 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 192 } else { 193 ail = 0; 194 } 195 } else 196 #endif /* defined(TARGET_PPC64) */ 197 { 198 lpes0 = true; 199 ail = 0; 200 } 201 202 /* 203 * Hypervisor emulation assistance interrupt only exists on server 204 * arch 2.05 server or later. We also don't want to generate it if 205 * we don't have HVB in msr_mask (PAPR mode). 206 */ 207 if (excp == POWERPC_EXCP_HV_EMU 208 #if defined(TARGET_PPC64) 209 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 210 #endif /* defined(TARGET_PPC64) */ 211 212 ) { 213 excp = POWERPC_EXCP_PROGRAM; 214 } 215 216 switch (excp) { 217 case POWERPC_EXCP_NONE: 218 /* Should never happen */ 219 return; 220 case POWERPC_EXCP_CRITICAL: /* Critical input */ 221 switch (excp_model) { 222 case POWERPC_EXCP_40x: 223 srr0 = SPR_40x_SRR2; 224 srr1 = SPR_40x_SRR3; 225 break; 226 case POWERPC_EXCP_BOOKE: 227 srr0 = SPR_BOOKE_CSRR0; 228 srr1 = SPR_BOOKE_CSRR1; 229 break; 230 case POWERPC_EXCP_G2: 231 break; 232 default: 233 goto excp_invalid; 234 } 235 break; 236 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 237 if (msr_me == 0) { 238 /* 239 * Machine check exception is not enabled. Enter 240 * checkstop state. 241 */ 242 fprintf(stderr, "Machine check while not allowed. " 243 "Entering checkstop state\n"); 244 if (qemu_log_separate()) { 245 qemu_log("Machine check while not allowed. " 246 "Entering checkstop state\n"); 247 } 248 cs->halted = 1; 249 cpu_interrupt_exittb(cs); 250 } 251 if (env->msr_mask & MSR_HVB) { 252 /* 253 * ISA specifies HV, but can be delivered to guest with HV 254 * clear (e.g., see FWNMI in PAPR). 255 */ 256 new_msr |= (target_ulong)MSR_HVB; 257 } 258 ail = 0; 259 260 /* machine check exceptions don't have ME set */ 261 new_msr &= ~((target_ulong)1 << MSR_ME); 262 263 /* XXX: should also have something loaded in DAR / DSISR */ 264 switch (excp_model) { 265 case POWERPC_EXCP_40x: 266 srr0 = SPR_40x_SRR2; 267 srr1 = SPR_40x_SRR3; 268 break; 269 case POWERPC_EXCP_BOOKE: 270 /* FIXME: choose one or the other based on CPU type */ 271 srr0 = SPR_BOOKE_MCSRR0; 272 srr1 = SPR_BOOKE_MCSRR1; 273 asrr0 = SPR_BOOKE_CSRR0; 274 asrr1 = SPR_BOOKE_CSRR1; 275 break; 276 default: 277 break; 278 } 279 break; 280 case POWERPC_EXCP_DSI: /* Data storage exception */ 281 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 282 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 283 break; 284 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 285 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 286 "\n", msr, env->nip); 287 msr |= env->error_code; 288 break; 289 case POWERPC_EXCP_EXTERNAL: /* External input */ 290 cs = CPU(cpu); 291 292 if (!lpes0) { 293 new_msr |= (target_ulong)MSR_HVB; 294 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 295 srr0 = SPR_HSRR0; 296 srr1 = SPR_HSRR1; 297 } 298 if (env->mpic_proxy) { 299 /* IACK the IRQ on delivery */ 300 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 301 } 302 break; 303 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 304 /* Get rS/rD and rA from faulting opcode */ 305 /* 306 * Note: the opcode fields will not be set properly for a 307 * direct store load/store, but nobody cares as nobody 308 * actually uses direct store segments. 309 */ 310 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 311 break; 312 case POWERPC_EXCP_PROGRAM: /* Program exception */ 313 switch (env->error_code & ~0xF) { 314 case POWERPC_EXCP_FP: 315 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 316 LOG_EXCP("Ignore floating point exception\n"); 317 cs->exception_index = POWERPC_EXCP_NONE; 318 env->error_code = 0; 319 return; 320 } 321 322 /* 323 * FP exceptions always have NIP pointing to the faulting 324 * instruction, so always use store_next and claim we are 325 * precise in the MSR. 326 */ 327 msr |= 0x00100000; 328 env->spr[SPR_BOOKE_ESR] = ESR_FP; 329 break; 330 case POWERPC_EXCP_INVAL: 331 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 332 msr |= 0x00080000; 333 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 334 break; 335 case POWERPC_EXCP_PRIV: 336 msr |= 0x00040000; 337 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 338 break; 339 case POWERPC_EXCP_TRAP: 340 msr |= 0x00020000; 341 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 342 break; 343 default: 344 /* Should never occur */ 345 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 346 env->error_code); 347 break; 348 } 349 break; 350 case POWERPC_EXCP_SYSCALL: /* System call exception */ 351 dump_syscall(env); 352 lev = env->error_code; 353 354 /* 355 * We need to correct the NIP which in this case is supposed 356 * to point to the next instruction 357 */ 358 env->nip += 4; 359 360 /* "PAPR mode" built-in hypercall emulation */ 361 if ((lev == 1) && cpu->vhyp) { 362 PPCVirtualHypervisorClass *vhc = 363 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 364 vhc->hypercall(cpu->vhyp, cpu); 365 return; 366 } 367 if (lev == 1) { 368 new_msr |= (target_ulong)MSR_HVB; 369 } 370 break; 371 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 372 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 373 case POWERPC_EXCP_DECR: /* Decrementer exception */ 374 break; 375 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 376 /* FIT on 4xx */ 377 LOG_EXCP("FIT exception\n"); 378 break; 379 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 380 LOG_EXCP("WDT exception\n"); 381 switch (excp_model) { 382 case POWERPC_EXCP_BOOKE: 383 srr0 = SPR_BOOKE_CSRR0; 384 srr1 = SPR_BOOKE_CSRR1; 385 break; 386 default: 387 break; 388 } 389 break; 390 case POWERPC_EXCP_DTLB: /* Data TLB error */ 391 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 392 break; 393 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 394 if (env->flags & POWERPC_FLAG_DE) { 395 /* FIXME: choose one or the other based on CPU type */ 396 srr0 = SPR_BOOKE_DSRR0; 397 srr1 = SPR_BOOKE_DSRR1; 398 asrr0 = SPR_BOOKE_CSRR0; 399 asrr1 = SPR_BOOKE_CSRR1; 400 /* DBSR already modified by caller */ 401 } else { 402 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 403 } 404 break; 405 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 406 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 407 break; 408 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 409 /* XXX: TODO */ 410 cpu_abort(cs, "Embedded floating point data exception " 411 "is not implemented yet !\n"); 412 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 413 break; 414 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 415 /* XXX: TODO */ 416 cpu_abort(cs, "Embedded floating point round exception " 417 "is not implemented yet !\n"); 418 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 419 break; 420 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 421 /* XXX: TODO */ 422 cpu_abort(cs, 423 "Performance counter exception is not implemented yet !\n"); 424 break; 425 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 426 break; 427 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 428 srr0 = SPR_BOOKE_CSRR0; 429 srr1 = SPR_BOOKE_CSRR1; 430 break; 431 case POWERPC_EXCP_RESET: /* System reset exception */ 432 /* A power-saving exception sets ME, otherwise it is unchanged */ 433 if (msr_pow) { 434 /* indicate that we resumed from power save mode */ 435 msr |= 0x10000; 436 new_msr |= ((target_ulong)1 << MSR_ME); 437 } 438 if (env->msr_mask & MSR_HVB) { 439 /* 440 * ISA specifies HV, but can be delivered to guest with HV 441 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 442 */ 443 new_msr |= (target_ulong)MSR_HVB; 444 } else { 445 if (msr_pow) { 446 cpu_abort(cs, "Trying to deliver power-saving system reset " 447 "exception %d with no HV support\n", excp); 448 } 449 } 450 ail = 0; 451 break; 452 case POWERPC_EXCP_DSEG: /* Data segment exception */ 453 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 454 case POWERPC_EXCP_TRACE: /* Trace exception */ 455 break; 456 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 457 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 458 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 459 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 460 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 461 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 462 case POWERPC_EXCP_HV_EMU: 463 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 464 srr0 = SPR_HSRR0; 465 srr1 = SPR_HSRR1; 466 new_msr |= (target_ulong)MSR_HVB; 467 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 468 break; 469 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 470 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 471 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 472 #ifdef TARGET_PPC64 473 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 474 #endif 475 break; 476 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 477 LOG_EXCP("PIT exception\n"); 478 break; 479 case POWERPC_EXCP_IO: /* IO error exception */ 480 /* XXX: TODO */ 481 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 482 break; 483 case POWERPC_EXCP_RUNM: /* Run mode exception */ 484 /* XXX: TODO */ 485 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 486 break; 487 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 488 /* XXX: TODO */ 489 cpu_abort(cs, "602 emulation trap exception " 490 "is not implemented yet !\n"); 491 break; 492 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 493 switch (excp_model) { 494 case POWERPC_EXCP_602: 495 case POWERPC_EXCP_603: 496 case POWERPC_EXCP_603E: 497 case POWERPC_EXCP_G2: 498 goto tlb_miss_tgpr; 499 case POWERPC_EXCP_7x5: 500 goto tlb_miss; 501 case POWERPC_EXCP_74xx: 502 goto tlb_miss_74xx; 503 default: 504 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 505 break; 506 } 507 break; 508 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 509 switch (excp_model) { 510 case POWERPC_EXCP_602: 511 case POWERPC_EXCP_603: 512 case POWERPC_EXCP_603E: 513 case POWERPC_EXCP_G2: 514 goto tlb_miss_tgpr; 515 case POWERPC_EXCP_7x5: 516 goto tlb_miss; 517 case POWERPC_EXCP_74xx: 518 goto tlb_miss_74xx; 519 default: 520 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 521 break; 522 } 523 break; 524 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 525 switch (excp_model) { 526 case POWERPC_EXCP_602: 527 case POWERPC_EXCP_603: 528 case POWERPC_EXCP_603E: 529 case POWERPC_EXCP_G2: 530 tlb_miss_tgpr: 531 /* Swap temporary saved registers with GPRs */ 532 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 533 new_msr |= (target_ulong)1 << MSR_TGPR; 534 hreg_swap_gpr_tgpr(env); 535 } 536 goto tlb_miss; 537 case POWERPC_EXCP_7x5: 538 tlb_miss: 539 #if defined(DEBUG_SOFTWARE_TLB) 540 if (qemu_log_enabled()) { 541 const char *es; 542 target_ulong *miss, *cmp; 543 int en; 544 545 if (excp == POWERPC_EXCP_IFTLB) { 546 es = "I"; 547 en = 'I'; 548 miss = &env->spr[SPR_IMISS]; 549 cmp = &env->spr[SPR_ICMP]; 550 } else { 551 if (excp == POWERPC_EXCP_DLTLB) { 552 es = "DL"; 553 } else { 554 es = "DS"; 555 } 556 en = 'D'; 557 miss = &env->spr[SPR_DMISS]; 558 cmp = &env->spr[SPR_DCMP]; 559 } 560 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 561 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 562 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 563 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 564 env->error_code); 565 } 566 #endif 567 msr |= env->crf[0] << 28; 568 msr |= env->error_code; /* key, D/I, S/L bits */ 569 /* Set way using a LRU mechanism */ 570 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 571 break; 572 case POWERPC_EXCP_74xx: 573 tlb_miss_74xx: 574 #if defined(DEBUG_SOFTWARE_TLB) 575 if (qemu_log_enabled()) { 576 const char *es; 577 target_ulong *miss, *cmp; 578 int en; 579 580 if (excp == POWERPC_EXCP_IFTLB) { 581 es = "I"; 582 en = 'I'; 583 miss = &env->spr[SPR_TLBMISS]; 584 cmp = &env->spr[SPR_PTEHI]; 585 } else { 586 if (excp == POWERPC_EXCP_DLTLB) { 587 es = "DL"; 588 } else { 589 es = "DS"; 590 } 591 en = 'D'; 592 miss = &env->spr[SPR_TLBMISS]; 593 cmp = &env->spr[SPR_PTEHI]; 594 } 595 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 596 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 597 env->error_code); 598 } 599 #endif 600 msr |= env->error_code; /* key bit */ 601 break; 602 default: 603 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 604 break; 605 } 606 break; 607 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 608 /* XXX: TODO */ 609 cpu_abort(cs, "Floating point assist exception " 610 "is not implemented yet !\n"); 611 break; 612 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 613 /* XXX: TODO */ 614 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 615 break; 616 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 617 /* XXX: TODO */ 618 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 619 break; 620 case POWERPC_EXCP_SMI: /* System management interrupt */ 621 /* XXX: TODO */ 622 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 623 break; 624 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 625 /* XXX: TODO */ 626 cpu_abort(cs, "Thermal management exception " 627 "is not implemented yet !\n"); 628 break; 629 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 630 /* XXX: TODO */ 631 cpu_abort(cs, 632 "Performance counter exception is not implemented yet !\n"); 633 break; 634 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 635 /* XXX: TODO */ 636 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 637 break; 638 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 639 /* XXX: TODO */ 640 cpu_abort(cs, 641 "970 soft-patch exception is not implemented yet !\n"); 642 break; 643 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 644 /* XXX: TODO */ 645 cpu_abort(cs, 646 "970 maintenance exception is not implemented yet !\n"); 647 break; 648 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 649 /* XXX: TODO */ 650 cpu_abort(cs, "Maskable external exception " 651 "is not implemented yet !\n"); 652 break; 653 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 654 /* XXX: TODO */ 655 cpu_abort(cs, "Non maskable external exception " 656 "is not implemented yet !\n"); 657 break; 658 default: 659 excp_invalid: 660 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 661 break; 662 } 663 664 /* Save PC */ 665 env->spr[srr0] = env->nip; 666 667 /* Save MSR */ 668 env->spr[srr1] = msr; 669 670 /* Sanity check */ 671 if (!(env->msr_mask & MSR_HVB)) { 672 if (new_msr & MSR_HVB) { 673 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 674 "no HV support\n", excp); 675 } 676 if (srr0 == SPR_HSRR0) { 677 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 678 "no HV support\n", excp); 679 } 680 } 681 682 /* If any alternate SRR register are defined, duplicate saved values */ 683 if (asrr0 != -1) { 684 env->spr[asrr0] = env->spr[srr0]; 685 } 686 if (asrr1 != -1) { 687 env->spr[asrr1] = env->spr[srr1]; 688 } 689 690 /* 691 * Sort out endianness of interrupt, this differs depending on the 692 * CPU, the HV mode, etc... 693 */ 694 #ifdef TARGET_PPC64 695 if (excp_model == POWERPC_EXCP_POWER7) { 696 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 697 new_msr |= (target_ulong)1 << MSR_LE; 698 } 699 } else if (excp_model == POWERPC_EXCP_POWER8) { 700 if (new_msr & MSR_HVB) { 701 if (env->spr[SPR_HID0] & HID0_HILE) { 702 new_msr |= (target_ulong)1 << MSR_LE; 703 } 704 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 705 new_msr |= (target_ulong)1 << MSR_LE; 706 } 707 } else if (excp_model == POWERPC_EXCP_POWER9) { 708 if (new_msr & MSR_HVB) { 709 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 710 new_msr |= (target_ulong)1 << MSR_LE; 711 } 712 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 713 new_msr |= (target_ulong)1 << MSR_LE; 714 } 715 } else if (msr_ile) { 716 new_msr |= (target_ulong)1 << MSR_LE; 717 } 718 #else 719 if (msr_ile) { 720 new_msr |= (target_ulong)1 << MSR_LE; 721 } 722 #endif 723 724 /* Jump to handler */ 725 vector = env->excp_vectors[excp]; 726 if (vector == (target_ulong)-1ULL) { 727 cpu_abort(cs, "Raised an exception without defined vector %d\n", 728 excp); 729 } 730 vector |= env->excp_prefix; 731 732 /* 733 * AIL only works if there is no HV transition and we are running 734 * with translations enabled 735 */ 736 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 737 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 738 ail = 0; 739 } 740 /* Handle AIL */ 741 if (ail) { 742 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 743 vector |= ppc_excp_vector_offset(cs, ail); 744 } 745 746 #if defined(TARGET_PPC64) 747 if (excp_model == POWERPC_EXCP_BOOKE) { 748 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 749 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 750 new_msr |= (target_ulong)1 << MSR_CM; 751 } else { 752 vector = (uint32_t)vector; 753 } 754 } else { 755 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 756 vector = (uint32_t)vector; 757 } else { 758 new_msr |= (target_ulong)1 << MSR_SF; 759 } 760 } 761 #endif 762 /* 763 * We don't use hreg_store_msr here as already have treated any 764 * special case that could occur. Just store MSR and update hflags 765 * 766 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 767 * will prevent setting of the HV bit which some exceptions might need 768 * to do. 769 */ 770 env->msr = new_msr & env->msr_mask; 771 hreg_compute_hflags(env); 772 env->nip = vector; 773 /* Reset exception state */ 774 cs->exception_index = POWERPC_EXCP_NONE; 775 env->error_code = 0; 776 777 /* Reset the reservation */ 778 env->reserve_addr = -1; 779 780 /* 781 * Any interrupt is context synchronizing, check if TCG TLB needs 782 * a delayed flush on ppc64 783 */ 784 check_tlb_flush(env, false); 785 } 786 787 void ppc_cpu_do_interrupt(CPUState *cs) 788 { 789 PowerPCCPU *cpu = POWERPC_CPU(cs); 790 CPUPPCState *env = &cpu->env; 791 792 powerpc_excp(cpu, env->excp_model, cs->exception_index); 793 } 794 795 static void ppc_hw_interrupt(CPUPPCState *env) 796 { 797 PowerPCCPU *cpu = env_archcpu(env); 798 bool async_deliver; 799 800 /* External reset */ 801 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 802 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 803 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 804 return; 805 } 806 /* Machine check exception */ 807 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 808 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 809 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 810 return; 811 } 812 #if 0 /* TODO */ 813 /* External debug exception */ 814 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 815 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 816 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 817 return; 818 } 819 #endif 820 821 /* 822 * For interrupts that gate on MSR:EE, we need to do something a 823 * bit more subtle, as we need to let them through even when EE is 824 * clear when coming out of some power management states (in order 825 * for them to become a 0x100). 826 */ 827 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 828 829 /* Hypervisor decrementer exception */ 830 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 831 /* LPCR will be clear when not supported so this will work */ 832 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 833 if ((async_deliver || msr_hv == 0) && hdice) { 834 /* HDEC clears on delivery */ 835 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 836 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 837 return; 838 } 839 } 840 841 /* Hypervisor virtualization interrupt */ 842 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 843 /* LPCR will be clear when not supported so this will work */ 844 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 845 if ((async_deliver || msr_hv == 0) && hvice) { 846 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 847 return; 848 } 849 } 850 851 /* External interrupt can ignore MSR:EE under some circumstances */ 852 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 853 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 854 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 855 /* HEIC blocks delivery to the hypervisor */ 856 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 857 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 858 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 859 return; 860 } 861 } 862 if (msr_ce != 0) { 863 /* External critical interrupt */ 864 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 865 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 866 return; 867 } 868 } 869 if (async_deliver != 0) { 870 /* Watchdog timer on embedded PowerPC */ 871 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 872 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 873 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 874 return; 875 } 876 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 877 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 878 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 879 return; 880 } 881 /* Fixed interval timer on embedded PowerPC */ 882 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 883 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 884 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 885 return; 886 } 887 /* Programmable interval timer on embedded PowerPC */ 888 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 889 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 890 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 891 return; 892 } 893 /* Decrementer exception */ 894 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 895 if (ppc_decr_clear_on_delivery(env)) { 896 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 897 } 898 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 899 return; 900 } 901 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 902 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 903 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 904 return; 905 } 906 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 907 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 908 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 909 return; 910 } 911 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 912 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 913 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 914 return; 915 } 916 /* Thermal interrupt */ 917 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 918 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 919 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 920 return; 921 } 922 } 923 924 if (env->resume_as_sreset) { 925 /* 926 * This is a bug ! It means that has_work took us out of halt without 927 * anything to deliver while in a PM state that requires getting 928 * out via a 0x100 929 * 930 * This means we will incorrectly execute past the power management 931 * instruction instead of triggering a reset. 932 * 933 * It generally means a discrepancy between the wakup conditions in the 934 * processor has_work implementation and the logic in this function. 935 */ 936 cpu_abort(env_cpu(env), 937 "Wakeup from PM state but interrupt Undelivered"); 938 } 939 } 940 941 void ppc_cpu_do_system_reset(CPUState *cs) 942 { 943 PowerPCCPU *cpu = POWERPC_CPU(cs); 944 CPUPPCState *env = &cpu->env; 945 946 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 947 } 948 #endif /* !CONFIG_USER_ONLY */ 949 950 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 951 { 952 PowerPCCPU *cpu = POWERPC_CPU(cs); 953 CPUPPCState *env = &cpu->env; 954 955 if (interrupt_request & CPU_INTERRUPT_HARD) { 956 ppc_hw_interrupt(env); 957 if (env->pending_interrupts == 0) { 958 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 959 } 960 return true; 961 } 962 return false; 963 } 964 965 #if defined(DEBUG_OP) 966 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 967 { 968 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 969 TARGET_FMT_lx "\n", RA, msr); 970 } 971 #endif 972 973 /*****************************************************************************/ 974 /* Exceptions processing helpers */ 975 976 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 977 uint32_t error_code, uintptr_t raddr) 978 { 979 CPUState *cs = env_cpu(env); 980 981 cs->exception_index = exception; 982 env->error_code = error_code; 983 cpu_loop_exit_restore(cs, raddr); 984 } 985 986 void raise_exception_err(CPUPPCState *env, uint32_t exception, 987 uint32_t error_code) 988 { 989 raise_exception_err_ra(env, exception, error_code, 0); 990 } 991 992 void raise_exception(CPUPPCState *env, uint32_t exception) 993 { 994 raise_exception_err_ra(env, exception, 0, 0); 995 } 996 997 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 998 uintptr_t raddr) 999 { 1000 raise_exception_err_ra(env, exception, 0, raddr); 1001 } 1002 1003 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1004 uint32_t error_code) 1005 { 1006 raise_exception_err_ra(env, exception, error_code, 0); 1007 } 1008 1009 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1010 { 1011 raise_exception_err_ra(env, exception, 0, 0); 1012 } 1013 1014 #if !defined(CONFIG_USER_ONLY) 1015 void helper_store_msr(CPUPPCState *env, target_ulong val) 1016 { 1017 uint32_t excp = hreg_store_msr(env, val, 0); 1018 1019 if (excp != 0) { 1020 CPUState *cs = env_cpu(env); 1021 cpu_interrupt_exittb(cs); 1022 raise_exception(env, excp); 1023 } 1024 } 1025 1026 #if defined(TARGET_PPC64) 1027 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1028 { 1029 CPUState *cs; 1030 1031 cs = env_cpu(env); 1032 cs->halted = 1; 1033 1034 /* 1035 * The architecture specifies that HDEC interrupts are discarded 1036 * in PM states 1037 */ 1038 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1039 1040 /* Condition for waking up at 0x100 */ 1041 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1042 (env->spr[SPR_PSSCR] & PSSCR_EC); 1043 } 1044 #endif /* defined(TARGET_PPC64) */ 1045 1046 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1047 { 1048 CPUState *cs = env_cpu(env); 1049 1050 /* MSR:POW cannot be set by any form of rfi */ 1051 msr &= ~(1ULL << MSR_POW); 1052 1053 #if defined(TARGET_PPC64) 1054 /* Switching to 32-bit ? Crop the nip */ 1055 if (!msr_is_64bit(env, msr)) { 1056 nip = (uint32_t)nip; 1057 } 1058 #else 1059 nip = (uint32_t)nip; 1060 #endif 1061 /* XXX: beware: this is false if VLE is supported */ 1062 env->nip = nip & ~((target_ulong)0x00000003); 1063 hreg_store_msr(env, msr, 1); 1064 #if defined(DEBUG_OP) 1065 cpu_dump_rfi(env->nip, env->msr); 1066 #endif 1067 /* 1068 * No need to raise an exception here, as rfi is always the last 1069 * insn of a TB 1070 */ 1071 cpu_interrupt_exittb(cs); 1072 /* Reset the reservation */ 1073 env->reserve_addr = -1; 1074 1075 /* Context synchronizing: check if TCG TLB needs flush */ 1076 check_tlb_flush(env, false); 1077 } 1078 1079 void helper_rfi(CPUPPCState *env) 1080 { 1081 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1082 } 1083 1084 #define MSR_BOOK3S_MASK 1085 #if defined(TARGET_PPC64) 1086 void helper_rfid(CPUPPCState *env) 1087 { 1088 /* 1089 * The architeture defines a number of rules for which bits can 1090 * change but in practice, we handle this in hreg_store_msr() 1091 * which will be called by do_rfi(), so there is no need to filter 1092 * here 1093 */ 1094 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1095 } 1096 1097 void helper_hrfid(CPUPPCState *env) 1098 { 1099 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1100 } 1101 #endif 1102 1103 /*****************************************************************************/ 1104 /* Embedded PowerPC specific helpers */ 1105 void helper_40x_rfci(CPUPPCState *env) 1106 { 1107 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1108 } 1109 1110 void helper_rfci(CPUPPCState *env) 1111 { 1112 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1113 } 1114 1115 void helper_rfdi(CPUPPCState *env) 1116 { 1117 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1118 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1119 } 1120 1121 void helper_rfmci(CPUPPCState *env) 1122 { 1123 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1124 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1125 } 1126 #endif 1127 1128 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1129 uint32_t flags) 1130 { 1131 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1132 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1133 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1134 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1135 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1136 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1137 POWERPC_EXCP_TRAP, GETPC()); 1138 } 1139 } 1140 1141 #if defined(TARGET_PPC64) 1142 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1143 uint32_t flags) 1144 { 1145 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1146 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1147 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1148 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1149 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1150 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1151 POWERPC_EXCP_TRAP, GETPC()); 1152 } 1153 } 1154 #endif 1155 1156 #if !defined(CONFIG_USER_ONLY) 1157 /*****************************************************************************/ 1158 /* PowerPC 601 specific instructions (POWER bridge) */ 1159 1160 void helper_rfsvc(CPUPPCState *env) 1161 { 1162 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1163 } 1164 1165 /* Embedded.Processor Control */ 1166 static int dbell2irq(target_ulong rb) 1167 { 1168 int msg = rb & DBELL_TYPE_MASK; 1169 int irq = -1; 1170 1171 switch (msg) { 1172 case DBELL_TYPE_DBELL: 1173 irq = PPC_INTERRUPT_DOORBELL; 1174 break; 1175 case DBELL_TYPE_DBELL_CRIT: 1176 irq = PPC_INTERRUPT_CDOORBELL; 1177 break; 1178 case DBELL_TYPE_G_DBELL: 1179 case DBELL_TYPE_G_DBELL_CRIT: 1180 case DBELL_TYPE_G_DBELL_MC: 1181 /* XXX implement */ 1182 default: 1183 break; 1184 } 1185 1186 return irq; 1187 } 1188 1189 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1190 { 1191 int irq = dbell2irq(rb); 1192 1193 if (irq < 0) { 1194 return; 1195 } 1196 1197 env->pending_interrupts &= ~(1 << irq); 1198 } 1199 1200 void helper_msgsnd(target_ulong rb) 1201 { 1202 int irq = dbell2irq(rb); 1203 int pir = rb & DBELL_PIRTAG_MASK; 1204 CPUState *cs; 1205 1206 if (irq < 0) { 1207 return; 1208 } 1209 1210 qemu_mutex_lock_iothread(); 1211 CPU_FOREACH(cs) { 1212 PowerPCCPU *cpu = POWERPC_CPU(cs); 1213 CPUPPCState *cenv = &cpu->env; 1214 1215 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1216 cenv->pending_interrupts |= 1 << irq; 1217 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1218 } 1219 } 1220 qemu_mutex_unlock_iothread(); 1221 } 1222 1223 /* Server Processor Control */ 1224 static int book3s_dbell2irq(target_ulong rb) 1225 { 1226 int msg = rb & DBELL_TYPE_MASK; 1227 1228 /* 1229 * A Directed Hypervisor Doorbell message is sent only if the 1230 * message type is 5. All other types are reserved and the 1231 * instruction is a no-op 1232 */ 1233 return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1; 1234 } 1235 1236 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1237 { 1238 int irq = book3s_dbell2irq(rb); 1239 1240 if (irq < 0) { 1241 return; 1242 } 1243 1244 env->pending_interrupts &= ~(1 << irq); 1245 } 1246 1247 void helper_book3s_msgsnd(target_ulong rb) 1248 { 1249 int irq = book3s_dbell2irq(rb); 1250 int pir = rb & DBELL_PROCIDTAG_MASK; 1251 CPUState *cs; 1252 1253 if (irq < 0) { 1254 return; 1255 } 1256 1257 qemu_mutex_lock_iothread(); 1258 CPU_FOREACH(cs) { 1259 PowerPCCPU *cpu = POWERPC_CPU(cs); 1260 CPUPPCState *cenv = &cpu->env; 1261 1262 /* TODO: broadcast message to all threads of the same processor */ 1263 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1264 cenv->pending_interrupts |= 1 << irq; 1265 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1266 } 1267 } 1268 qemu_mutex_unlock_iothread(); 1269 } 1270 #endif 1271 1272 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1273 MMUAccessType access_type, 1274 int mmu_idx, uintptr_t retaddr) 1275 { 1276 CPUPPCState *env = cs->env_ptr; 1277 uint32_t insn; 1278 1279 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1280 cpu_restore_state(cs, retaddr, true); 1281 insn = cpu_ldl_code(env, env->nip); 1282 1283 cs->exception_index = POWERPC_EXCP_ALIGN; 1284 env->error_code = insn & 0x03FF0000; 1285 cpu_loop_exit(cs); 1286 } 1287