1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "internal.h" 26 #include "helper_regs.h" 27 28 /* #define DEBUG_OP */ 29 /* #define DEBUG_SOFTWARE_TLB */ 30 /* #define DEBUG_EXCEPTIONS */ 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = CPU(ppc_env_get_cpu(env)); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 62 " nip=" TARGET_FMT_lx "\n", 63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 65 ppc_dump_gpr(env, 6), env->nip); 66 } 67 68 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 69 target_ulong *msr) 70 { 71 /* We no longer are in a PM state */ 72 env->resume_as_sreset = false; 73 74 /* Pretend to be returning from doze always as we don't lose state */ 75 *msr |= (0x1ull << (63 - 47)); 76 77 /* Machine checks are sent normally */ 78 if (excp == POWERPC_EXCP_MCHECK) { 79 return excp; 80 } 81 switch (excp) { 82 case POWERPC_EXCP_RESET: 83 *msr |= 0x4ull << (63 - 45); 84 break; 85 case POWERPC_EXCP_EXTERNAL: 86 *msr |= 0x8ull << (63 - 45); 87 break; 88 case POWERPC_EXCP_DECR: 89 *msr |= 0x6ull << (63 - 45); 90 break; 91 case POWERPC_EXCP_SDOOR: 92 *msr |= 0x5ull << (63 - 45); 93 break; 94 case POWERPC_EXCP_SDOOR_HV: 95 *msr |= 0x3ull << (63 - 45); 96 break; 97 case POWERPC_EXCP_HV_MAINT: 98 *msr |= 0xaull << (63 - 45); 99 break; 100 case POWERPC_EXCP_HVIRT: 101 *msr |= 0x9ull << (63 - 45); 102 break; 103 default: 104 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 105 excp); 106 } 107 return POWERPC_EXCP_RESET; 108 } 109 110 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) 111 { 112 uint64_t offset = 0; 113 114 switch (ail) { 115 case AIL_0001_8000: 116 offset = 0x18000; 117 break; 118 case AIL_C000_0000_0000_4000: 119 offset = 0xc000000000004000ull; 120 break; 121 default: 122 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 123 break; 124 } 125 126 return offset; 127 } 128 129 /* 130 * Note that this function should be greatly optimized when called 131 * with a constant excp, from ppc_hw_interrupt 132 */ 133 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 134 { 135 CPUState *cs = CPU(cpu); 136 CPUPPCState *env = &cpu->env; 137 target_ulong msr, new_msr, vector; 138 int srr0, srr1, asrr0, asrr1, lev, ail; 139 bool lpes0; 140 141 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 142 " => %08x (%02x)\n", env->nip, excp, env->error_code); 143 144 /* new srr1 value excluding must-be-zero bits */ 145 if (excp_model == POWERPC_EXCP_BOOKE) { 146 msr = env->msr; 147 } else { 148 msr = env->msr & ~0x783f0000ULL; 149 } 150 151 /* 152 * new interrupt handler msr preserves existing HV and ME unless 153 * explicitly overriden 154 */ 155 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 156 157 /* target registers */ 158 srr0 = SPR_SRR0; 159 srr1 = SPR_SRR1; 160 asrr0 = -1; 161 asrr1 = -1; 162 163 /* 164 * check for special resume at 0x100 from doze/nap/sleep/winkle on 165 * P7/P8/P9 166 */ 167 if (env->resume_as_sreset) { 168 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 169 } 170 171 /* 172 * Exception targetting modifiers 173 * 174 * LPES0 is supported on POWER7/8/9 175 * LPES1 is not supported (old iSeries mode) 176 * 177 * On anything else, we behave as if LPES0 is 1 178 * (externals don't alter MSR:HV) 179 * 180 * AIL is initialized here but can be cleared by 181 * selected exceptions 182 */ 183 #if defined(TARGET_PPC64) 184 if (excp_model == POWERPC_EXCP_POWER7 || 185 excp_model == POWERPC_EXCP_POWER8 || 186 excp_model == POWERPC_EXCP_POWER9) { 187 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 188 if (excp_model != POWERPC_EXCP_POWER7) { 189 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 190 } else { 191 ail = 0; 192 } 193 } else 194 #endif /* defined(TARGET_PPC64) */ 195 { 196 lpes0 = true; 197 ail = 0; 198 } 199 200 /* 201 * Hypervisor emulation assistance interrupt only exists on server 202 * arch 2.05 server or later. We also don't want to generate it if 203 * we don't have HVB in msr_mask (PAPR mode). 204 */ 205 if (excp == POWERPC_EXCP_HV_EMU 206 #if defined(TARGET_PPC64) 207 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 208 #endif /* defined(TARGET_PPC64) */ 209 210 ) { 211 excp = POWERPC_EXCP_PROGRAM; 212 } 213 214 switch (excp) { 215 case POWERPC_EXCP_NONE: 216 /* Should never happen */ 217 return; 218 case POWERPC_EXCP_CRITICAL: /* Critical input */ 219 switch (excp_model) { 220 case POWERPC_EXCP_40x: 221 srr0 = SPR_40x_SRR2; 222 srr1 = SPR_40x_SRR3; 223 break; 224 case POWERPC_EXCP_BOOKE: 225 srr0 = SPR_BOOKE_CSRR0; 226 srr1 = SPR_BOOKE_CSRR1; 227 break; 228 case POWERPC_EXCP_G2: 229 break; 230 default: 231 goto excp_invalid; 232 } 233 break; 234 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 235 if (msr_me == 0) { 236 /* 237 * Machine check exception is not enabled. Enter 238 * checkstop state. 239 */ 240 fprintf(stderr, "Machine check while not allowed. " 241 "Entering checkstop state\n"); 242 if (qemu_log_separate()) { 243 qemu_log("Machine check while not allowed. " 244 "Entering checkstop state\n"); 245 } 246 cs->halted = 1; 247 cpu_interrupt_exittb(cs); 248 } 249 if (env->msr_mask & MSR_HVB) { 250 /* 251 * ISA specifies HV, but can be delivered to guest with HV 252 * clear (e.g., see FWNMI in PAPR). 253 */ 254 new_msr |= (target_ulong)MSR_HVB; 255 } 256 ail = 0; 257 258 /* machine check exceptions don't have ME set */ 259 new_msr &= ~((target_ulong)1 << MSR_ME); 260 261 /* XXX: should also have something loaded in DAR / DSISR */ 262 switch (excp_model) { 263 case POWERPC_EXCP_40x: 264 srr0 = SPR_40x_SRR2; 265 srr1 = SPR_40x_SRR3; 266 break; 267 case POWERPC_EXCP_BOOKE: 268 /* FIXME: choose one or the other based on CPU type */ 269 srr0 = SPR_BOOKE_MCSRR0; 270 srr1 = SPR_BOOKE_MCSRR1; 271 asrr0 = SPR_BOOKE_CSRR0; 272 asrr1 = SPR_BOOKE_CSRR1; 273 break; 274 default: 275 break; 276 } 277 break; 278 case POWERPC_EXCP_DSI: /* Data storage exception */ 279 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 280 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 281 break; 282 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 283 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 284 "\n", msr, env->nip); 285 msr |= env->error_code; 286 break; 287 case POWERPC_EXCP_EXTERNAL: /* External input */ 288 cs = CPU(cpu); 289 290 if (!lpes0) { 291 new_msr |= (target_ulong)MSR_HVB; 292 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 293 srr0 = SPR_HSRR0; 294 srr1 = SPR_HSRR1; 295 } 296 if (env->mpic_proxy) { 297 /* IACK the IRQ on delivery */ 298 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 299 } 300 break; 301 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 302 /* Get rS/rD and rA from faulting opcode */ 303 /* 304 * Note: the opcode fields will not be set properly for a 305 * direct store load/store, but nobody cares as nobody 306 * actually uses direct store segments. 307 */ 308 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 309 break; 310 case POWERPC_EXCP_PROGRAM: /* Program exception */ 311 switch (env->error_code & ~0xF) { 312 case POWERPC_EXCP_FP: 313 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 314 LOG_EXCP("Ignore floating point exception\n"); 315 cs->exception_index = POWERPC_EXCP_NONE; 316 env->error_code = 0; 317 return; 318 } 319 320 /* 321 * FP exceptions always have NIP pointing to the faulting 322 * instruction, so always use store_next and claim we are 323 * precise in the MSR. 324 */ 325 msr |= 0x00100000; 326 env->spr[SPR_BOOKE_ESR] = ESR_FP; 327 break; 328 case POWERPC_EXCP_INVAL: 329 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 330 msr |= 0x00080000; 331 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 332 break; 333 case POWERPC_EXCP_PRIV: 334 msr |= 0x00040000; 335 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 336 break; 337 case POWERPC_EXCP_TRAP: 338 msr |= 0x00020000; 339 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 340 break; 341 default: 342 /* Should never occur */ 343 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 344 env->error_code); 345 break; 346 } 347 break; 348 case POWERPC_EXCP_SYSCALL: /* System call exception */ 349 dump_syscall(env); 350 lev = env->error_code; 351 352 /* 353 * We need to correct the NIP which in this case is supposed 354 * to point to the next instruction 355 */ 356 env->nip += 4; 357 358 /* "PAPR mode" built-in hypercall emulation */ 359 if ((lev == 1) && cpu->vhyp) { 360 PPCVirtualHypervisorClass *vhc = 361 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 362 vhc->hypercall(cpu->vhyp, cpu); 363 return; 364 } 365 if (lev == 1) { 366 new_msr |= (target_ulong)MSR_HVB; 367 } 368 break; 369 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 370 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 371 case POWERPC_EXCP_DECR: /* Decrementer exception */ 372 break; 373 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 374 /* FIT on 4xx */ 375 LOG_EXCP("FIT exception\n"); 376 break; 377 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 378 LOG_EXCP("WDT exception\n"); 379 switch (excp_model) { 380 case POWERPC_EXCP_BOOKE: 381 srr0 = SPR_BOOKE_CSRR0; 382 srr1 = SPR_BOOKE_CSRR1; 383 break; 384 default: 385 break; 386 } 387 break; 388 case POWERPC_EXCP_DTLB: /* Data TLB error */ 389 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 390 break; 391 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 392 if (env->flags & POWERPC_FLAG_DE) { 393 /* FIXME: choose one or the other based on CPU type */ 394 srr0 = SPR_BOOKE_DSRR0; 395 srr1 = SPR_BOOKE_DSRR1; 396 asrr0 = SPR_BOOKE_CSRR0; 397 asrr1 = SPR_BOOKE_CSRR1; 398 /* DBSR already modified by caller */ 399 } else { 400 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 401 } 402 break; 403 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 404 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 405 break; 406 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 407 /* XXX: TODO */ 408 cpu_abort(cs, "Embedded floating point data exception " 409 "is not implemented yet !\n"); 410 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 411 break; 412 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 413 /* XXX: TODO */ 414 cpu_abort(cs, "Embedded floating point round exception " 415 "is not implemented yet !\n"); 416 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 417 break; 418 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 419 /* XXX: TODO */ 420 cpu_abort(cs, 421 "Performance counter exception is not implemented yet !\n"); 422 break; 423 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 424 break; 425 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 426 srr0 = SPR_BOOKE_CSRR0; 427 srr1 = SPR_BOOKE_CSRR1; 428 break; 429 case POWERPC_EXCP_RESET: /* System reset exception */ 430 /* A power-saving exception sets ME, otherwise it is unchanged */ 431 if (msr_pow) { 432 /* indicate that we resumed from power save mode */ 433 msr |= 0x10000; 434 new_msr |= ((target_ulong)1 << MSR_ME); 435 } 436 if (env->msr_mask & MSR_HVB) { 437 /* 438 * ISA specifies HV, but can be delivered to guest with HV 439 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 440 */ 441 new_msr |= (target_ulong)MSR_HVB; 442 } else { 443 if (msr_pow) { 444 cpu_abort(cs, "Trying to deliver power-saving system reset " 445 "exception %d with no HV support\n", excp); 446 } 447 } 448 ail = 0; 449 break; 450 case POWERPC_EXCP_DSEG: /* Data segment exception */ 451 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 452 case POWERPC_EXCP_TRACE: /* Trace exception */ 453 break; 454 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 455 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 456 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 457 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 458 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 459 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 460 case POWERPC_EXCP_HV_EMU: 461 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 462 srr0 = SPR_HSRR0; 463 srr1 = SPR_HSRR1; 464 new_msr |= (target_ulong)MSR_HVB; 465 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 466 break; 467 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 468 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 469 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 470 #ifdef TARGET_PPC64 471 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 472 #endif 473 break; 474 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 475 LOG_EXCP("PIT exception\n"); 476 break; 477 case POWERPC_EXCP_IO: /* IO error exception */ 478 /* XXX: TODO */ 479 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 480 break; 481 case POWERPC_EXCP_RUNM: /* Run mode exception */ 482 /* XXX: TODO */ 483 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 484 break; 485 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 486 /* XXX: TODO */ 487 cpu_abort(cs, "602 emulation trap exception " 488 "is not implemented yet !\n"); 489 break; 490 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 491 switch (excp_model) { 492 case POWERPC_EXCP_602: 493 case POWERPC_EXCP_603: 494 case POWERPC_EXCP_603E: 495 case POWERPC_EXCP_G2: 496 goto tlb_miss_tgpr; 497 case POWERPC_EXCP_7x5: 498 goto tlb_miss; 499 case POWERPC_EXCP_74xx: 500 goto tlb_miss_74xx; 501 default: 502 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 503 break; 504 } 505 break; 506 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 507 switch (excp_model) { 508 case POWERPC_EXCP_602: 509 case POWERPC_EXCP_603: 510 case POWERPC_EXCP_603E: 511 case POWERPC_EXCP_G2: 512 goto tlb_miss_tgpr; 513 case POWERPC_EXCP_7x5: 514 goto tlb_miss; 515 case POWERPC_EXCP_74xx: 516 goto tlb_miss_74xx; 517 default: 518 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 519 break; 520 } 521 break; 522 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 523 switch (excp_model) { 524 case POWERPC_EXCP_602: 525 case POWERPC_EXCP_603: 526 case POWERPC_EXCP_603E: 527 case POWERPC_EXCP_G2: 528 tlb_miss_tgpr: 529 /* Swap temporary saved registers with GPRs */ 530 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 531 new_msr |= (target_ulong)1 << MSR_TGPR; 532 hreg_swap_gpr_tgpr(env); 533 } 534 goto tlb_miss; 535 case POWERPC_EXCP_7x5: 536 tlb_miss: 537 #if defined(DEBUG_SOFTWARE_TLB) 538 if (qemu_log_enabled()) { 539 const char *es; 540 target_ulong *miss, *cmp; 541 int en; 542 543 if (excp == POWERPC_EXCP_IFTLB) { 544 es = "I"; 545 en = 'I'; 546 miss = &env->spr[SPR_IMISS]; 547 cmp = &env->spr[SPR_ICMP]; 548 } else { 549 if (excp == POWERPC_EXCP_DLTLB) { 550 es = "DL"; 551 } else { 552 es = "DS"; 553 } 554 en = 'D'; 555 miss = &env->spr[SPR_DMISS]; 556 cmp = &env->spr[SPR_DCMP]; 557 } 558 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 559 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 560 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 561 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 562 env->error_code); 563 } 564 #endif 565 msr |= env->crf[0] << 28; 566 msr |= env->error_code; /* key, D/I, S/L bits */ 567 /* Set way using a LRU mechanism */ 568 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 569 break; 570 case POWERPC_EXCP_74xx: 571 tlb_miss_74xx: 572 #if defined(DEBUG_SOFTWARE_TLB) 573 if (qemu_log_enabled()) { 574 const char *es; 575 target_ulong *miss, *cmp; 576 int en; 577 578 if (excp == POWERPC_EXCP_IFTLB) { 579 es = "I"; 580 en = 'I'; 581 miss = &env->spr[SPR_TLBMISS]; 582 cmp = &env->spr[SPR_PTEHI]; 583 } else { 584 if (excp == POWERPC_EXCP_DLTLB) { 585 es = "DL"; 586 } else { 587 es = "DS"; 588 } 589 en = 'D'; 590 miss = &env->spr[SPR_TLBMISS]; 591 cmp = &env->spr[SPR_PTEHI]; 592 } 593 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 594 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 595 env->error_code); 596 } 597 #endif 598 msr |= env->error_code; /* key bit */ 599 break; 600 default: 601 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 602 break; 603 } 604 break; 605 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 606 /* XXX: TODO */ 607 cpu_abort(cs, "Floating point assist exception " 608 "is not implemented yet !\n"); 609 break; 610 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 611 /* XXX: TODO */ 612 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 613 break; 614 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 615 /* XXX: TODO */ 616 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 617 break; 618 case POWERPC_EXCP_SMI: /* System management interrupt */ 619 /* XXX: TODO */ 620 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 621 break; 622 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 623 /* XXX: TODO */ 624 cpu_abort(cs, "Thermal management exception " 625 "is not implemented yet !\n"); 626 break; 627 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 628 /* XXX: TODO */ 629 cpu_abort(cs, 630 "Performance counter exception is not implemented yet !\n"); 631 break; 632 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 633 /* XXX: TODO */ 634 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 635 break; 636 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 637 /* XXX: TODO */ 638 cpu_abort(cs, 639 "970 soft-patch exception is not implemented yet !\n"); 640 break; 641 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 642 /* XXX: TODO */ 643 cpu_abort(cs, 644 "970 maintenance exception is not implemented yet !\n"); 645 break; 646 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 647 /* XXX: TODO */ 648 cpu_abort(cs, "Maskable external exception " 649 "is not implemented yet !\n"); 650 break; 651 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 652 /* XXX: TODO */ 653 cpu_abort(cs, "Non maskable external exception " 654 "is not implemented yet !\n"); 655 break; 656 default: 657 excp_invalid: 658 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 659 break; 660 } 661 662 /* Save PC */ 663 env->spr[srr0] = env->nip; 664 665 /* Save MSR */ 666 env->spr[srr1] = msr; 667 668 /* Sanity check */ 669 if (!(env->msr_mask & MSR_HVB)) { 670 if (new_msr & MSR_HVB) { 671 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 672 "no HV support\n", excp); 673 } 674 if (srr0 == SPR_HSRR0) { 675 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 676 "no HV support\n", excp); 677 } 678 } 679 680 /* If any alternate SRR register are defined, duplicate saved values */ 681 if (asrr0 != -1) { 682 env->spr[asrr0] = env->spr[srr0]; 683 } 684 if (asrr1 != -1) { 685 env->spr[asrr1] = env->spr[srr1]; 686 } 687 688 /* 689 * Sort out endianness of interrupt, this differs depending on the 690 * CPU, the HV mode, etc... 691 */ 692 #ifdef TARGET_PPC64 693 if (excp_model == POWERPC_EXCP_POWER7) { 694 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 695 new_msr |= (target_ulong)1 << MSR_LE; 696 } 697 } else if (excp_model == POWERPC_EXCP_POWER8) { 698 if (new_msr & MSR_HVB) { 699 if (env->spr[SPR_HID0] & HID0_HILE) { 700 new_msr |= (target_ulong)1 << MSR_LE; 701 } 702 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 703 new_msr |= (target_ulong)1 << MSR_LE; 704 } 705 } else if (excp_model == POWERPC_EXCP_POWER9) { 706 if (new_msr & MSR_HVB) { 707 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 708 new_msr |= (target_ulong)1 << MSR_LE; 709 } 710 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 711 new_msr |= (target_ulong)1 << MSR_LE; 712 } 713 } else if (msr_ile) { 714 new_msr |= (target_ulong)1 << MSR_LE; 715 } 716 #else 717 if (msr_ile) { 718 new_msr |= (target_ulong)1 << MSR_LE; 719 } 720 #endif 721 722 /* Jump to handler */ 723 vector = env->excp_vectors[excp]; 724 if (vector == (target_ulong)-1ULL) { 725 cpu_abort(cs, "Raised an exception without defined vector %d\n", 726 excp); 727 } 728 vector |= env->excp_prefix; 729 730 /* 731 * AIL only works if there is no HV transition and we are running 732 * with translations enabled 733 */ 734 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 735 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 736 ail = 0; 737 } 738 /* Handle AIL */ 739 if (ail) { 740 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 741 vector |= ppc_excp_vector_offset(cs, ail); 742 } 743 744 #if defined(TARGET_PPC64) 745 if (excp_model == POWERPC_EXCP_BOOKE) { 746 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 747 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 748 new_msr |= (target_ulong)1 << MSR_CM; 749 } else { 750 vector = (uint32_t)vector; 751 } 752 } else { 753 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 754 vector = (uint32_t)vector; 755 } else { 756 new_msr |= (target_ulong)1 << MSR_SF; 757 } 758 } 759 #endif 760 /* 761 * We don't use hreg_store_msr here as already have treated any 762 * special case that could occur. Just store MSR and update hflags 763 * 764 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 765 * will prevent setting of the HV bit which some exceptions might need 766 * to do. 767 */ 768 env->msr = new_msr & env->msr_mask; 769 hreg_compute_hflags(env); 770 env->nip = vector; 771 /* Reset exception state */ 772 cs->exception_index = POWERPC_EXCP_NONE; 773 env->error_code = 0; 774 775 /* Reset the reservation */ 776 env->reserve_addr = -1; 777 778 /* 779 * Any interrupt is context synchronizing, check if TCG TLB needs 780 * a delayed flush on ppc64 781 */ 782 check_tlb_flush(env, false); 783 } 784 785 void ppc_cpu_do_interrupt(CPUState *cs) 786 { 787 PowerPCCPU *cpu = POWERPC_CPU(cs); 788 CPUPPCState *env = &cpu->env; 789 790 powerpc_excp(cpu, env->excp_model, cs->exception_index); 791 } 792 793 static void ppc_hw_interrupt(CPUPPCState *env) 794 { 795 PowerPCCPU *cpu = ppc_env_get_cpu(env); 796 bool async_deliver; 797 798 /* External reset */ 799 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 800 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 801 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 802 return; 803 } 804 /* Machine check exception */ 805 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 806 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 807 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 808 return; 809 } 810 #if 0 /* TODO */ 811 /* External debug exception */ 812 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 813 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 814 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 815 return; 816 } 817 #endif 818 819 /* 820 * For interrupts that gate on MSR:EE, we need to do something a 821 * bit more subtle, as we need to let them through even when EE is 822 * clear when coming out of some power management states (in order 823 * for them to become a 0x100). 824 */ 825 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 826 827 /* Hypervisor decrementer exception */ 828 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 829 /* LPCR will be clear when not supported so this will work */ 830 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 831 if ((async_deliver || msr_hv == 0) && hdice) { 832 /* HDEC clears on delivery */ 833 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 834 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 835 return; 836 } 837 } 838 839 /* Hypervisor virtualization interrupt */ 840 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 841 /* LPCR will be clear when not supported so this will work */ 842 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 843 if ((async_deliver || msr_hv == 0) && hvice) { 844 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 845 return; 846 } 847 } 848 849 /* External interrupt can ignore MSR:EE under some circumstances */ 850 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 851 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 852 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 853 /* HEIC blocks delivery to the hypervisor */ 854 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 855 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 856 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 857 return; 858 } 859 } 860 if (msr_ce != 0) { 861 /* External critical interrupt */ 862 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 863 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 864 return; 865 } 866 } 867 if (async_deliver != 0) { 868 /* Watchdog timer on embedded PowerPC */ 869 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 870 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 871 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 872 return; 873 } 874 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 875 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 876 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 877 return; 878 } 879 /* Fixed interval timer on embedded PowerPC */ 880 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 881 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 882 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 883 return; 884 } 885 /* Programmable interval timer on embedded PowerPC */ 886 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 887 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 888 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 889 return; 890 } 891 /* Decrementer exception */ 892 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 893 if (ppc_decr_clear_on_delivery(env)) { 894 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 895 } 896 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 897 return; 898 } 899 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 900 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 901 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 902 return; 903 } 904 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 905 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 906 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 907 return; 908 } 909 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 910 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 911 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 912 return; 913 } 914 /* Thermal interrupt */ 915 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 916 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 917 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 918 return; 919 } 920 } 921 922 if (env->resume_as_sreset) { 923 /* 924 * This is a bug ! It means that has_work took us out of halt without 925 * anything to deliver while in a PM state that requires getting 926 * out via a 0x100 927 * 928 * This means we will incorrectly execute past the power management 929 * instruction instead of triggering a reset. 930 * 931 * It generally means a discrepancy between the wakup conditions in the 932 * processor has_work implementation and the logic in this function. 933 */ 934 cpu_abort(CPU(ppc_env_get_cpu(env)), 935 "Wakeup from PM state but interrupt Undelivered"); 936 } 937 } 938 939 void ppc_cpu_do_system_reset(CPUState *cs) 940 { 941 PowerPCCPU *cpu = POWERPC_CPU(cs); 942 CPUPPCState *env = &cpu->env; 943 944 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 945 } 946 #endif /* !CONFIG_USER_ONLY */ 947 948 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 949 { 950 PowerPCCPU *cpu = POWERPC_CPU(cs); 951 CPUPPCState *env = &cpu->env; 952 953 if (interrupt_request & CPU_INTERRUPT_HARD) { 954 ppc_hw_interrupt(env); 955 if (env->pending_interrupts == 0) { 956 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 957 } 958 return true; 959 } 960 return false; 961 } 962 963 #if defined(DEBUG_OP) 964 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 965 { 966 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 967 TARGET_FMT_lx "\n", RA, msr); 968 } 969 #endif 970 971 /*****************************************************************************/ 972 /* Exceptions processing helpers */ 973 974 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 975 uint32_t error_code, uintptr_t raddr) 976 { 977 CPUState *cs = CPU(ppc_env_get_cpu(env)); 978 979 cs->exception_index = exception; 980 env->error_code = error_code; 981 cpu_loop_exit_restore(cs, raddr); 982 } 983 984 void raise_exception_err(CPUPPCState *env, uint32_t exception, 985 uint32_t error_code) 986 { 987 raise_exception_err_ra(env, exception, error_code, 0); 988 } 989 990 void raise_exception(CPUPPCState *env, uint32_t exception) 991 { 992 raise_exception_err_ra(env, exception, 0, 0); 993 } 994 995 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 996 uintptr_t raddr) 997 { 998 raise_exception_err_ra(env, exception, 0, raddr); 999 } 1000 1001 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1002 uint32_t error_code) 1003 { 1004 raise_exception_err_ra(env, exception, error_code, 0); 1005 } 1006 1007 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1008 { 1009 raise_exception_err_ra(env, exception, 0, 0); 1010 } 1011 1012 #if !defined(CONFIG_USER_ONLY) 1013 void helper_store_msr(CPUPPCState *env, target_ulong val) 1014 { 1015 uint32_t excp = hreg_store_msr(env, val, 0); 1016 1017 if (excp != 0) { 1018 CPUState *cs = CPU(ppc_env_get_cpu(env)); 1019 cpu_interrupt_exittb(cs); 1020 raise_exception(env, excp); 1021 } 1022 } 1023 1024 #if defined(TARGET_PPC64) 1025 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1026 { 1027 CPUState *cs; 1028 1029 cs = CPU(ppc_env_get_cpu(env)); 1030 cs->halted = 1; 1031 1032 /* 1033 * The architecture specifies that HDEC interrupts are discarded 1034 * in PM states 1035 */ 1036 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1037 1038 /* Condition for waking up at 0x100 */ 1039 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1040 (env->spr[SPR_PSSCR] & PSSCR_EC); 1041 } 1042 #endif /* defined(TARGET_PPC64) */ 1043 1044 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1045 { 1046 CPUState *cs = CPU(ppc_env_get_cpu(env)); 1047 1048 /* MSR:POW cannot be set by any form of rfi */ 1049 msr &= ~(1ULL << MSR_POW); 1050 1051 #if defined(TARGET_PPC64) 1052 /* Switching to 32-bit ? Crop the nip */ 1053 if (!msr_is_64bit(env, msr)) { 1054 nip = (uint32_t)nip; 1055 } 1056 #else 1057 nip = (uint32_t)nip; 1058 #endif 1059 /* XXX: beware: this is false if VLE is supported */ 1060 env->nip = nip & ~((target_ulong)0x00000003); 1061 hreg_store_msr(env, msr, 1); 1062 #if defined(DEBUG_OP) 1063 cpu_dump_rfi(env->nip, env->msr); 1064 #endif 1065 /* 1066 * No need to raise an exception here, as rfi is always the last 1067 * insn of a TB 1068 */ 1069 cpu_interrupt_exittb(cs); 1070 /* Reset the reservation */ 1071 env->reserve_addr = -1; 1072 1073 /* Context synchronizing: check if TCG TLB needs flush */ 1074 check_tlb_flush(env, false); 1075 } 1076 1077 void helper_rfi(CPUPPCState *env) 1078 { 1079 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1080 } 1081 1082 #define MSR_BOOK3S_MASK 1083 #if defined(TARGET_PPC64) 1084 void helper_rfid(CPUPPCState *env) 1085 { 1086 /* 1087 * The architeture defines a number of rules for which bits can 1088 * change but in practice, we handle this in hreg_store_msr() 1089 * which will be called by do_rfi(), so there is no need to filter 1090 * here 1091 */ 1092 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1093 } 1094 1095 void helper_hrfid(CPUPPCState *env) 1096 { 1097 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1098 } 1099 #endif 1100 1101 /*****************************************************************************/ 1102 /* Embedded PowerPC specific helpers */ 1103 void helper_40x_rfci(CPUPPCState *env) 1104 { 1105 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1106 } 1107 1108 void helper_rfci(CPUPPCState *env) 1109 { 1110 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1111 } 1112 1113 void helper_rfdi(CPUPPCState *env) 1114 { 1115 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1116 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1117 } 1118 1119 void helper_rfmci(CPUPPCState *env) 1120 { 1121 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1122 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1123 } 1124 #endif 1125 1126 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1127 uint32_t flags) 1128 { 1129 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1130 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1131 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1132 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1133 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1134 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1135 POWERPC_EXCP_TRAP, GETPC()); 1136 } 1137 } 1138 1139 #if defined(TARGET_PPC64) 1140 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1141 uint32_t flags) 1142 { 1143 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1144 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1145 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1146 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1147 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1148 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1149 POWERPC_EXCP_TRAP, GETPC()); 1150 } 1151 } 1152 #endif 1153 1154 #if !defined(CONFIG_USER_ONLY) 1155 /*****************************************************************************/ 1156 /* PowerPC 601 specific instructions (POWER bridge) */ 1157 1158 void helper_rfsvc(CPUPPCState *env) 1159 { 1160 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1161 } 1162 1163 /* Embedded.Processor Control */ 1164 static int dbell2irq(target_ulong rb) 1165 { 1166 int msg = rb & DBELL_TYPE_MASK; 1167 int irq = -1; 1168 1169 switch (msg) { 1170 case DBELL_TYPE_DBELL: 1171 irq = PPC_INTERRUPT_DOORBELL; 1172 break; 1173 case DBELL_TYPE_DBELL_CRIT: 1174 irq = PPC_INTERRUPT_CDOORBELL; 1175 break; 1176 case DBELL_TYPE_G_DBELL: 1177 case DBELL_TYPE_G_DBELL_CRIT: 1178 case DBELL_TYPE_G_DBELL_MC: 1179 /* XXX implement */ 1180 default: 1181 break; 1182 } 1183 1184 return irq; 1185 } 1186 1187 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1188 { 1189 int irq = dbell2irq(rb); 1190 1191 if (irq < 0) { 1192 return; 1193 } 1194 1195 env->pending_interrupts &= ~(1 << irq); 1196 } 1197 1198 void helper_msgsnd(target_ulong rb) 1199 { 1200 int irq = dbell2irq(rb); 1201 int pir = rb & DBELL_PIRTAG_MASK; 1202 CPUState *cs; 1203 1204 if (irq < 0) { 1205 return; 1206 } 1207 1208 qemu_mutex_lock_iothread(); 1209 CPU_FOREACH(cs) { 1210 PowerPCCPU *cpu = POWERPC_CPU(cs); 1211 CPUPPCState *cenv = &cpu->env; 1212 1213 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1214 cenv->pending_interrupts |= 1 << irq; 1215 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1216 } 1217 } 1218 qemu_mutex_unlock_iothread(); 1219 } 1220 1221 /* Server Processor Control */ 1222 static int book3s_dbell2irq(target_ulong rb) 1223 { 1224 int msg = rb & DBELL_TYPE_MASK; 1225 1226 /* 1227 * A Directed Hypervisor Doorbell message is sent only if the 1228 * message type is 5. All other types are reserved and the 1229 * instruction is a no-op 1230 */ 1231 return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1; 1232 } 1233 1234 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1235 { 1236 int irq = book3s_dbell2irq(rb); 1237 1238 if (irq < 0) { 1239 return; 1240 } 1241 1242 env->pending_interrupts &= ~(1 << irq); 1243 } 1244 1245 void helper_book3s_msgsnd(target_ulong rb) 1246 { 1247 int irq = book3s_dbell2irq(rb); 1248 int pir = rb & DBELL_PROCIDTAG_MASK; 1249 CPUState *cs; 1250 1251 if (irq < 0) { 1252 return; 1253 } 1254 1255 qemu_mutex_lock_iothread(); 1256 CPU_FOREACH(cs) { 1257 PowerPCCPU *cpu = POWERPC_CPU(cs); 1258 CPUPPCState *cenv = &cpu->env; 1259 1260 /* TODO: broadcast message to all threads of the same processor */ 1261 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1262 cenv->pending_interrupts |= 1 << irq; 1263 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1264 } 1265 } 1266 qemu_mutex_unlock_iothread(); 1267 } 1268 #endif 1269 1270 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1271 MMUAccessType access_type, 1272 int mmu_idx, uintptr_t retaddr) 1273 { 1274 CPUPPCState *env = cs->env_ptr; 1275 uint32_t insn; 1276 1277 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1278 cpu_restore_state(cs, retaddr, true); 1279 insn = cpu_ldl_code(env, env->nip); 1280 1281 cs->exception_index = POWERPC_EXCP_ALIGN; 1282 env->error_code = insn & 0x03FF0000; 1283 cpu_loop_exit(cs); 1284 } 1285