1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "internal.h" 26 #include "helper_regs.h" 27 28 /* #define DEBUG_OP */ 29 /* #define DEBUG_SOFTWARE_TLB */ 30 /* #define DEBUG_EXCEPTIONS */ 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = env_cpu(env); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 62 " nip=" TARGET_FMT_lx "\n", 63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 65 ppc_dump_gpr(env, 6), env->nip); 66 } 67 68 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 69 target_ulong *msr) 70 { 71 /* We no longer are in a PM state */ 72 env->resume_as_sreset = false; 73 74 /* Pretend to be returning from doze always as we don't lose state */ 75 *msr |= (0x1ull << (63 - 47)); 76 77 /* Machine checks are sent normally */ 78 if (excp == POWERPC_EXCP_MCHECK) { 79 return excp; 80 } 81 switch (excp) { 82 case POWERPC_EXCP_RESET: 83 *msr |= 0x4ull << (63 - 45); 84 break; 85 case POWERPC_EXCP_EXTERNAL: 86 *msr |= 0x8ull << (63 - 45); 87 break; 88 case POWERPC_EXCP_DECR: 89 *msr |= 0x6ull << (63 - 45); 90 break; 91 case POWERPC_EXCP_SDOOR: 92 *msr |= 0x5ull << (63 - 45); 93 break; 94 case POWERPC_EXCP_SDOOR_HV: 95 *msr |= 0x3ull << (63 - 45); 96 break; 97 case POWERPC_EXCP_HV_MAINT: 98 *msr |= 0xaull << (63 - 45); 99 break; 100 case POWERPC_EXCP_HVIRT: 101 *msr |= 0x9ull << (63 - 45); 102 break; 103 default: 104 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 105 excp); 106 } 107 return POWERPC_EXCP_RESET; 108 } 109 110 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) 111 { 112 uint64_t offset = 0; 113 114 switch (ail) { 115 case AIL_NONE: 116 break; 117 case AIL_0001_8000: 118 offset = 0x18000; 119 break; 120 case AIL_C000_0000_0000_4000: 121 offset = 0xc000000000004000ull; 122 break; 123 default: 124 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 125 break; 126 } 127 128 return offset; 129 } 130 131 /* 132 * Note that this function should be greatly optimized when called 133 * with a constant excp, from ppc_hw_interrupt 134 */ 135 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 136 { 137 CPUState *cs = CPU(cpu); 138 CPUPPCState *env = &cpu->env; 139 target_ulong msr, new_msr, vector; 140 int srr0, srr1, asrr0, asrr1, lev, ail; 141 bool lpes0; 142 143 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 144 " => %08x (%02x)\n", env->nip, excp, env->error_code); 145 146 /* new srr1 value excluding must-be-zero bits */ 147 if (excp_model == POWERPC_EXCP_BOOKE) { 148 msr = env->msr; 149 } else { 150 msr = env->msr & ~0x783f0000ULL; 151 } 152 153 /* 154 * new interrupt handler msr preserves existing HV and ME unless 155 * explicitly overriden 156 */ 157 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 158 159 /* target registers */ 160 srr0 = SPR_SRR0; 161 srr1 = SPR_SRR1; 162 asrr0 = -1; 163 asrr1 = -1; 164 165 /* 166 * check for special resume at 0x100 from doze/nap/sleep/winkle on 167 * P7/P8/P9 168 */ 169 if (env->resume_as_sreset) { 170 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 171 } 172 173 /* 174 * Exception targetting modifiers 175 * 176 * LPES0 is supported on POWER7/8/9 177 * LPES1 is not supported (old iSeries mode) 178 * 179 * On anything else, we behave as if LPES0 is 1 180 * (externals don't alter MSR:HV) 181 * 182 * AIL is initialized here but can be cleared by 183 * selected exceptions 184 */ 185 #if defined(TARGET_PPC64) 186 if (excp_model == POWERPC_EXCP_POWER7 || 187 excp_model == POWERPC_EXCP_POWER8 || 188 excp_model == POWERPC_EXCP_POWER9) { 189 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 190 if (excp_model != POWERPC_EXCP_POWER7) { 191 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 192 } else { 193 ail = 0; 194 } 195 } else 196 #endif /* defined(TARGET_PPC64) */ 197 { 198 lpes0 = true; 199 ail = 0; 200 } 201 202 /* 203 * Hypervisor emulation assistance interrupt only exists on server 204 * arch 2.05 server or later. We also don't want to generate it if 205 * we don't have HVB in msr_mask (PAPR mode). 206 */ 207 if (excp == POWERPC_EXCP_HV_EMU 208 #if defined(TARGET_PPC64) 209 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 210 #endif /* defined(TARGET_PPC64) */ 211 212 ) { 213 excp = POWERPC_EXCP_PROGRAM; 214 } 215 216 switch (excp) { 217 case POWERPC_EXCP_NONE: 218 /* Should never happen */ 219 return; 220 case POWERPC_EXCP_CRITICAL: /* Critical input */ 221 switch (excp_model) { 222 case POWERPC_EXCP_40x: 223 srr0 = SPR_40x_SRR2; 224 srr1 = SPR_40x_SRR3; 225 break; 226 case POWERPC_EXCP_BOOKE: 227 srr0 = SPR_BOOKE_CSRR0; 228 srr1 = SPR_BOOKE_CSRR1; 229 break; 230 case POWERPC_EXCP_G2: 231 break; 232 default: 233 goto excp_invalid; 234 } 235 break; 236 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 237 if (msr_me == 0) { 238 /* 239 * Machine check exception is not enabled. Enter 240 * checkstop state. 241 */ 242 fprintf(stderr, "Machine check while not allowed. " 243 "Entering checkstop state\n"); 244 if (qemu_log_separate()) { 245 qemu_log("Machine check while not allowed. " 246 "Entering checkstop state\n"); 247 } 248 cs->halted = 1; 249 cpu_interrupt_exittb(cs); 250 } 251 if (env->msr_mask & MSR_HVB) { 252 /* 253 * ISA specifies HV, but can be delivered to guest with HV 254 * clear (e.g., see FWNMI in PAPR). 255 */ 256 new_msr |= (target_ulong)MSR_HVB; 257 } 258 ail = 0; 259 260 /* machine check exceptions don't have ME set */ 261 new_msr &= ~((target_ulong)1 << MSR_ME); 262 263 /* XXX: should also have something loaded in DAR / DSISR */ 264 switch (excp_model) { 265 case POWERPC_EXCP_40x: 266 srr0 = SPR_40x_SRR2; 267 srr1 = SPR_40x_SRR3; 268 break; 269 case POWERPC_EXCP_BOOKE: 270 /* FIXME: choose one or the other based on CPU type */ 271 srr0 = SPR_BOOKE_MCSRR0; 272 srr1 = SPR_BOOKE_MCSRR1; 273 asrr0 = SPR_BOOKE_CSRR0; 274 asrr1 = SPR_BOOKE_CSRR1; 275 break; 276 default: 277 break; 278 } 279 break; 280 case POWERPC_EXCP_DSI: /* Data storage exception */ 281 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 282 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 283 break; 284 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 285 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 286 "\n", msr, env->nip); 287 msr |= env->error_code; 288 break; 289 case POWERPC_EXCP_EXTERNAL: /* External input */ 290 cs = CPU(cpu); 291 292 if (!lpes0) { 293 new_msr |= (target_ulong)MSR_HVB; 294 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 295 srr0 = SPR_HSRR0; 296 srr1 = SPR_HSRR1; 297 } 298 if (env->mpic_proxy) { 299 /* IACK the IRQ on delivery */ 300 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 301 } 302 break; 303 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 304 /* Get rS/rD and rA from faulting opcode */ 305 /* 306 * Note: the opcode fields will not be set properly for a 307 * direct store load/store, but nobody cares as nobody 308 * actually uses direct store segments. 309 */ 310 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 311 break; 312 case POWERPC_EXCP_PROGRAM: /* Program exception */ 313 switch (env->error_code & ~0xF) { 314 case POWERPC_EXCP_FP: 315 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 316 LOG_EXCP("Ignore floating point exception\n"); 317 cs->exception_index = POWERPC_EXCP_NONE; 318 env->error_code = 0; 319 return; 320 } 321 322 /* 323 * FP exceptions always have NIP pointing to the faulting 324 * instruction, so always use store_next and claim we are 325 * precise in the MSR. 326 */ 327 msr |= 0x00100000; 328 env->spr[SPR_BOOKE_ESR] = ESR_FP; 329 break; 330 case POWERPC_EXCP_INVAL: 331 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 332 msr |= 0x00080000; 333 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 334 break; 335 case POWERPC_EXCP_PRIV: 336 msr |= 0x00040000; 337 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 338 break; 339 case POWERPC_EXCP_TRAP: 340 msr |= 0x00020000; 341 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 342 break; 343 default: 344 /* Should never occur */ 345 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 346 env->error_code); 347 break; 348 } 349 break; 350 case POWERPC_EXCP_SYSCALL: /* System call exception */ 351 dump_syscall(env); 352 lev = env->error_code; 353 354 /* 355 * We need to correct the NIP which in this case is supposed 356 * to point to the next instruction 357 */ 358 env->nip += 4; 359 360 /* "PAPR mode" built-in hypercall emulation */ 361 if ((lev == 1) && cpu->vhyp) { 362 PPCVirtualHypervisorClass *vhc = 363 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 364 vhc->hypercall(cpu->vhyp, cpu); 365 return; 366 } 367 if (lev == 1) { 368 new_msr |= (target_ulong)MSR_HVB; 369 } 370 break; 371 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 372 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 373 case POWERPC_EXCP_DECR: /* Decrementer exception */ 374 break; 375 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 376 /* FIT on 4xx */ 377 LOG_EXCP("FIT exception\n"); 378 break; 379 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 380 LOG_EXCP("WDT exception\n"); 381 switch (excp_model) { 382 case POWERPC_EXCP_BOOKE: 383 srr0 = SPR_BOOKE_CSRR0; 384 srr1 = SPR_BOOKE_CSRR1; 385 break; 386 default: 387 break; 388 } 389 break; 390 case POWERPC_EXCP_DTLB: /* Data TLB error */ 391 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 392 break; 393 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 394 if (env->flags & POWERPC_FLAG_DE) { 395 /* FIXME: choose one or the other based on CPU type */ 396 srr0 = SPR_BOOKE_DSRR0; 397 srr1 = SPR_BOOKE_DSRR1; 398 asrr0 = SPR_BOOKE_CSRR0; 399 asrr1 = SPR_BOOKE_CSRR1; 400 /* DBSR already modified by caller */ 401 } else { 402 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 403 } 404 break; 405 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 406 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 407 break; 408 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 409 /* XXX: TODO */ 410 cpu_abort(cs, "Embedded floating point data exception " 411 "is not implemented yet !\n"); 412 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 413 break; 414 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 415 /* XXX: TODO */ 416 cpu_abort(cs, "Embedded floating point round exception " 417 "is not implemented yet !\n"); 418 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 419 break; 420 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 421 /* XXX: TODO */ 422 cpu_abort(cs, 423 "Performance counter exception is not implemented yet !\n"); 424 break; 425 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 426 break; 427 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 428 srr0 = SPR_BOOKE_CSRR0; 429 srr1 = SPR_BOOKE_CSRR1; 430 break; 431 case POWERPC_EXCP_RESET: /* System reset exception */ 432 /* A power-saving exception sets ME, otherwise it is unchanged */ 433 if (msr_pow) { 434 /* indicate that we resumed from power save mode */ 435 msr |= 0x10000; 436 new_msr |= ((target_ulong)1 << MSR_ME); 437 } 438 if (env->msr_mask & MSR_HVB) { 439 /* 440 * ISA specifies HV, but can be delivered to guest with HV 441 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 442 */ 443 new_msr |= (target_ulong)MSR_HVB; 444 } else { 445 if (msr_pow) { 446 cpu_abort(cs, "Trying to deliver power-saving system reset " 447 "exception %d with no HV support\n", excp); 448 } 449 } 450 ail = 0; 451 break; 452 case POWERPC_EXCP_DSEG: /* Data segment exception */ 453 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 454 case POWERPC_EXCP_TRACE: /* Trace exception */ 455 break; 456 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 457 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 458 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 459 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 460 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 461 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 462 case POWERPC_EXCP_HV_EMU: 463 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 464 srr0 = SPR_HSRR0; 465 srr1 = SPR_HSRR1; 466 new_msr |= (target_ulong)MSR_HVB; 467 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 468 break; 469 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 470 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 471 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 472 #ifdef TARGET_PPC64 473 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 474 #endif 475 break; 476 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 477 #ifdef TARGET_PPC64 478 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 479 srr0 = SPR_HSRR0; 480 srr1 = SPR_HSRR1; 481 new_msr |= (target_ulong)MSR_HVB; 482 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 483 #endif 484 break; 485 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 486 LOG_EXCP("PIT exception\n"); 487 break; 488 case POWERPC_EXCP_IO: /* IO error exception */ 489 /* XXX: TODO */ 490 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 491 break; 492 case POWERPC_EXCP_RUNM: /* Run mode exception */ 493 /* XXX: TODO */ 494 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 495 break; 496 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 497 /* XXX: TODO */ 498 cpu_abort(cs, "602 emulation trap exception " 499 "is not implemented yet !\n"); 500 break; 501 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 502 switch (excp_model) { 503 case POWERPC_EXCP_602: 504 case POWERPC_EXCP_603: 505 case POWERPC_EXCP_603E: 506 case POWERPC_EXCP_G2: 507 goto tlb_miss_tgpr; 508 case POWERPC_EXCP_7x5: 509 goto tlb_miss; 510 case POWERPC_EXCP_74xx: 511 goto tlb_miss_74xx; 512 default: 513 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 514 break; 515 } 516 break; 517 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 518 switch (excp_model) { 519 case POWERPC_EXCP_602: 520 case POWERPC_EXCP_603: 521 case POWERPC_EXCP_603E: 522 case POWERPC_EXCP_G2: 523 goto tlb_miss_tgpr; 524 case POWERPC_EXCP_7x5: 525 goto tlb_miss; 526 case POWERPC_EXCP_74xx: 527 goto tlb_miss_74xx; 528 default: 529 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 530 break; 531 } 532 break; 533 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 534 switch (excp_model) { 535 case POWERPC_EXCP_602: 536 case POWERPC_EXCP_603: 537 case POWERPC_EXCP_603E: 538 case POWERPC_EXCP_G2: 539 tlb_miss_tgpr: 540 /* Swap temporary saved registers with GPRs */ 541 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 542 new_msr |= (target_ulong)1 << MSR_TGPR; 543 hreg_swap_gpr_tgpr(env); 544 } 545 goto tlb_miss; 546 case POWERPC_EXCP_7x5: 547 tlb_miss: 548 #if defined(DEBUG_SOFTWARE_TLB) 549 if (qemu_log_enabled()) { 550 const char *es; 551 target_ulong *miss, *cmp; 552 int en; 553 554 if (excp == POWERPC_EXCP_IFTLB) { 555 es = "I"; 556 en = 'I'; 557 miss = &env->spr[SPR_IMISS]; 558 cmp = &env->spr[SPR_ICMP]; 559 } else { 560 if (excp == POWERPC_EXCP_DLTLB) { 561 es = "DL"; 562 } else { 563 es = "DS"; 564 } 565 en = 'D'; 566 miss = &env->spr[SPR_DMISS]; 567 cmp = &env->spr[SPR_DCMP]; 568 } 569 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 570 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 571 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 572 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 573 env->error_code); 574 } 575 #endif 576 msr |= env->crf[0] << 28; 577 msr |= env->error_code; /* key, D/I, S/L bits */ 578 /* Set way using a LRU mechanism */ 579 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 580 break; 581 case POWERPC_EXCP_74xx: 582 tlb_miss_74xx: 583 #if defined(DEBUG_SOFTWARE_TLB) 584 if (qemu_log_enabled()) { 585 const char *es; 586 target_ulong *miss, *cmp; 587 int en; 588 589 if (excp == POWERPC_EXCP_IFTLB) { 590 es = "I"; 591 en = 'I'; 592 miss = &env->spr[SPR_TLBMISS]; 593 cmp = &env->spr[SPR_PTEHI]; 594 } else { 595 if (excp == POWERPC_EXCP_DLTLB) { 596 es = "DL"; 597 } else { 598 es = "DS"; 599 } 600 en = 'D'; 601 miss = &env->spr[SPR_TLBMISS]; 602 cmp = &env->spr[SPR_PTEHI]; 603 } 604 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 605 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 606 env->error_code); 607 } 608 #endif 609 msr |= env->error_code; /* key bit */ 610 break; 611 default: 612 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 613 break; 614 } 615 break; 616 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 617 /* XXX: TODO */ 618 cpu_abort(cs, "Floating point assist exception " 619 "is not implemented yet !\n"); 620 break; 621 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 622 /* XXX: TODO */ 623 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 624 break; 625 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 626 /* XXX: TODO */ 627 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 628 break; 629 case POWERPC_EXCP_SMI: /* System management interrupt */ 630 /* XXX: TODO */ 631 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 632 break; 633 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 634 /* XXX: TODO */ 635 cpu_abort(cs, "Thermal management exception " 636 "is not implemented yet !\n"); 637 break; 638 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 639 /* XXX: TODO */ 640 cpu_abort(cs, 641 "Performance counter exception is not implemented yet !\n"); 642 break; 643 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 644 /* XXX: TODO */ 645 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 646 break; 647 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 648 /* XXX: TODO */ 649 cpu_abort(cs, 650 "970 soft-patch exception is not implemented yet !\n"); 651 break; 652 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 653 /* XXX: TODO */ 654 cpu_abort(cs, 655 "970 maintenance exception is not implemented yet !\n"); 656 break; 657 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 658 /* XXX: TODO */ 659 cpu_abort(cs, "Maskable external exception " 660 "is not implemented yet !\n"); 661 break; 662 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 663 /* XXX: TODO */ 664 cpu_abort(cs, "Non maskable external exception " 665 "is not implemented yet !\n"); 666 break; 667 default: 668 excp_invalid: 669 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 670 break; 671 } 672 673 /* Save PC */ 674 env->spr[srr0] = env->nip; 675 676 /* Save MSR */ 677 env->spr[srr1] = msr; 678 679 /* Sanity check */ 680 if (!(env->msr_mask & MSR_HVB)) { 681 if (new_msr & MSR_HVB) { 682 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 683 "no HV support\n", excp); 684 } 685 if (srr0 == SPR_HSRR0) { 686 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 687 "no HV support\n", excp); 688 } 689 } 690 691 /* If any alternate SRR register are defined, duplicate saved values */ 692 if (asrr0 != -1) { 693 env->spr[asrr0] = env->spr[srr0]; 694 } 695 if (asrr1 != -1) { 696 env->spr[asrr1] = env->spr[srr1]; 697 } 698 699 /* 700 * Sort out endianness of interrupt, this differs depending on the 701 * CPU, the HV mode, etc... 702 */ 703 #ifdef TARGET_PPC64 704 if (excp_model == POWERPC_EXCP_POWER7) { 705 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 706 new_msr |= (target_ulong)1 << MSR_LE; 707 } 708 } else if (excp_model == POWERPC_EXCP_POWER8) { 709 if (new_msr & MSR_HVB) { 710 if (env->spr[SPR_HID0] & HID0_HILE) { 711 new_msr |= (target_ulong)1 << MSR_LE; 712 } 713 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 714 new_msr |= (target_ulong)1 << MSR_LE; 715 } 716 } else if (excp_model == POWERPC_EXCP_POWER9) { 717 if (new_msr & MSR_HVB) { 718 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 719 new_msr |= (target_ulong)1 << MSR_LE; 720 } 721 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 722 new_msr |= (target_ulong)1 << MSR_LE; 723 } 724 } else if (msr_ile) { 725 new_msr |= (target_ulong)1 << MSR_LE; 726 } 727 #else 728 if (msr_ile) { 729 new_msr |= (target_ulong)1 << MSR_LE; 730 } 731 #endif 732 733 /* Jump to handler */ 734 vector = env->excp_vectors[excp]; 735 if (vector == (target_ulong)-1ULL) { 736 cpu_abort(cs, "Raised an exception without defined vector %d\n", 737 excp); 738 } 739 vector |= env->excp_prefix; 740 741 /* 742 * AIL only works if there is no HV transition and we are running 743 * with translations enabled 744 */ 745 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 746 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 747 ail = 0; 748 } 749 /* Handle AIL */ 750 if (ail) { 751 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 752 vector |= ppc_excp_vector_offset(cs, ail); 753 } 754 755 #if defined(TARGET_PPC64) 756 if (excp_model == POWERPC_EXCP_BOOKE) { 757 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 758 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 759 new_msr |= (target_ulong)1 << MSR_CM; 760 } else { 761 vector = (uint32_t)vector; 762 } 763 } else { 764 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 765 vector = (uint32_t)vector; 766 } else { 767 new_msr |= (target_ulong)1 << MSR_SF; 768 } 769 } 770 #endif 771 /* 772 * We don't use hreg_store_msr here as already have treated any 773 * special case that could occur. Just store MSR and update hflags 774 * 775 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 776 * will prevent setting of the HV bit which some exceptions might need 777 * to do. 778 */ 779 env->msr = new_msr & env->msr_mask; 780 hreg_compute_hflags(env); 781 env->nip = vector; 782 /* Reset exception state */ 783 cs->exception_index = POWERPC_EXCP_NONE; 784 env->error_code = 0; 785 786 /* Reset the reservation */ 787 env->reserve_addr = -1; 788 789 /* 790 * Any interrupt is context synchronizing, check if TCG TLB needs 791 * a delayed flush on ppc64 792 */ 793 check_tlb_flush(env, false); 794 } 795 796 void ppc_cpu_do_interrupt(CPUState *cs) 797 { 798 PowerPCCPU *cpu = POWERPC_CPU(cs); 799 CPUPPCState *env = &cpu->env; 800 801 powerpc_excp(cpu, env->excp_model, cs->exception_index); 802 } 803 804 static void ppc_hw_interrupt(CPUPPCState *env) 805 { 806 PowerPCCPU *cpu = env_archcpu(env); 807 bool async_deliver; 808 809 /* External reset */ 810 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 811 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 812 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 813 return; 814 } 815 /* Machine check exception */ 816 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 817 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 818 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 819 return; 820 } 821 #if 0 /* TODO */ 822 /* External debug exception */ 823 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 824 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 825 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 826 return; 827 } 828 #endif 829 830 /* 831 * For interrupts that gate on MSR:EE, we need to do something a 832 * bit more subtle, as we need to let them through even when EE is 833 * clear when coming out of some power management states (in order 834 * for them to become a 0x100). 835 */ 836 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 837 838 /* Hypervisor decrementer exception */ 839 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 840 /* LPCR will be clear when not supported so this will work */ 841 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 842 if ((async_deliver || msr_hv == 0) && hdice) { 843 /* HDEC clears on delivery */ 844 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 845 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 846 return; 847 } 848 } 849 850 /* Hypervisor virtualization interrupt */ 851 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 852 /* LPCR will be clear when not supported so this will work */ 853 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 854 if ((async_deliver || msr_hv == 0) && hvice) { 855 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 856 return; 857 } 858 } 859 860 /* External interrupt can ignore MSR:EE under some circumstances */ 861 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 862 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 863 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 864 /* HEIC blocks delivery to the hypervisor */ 865 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 866 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 867 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 868 return; 869 } 870 } 871 if (msr_ce != 0) { 872 /* External critical interrupt */ 873 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 874 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 875 return; 876 } 877 } 878 if (async_deliver != 0) { 879 /* Watchdog timer on embedded PowerPC */ 880 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 881 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 882 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 883 return; 884 } 885 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 886 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 887 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 888 return; 889 } 890 /* Fixed interval timer on embedded PowerPC */ 891 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 892 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 893 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 894 return; 895 } 896 /* Programmable interval timer on embedded PowerPC */ 897 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 898 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 899 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 900 return; 901 } 902 /* Decrementer exception */ 903 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 904 if (ppc_decr_clear_on_delivery(env)) { 905 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 906 } 907 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 908 return; 909 } 910 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 911 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 912 if (is_book3s_arch2x(env)) { 913 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); 914 } else { 915 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 916 } 917 return; 918 } 919 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 920 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 921 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 922 return; 923 } 924 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 925 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 926 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 927 return; 928 } 929 /* Thermal interrupt */ 930 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 931 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 932 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 933 return; 934 } 935 } 936 937 if (env->resume_as_sreset) { 938 /* 939 * This is a bug ! It means that has_work took us out of halt without 940 * anything to deliver while in a PM state that requires getting 941 * out via a 0x100 942 * 943 * This means we will incorrectly execute past the power management 944 * instruction instead of triggering a reset. 945 * 946 * It generally means a discrepancy between the wakup conditions in the 947 * processor has_work implementation and the logic in this function. 948 */ 949 cpu_abort(env_cpu(env), 950 "Wakeup from PM state but interrupt Undelivered"); 951 } 952 } 953 954 void ppc_cpu_do_system_reset(CPUState *cs) 955 { 956 PowerPCCPU *cpu = POWERPC_CPU(cs); 957 CPUPPCState *env = &cpu->env; 958 959 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 960 } 961 #endif /* !CONFIG_USER_ONLY */ 962 963 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 964 { 965 PowerPCCPU *cpu = POWERPC_CPU(cs); 966 CPUPPCState *env = &cpu->env; 967 968 if (interrupt_request & CPU_INTERRUPT_HARD) { 969 ppc_hw_interrupt(env); 970 if (env->pending_interrupts == 0) { 971 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 972 } 973 return true; 974 } 975 return false; 976 } 977 978 #if defined(DEBUG_OP) 979 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 980 { 981 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 982 TARGET_FMT_lx "\n", RA, msr); 983 } 984 #endif 985 986 /*****************************************************************************/ 987 /* Exceptions processing helpers */ 988 989 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 990 uint32_t error_code, uintptr_t raddr) 991 { 992 CPUState *cs = env_cpu(env); 993 994 cs->exception_index = exception; 995 env->error_code = error_code; 996 cpu_loop_exit_restore(cs, raddr); 997 } 998 999 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1000 uint32_t error_code) 1001 { 1002 raise_exception_err_ra(env, exception, error_code, 0); 1003 } 1004 1005 void raise_exception(CPUPPCState *env, uint32_t exception) 1006 { 1007 raise_exception_err_ra(env, exception, 0, 0); 1008 } 1009 1010 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1011 uintptr_t raddr) 1012 { 1013 raise_exception_err_ra(env, exception, 0, raddr); 1014 } 1015 1016 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1017 uint32_t error_code) 1018 { 1019 raise_exception_err_ra(env, exception, error_code, 0); 1020 } 1021 1022 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1023 { 1024 raise_exception_err_ra(env, exception, 0, 0); 1025 } 1026 1027 #if !defined(CONFIG_USER_ONLY) 1028 void helper_store_msr(CPUPPCState *env, target_ulong val) 1029 { 1030 uint32_t excp = hreg_store_msr(env, val, 0); 1031 1032 if (excp != 0) { 1033 CPUState *cs = env_cpu(env); 1034 cpu_interrupt_exittb(cs); 1035 raise_exception(env, excp); 1036 } 1037 } 1038 1039 #if defined(TARGET_PPC64) 1040 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1041 { 1042 CPUState *cs; 1043 1044 cs = env_cpu(env); 1045 cs->halted = 1; 1046 1047 /* 1048 * The architecture specifies that HDEC interrupts are discarded 1049 * in PM states 1050 */ 1051 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1052 1053 /* Condition for waking up at 0x100 */ 1054 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1055 (env->spr[SPR_PSSCR] & PSSCR_EC); 1056 } 1057 #endif /* defined(TARGET_PPC64) */ 1058 1059 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1060 { 1061 CPUState *cs = env_cpu(env); 1062 1063 /* MSR:POW cannot be set by any form of rfi */ 1064 msr &= ~(1ULL << MSR_POW); 1065 1066 #if defined(TARGET_PPC64) 1067 /* Switching to 32-bit ? Crop the nip */ 1068 if (!msr_is_64bit(env, msr)) { 1069 nip = (uint32_t)nip; 1070 } 1071 #else 1072 nip = (uint32_t)nip; 1073 #endif 1074 /* XXX: beware: this is false if VLE is supported */ 1075 env->nip = nip & ~((target_ulong)0x00000003); 1076 hreg_store_msr(env, msr, 1); 1077 #if defined(DEBUG_OP) 1078 cpu_dump_rfi(env->nip, env->msr); 1079 #endif 1080 /* 1081 * No need to raise an exception here, as rfi is always the last 1082 * insn of a TB 1083 */ 1084 cpu_interrupt_exittb(cs); 1085 /* Reset the reservation */ 1086 env->reserve_addr = -1; 1087 1088 /* Context synchronizing: check if TCG TLB needs flush */ 1089 check_tlb_flush(env, false); 1090 } 1091 1092 void helper_rfi(CPUPPCState *env) 1093 { 1094 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1095 } 1096 1097 #define MSR_BOOK3S_MASK 1098 #if defined(TARGET_PPC64) 1099 void helper_rfid(CPUPPCState *env) 1100 { 1101 /* 1102 * The architeture defines a number of rules for which bits can 1103 * change but in practice, we handle this in hreg_store_msr() 1104 * which will be called by do_rfi(), so there is no need to filter 1105 * here 1106 */ 1107 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1108 } 1109 1110 void helper_hrfid(CPUPPCState *env) 1111 { 1112 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1113 } 1114 #endif 1115 1116 /*****************************************************************************/ 1117 /* Embedded PowerPC specific helpers */ 1118 void helper_40x_rfci(CPUPPCState *env) 1119 { 1120 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1121 } 1122 1123 void helper_rfci(CPUPPCState *env) 1124 { 1125 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1126 } 1127 1128 void helper_rfdi(CPUPPCState *env) 1129 { 1130 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1131 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1132 } 1133 1134 void helper_rfmci(CPUPPCState *env) 1135 { 1136 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1137 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1138 } 1139 #endif 1140 1141 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1142 uint32_t flags) 1143 { 1144 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1145 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1146 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1147 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1148 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1149 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1150 POWERPC_EXCP_TRAP, GETPC()); 1151 } 1152 } 1153 1154 #if defined(TARGET_PPC64) 1155 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1156 uint32_t flags) 1157 { 1158 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1159 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1160 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1161 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1162 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1163 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1164 POWERPC_EXCP_TRAP, GETPC()); 1165 } 1166 } 1167 #endif 1168 1169 #if !defined(CONFIG_USER_ONLY) 1170 /*****************************************************************************/ 1171 /* PowerPC 601 specific instructions (POWER bridge) */ 1172 1173 void helper_rfsvc(CPUPPCState *env) 1174 { 1175 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1176 } 1177 1178 /* Embedded.Processor Control */ 1179 static int dbell2irq(target_ulong rb) 1180 { 1181 int msg = rb & DBELL_TYPE_MASK; 1182 int irq = -1; 1183 1184 switch (msg) { 1185 case DBELL_TYPE_DBELL: 1186 irq = PPC_INTERRUPT_DOORBELL; 1187 break; 1188 case DBELL_TYPE_DBELL_CRIT: 1189 irq = PPC_INTERRUPT_CDOORBELL; 1190 break; 1191 case DBELL_TYPE_G_DBELL: 1192 case DBELL_TYPE_G_DBELL_CRIT: 1193 case DBELL_TYPE_G_DBELL_MC: 1194 /* XXX implement */ 1195 default: 1196 break; 1197 } 1198 1199 return irq; 1200 } 1201 1202 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1203 { 1204 int irq = dbell2irq(rb); 1205 1206 if (irq < 0) { 1207 return; 1208 } 1209 1210 env->pending_interrupts &= ~(1 << irq); 1211 } 1212 1213 void helper_msgsnd(target_ulong rb) 1214 { 1215 int irq = dbell2irq(rb); 1216 int pir = rb & DBELL_PIRTAG_MASK; 1217 CPUState *cs; 1218 1219 if (irq < 0) { 1220 return; 1221 } 1222 1223 qemu_mutex_lock_iothread(); 1224 CPU_FOREACH(cs) { 1225 PowerPCCPU *cpu = POWERPC_CPU(cs); 1226 CPUPPCState *cenv = &cpu->env; 1227 1228 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1229 cenv->pending_interrupts |= 1 << irq; 1230 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1231 } 1232 } 1233 qemu_mutex_unlock_iothread(); 1234 } 1235 1236 /* Server Processor Control */ 1237 1238 static bool dbell_type_server(target_ulong rb) 1239 { 1240 /* 1241 * A Directed Hypervisor Doorbell message is sent only if the 1242 * message type is 5. All other types are reserved and the 1243 * instruction is a no-op 1244 */ 1245 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1246 } 1247 1248 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1249 { 1250 if (!dbell_type_server(rb)) { 1251 return; 1252 } 1253 1254 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1255 } 1256 1257 static void book3s_msgsnd_common(int pir, int irq) 1258 { 1259 CPUState *cs; 1260 1261 qemu_mutex_lock_iothread(); 1262 CPU_FOREACH(cs) { 1263 PowerPCCPU *cpu = POWERPC_CPU(cs); 1264 CPUPPCState *cenv = &cpu->env; 1265 1266 /* TODO: broadcast message to all threads of the same processor */ 1267 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1268 cenv->pending_interrupts |= 1 << irq; 1269 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1270 } 1271 } 1272 qemu_mutex_unlock_iothread(); 1273 } 1274 1275 void helper_book3s_msgsnd(target_ulong rb) 1276 { 1277 int pir = rb & DBELL_PROCIDTAG_MASK; 1278 1279 if (!dbell_type_server(rb)) { 1280 return; 1281 } 1282 1283 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1284 } 1285 1286 #if defined(TARGET_PPC64) 1287 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1288 { 1289 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1290 1291 if (!dbell_type_server(rb)) { 1292 return; 1293 } 1294 1295 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1296 } 1297 1298 /* 1299 * sends a message to other threads that are on the same 1300 * multi-threaded processor 1301 */ 1302 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1303 { 1304 int pir = env->spr_cb[SPR_PIR].default_value; 1305 1306 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1307 1308 if (!dbell_type_server(rb)) { 1309 return; 1310 } 1311 1312 /* TODO: TCG supports only one thread */ 1313 1314 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1315 } 1316 #endif 1317 #endif 1318 1319 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1320 MMUAccessType access_type, 1321 int mmu_idx, uintptr_t retaddr) 1322 { 1323 CPUPPCState *env = cs->env_ptr; 1324 uint32_t insn; 1325 1326 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1327 cpu_restore_state(cs, retaddr, true); 1328 insn = cpu_ldl_code(env, env->nip); 1329 1330 cs->exception_index = POWERPC_EXCP_ALIGN; 1331 env->error_code = insn & 0x03FF0000; 1332 cpu_loop_exit(cs); 1333 } 1334