1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "internal.h" 26 #include "helper_regs.h" 27 28 /* #define DEBUG_OP */ 29 /* #define DEBUG_SOFTWARE_TLB */ 30 /* #define DEBUG_EXCEPTIONS */ 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = env_cpu(env); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 61 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 62 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 63 " nip=" TARGET_FMT_lx "\n", 64 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 65 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 66 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 67 ppc_dump_gpr(env, 8), env->nip); 68 } 69 70 static inline void dump_hcall(CPUPPCState *env) 71 { 72 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 73 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 74 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 75 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 76 " nip=" TARGET_FMT_lx "\n", 77 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 78 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 79 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 80 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 81 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 82 env->nip); 83 } 84 85 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 86 target_ulong *msr) 87 { 88 /* We no longer are in a PM state */ 89 env->resume_as_sreset = false; 90 91 /* Pretend to be returning from doze always as we don't lose state */ 92 *msr |= (0x1ull << (63 - 47)); 93 94 /* Machine checks are sent normally */ 95 if (excp == POWERPC_EXCP_MCHECK) { 96 return excp; 97 } 98 switch (excp) { 99 case POWERPC_EXCP_RESET: 100 *msr |= 0x4ull << (63 - 45); 101 break; 102 case POWERPC_EXCP_EXTERNAL: 103 *msr |= 0x8ull << (63 - 45); 104 break; 105 case POWERPC_EXCP_DECR: 106 *msr |= 0x6ull << (63 - 45); 107 break; 108 case POWERPC_EXCP_SDOOR: 109 *msr |= 0x5ull << (63 - 45); 110 break; 111 case POWERPC_EXCP_SDOOR_HV: 112 *msr |= 0x3ull << (63 - 45); 113 break; 114 case POWERPC_EXCP_HV_MAINT: 115 *msr |= 0xaull << (63 - 45); 116 break; 117 case POWERPC_EXCP_HVIRT: 118 *msr |= 0x9ull << (63 - 45); 119 break; 120 default: 121 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 122 excp); 123 } 124 return POWERPC_EXCP_RESET; 125 } 126 127 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) 128 { 129 uint64_t offset = 0; 130 131 switch (ail) { 132 case AIL_NONE: 133 break; 134 case AIL_0001_8000: 135 offset = 0x18000; 136 break; 137 case AIL_C000_0000_0000_4000: 138 offset = 0xc000000000004000ull; 139 break; 140 default: 141 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 142 break; 143 } 144 145 return offset; 146 } 147 148 static inline void powerpc_set_excp_state(PowerPCCPU *cpu, 149 target_ulong vector, target_ulong msr) 150 { 151 CPUState *cs = CPU(cpu); 152 CPUPPCState *env = &cpu->env; 153 154 /* 155 * We don't use hreg_store_msr here as already have treated any 156 * special case that could occur. Just store MSR and update hflags 157 * 158 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 159 * will prevent setting of the HV bit which some exceptions might need 160 * to do. 161 */ 162 env->msr = msr & env->msr_mask; 163 hreg_compute_hflags(env); 164 env->nip = vector; 165 /* Reset exception state */ 166 cs->exception_index = POWERPC_EXCP_NONE; 167 env->error_code = 0; 168 169 /* Reset the reservation */ 170 env->reserve_addr = -1; 171 172 /* 173 * Any interrupt is context synchronizing, check if TCG TLB needs 174 * a delayed flush on ppc64 175 */ 176 check_tlb_flush(env, false); 177 } 178 179 /* 180 * Note that this function should be greatly optimized when called 181 * with a constant excp, from ppc_hw_interrupt 182 */ 183 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 184 { 185 CPUState *cs = CPU(cpu); 186 CPUPPCState *env = &cpu->env; 187 target_ulong msr, new_msr, vector; 188 int srr0, srr1, asrr0, asrr1, lev, ail; 189 bool lpes0; 190 191 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 192 " => %08x (%02x)\n", env->nip, excp, env->error_code); 193 194 /* new srr1 value excluding must-be-zero bits */ 195 if (excp_model == POWERPC_EXCP_BOOKE) { 196 msr = env->msr; 197 } else { 198 msr = env->msr & ~0x783f0000ULL; 199 } 200 201 /* 202 * new interrupt handler msr preserves existing HV and ME unless 203 * explicitly overriden 204 */ 205 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 206 207 /* target registers */ 208 srr0 = SPR_SRR0; 209 srr1 = SPR_SRR1; 210 asrr0 = -1; 211 asrr1 = -1; 212 213 /* 214 * check for special resume at 0x100 from doze/nap/sleep/winkle on 215 * P7/P8/P9 216 */ 217 if (env->resume_as_sreset) { 218 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 219 } 220 221 /* 222 * Exception targetting modifiers 223 * 224 * LPES0 is supported on POWER7/8/9 225 * LPES1 is not supported (old iSeries mode) 226 * 227 * On anything else, we behave as if LPES0 is 1 228 * (externals don't alter MSR:HV) 229 * 230 * AIL is initialized here but can be cleared by 231 * selected exceptions 232 */ 233 #if defined(TARGET_PPC64) 234 if (excp_model == POWERPC_EXCP_POWER7 || 235 excp_model == POWERPC_EXCP_POWER8 || 236 excp_model == POWERPC_EXCP_POWER9) { 237 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 238 if (excp_model != POWERPC_EXCP_POWER7) { 239 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 240 } else { 241 ail = 0; 242 } 243 } else 244 #endif /* defined(TARGET_PPC64) */ 245 { 246 lpes0 = true; 247 ail = 0; 248 } 249 250 /* 251 * Hypervisor emulation assistance interrupt only exists on server 252 * arch 2.05 server or later. We also don't want to generate it if 253 * we don't have HVB in msr_mask (PAPR mode). 254 */ 255 if (excp == POWERPC_EXCP_HV_EMU 256 #if defined(TARGET_PPC64) 257 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 258 #endif /* defined(TARGET_PPC64) */ 259 260 ) { 261 excp = POWERPC_EXCP_PROGRAM; 262 } 263 264 switch (excp) { 265 case POWERPC_EXCP_NONE: 266 /* Should never happen */ 267 return; 268 case POWERPC_EXCP_CRITICAL: /* Critical input */ 269 switch (excp_model) { 270 case POWERPC_EXCP_40x: 271 srr0 = SPR_40x_SRR2; 272 srr1 = SPR_40x_SRR3; 273 break; 274 case POWERPC_EXCP_BOOKE: 275 srr0 = SPR_BOOKE_CSRR0; 276 srr1 = SPR_BOOKE_CSRR1; 277 break; 278 case POWERPC_EXCP_G2: 279 break; 280 default: 281 goto excp_invalid; 282 } 283 break; 284 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 285 if (msr_me == 0) { 286 /* 287 * Machine check exception is not enabled. Enter 288 * checkstop state. 289 */ 290 fprintf(stderr, "Machine check while not allowed. " 291 "Entering checkstop state\n"); 292 if (qemu_log_separate()) { 293 qemu_log("Machine check while not allowed. " 294 "Entering checkstop state\n"); 295 } 296 cs->halted = 1; 297 cpu_interrupt_exittb(cs); 298 } 299 if (env->msr_mask & MSR_HVB) { 300 /* 301 * ISA specifies HV, but can be delivered to guest with HV 302 * clear (e.g., see FWNMI in PAPR). 303 */ 304 new_msr |= (target_ulong)MSR_HVB; 305 } 306 ail = 0; 307 308 /* machine check exceptions don't have ME set */ 309 new_msr &= ~((target_ulong)1 << MSR_ME); 310 311 /* XXX: should also have something loaded in DAR / DSISR */ 312 switch (excp_model) { 313 case POWERPC_EXCP_40x: 314 srr0 = SPR_40x_SRR2; 315 srr1 = SPR_40x_SRR3; 316 break; 317 case POWERPC_EXCP_BOOKE: 318 /* FIXME: choose one or the other based on CPU type */ 319 srr0 = SPR_BOOKE_MCSRR0; 320 srr1 = SPR_BOOKE_MCSRR1; 321 asrr0 = SPR_BOOKE_CSRR0; 322 asrr1 = SPR_BOOKE_CSRR1; 323 break; 324 default: 325 break; 326 } 327 break; 328 case POWERPC_EXCP_DSI: /* Data storage exception */ 329 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 330 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 331 break; 332 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 333 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 334 "\n", msr, env->nip); 335 msr |= env->error_code; 336 break; 337 case POWERPC_EXCP_EXTERNAL: /* External input */ 338 cs = CPU(cpu); 339 340 if (!lpes0) { 341 new_msr |= (target_ulong)MSR_HVB; 342 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 343 srr0 = SPR_HSRR0; 344 srr1 = SPR_HSRR1; 345 } 346 if (env->mpic_proxy) { 347 /* IACK the IRQ on delivery */ 348 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 349 } 350 break; 351 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 352 /* Get rS/rD and rA from faulting opcode */ 353 /* 354 * Note: the opcode fields will not be set properly for a 355 * direct store load/store, but nobody cares as nobody 356 * actually uses direct store segments. 357 */ 358 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 359 break; 360 case POWERPC_EXCP_PROGRAM: /* Program exception */ 361 switch (env->error_code & ~0xF) { 362 case POWERPC_EXCP_FP: 363 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 364 LOG_EXCP("Ignore floating point exception\n"); 365 cs->exception_index = POWERPC_EXCP_NONE; 366 env->error_code = 0; 367 return; 368 } 369 370 /* 371 * FP exceptions always have NIP pointing to the faulting 372 * instruction, so always use store_next and claim we are 373 * precise in the MSR. 374 */ 375 msr |= 0x00100000; 376 env->spr[SPR_BOOKE_ESR] = ESR_FP; 377 break; 378 case POWERPC_EXCP_INVAL: 379 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 380 msr |= 0x00080000; 381 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 382 break; 383 case POWERPC_EXCP_PRIV: 384 msr |= 0x00040000; 385 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 386 break; 387 case POWERPC_EXCP_TRAP: 388 msr |= 0x00020000; 389 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 390 break; 391 default: 392 /* Should never occur */ 393 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 394 env->error_code); 395 break; 396 } 397 break; 398 case POWERPC_EXCP_SYSCALL: /* System call exception */ 399 lev = env->error_code; 400 401 if ((lev == 1) && cpu->vhyp) { 402 dump_hcall(env); 403 } else { 404 dump_syscall(env); 405 } 406 407 /* 408 * We need to correct the NIP which in this case is supposed 409 * to point to the next instruction 410 */ 411 env->nip += 4; 412 413 /* "PAPR mode" built-in hypercall emulation */ 414 if ((lev == 1) && cpu->vhyp) { 415 PPCVirtualHypervisorClass *vhc = 416 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 417 vhc->hypercall(cpu->vhyp, cpu); 418 return; 419 } 420 if (lev == 1) { 421 new_msr |= (target_ulong)MSR_HVB; 422 } 423 break; 424 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 425 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 426 case POWERPC_EXCP_DECR: /* Decrementer exception */ 427 break; 428 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 429 /* FIT on 4xx */ 430 LOG_EXCP("FIT exception\n"); 431 break; 432 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 433 LOG_EXCP("WDT exception\n"); 434 switch (excp_model) { 435 case POWERPC_EXCP_BOOKE: 436 srr0 = SPR_BOOKE_CSRR0; 437 srr1 = SPR_BOOKE_CSRR1; 438 break; 439 default: 440 break; 441 } 442 break; 443 case POWERPC_EXCP_DTLB: /* Data TLB error */ 444 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 445 break; 446 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 447 if (env->flags & POWERPC_FLAG_DE) { 448 /* FIXME: choose one or the other based on CPU type */ 449 srr0 = SPR_BOOKE_DSRR0; 450 srr1 = SPR_BOOKE_DSRR1; 451 asrr0 = SPR_BOOKE_CSRR0; 452 asrr1 = SPR_BOOKE_CSRR1; 453 /* DBSR already modified by caller */ 454 } else { 455 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 456 } 457 break; 458 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 459 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 460 break; 461 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 462 /* XXX: TODO */ 463 cpu_abort(cs, "Embedded floating point data exception " 464 "is not implemented yet !\n"); 465 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 466 break; 467 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 468 /* XXX: TODO */ 469 cpu_abort(cs, "Embedded floating point round exception " 470 "is not implemented yet !\n"); 471 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 472 break; 473 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 474 /* XXX: TODO */ 475 cpu_abort(cs, 476 "Performance counter exception is not implemented yet !\n"); 477 break; 478 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 479 break; 480 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 481 srr0 = SPR_BOOKE_CSRR0; 482 srr1 = SPR_BOOKE_CSRR1; 483 break; 484 case POWERPC_EXCP_RESET: /* System reset exception */ 485 /* A power-saving exception sets ME, otherwise it is unchanged */ 486 if (msr_pow) { 487 /* indicate that we resumed from power save mode */ 488 msr |= 0x10000; 489 new_msr |= ((target_ulong)1 << MSR_ME); 490 } 491 if (env->msr_mask & MSR_HVB) { 492 /* 493 * ISA specifies HV, but can be delivered to guest with HV 494 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 495 */ 496 new_msr |= (target_ulong)MSR_HVB; 497 } else { 498 if (msr_pow) { 499 cpu_abort(cs, "Trying to deliver power-saving system reset " 500 "exception %d with no HV support\n", excp); 501 } 502 } 503 ail = 0; 504 break; 505 case POWERPC_EXCP_DSEG: /* Data segment exception */ 506 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 507 case POWERPC_EXCP_TRACE: /* Trace exception */ 508 break; 509 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 510 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 511 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 512 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 513 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 514 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 515 case POWERPC_EXCP_HV_EMU: 516 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 517 srr0 = SPR_HSRR0; 518 srr1 = SPR_HSRR1; 519 new_msr |= (target_ulong)MSR_HVB; 520 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 521 break; 522 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 523 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 524 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 525 #ifdef TARGET_PPC64 526 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 527 #endif 528 break; 529 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 530 #ifdef TARGET_PPC64 531 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 532 srr0 = SPR_HSRR0; 533 srr1 = SPR_HSRR1; 534 new_msr |= (target_ulong)MSR_HVB; 535 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 536 #endif 537 break; 538 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 539 LOG_EXCP("PIT exception\n"); 540 break; 541 case POWERPC_EXCP_IO: /* IO error exception */ 542 /* XXX: TODO */ 543 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 544 break; 545 case POWERPC_EXCP_RUNM: /* Run mode exception */ 546 /* XXX: TODO */ 547 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 548 break; 549 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 550 /* XXX: TODO */ 551 cpu_abort(cs, "602 emulation trap exception " 552 "is not implemented yet !\n"); 553 break; 554 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 555 switch (excp_model) { 556 case POWERPC_EXCP_602: 557 case POWERPC_EXCP_603: 558 case POWERPC_EXCP_603E: 559 case POWERPC_EXCP_G2: 560 goto tlb_miss_tgpr; 561 case POWERPC_EXCP_7x5: 562 goto tlb_miss; 563 case POWERPC_EXCP_74xx: 564 goto tlb_miss_74xx; 565 default: 566 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 567 break; 568 } 569 break; 570 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 571 switch (excp_model) { 572 case POWERPC_EXCP_602: 573 case POWERPC_EXCP_603: 574 case POWERPC_EXCP_603E: 575 case POWERPC_EXCP_G2: 576 goto tlb_miss_tgpr; 577 case POWERPC_EXCP_7x5: 578 goto tlb_miss; 579 case POWERPC_EXCP_74xx: 580 goto tlb_miss_74xx; 581 default: 582 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 583 break; 584 } 585 break; 586 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 587 switch (excp_model) { 588 case POWERPC_EXCP_602: 589 case POWERPC_EXCP_603: 590 case POWERPC_EXCP_603E: 591 case POWERPC_EXCP_G2: 592 tlb_miss_tgpr: 593 /* Swap temporary saved registers with GPRs */ 594 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 595 new_msr |= (target_ulong)1 << MSR_TGPR; 596 hreg_swap_gpr_tgpr(env); 597 } 598 goto tlb_miss; 599 case POWERPC_EXCP_7x5: 600 tlb_miss: 601 #if defined(DEBUG_SOFTWARE_TLB) 602 if (qemu_log_enabled()) { 603 const char *es; 604 target_ulong *miss, *cmp; 605 int en; 606 607 if (excp == POWERPC_EXCP_IFTLB) { 608 es = "I"; 609 en = 'I'; 610 miss = &env->spr[SPR_IMISS]; 611 cmp = &env->spr[SPR_ICMP]; 612 } else { 613 if (excp == POWERPC_EXCP_DLTLB) { 614 es = "DL"; 615 } else { 616 es = "DS"; 617 } 618 en = 'D'; 619 miss = &env->spr[SPR_DMISS]; 620 cmp = &env->spr[SPR_DCMP]; 621 } 622 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 623 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 624 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 625 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 626 env->error_code); 627 } 628 #endif 629 msr |= env->crf[0] << 28; 630 msr |= env->error_code; /* key, D/I, S/L bits */ 631 /* Set way using a LRU mechanism */ 632 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 633 break; 634 case POWERPC_EXCP_74xx: 635 tlb_miss_74xx: 636 #if defined(DEBUG_SOFTWARE_TLB) 637 if (qemu_log_enabled()) { 638 const char *es; 639 target_ulong *miss, *cmp; 640 int en; 641 642 if (excp == POWERPC_EXCP_IFTLB) { 643 es = "I"; 644 en = 'I'; 645 miss = &env->spr[SPR_TLBMISS]; 646 cmp = &env->spr[SPR_PTEHI]; 647 } else { 648 if (excp == POWERPC_EXCP_DLTLB) { 649 es = "DL"; 650 } else { 651 es = "DS"; 652 } 653 en = 'D'; 654 miss = &env->spr[SPR_TLBMISS]; 655 cmp = &env->spr[SPR_PTEHI]; 656 } 657 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 658 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 659 env->error_code); 660 } 661 #endif 662 msr |= env->error_code; /* key bit */ 663 break; 664 default: 665 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 666 break; 667 } 668 break; 669 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 670 /* XXX: TODO */ 671 cpu_abort(cs, "Floating point assist exception " 672 "is not implemented yet !\n"); 673 break; 674 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 675 /* XXX: TODO */ 676 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 677 break; 678 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 679 /* XXX: TODO */ 680 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 681 break; 682 case POWERPC_EXCP_SMI: /* System management interrupt */ 683 /* XXX: TODO */ 684 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 685 break; 686 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 687 /* XXX: TODO */ 688 cpu_abort(cs, "Thermal management exception " 689 "is not implemented yet !\n"); 690 break; 691 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 692 /* XXX: TODO */ 693 cpu_abort(cs, 694 "Performance counter exception is not implemented yet !\n"); 695 break; 696 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 697 /* XXX: TODO */ 698 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 699 break; 700 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 701 /* XXX: TODO */ 702 cpu_abort(cs, 703 "970 soft-patch exception is not implemented yet !\n"); 704 break; 705 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 706 /* XXX: TODO */ 707 cpu_abort(cs, 708 "970 maintenance exception is not implemented yet !\n"); 709 break; 710 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 711 /* XXX: TODO */ 712 cpu_abort(cs, "Maskable external exception " 713 "is not implemented yet !\n"); 714 break; 715 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 716 /* XXX: TODO */ 717 cpu_abort(cs, "Non maskable external exception " 718 "is not implemented yet !\n"); 719 break; 720 default: 721 excp_invalid: 722 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 723 break; 724 } 725 726 /* Save PC */ 727 env->spr[srr0] = env->nip; 728 729 /* Save MSR */ 730 env->spr[srr1] = msr; 731 732 /* Sanity check */ 733 if (!(env->msr_mask & MSR_HVB)) { 734 if (new_msr & MSR_HVB) { 735 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 736 "no HV support\n", excp); 737 } 738 if (srr0 == SPR_HSRR0) { 739 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 740 "no HV support\n", excp); 741 } 742 } 743 744 /* If any alternate SRR register are defined, duplicate saved values */ 745 if (asrr0 != -1) { 746 env->spr[asrr0] = env->spr[srr0]; 747 } 748 if (asrr1 != -1) { 749 env->spr[asrr1] = env->spr[srr1]; 750 } 751 752 /* 753 * Sort out endianness of interrupt, this differs depending on the 754 * CPU, the HV mode, etc... 755 */ 756 #ifdef TARGET_PPC64 757 if (excp_model == POWERPC_EXCP_POWER7) { 758 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 759 new_msr |= (target_ulong)1 << MSR_LE; 760 } 761 } else if (excp_model == POWERPC_EXCP_POWER8) { 762 if (new_msr & MSR_HVB) { 763 if (env->spr[SPR_HID0] & HID0_HILE) { 764 new_msr |= (target_ulong)1 << MSR_LE; 765 } 766 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 767 new_msr |= (target_ulong)1 << MSR_LE; 768 } 769 } else if (excp_model == POWERPC_EXCP_POWER9) { 770 if (new_msr & MSR_HVB) { 771 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 772 new_msr |= (target_ulong)1 << MSR_LE; 773 } 774 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 775 new_msr |= (target_ulong)1 << MSR_LE; 776 } 777 } else if (msr_ile) { 778 new_msr |= (target_ulong)1 << MSR_LE; 779 } 780 #else 781 if (msr_ile) { 782 new_msr |= (target_ulong)1 << MSR_LE; 783 } 784 #endif 785 786 /* Jump to handler */ 787 vector = env->excp_vectors[excp]; 788 if (vector == (target_ulong)-1ULL) { 789 cpu_abort(cs, "Raised an exception without defined vector %d\n", 790 excp); 791 } 792 vector |= env->excp_prefix; 793 794 /* 795 * AIL only works if there is no HV transition and we are running 796 * with translations enabled 797 */ 798 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 799 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 800 ail = 0; 801 } 802 /* Handle AIL */ 803 if (ail) { 804 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 805 vector |= ppc_excp_vector_offset(cs, ail); 806 } 807 808 #if defined(TARGET_PPC64) 809 if (excp_model == POWERPC_EXCP_BOOKE) { 810 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 811 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 812 new_msr |= (target_ulong)1 << MSR_CM; 813 } else { 814 vector = (uint32_t)vector; 815 } 816 } else { 817 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 818 vector = (uint32_t)vector; 819 } else { 820 new_msr |= (target_ulong)1 << MSR_SF; 821 } 822 } 823 #endif 824 825 powerpc_set_excp_state(cpu, vector, new_msr); 826 } 827 828 void ppc_cpu_do_interrupt(CPUState *cs) 829 { 830 PowerPCCPU *cpu = POWERPC_CPU(cs); 831 CPUPPCState *env = &cpu->env; 832 833 powerpc_excp(cpu, env->excp_model, cs->exception_index); 834 } 835 836 static void ppc_hw_interrupt(CPUPPCState *env) 837 { 838 PowerPCCPU *cpu = env_archcpu(env); 839 bool async_deliver; 840 841 /* External reset */ 842 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 843 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 844 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 845 return; 846 } 847 /* Machine check exception */ 848 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 849 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 850 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 851 return; 852 } 853 #if 0 /* TODO */ 854 /* External debug exception */ 855 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 856 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 857 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 858 return; 859 } 860 #endif 861 862 /* 863 * For interrupts that gate on MSR:EE, we need to do something a 864 * bit more subtle, as we need to let them through even when EE is 865 * clear when coming out of some power management states (in order 866 * for them to become a 0x100). 867 */ 868 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 869 870 /* Hypervisor decrementer exception */ 871 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 872 /* LPCR will be clear when not supported so this will work */ 873 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 874 if ((async_deliver || msr_hv == 0) && hdice) { 875 /* HDEC clears on delivery */ 876 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 877 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 878 return; 879 } 880 } 881 882 /* Hypervisor virtualization interrupt */ 883 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 884 /* LPCR will be clear when not supported so this will work */ 885 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 886 if ((async_deliver || msr_hv == 0) && hvice) { 887 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 888 return; 889 } 890 } 891 892 /* External interrupt can ignore MSR:EE under some circumstances */ 893 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 894 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 895 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 896 /* HEIC blocks delivery to the hypervisor */ 897 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 898 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 899 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 900 return; 901 } 902 } 903 if (msr_ce != 0) { 904 /* External critical interrupt */ 905 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 906 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 907 return; 908 } 909 } 910 if (async_deliver != 0) { 911 /* Watchdog timer on embedded PowerPC */ 912 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 913 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 914 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 915 return; 916 } 917 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 918 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 919 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 920 return; 921 } 922 /* Fixed interval timer on embedded PowerPC */ 923 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 924 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 925 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 926 return; 927 } 928 /* Programmable interval timer on embedded PowerPC */ 929 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 930 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 931 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 932 return; 933 } 934 /* Decrementer exception */ 935 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 936 if (ppc_decr_clear_on_delivery(env)) { 937 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 938 } 939 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 940 return; 941 } 942 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 943 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 944 if (is_book3s_arch2x(env)) { 945 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); 946 } else { 947 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 948 } 949 return; 950 } 951 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 952 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 953 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 954 return; 955 } 956 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 957 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 958 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 959 return; 960 } 961 /* Thermal interrupt */ 962 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 963 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 964 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 965 return; 966 } 967 } 968 969 if (env->resume_as_sreset) { 970 /* 971 * This is a bug ! It means that has_work took us out of halt without 972 * anything to deliver while in a PM state that requires getting 973 * out via a 0x100 974 * 975 * This means we will incorrectly execute past the power management 976 * instruction instead of triggering a reset. 977 * 978 * It generally means a discrepancy between the wakup conditions in the 979 * processor has_work implementation and the logic in this function. 980 */ 981 cpu_abort(env_cpu(env), 982 "Wakeup from PM state but interrupt Undelivered"); 983 } 984 } 985 986 void ppc_cpu_do_system_reset(CPUState *cs) 987 { 988 PowerPCCPU *cpu = POWERPC_CPU(cs); 989 CPUPPCState *env = &cpu->env; 990 991 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 992 } 993 994 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 995 { 996 PowerPCCPU *cpu = POWERPC_CPU(cs); 997 CPUPPCState *env = &cpu->env; 998 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 999 target_ulong msr = 0; 1000 1001 /* 1002 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 1003 * been set by KVM. 1004 */ 1005 msr = (1ULL << MSR_ME); 1006 msr |= env->msr & (1ULL << MSR_SF); 1007 if (!(*pcc->interrupts_big_endian)(cpu)) { 1008 msr |= (1ULL << MSR_LE); 1009 } 1010 1011 powerpc_set_excp_state(cpu, vector, msr); 1012 } 1013 #endif /* !CONFIG_USER_ONLY */ 1014 1015 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1016 { 1017 PowerPCCPU *cpu = POWERPC_CPU(cs); 1018 CPUPPCState *env = &cpu->env; 1019 1020 if (interrupt_request & CPU_INTERRUPT_HARD) { 1021 ppc_hw_interrupt(env); 1022 if (env->pending_interrupts == 0) { 1023 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 1024 } 1025 return true; 1026 } 1027 return false; 1028 } 1029 1030 #if defined(DEBUG_OP) 1031 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 1032 { 1033 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 1034 TARGET_FMT_lx "\n", RA, msr); 1035 } 1036 #endif 1037 1038 /*****************************************************************************/ 1039 /* Exceptions processing helpers */ 1040 1041 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 1042 uint32_t error_code, uintptr_t raddr) 1043 { 1044 CPUState *cs = env_cpu(env); 1045 1046 cs->exception_index = exception; 1047 env->error_code = error_code; 1048 cpu_loop_exit_restore(cs, raddr); 1049 } 1050 1051 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1052 uint32_t error_code) 1053 { 1054 raise_exception_err_ra(env, exception, error_code, 0); 1055 } 1056 1057 void raise_exception(CPUPPCState *env, uint32_t exception) 1058 { 1059 raise_exception_err_ra(env, exception, 0, 0); 1060 } 1061 1062 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1063 uintptr_t raddr) 1064 { 1065 raise_exception_err_ra(env, exception, 0, raddr); 1066 } 1067 1068 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1069 uint32_t error_code) 1070 { 1071 raise_exception_err_ra(env, exception, error_code, 0); 1072 } 1073 1074 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1075 { 1076 raise_exception_err_ra(env, exception, 0, 0); 1077 } 1078 1079 #if !defined(CONFIG_USER_ONLY) 1080 void helper_store_msr(CPUPPCState *env, target_ulong val) 1081 { 1082 uint32_t excp = hreg_store_msr(env, val, 0); 1083 1084 if (excp != 0) { 1085 CPUState *cs = env_cpu(env); 1086 cpu_interrupt_exittb(cs); 1087 raise_exception(env, excp); 1088 } 1089 } 1090 1091 #if defined(TARGET_PPC64) 1092 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1093 { 1094 CPUState *cs; 1095 1096 cs = env_cpu(env); 1097 cs->halted = 1; 1098 1099 /* 1100 * The architecture specifies that HDEC interrupts are discarded 1101 * in PM states 1102 */ 1103 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1104 1105 /* Condition for waking up at 0x100 */ 1106 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1107 (env->spr[SPR_PSSCR] & PSSCR_EC); 1108 } 1109 #endif /* defined(TARGET_PPC64) */ 1110 1111 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1112 { 1113 CPUState *cs = env_cpu(env); 1114 1115 /* MSR:POW cannot be set by any form of rfi */ 1116 msr &= ~(1ULL << MSR_POW); 1117 1118 #if defined(TARGET_PPC64) 1119 /* Switching to 32-bit ? Crop the nip */ 1120 if (!msr_is_64bit(env, msr)) { 1121 nip = (uint32_t)nip; 1122 } 1123 #else 1124 nip = (uint32_t)nip; 1125 #endif 1126 /* XXX: beware: this is false if VLE is supported */ 1127 env->nip = nip & ~((target_ulong)0x00000003); 1128 hreg_store_msr(env, msr, 1); 1129 #if defined(DEBUG_OP) 1130 cpu_dump_rfi(env->nip, env->msr); 1131 #endif 1132 /* 1133 * No need to raise an exception here, as rfi is always the last 1134 * insn of a TB 1135 */ 1136 cpu_interrupt_exittb(cs); 1137 /* Reset the reservation */ 1138 env->reserve_addr = -1; 1139 1140 /* Context synchronizing: check if TCG TLB needs flush */ 1141 check_tlb_flush(env, false); 1142 } 1143 1144 void helper_rfi(CPUPPCState *env) 1145 { 1146 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1147 } 1148 1149 #define MSR_BOOK3S_MASK 1150 #if defined(TARGET_PPC64) 1151 void helper_rfid(CPUPPCState *env) 1152 { 1153 /* 1154 * The architeture defines a number of rules for which bits can 1155 * change but in practice, we handle this in hreg_store_msr() 1156 * which will be called by do_rfi(), so there is no need to filter 1157 * here 1158 */ 1159 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1160 } 1161 1162 void helper_hrfid(CPUPPCState *env) 1163 { 1164 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1165 } 1166 #endif 1167 1168 /*****************************************************************************/ 1169 /* Embedded PowerPC specific helpers */ 1170 void helper_40x_rfci(CPUPPCState *env) 1171 { 1172 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1173 } 1174 1175 void helper_rfci(CPUPPCState *env) 1176 { 1177 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1178 } 1179 1180 void helper_rfdi(CPUPPCState *env) 1181 { 1182 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1183 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1184 } 1185 1186 void helper_rfmci(CPUPPCState *env) 1187 { 1188 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1189 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1190 } 1191 #endif 1192 1193 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1194 uint32_t flags) 1195 { 1196 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1197 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1198 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1199 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1200 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1201 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1202 POWERPC_EXCP_TRAP, GETPC()); 1203 } 1204 } 1205 1206 #if defined(TARGET_PPC64) 1207 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1208 uint32_t flags) 1209 { 1210 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1211 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1212 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1213 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1214 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1215 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1216 POWERPC_EXCP_TRAP, GETPC()); 1217 } 1218 } 1219 #endif 1220 1221 #if !defined(CONFIG_USER_ONLY) 1222 /*****************************************************************************/ 1223 /* PowerPC 601 specific instructions (POWER bridge) */ 1224 1225 void helper_rfsvc(CPUPPCState *env) 1226 { 1227 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1228 } 1229 1230 /* Embedded.Processor Control */ 1231 static int dbell2irq(target_ulong rb) 1232 { 1233 int msg = rb & DBELL_TYPE_MASK; 1234 int irq = -1; 1235 1236 switch (msg) { 1237 case DBELL_TYPE_DBELL: 1238 irq = PPC_INTERRUPT_DOORBELL; 1239 break; 1240 case DBELL_TYPE_DBELL_CRIT: 1241 irq = PPC_INTERRUPT_CDOORBELL; 1242 break; 1243 case DBELL_TYPE_G_DBELL: 1244 case DBELL_TYPE_G_DBELL_CRIT: 1245 case DBELL_TYPE_G_DBELL_MC: 1246 /* XXX implement */ 1247 default: 1248 break; 1249 } 1250 1251 return irq; 1252 } 1253 1254 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1255 { 1256 int irq = dbell2irq(rb); 1257 1258 if (irq < 0) { 1259 return; 1260 } 1261 1262 env->pending_interrupts &= ~(1 << irq); 1263 } 1264 1265 void helper_msgsnd(target_ulong rb) 1266 { 1267 int irq = dbell2irq(rb); 1268 int pir = rb & DBELL_PIRTAG_MASK; 1269 CPUState *cs; 1270 1271 if (irq < 0) { 1272 return; 1273 } 1274 1275 qemu_mutex_lock_iothread(); 1276 CPU_FOREACH(cs) { 1277 PowerPCCPU *cpu = POWERPC_CPU(cs); 1278 CPUPPCState *cenv = &cpu->env; 1279 1280 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1281 cenv->pending_interrupts |= 1 << irq; 1282 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1283 } 1284 } 1285 qemu_mutex_unlock_iothread(); 1286 } 1287 1288 /* Server Processor Control */ 1289 1290 static bool dbell_type_server(target_ulong rb) 1291 { 1292 /* 1293 * A Directed Hypervisor Doorbell message is sent only if the 1294 * message type is 5. All other types are reserved and the 1295 * instruction is a no-op 1296 */ 1297 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1298 } 1299 1300 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1301 { 1302 if (!dbell_type_server(rb)) { 1303 return; 1304 } 1305 1306 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1307 } 1308 1309 static void book3s_msgsnd_common(int pir, int irq) 1310 { 1311 CPUState *cs; 1312 1313 qemu_mutex_lock_iothread(); 1314 CPU_FOREACH(cs) { 1315 PowerPCCPU *cpu = POWERPC_CPU(cs); 1316 CPUPPCState *cenv = &cpu->env; 1317 1318 /* TODO: broadcast message to all threads of the same processor */ 1319 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1320 cenv->pending_interrupts |= 1 << irq; 1321 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1322 } 1323 } 1324 qemu_mutex_unlock_iothread(); 1325 } 1326 1327 void helper_book3s_msgsnd(target_ulong rb) 1328 { 1329 int pir = rb & DBELL_PROCIDTAG_MASK; 1330 1331 if (!dbell_type_server(rb)) { 1332 return; 1333 } 1334 1335 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1336 } 1337 1338 #if defined(TARGET_PPC64) 1339 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1340 { 1341 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1342 1343 if (!dbell_type_server(rb)) { 1344 return; 1345 } 1346 1347 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1348 } 1349 1350 /* 1351 * sends a message to other threads that are on the same 1352 * multi-threaded processor 1353 */ 1354 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1355 { 1356 int pir = env->spr_cb[SPR_PIR].default_value; 1357 1358 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1359 1360 if (!dbell_type_server(rb)) { 1361 return; 1362 } 1363 1364 /* TODO: TCG supports only one thread */ 1365 1366 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1367 } 1368 #endif 1369 #endif 1370 1371 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1372 MMUAccessType access_type, 1373 int mmu_idx, uintptr_t retaddr) 1374 { 1375 CPUPPCState *env = cs->env_ptr; 1376 uint32_t insn; 1377 1378 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1379 cpu_restore_state(cs, retaddr, true); 1380 insn = cpu_ldl_code(env, env->nip); 1381 1382 cs->exception_index = POWERPC_EXCP_ALIGN; 1383 env->error_code = insn & 0x03FF0000; 1384 cpu_loop_exit(cs); 1385 } 1386