1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "internal.h" 26 #include "helper_regs.h" 27 28 /* #define DEBUG_OP */ 29 /* #define DEBUG_SOFTWARE_TLB */ 30 /* #define DEBUG_EXCEPTIONS */ 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = env_cpu(env); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 61 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 62 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 63 " nip=" TARGET_FMT_lx "\n", 64 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 65 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 66 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 67 ppc_dump_gpr(env, 8), env->nip); 68 } 69 70 static inline void dump_hcall(CPUPPCState *env) 71 { 72 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 73 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 74 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 75 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 76 " nip=" TARGET_FMT_lx "\n", 77 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 78 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 79 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 80 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 81 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 82 env->nip); 83 } 84 85 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 86 target_ulong *msr) 87 { 88 /* We no longer are in a PM state */ 89 env->resume_as_sreset = false; 90 91 /* Pretend to be returning from doze always as we don't lose state */ 92 *msr |= (0x1ull << (63 - 47)); 93 94 /* Machine checks are sent normally */ 95 if (excp == POWERPC_EXCP_MCHECK) { 96 return excp; 97 } 98 switch (excp) { 99 case POWERPC_EXCP_RESET: 100 *msr |= 0x4ull << (63 - 45); 101 break; 102 case POWERPC_EXCP_EXTERNAL: 103 *msr |= 0x8ull << (63 - 45); 104 break; 105 case POWERPC_EXCP_DECR: 106 *msr |= 0x6ull << (63 - 45); 107 break; 108 case POWERPC_EXCP_SDOOR: 109 *msr |= 0x5ull << (63 - 45); 110 break; 111 case POWERPC_EXCP_SDOOR_HV: 112 *msr |= 0x3ull << (63 - 45); 113 break; 114 case POWERPC_EXCP_HV_MAINT: 115 *msr |= 0xaull << (63 - 45); 116 break; 117 case POWERPC_EXCP_HVIRT: 118 *msr |= 0x9ull << (63 - 45); 119 break; 120 default: 121 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 122 excp); 123 } 124 return POWERPC_EXCP_RESET; 125 } 126 127 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) 128 { 129 uint64_t offset = 0; 130 131 switch (ail) { 132 case AIL_NONE: 133 break; 134 case AIL_0001_8000: 135 offset = 0x18000; 136 break; 137 case AIL_C000_0000_0000_4000: 138 offset = 0xc000000000004000ull; 139 break; 140 default: 141 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 142 break; 143 } 144 145 return offset; 146 } 147 148 static inline void powerpc_set_excp_state(PowerPCCPU *cpu, 149 target_ulong vector, target_ulong msr) 150 { 151 CPUState *cs = CPU(cpu); 152 CPUPPCState *env = &cpu->env; 153 154 /* 155 * We don't use hreg_store_msr here as already have treated any 156 * special case that could occur. Just store MSR and update hflags 157 * 158 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 159 * will prevent setting of the HV bit which some exceptions might need 160 * to do. 161 */ 162 env->msr = msr & env->msr_mask; 163 hreg_compute_hflags(env); 164 env->nip = vector; 165 /* Reset exception state */ 166 cs->exception_index = POWERPC_EXCP_NONE; 167 env->error_code = 0; 168 169 /* Reset the reservation */ 170 env->reserve_addr = -1; 171 172 /* 173 * Any interrupt is context synchronizing, check if TCG TLB needs 174 * a delayed flush on ppc64 175 */ 176 check_tlb_flush(env, false); 177 } 178 179 /* 180 * Note that this function should be greatly optimized when called 181 * with a constant excp, from ppc_hw_interrupt 182 */ 183 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 184 { 185 CPUState *cs = CPU(cpu); 186 CPUPPCState *env = &cpu->env; 187 target_ulong msr, new_msr, vector; 188 int srr0, srr1, asrr0, asrr1, lev, ail; 189 bool lpes0; 190 191 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 192 " => %08x (%02x)\n", env->nip, excp, env->error_code); 193 194 /* new srr1 value excluding must-be-zero bits */ 195 if (excp_model == POWERPC_EXCP_BOOKE) { 196 msr = env->msr; 197 } else { 198 msr = env->msr & ~0x783f0000ULL; 199 } 200 201 /* 202 * new interrupt handler msr preserves existing HV and ME unless 203 * explicitly overriden 204 */ 205 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 206 207 /* target registers */ 208 srr0 = SPR_SRR0; 209 srr1 = SPR_SRR1; 210 asrr0 = -1; 211 asrr1 = -1; 212 213 /* 214 * check for special resume at 0x100 from doze/nap/sleep/winkle on 215 * P7/P8/P9 216 */ 217 if (env->resume_as_sreset) { 218 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 219 } 220 221 /* 222 * Exception targetting modifiers 223 * 224 * LPES0 is supported on POWER7/8/9 225 * LPES1 is not supported (old iSeries mode) 226 * 227 * On anything else, we behave as if LPES0 is 1 228 * (externals don't alter MSR:HV) 229 * 230 * AIL is initialized here but can be cleared by 231 * selected exceptions 232 */ 233 #if defined(TARGET_PPC64) 234 if (excp_model == POWERPC_EXCP_POWER7 || 235 excp_model == POWERPC_EXCP_POWER8 || 236 excp_model == POWERPC_EXCP_POWER9) { 237 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 238 if (excp_model != POWERPC_EXCP_POWER7) { 239 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 240 } else { 241 ail = 0; 242 } 243 } else 244 #endif /* defined(TARGET_PPC64) */ 245 { 246 lpes0 = true; 247 ail = 0; 248 } 249 250 /* 251 * Hypervisor emulation assistance interrupt only exists on server 252 * arch 2.05 server or later. We also don't want to generate it if 253 * we don't have HVB in msr_mask (PAPR mode). 254 */ 255 if (excp == POWERPC_EXCP_HV_EMU 256 #if defined(TARGET_PPC64) 257 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 258 #endif /* defined(TARGET_PPC64) */ 259 260 ) { 261 excp = POWERPC_EXCP_PROGRAM; 262 } 263 264 switch (excp) { 265 case POWERPC_EXCP_NONE: 266 /* Should never happen */ 267 return; 268 case POWERPC_EXCP_CRITICAL: /* Critical input */ 269 switch (excp_model) { 270 case POWERPC_EXCP_40x: 271 srr0 = SPR_40x_SRR2; 272 srr1 = SPR_40x_SRR3; 273 break; 274 case POWERPC_EXCP_BOOKE: 275 srr0 = SPR_BOOKE_CSRR0; 276 srr1 = SPR_BOOKE_CSRR1; 277 break; 278 case POWERPC_EXCP_G2: 279 break; 280 default: 281 goto excp_invalid; 282 } 283 break; 284 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 285 if (msr_me == 0) { 286 /* 287 * Machine check exception is not enabled. Enter 288 * checkstop state. 289 */ 290 fprintf(stderr, "Machine check while not allowed. " 291 "Entering checkstop state\n"); 292 if (qemu_log_separate()) { 293 qemu_log("Machine check while not allowed. " 294 "Entering checkstop state\n"); 295 } 296 cs->halted = 1; 297 cpu_interrupt_exittb(cs); 298 } 299 if (env->msr_mask & MSR_HVB) { 300 /* 301 * ISA specifies HV, but can be delivered to guest with HV 302 * clear (e.g., see FWNMI in PAPR). 303 */ 304 new_msr |= (target_ulong)MSR_HVB; 305 } 306 ail = 0; 307 308 /* machine check exceptions don't have ME set */ 309 new_msr &= ~((target_ulong)1 << MSR_ME); 310 311 /* XXX: should also have something loaded in DAR / DSISR */ 312 switch (excp_model) { 313 case POWERPC_EXCP_40x: 314 srr0 = SPR_40x_SRR2; 315 srr1 = SPR_40x_SRR3; 316 break; 317 case POWERPC_EXCP_BOOKE: 318 /* FIXME: choose one or the other based on CPU type */ 319 srr0 = SPR_BOOKE_MCSRR0; 320 srr1 = SPR_BOOKE_MCSRR1; 321 asrr0 = SPR_BOOKE_CSRR0; 322 asrr1 = SPR_BOOKE_CSRR1; 323 break; 324 default: 325 break; 326 } 327 break; 328 case POWERPC_EXCP_DSI: /* Data storage exception */ 329 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 330 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 331 break; 332 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 333 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 334 "\n", msr, env->nip); 335 msr |= env->error_code; 336 break; 337 case POWERPC_EXCP_EXTERNAL: /* External input */ 338 cs = CPU(cpu); 339 340 if (!lpes0) { 341 new_msr |= (target_ulong)MSR_HVB; 342 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 343 srr0 = SPR_HSRR0; 344 srr1 = SPR_HSRR1; 345 } 346 if (env->mpic_proxy) { 347 /* IACK the IRQ on delivery */ 348 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 349 } 350 break; 351 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 352 /* Get rS/rD and rA from faulting opcode */ 353 /* 354 * Note: the opcode fields will not be set properly for a 355 * direct store load/store, but nobody cares as nobody 356 * actually uses direct store segments. 357 */ 358 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 359 break; 360 case POWERPC_EXCP_PROGRAM: /* Program exception */ 361 switch (env->error_code & ~0xF) { 362 case POWERPC_EXCP_FP: 363 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 364 LOG_EXCP("Ignore floating point exception\n"); 365 cs->exception_index = POWERPC_EXCP_NONE; 366 env->error_code = 0; 367 return; 368 } 369 370 /* 371 * FP exceptions always have NIP pointing to the faulting 372 * instruction, so always use store_next and claim we are 373 * precise in the MSR. 374 */ 375 msr |= 0x00100000; 376 env->spr[SPR_BOOKE_ESR] = ESR_FP; 377 break; 378 case POWERPC_EXCP_INVAL: 379 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 380 msr |= 0x00080000; 381 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 382 break; 383 case POWERPC_EXCP_PRIV: 384 msr |= 0x00040000; 385 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 386 break; 387 case POWERPC_EXCP_TRAP: 388 msr |= 0x00020000; 389 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 390 break; 391 default: 392 /* Should never occur */ 393 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 394 env->error_code); 395 break; 396 } 397 break; 398 case POWERPC_EXCP_SYSCALL: /* System call exception */ 399 lev = env->error_code; 400 401 if ((lev == 1) && cpu->vhyp) { 402 dump_hcall(env); 403 } else { 404 dump_syscall(env); 405 } 406 407 /* 408 * We need to correct the NIP which in this case is supposed 409 * to point to the next instruction 410 */ 411 env->nip += 4; 412 413 /* "PAPR mode" built-in hypercall emulation */ 414 if ((lev == 1) && cpu->vhyp) { 415 PPCVirtualHypervisorClass *vhc = 416 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 417 vhc->hypercall(cpu->vhyp, cpu); 418 return; 419 } 420 if (lev == 1) { 421 new_msr |= (target_ulong)MSR_HVB; 422 } 423 break; 424 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 425 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 426 case POWERPC_EXCP_DECR: /* Decrementer exception */ 427 break; 428 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 429 /* FIT on 4xx */ 430 LOG_EXCP("FIT exception\n"); 431 break; 432 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 433 LOG_EXCP("WDT exception\n"); 434 switch (excp_model) { 435 case POWERPC_EXCP_BOOKE: 436 srr0 = SPR_BOOKE_CSRR0; 437 srr1 = SPR_BOOKE_CSRR1; 438 break; 439 default: 440 break; 441 } 442 break; 443 case POWERPC_EXCP_DTLB: /* Data TLB error */ 444 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 445 break; 446 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 447 if (env->flags & POWERPC_FLAG_DE) { 448 /* FIXME: choose one or the other based on CPU type */ 449 srr0 = SPR_BOOKE_DSRR0; 450 srr1 = SPR_BOOKE_DSRR1; 451 asrr0 = SPR_BOOKE_CSRR0; 452 asrr1 = SPR_BOOKE_CSRR1; 453 /* DBSR already modified by caller */ 454 } else { 455 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 456 } 457 break; 458 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 459 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 460 break; 461 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 462 /* XXX: TODO */ 463 cpu_abort(cs, "Embedded floating point data exception " 464 "is not implemented yet !\n"); 465 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 466 break; 467 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 468 /* XXX: TODO */ 469 cpu_abort(cs, "Embedded floating point round exception " 470 "is not implemented yet !\n"); 471 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 472 break; 473 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 474 /* XXX: TODO */ 475 cpu_abort(cs, 476 "Performance counter exception is not implemented yet !\n"); 477 break; 478 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 479 break; 480 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 481 srr0 = SPR_BOOKE_CSRR0; 482 srr1 = SPR_BOOKE_CSRR1; 483 break; 484 case POWERPC_EXCP_RESET: /* System reset exception */ 485 /* A power-saving exception sets ME, otherwise it is unchanged */ 486 if (msr_pow) { 487 /* indicate that we resumed from power save mode */ 488 msr |= 0x10000; 489 new_msr |= ((target_ulong)1 << MSR_ME); 490 } 491 if (env->msr_mask & MSR_HVB) { 492 /* 493 * ISA specifies HV, but can be delivered to guest with HV 494 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 495 */ 496 new_msr |= (target_ulong)MSR_HVB; 497 } else { 498 if (msr_pow) { 499 cpu_abort(cs, "Trying to deliver power-saving system reset " 500 "exception %d with no HV support\n", excp); 501 } 502 } 503 ail = 0; 504 break; 505 case POWERPC_EXCP_DSEG: /* Data segment exception */ 506 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 507 case POWERPC_EXCP_TRACE: /* Trace exception */ 508 break; 509 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 510 msr |= env->error_code; 511 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 512 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 513 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 514 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 515 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 516 case POWERPC_EXCP_HV_EMU: 517 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 518 srr0 = SPR_HSRR0; 519 srr1 = SPR_HSRR1; 520 new_msr |= (target_ulong)MSR_HVB; 521 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 522 break; 523 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 524 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 525 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 526 #ifdef TARGET_PPC64 527 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 528 #endif 529 break; 530 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 531 #ifdef TARGET_PPC64 532 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 533 srr0 = SPR_HSRR0; 534 srr1 = SPR_HSRR1; 535 new_msr |= (target_ulong)MSR_HVB; 536 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 537 #endif 538 break; 539 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 540 LOG_EXCP("PIT exception\n"); 541 break; 542 case POWERPC_EXCP_IO: /* IO error exception */ 543 /* XXX: TODO */ 544 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 545 break; 546 case POWERPC_EXCP_RUNM: /* Run mode exception */ 547 /* XXX: TODO */ 548 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 549 break; 550 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 551 /* XXX: TODO */ 552 cpu_abort(cs, "602 emulation trap exception " 553 "is not implemented yet !\n"); 554 break; 555 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 556 switch (excp_model) { 557 case POWERPC_EXCP_602: 558 case POWERPC_EXCP_603: 559 case POWERPC_EXCP_603E: 560 case POWERPC_EXCP_G2: 561 goto tlb_miss_tgpr; 562 case POWERPC_EXCP_7x5: 563 goto tlb_miss; 564 case POWERPC_EXCP_74xx: 565 goto tlb_miss_74xx; 566 default: 567 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 568 break; 569 } 570 break; 571 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 572 switch (excp_model) { 573 case POWERPC_EXCP_602: 574 case POWERPC_EXCP_603: 575 case POWERPC_EXCP_603E: 576 case POWERPC_EXCP_G2: 577 goto tlb_miss_tgpr; 578 case POWERPC_EXCP_7x5: 579 goto tlb_miss; 580 case POWERPC_EXCP_74xx: 581 goto tlb_miss_74xx; 582 default: 583 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 584 break; 585 } 586 break; 587 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 588 switch (excp_model) { 589 case POWERPC_EXCP_602: 590 case POWERPC_EXCP_603: 591 case POWERPC_EXCP_603E: 592 case POWERPC_EXCP_G2: 593 tlb_miss_tgpr: 594 /* Swap temporary saved registers with GPRs */ 595 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 596 new_msr |= (target_ulong)1 << MSR_TGPR; 597 hreg_swap_gpr_tgpr(env); 598 } 599 goto tlb_miss; 600 case POWERPC_EXCP_7x5: 601 tlb_miss: 602 #if defined(DEBUG_SOFTWARE_TLB) 603 if (qemu_log_enabled()) { 604 const char *es; 605 target_ulong *miss, *cmp; 606 int en; 607 608 if (excp == POWERPC_EXCP_IFTLB) { 609 es = "I"; 610 en = 'I'; 611 miss = &env->spr[SPR_IMISS]; 612 cmp = &env->spr[SPR_ICMP]; 613 } else { 614 if (excp == POWERPC_EXCP_DLTLB) { 615 es = "DL"; 616 } else { 617 es = "DS"; 618 } 619 en = 'D'; 620 miss = &env->spr[SPR_DMISS]; 621 cmp = &env->spr[SPR_DCMP]; 622 } 623 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 624 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 625 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 626 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 627 env->error_code); 628 } 629 #endif 630 msr |= env->crf[0] << 28; 631 msr |= env->error_code; /* key, D/I, S/L bits */ 632 /* Set way using a LRU mechanism */ 633 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 634 break; 635 case POWERPC_EXCP_74xx: 636 tlb_miss_74xx: 637 #if defined(DEBUG_SOFTWARE_TLB) 638 if (qemu_log_enabled()) { 639 const char *es; 640 target_ulong *miss, *cmp; 641 int en; 642 643 if (excp == POWERPC_EXCP_IFTLB) { 644 es = "I"; 645 en = 'I'; 646 miss = &env->spr[SPR_TLBMISS]; 647 cmp = &env->spr[SPR_PTEHI]; 648 } else { 649 if (excp == POWERPC_EXCP_DLTLB) { 650 es = "DL"; 651 } else { 652 es = "DS"; 653 } 654 en = 'D'; 655 miss = &env->spr[SPR_TLBMISS]; 656 cmp = &env->spr[SPR_PTEHI]; 657 } 658 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 659 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 660 env->error_code); 661 } 662 #endif 663 msr |= env->error_code; /* key bit */ 664 break; 665 default: 666 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 667 break; 668 } 669 break; 670 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 671 /* XXX: TODO */ 672 cpu_abort(cs, "Floating point assist exception " 673 "is not implemented yet !\n"); 674 break; 675 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 676 /* XXX: TODO */ 677 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 678 break; 679 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 680 /* XXX: TODO */ 681 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 682 break; 683 case POWERPC_EXCP_SMI: /* System management interrupt */ 684 /* XXX: TODO */ 685 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 686 break; 687 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 688 /* XXX: TODO */ 689 cpu_abort(cs, "Thermal management exception " 690 "is not implemented yet !\n"); 691 break; 692 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 693 /* XXX: TODO */ 694 cpu_abort(cs, 695 "Performance counter exception is not implemented yet !\n"); 696 break; 697 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 698 /* XXX: TODO */ 699 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 700 break; 701 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 702 /* XXX: TODO */ 703 cpu_abort(cs, 704 "970 soft-patch exception is not implemented yet !\n"); 705 break; 706 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 707 /* XXX: TODO */ 708 cpu_abort(cs, 709 "970 maintenance exception is not implemented yet !\n"); 710 break; 711 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 712 /* XXX: TODO */ 713 cpu_abort(cs, "Maskable external exception " 714 "is not implemented yet !\n"); 715 break; 716 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 717 /* XXX: TODO */ 718 cpu_abort(cs, "Non maskable external exception " 719 "is not implemented yet !\n"); 720 break; 721 default: 722 excp_invalid: 723 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 724 break; 725 } 726 727 /* Save PC */ 728 env->spr[srr0] = env->nip; 729 730 /* Save MSR */ 731 env->spr[srr1] = msr; 732 733 /* Sanity check */ 734 if (!(env->msr_mask & MSR_HVB)) { 735 if (new_msr & MSR_HVB) { 736 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 737 "no HV support\n", excp); 738 } 739 if (srr0 == SPR_HSRR0) { 740 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 741 "no HV support\n", excp); 742 } 743 } 744 745 /* If any alternate SRR register are defined, duplicate saved values */ 746 if (asrr0 != -1) { 747 env->spr[asrr0] = env->spr[srr0]; 748 } 749 if (asrr1 != -1) { 750 env->spr[asrr1] = env->spr[srr1]; 751 } 752 753 /* 754 * Sort out endianness of interrupt, this differs depending on the 755 * CPU, the HV mode, etc... 756 */ 757 #ifdef TARGET_PPC64 758 if (excp_model == POWERPC_EXCP_POWER7) { 759 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 760 new_msr |= (target_ulong)1 << MSR_LE; 761 } 762 } else if (excp_model == POWERPC_EXCP_POWER8) { 763 if (new_msr & MSR_HVB) { 764 if (env->spr[SPR_HID0] & HID0_HILE) { 765 new_msr |= (target_ulong)1 << MSR_LE; 766 } 767 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 768 new_msr |= (target_ulong)1 << MSR_LE; 769 } 770 } else if (excp_model == POWERPC_EXCP_POWER9) { 771 if (new_msr & MSR_HVB) { 772 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 773 new_msr |= (target_ulong)1 << MSR_LE; 774 } 775 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 776 new_msr |= (target_ulong)1 << MSR_LE; 777 } 778 } else if (msr_ile) { 779 new_msr |= (target_ulong)1 << MSR_LE; 780 } 781 #else 782 if (msr_ile) { 783 new_msr |= (target_ulong)1 << MSR_LE; 784 } 785 #endif 786 787 /* Jump to handler */ 788 vector = env->excp_vectors[excp]; 789 if (vector == (target_ulong)-1ULL) { 790 cpu_abort(cs, "Raised an exception without defined vector %d\n", 791 excp); 792 } 793 vector |= env->excp_prefix; 794 795 /* 796 * AIL only works if there is no HV transition and we are running 797 * with translations enabled 798 */ 799 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 800 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 801 ail = 0; 802 } 803 /* Handle AIL */ 804 if (ail) { 805 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 806 vector |= ppc_excp_vector_offset(cs, ail); 807 } 808 809 #if defined(TARGET_PPC64) 810 if (excp_model == POWERPC_EXCP_BOOKE) { 811 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 812 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 813 new_msr |= (target_ulong)1 << MSR_CM; 814 } else { 815 vector = (uint32_t)vector; 816 } 817 } else { 818 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 819 vector = (uint32_t)vector; 820 } else { 821 new_msr |= (target_ulong)1 << MSR_SF; 822 } 823 } 824 #endif 825 826 powerpc_set_excp_state(cpu, vector, new_msr); 827 } 828 829 void ppc_cpu_do_interrupt(CPUState *cs) 830 { 831 PowerPCCPU *cpu = POWERPC_CPU(cs); 832 CPUPPCState *env = &cpu->env; 833 834 powerpc_excp(cpu, env->excp_model, cs->exception_index); 835 } 836 837 static void ppc_hw_interrupt(CPUPPCState *env) 838 { 839 PowerPCCPU *cpu = env_archcpu(env); 840 bool async_deliver; 841 842 /* External reset */ 843 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 844 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 845 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 846 return; 847 } 848 /* Machine check exception */ 849 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 850 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 851 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 852 return; 853 } 854 #if 0 /* TODO */ 855 /* External debug exception */ 856 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 857 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 858 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 859 return; 860 } 861 #endif 862 863 /* 864 * For interrupts that gate on MSR:EE, we need to do something a 865 * bit more subtle, as we need to let them through even when EE is 866 * clear when coming out of some power management states (in order 867 * for them to become a 0x100). 868 */ 869 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 870 871 /* Hypervisor decrementer exception */ 872 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 873 /* LPCR will be clear when not supported so this will work */ 874 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 875 if ((async_deliver || msr_hv == 0) && hdice) { 876 /* HDEC clears on delivery */ 877 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 878 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 879 return; 880 } 881 } 882 883 /* Hypervisor virtualization interrupt */ 884 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 885 /* LPCR will be clear when not supported so this will work */ 886 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 887 if ((async_deliver || msr_hv == 0) && hvice) { 888 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 889 return; 890 } 891 } 892 893 /* External interrupt can ignore MSR:EE under some circumstances */ 894 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 895 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 896 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 897 /* HEIC blocks delivery to the hypervisor */ 898 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 899 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 900 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 901 return; 902 } 903 } 904 if (msr_ce != 0) { 905 /* External critical interrupt */ 906 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 907 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 908 return; 909 } 910 } 911 if (async_deliver != 0) { 912 /* Watchdog timer on embedded PowerPC */ 913 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 914 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 915 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 916 return; 917 } 918 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 919 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 920 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 921 return; 922 } 923 /* Fixed interval timer on embedded PowerPC */ 924 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 925 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 926 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 927 return; 928 } 929 /* Programmable interval timer on embedded PowerPC */ 930 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 931 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 932 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 933 return; 934 } 935 /* Decrementer exception */ 936 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 937 if (ppc_decr_clear_on_delivery(env)) { 938 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 939 } 940 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 941 return; 942 } 943 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 944 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 945 if (is_book3s_arch2x(env)) { 946 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); 947 } else { 948 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 949 } 950 return; 951 } 952 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 953 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 954 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 955 return; 956 } 957 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 958 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 959 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 960 return; 961 } 962 /* Thermal interrupt */ 963 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 964 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 965 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 966 return; 967 } 968 } 969 970 if (env->resume_as_sreset) { 971 /* 972 * This is a bug ! It means that has_work took us out of halt without 973 * anything to deliver while in a PM state that requires getting 974 * out via a 0x100 975 * 976 * This means we will incorrectly execute past the power management 977 * instruction instead of triggering a reset. 978 * 979 * It generally means a discrepancy between the wakup conditions in the 980 * processor has_work implementation and the logic in this function. 981 */ 982 cpu_abort(env_cpu(env), 983 "Wakeup from PM state but interrupt Undelivered"); 984 } 985 } 986 987 void ppc_cpu_do_system_reset(CPUState *cs) 988 { 989 PowerPCCPU *cpu = POWERPC_CPU(cs); 990 CPUPPCState *env = &cpu->env; 991 992 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 993 } 994 995 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 996 { 997 PowerPCCPU *cpu = POWERPC_CPU(cs); 998 CPUPPCState *env = &cpu->env; 999 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1000 target_ulong msr = 0; 1001 1002 /* 1003 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 1004 * been set by KVM. 1005 */ 1006 msr = (1ULL << MSR_ME); 1007 msr |= env->msr & (1ULL << MSR_SF); 1008 if (!(*pcc->interrupts_big_endian)(cpu)) { 1009 msr |= (1ULL << MSR_LE); 1010 } 1011 1012 powerpc_set_excp_state(cpu, vector, msr); 1013 } 1014 #endif /* !CONFIG_USER_ONLY */ 1015 1016 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1017 { 1018 PowerPCCPU *cpu = POWERPC_CPU(cs); 1019 CPUPPCState *env = &cpu->env; 1020 1021 if (interrupt_request & CPU_INTERRUPT_HARD) { 1022 ppc_hw_interrupt(env); 1023 if (env->pending_interrupts == 0) { 1024 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 1025 } 1026 return true; 1027 } 1028 return false; 1029 } 1030 1031 #if defined(DEBUG_OP) 1032 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 1033 { 1034 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 1035 TARGET_FMT_lx "\n", RA, msr); 1036 } 1037 #endif 1038 1039 /*****************************************************************************/ 1040 /* Exceptions processing helpers */ 1041 1042 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 1043 uint32_t error_code, uintptr_t raddr) 1044 { 1045 CPUState *cs = env_cpu(env); 1046 1047 cs->exception_index = exception; 1048 env->error_code = error_code; 1049 cpu_loop_exit_restore(cs, raddr); 1050 } 1051 1052 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1053 uint32_t error_code) 1054 { 1055 raise_exception_err_ra(env, exception, error_code, 0); 1056 } 1057 1058 void raise_exception(CPUPPCState *env, uint32_t exception) 1059 { 1060 raise_exception_err_ra(env, exception, 0, 0); 1061 } 1062 1063 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1064 uintptr_t raddr) 1065 { 1066 raise_exception_err_ra(env, exception, 0, raddr); 1067 } 1068 1069 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1070 uint32_t error_code) 1071 { 1072 raise_exception_err_ra(env, exception, error_code, 0); 1073 } 1074 1075 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1076 { 1077 raise_exception_err_ra(env, exception, 0, 0); 1078 } 1079 1080 #if !defined(CONFIG_USER_ONLY) 1081 void helper_store_msr(CPUPPCState *env, target_ulong val) 1082 { 1083 uint32_t excp = hreg_store_msr(env, val, 0); 1084 1085 if (excp != 0) { 1086 CPUState *cs = env_cpu(env); 1087 cpu_interrupt_exittb(cs); 1088 raise_exception(env, excp); 1089 } 1090 } 1091 1092 #if defined(TARGET_PPC64) 1093 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1094 { 1095 CPUState *cs; 1096 1097 cs = env_cpu(env); 1098 cs->halted = 1; 1099 1100 /* 1101 * The architecture specifies that HDEC interrupts are discarded 1102 * in PM states 1103 */ 1104 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1105 1106 /* Condition for waking up at 0x100 */ 1107 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1108 (env->spr[SPR_PSSCR] & PSSCR_EC); 1109 } 1110 #endif /* defined(TARGET_PPC64) */ 1111 1112 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1113 { 1114 CPUState *cs = env_cpu(env); 1115 1116 /* MSR:POW cannot be set by any form of rfi */ 1117 msr &= ~(1ULL << MSR_POW); 1118 1119 #if defined(TARGET_PPC64) 1120 /* Switching to 32-bit ? Crop the nip */ 1121 if (!msr_is_64bit(env, msr)) { 1122 nip = (uint32_t)nip; 1123 } 1124 #else 1125 nip = (uint32_t)nip; 1126 #endif 1127 /* XXX: beware: this is false if VLE is supported */ 1128 env->nip = nip & ~((target_ulong)0x00000003); 1129 hreg_store_msr(env, msr, 1); 1130 #if defined(DEBUG_OP) 1131 cpu_dump_rfi(env->nip, env->msr); 1132 #endif 1133 /* 1134 * No need to raise an exception here, as rfi is always the last 1135 * insn of a TB 1136 */ 1137 cpu_interrupt_exittb(cs); 1138 /* Reset the reservation */ 1139 env->reserve_addr = -1; 1140 1141 /* Context synchronizing: check if TCG TLB needs flush */ 1142 check_tlb_flush(env, false); 1143 } 1144 1145 void helper_rfi(CPUPPCState *env) 1146 { 1147 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1148 } 1149 1150 #define MSR_BOOK3S_MASK 1151 #if defined(TARGET_PPC64) 1152 void helper_rfid(CPUPPCState *env) 1153 { 1154 /* 1155 * The architeture defines a number of rules for which bits can 1156 * change but in practice, we handle this in hreg_store_msr() 1157 * which will be called by do_rfi(), so there is no need to filter 1158 * here 1159 */ 1160 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1161 } 1162 1163 void helper_hrfid(CPUPPCState *env) 1164 { 1165 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1166 } 1167 #endif 1168 1169 /*****************************************************************************/ 1170 /* Embedded PowerPC specific helpers */ 1171 void helper_40x_rfci(CPUPPCState *env) 1172 { 1173 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1174 } 1175 1176 void helper_rfci(CPUPPCState *env) 1177 { 1178 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1179 } 1180 1181 void helper_rfdi(CPUPPCState *env) 1182 { 1183 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1184 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1185 } 1186 1187 void helper_rfmci(CPUPPCState *env) 1188 { 1189 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1190 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1191 } 1192 #endif 1193 1194 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1195 uint32_t flags) 1196 { 1197 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1198 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1199 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1200 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1201 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1202 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1203 POWERPC_EXCP_TRAP, GETPC()); 1204 } 1205 } 1206 1207 #if defined(TARGET_PPC64) 1208 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1209 uint32_t flags) 1210 { 1211 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1212 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1213 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1214 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1215 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1216 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1217 POWERPC_EXCP_TRAP, GETPC()); 1218 } 1219 } 1220 #endif 1221 1222 #if !defined(CONFIG_USER_ONLY) 1223 /*****************************************************************************/ 1224 /* PowerPC 601 specific instructions (POWER bridge) */ 1225 1226 void helper_rfsvc(CPUPPCState *env) 1227 { 1228 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1229 } 1230 1231 /* Embedded.Processor Control */ 1232 static int dbell2irq(target_ulong rb) 1233 { 1234 int msg = rb & DBELL_TYPE_MASK; 1235 int irq = -1; 1236 1237 switch (msg) { 1238 case DBELL_TYPE_DBELL: 1239 irq = PPC_INTERRUPT_DOORBELL; 1240 break; 1241 case DBELL_TYPE_DBELL_CRIT: 1242 irq = PPC_INTERRUPT_CDOORBELL; 1243 break; 1244 case DBELL_TYPE_G_DBELL: 1245 case DBELL_TYPE_G_DBELL_CRIT: 1246 case DBELL_TYPE_G_DBELL_MC: 1247 /* XXX implement */ 1248 default: 1249 break; 1250 } 1251 1252 return irq; 1253 } 1254 1255 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1256 { 1257 int irq = dbell2irq(rb); 1258 1259 if (irq < 0) { 1260 return; 1261 } 1262 1263 env->pending_interrupts &= ~(1 << irq); 1264 } 1265 1266 void helper_msgsnd(target_ulong rb) 1267 { 1268 int irq = dbell2irq(rb); 1269 int pir = rb & DBELL_PIRTAG_MASK; 1270 CPUState *cs; 1271 1272 if (irq < 0) { 1273 return; 1274 } 1275 1276 qemu_mutex_lock_iothread(); 1277 CPU_FOREACH(cs) { 1278 PowerPCCPU *cpu = POWERPC_CPU(cs); 1279 CPUPPCState *cenv = &cpu->env; 1280 1281 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1282 cenv->pending_interrupts |= 1 << irq; 1283 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1284 } 1285 } 1286 qemu_mutex_unlock_iothread(); 1287 } 1288 1289 /* Server Processor Control */ 1290 1291 static bool dbell_type_server(target_ulong rb) 1292 { 1293 /* 1294 * A Directed Hypervisor Doorbell message is sent only if the 1295 * message type is 5. All other types are reserved and the 1296 * instruction is a no-op 1297 */ 1298 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1299 } 1300 1301 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1302 { 1303 if (!dbell_type_server(rb)) { 1304 return; 1305 } 1306 1307 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1308 } 1309 1310 static void book3s_msgsnd_common(int pir, int irq) 1311 { 1312 CPUState *cs; 1313 1314 qemu_mutex_lock_iothread(); 1315 CPU_FOREACH(cs) { 1316 PowerPCCPU *cpu = POWERPC_CPU(cs); 1317 CPUPPCState *cenv = &cpu->env; 1318 1319 /* TODO: broadcast message to all threads of the same processor */ 1320 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1321 cenv->pending_interrupts |= 1 << irq; 1322 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1323 } 1324 } 1325 qemu_mutex_unlock_iothread(); 1326 } 1327 1328 void helper_book3s_msgsnd(target_ulong rb) 1329 { 1330 int pir = rb & DBELL_PROCIDTAG_MASK; 1331 1332 if (!dbell_type_server(rb)) { 1333 return; 1334 } 1335 1336 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1337 } 1338 1339 #if defined(TARGET_PPC64) 1340 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1341 { 1342 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1343 1344 if (!dbell_type_server(rb)) { 1345 return; 1346 } 1347 1348 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1349 } 1350 1351 /* 1352 * sends a message to other threads that are on the same 1353 * multi-threaded processor 1354 */ 1355 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1356 { 1357 int pir = env->spr_cb[SPR_PIR].default_value; 1358 1359 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1360 1361 if (!dbell_type_server(rb)) { 1362 return; 1363 } 1364 1365 /* TODO: TCG supports only one thread */ 1366 1367 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1368 } 1369 #endif 1370 #endif 1371 1372 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1373 MMUAccessType access_type, 1374 int mmu_idx, uintptr_t retaddr) 1375 { 1376 CPUPPCState *env = cs->env_ptr; 1377 uint32_t insn; 1378 1379 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1380 cpu_restore_state(cs, retaddr, true); 1381 insn = cpu_ldl_code(env, env->nip); 1382 1383 cs->exception_index = POWERPC_EXCP_ALIGN; 1384 env->error_code = insn & 0x03FF0000; 1385 cpu_loop_exit(cs); 1386 } 1387