1 /* 2 * PowerPC exception emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "internal.h" 26 #include "helper_regs.h" 27 28 /* #define DEBUG_OP */ 29 /* #define DEBUG_SOFTWARE_TLB */ 30 /* #define DEBUG_EXCEPTIONS */ 31 32 #ifdef DEBUG_EXCEPTIONS 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) 34 #else 35 # define LOG_EXCP(...) do { } while (0) 36 #endif 37 38 /*****************************************************************************/ 39 /* Exception processing */ 40 #if defined(CONFIG_USER_ONLY) 41 void ppc_cpu_do_interrupt(CPUState *cs) 42 { 43 PowerPCCPU *cpu = POWERPC_CPU(cs); 44 CPUPPCState *env = &cpu->env; 45 46 cs->exception_index = POWERPC_EXCP_NONE; 47 env->error_code = 0; 48 } 49 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 { 52 CPUState *cs = env_cpu(env); 53 54 cs->exception_index = POWERPC_EXCP_NONE; 55 env->error_code = 0; 56 } 57 #else /* defined(CONFIG_USER_ONLY) */ 58 static inline void dump_syscall(CPUPPCState *env) 59 { 60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 61 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 62 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 63 " nip=" TARGET_FMT_lx "\n", 64 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 65 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 66 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 67 ppc_dump_gpr(env, 8), env->nip); 68 } 69 70 static inline void dump_syscall_vectored(CPUPPCState *env) 71 { 72 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 73 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 74 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 75 " nip=" TARGET_FMT_lx "\n", 76 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 77 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 78 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 79 ppc_dump_gpr(env, 8), env->nip); 80 } 81 82 static inline void dump_hcall(CPUPPCState *env) 83 { 84 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 85 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 86 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 87 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 88 " nip=" TARGET_FMT_lx "\n", 89 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 90 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 91 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 92 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 93 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 94 env->nip); 95 } 96 97 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, 98 target_ulong *msr) 99 { 100 /* We no longer are in a PM state */ 101 env->resume_as_sreset = false; 102 103 /* Pretend to be returning from doze always as we don't lose state */ 104 *msr |= SRR1_WS_NOLOSS; 105 106 /* Machine checks are sent normally */ 107 if (excp == POWERPC_EXCP_MCHECK) { 108 return excp; 109 } 110 switch (excp) { 111 case POWERPC_EXCP_RESET: 112 *msr |= SRR1_WAKERESET; 113 break; 114 case POWERPC_EXCP_EXTERNAL: 115 *msr |= SRR1_WAKEEE; 116 break; 117 case POWERPC_EXCP_DECR: 118 *msr |= SRR1_WAKEDEC; 119 break; 120 case POWERPC_EXCP_SDOOR: 121 *msr |= SRR1_WAKEDBELL; 122 break; 123 case POWERPC_EXCP_SDOOR_HV: 124 *msr |= SRR1_WAKEHDBELL; 125 break; 126 case POWERPC_EXCP_HV_MAINT: 127 *msr |= SRR1_WAKEHMI; 128 break; 129 case POWERPC_EXCP_HVIRT: 130 *msr |= SRR1_WAKEHVI; 131 break; 132 default: 133 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", 134 excp); 135 } 136 return POWERPC_EXCP_RESET; 137 } 138 139 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) 140 { 141 uint64_t offset = 0; 142 143 switch (ail) { 144 case AIL_NONE: 145 break; 146 case AIL_0001_8000: 147 offset = 0x18000; 148 break; 149 case AIL_C000_0000_0000_4000: 150 offset = 0xc000000000004000ull; 151 break; 152 default: 153 cpu_abort(cs, "Invalid AIL combination %d\n", ail); 154 break; 155 } 156 157 return offset; 158 } 159 160 static inline void powerpc_set_excp_state(PowerPCCPU *cpu, 161 target_ulong vector, target_ulong msr) 162 { 163 CPUState *cs = CPU(cpu); 164 CPUPPCState *env = &cpu->env; 165 166 /* 167 * We don't use hreg_store_msr here as already have treated any 168 * special case that could occur. Just store MSR and update hflags 169 * 170 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 171 * will prevent setting of the HV bit which some exceptions might need 172 * to do. 173 */ 174 env->msr = msr & env->msr_mask; 175 hreg_compute_hflags(env); 176 env->nip = vector; 177 /* Reset exception state */ 178 cs->exception_index = POWERPC_EXCP_NONE; 179 env->error_code = 0; 180 181 /* Reset the reservation */ 182 env->reserve_addr = -1; 183 184 /* 185 * Any interrupt is context synchronizing, check if TCG TLB needs 186 * a delayed flush on ppc64 187 */ 188 check_tlb_flush(env, false); 189 } 190 191 /* 192 * Note that this function should be greatly optimized when called 193 * with a constant excp, from ppc_hw_interrupt 194 */ 195 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 196 { 197 CPUState *cs = CPU(cpu); 198 CPUPPCState *env = &cpu->env; 199 target_ulong msr, new_msr, vector; 200 int srr0, srr1, asrr0, asrr1, lev = -1, ail; 201 bool lpes0; 202 203 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx 204 " => %08x (%02x)\n", env->nip, excp, env->error_code); 205 206 /* new srr1 value excluding must-be-zero bits */ 207 if (excp_model == POWERPC_EXCP_BOOKE) { 208 msr = env->msr; 209 } else { 210 msr = env->msr & ~0x783f0000ULL; 211 } 212 213 /* 214 * new interrupt handler msr preserves existing HV and ME unless 215 * explicitly overriden 216 */ 217 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); 218 219 /* target registers */ 220 srr0 = SPR_SRR0; 221 srr1 = SPR_SRR1; 222 asrr0 = -1; 223 asrr1 = -1; 224 225 /* 226 * check for special resume at 0x100 from doze/nap/sleep/winkle on 227 * P7/P8/P9 228 */ 229 if (env->resume_as_sreset) { 230 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 231 } 232 233 /* 234 * Exception targeting modifiers 235 * 236 * LPES0 is supported on POWER7/8/9 237 * LPES1 is not supported (old iSeries mode) 238 * 239 * On anything else, we behave as if LPES0 is 1 240 * (externals don't alter MSR:HV) 241 * 242 * AIL is initialized here but can be cleared by 243 * selected exceptions 244 */ 245 #if defined(TARGET_PPC64) 246 if (excp_model == POWERPC_EXCP_POWER7 || 247 excp_model == POWERPC_EXCP_POWER8 || 248 excp_model == POWERPC_EXCP_POWER9) { 249 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 250 if (excp_model != POWERPC_EXCP_POWER7) { 251 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; 252 } else { 253 ail = 0; 254 } 255 } else 256 #endif /* defined(TARGET_PPC64) */ 257 { 258 lpes0 = true; 259 ail = 0; 260 } 261 262 /* 263 * Hypervisor emulation assistance interrupt only exists on server 264 * arch 2.05 server or later. We also don't want to generate it if 265 * we don't have HVB in msr_mask (PAPR mode). 266 */ 267 if (excp == POWERPC_EXCP_HV_EMU 268 #if defined(TARGET_PPC64) 269 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) 270 #endif /* defined(TARGET_PPC64) */ 271 272 ) { 273 excp = POWERPC_EXCP_PROGRAM; 274 } 275 276 switch (excp) { 277 case POWERPC_EXCP_NONE: 278 /* Should never happen */ 279 return; 280 case POWERPC_EXCP_CRITICAL: /* Critical input */ 281 switch (excp_model) { 282 case POWERPC_EXCP_40x: 283 srr0 = SPR_40x_SRR2; 284 srr1 = SPR_40x_SRR3; 285 break; 286 case POWERPC_EXCP_BOOKE: 287 srr0 = SPR_BOOKE_CSRR0; 288 srr1 = SPR_BOOKE_CSRR1; 289 break; 290 case POWERPC_EXCP_G2: 291 break; 292 default: 293 goto excp_invalid; 294 } 295 break; 296 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 297 if (msr_me == 0) { 298 /* 299 * Machine check exception is not enabled. Enter 300 * checkstop state. 301 */ 302 fprintf(stderr, "Machine check while not allowed. " 303 "Entering checkstop state\n"); 304 if (qemu_log_separate()) { 305 qemu_log("Machine check while not allowed. " 306 "Entering checkstop state\n"); 307 } 308 cs->halted = 1; 309 cpu_interrupt_exittb(cs); 310 } 311 if (env->msr_mask & MSR_HVB) { 312 /* 313 * ISA specifies HV, but can be delivered to guest with HV 314 * clear (e.g., see FWNMI in PAPR). 315 */ 316 new_msr |= (target_ulong)MSR_HVB; 317 } 318 ail = 0; 319 320 /* machine check exceptions don't have ME set */ 321 new_msr &= ~((target_ulong)1 << MSR_ME); 322 323 /* XXX: should also have something loaded in DAR / DSISR */ 324 switch (excp_model) { 325 case POWERPC_EXCP_40x: 326 srr0 = SPR_40x_SRR2; 327 srr1 = SPR_40x_SRR3; 328 break; 329 case POWERPC_EXCP_BOOKE: 330 /* FIXME: choose one or the other based on CPU type */ 331 srr0 = SPR_BOOKE_MCSRR0; 332 srr1 = SPR_BOOKE_MCSRR1; 333 asrr0 = SPR_BOOKE_CSRR0; 334 asrr1 = SPR_BOOKE_CSRR1; 335 break; 336 default: 337 break; 338 } 339 break; 340 case POWERPC_EXCP_DSI: /* Data storage exception */ 341 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx 342 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); 343 break; 344 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 345 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx 346 "\n", msr, env->nip); 347 msr |= env->error_code; 348 break; 349 case POWERPC_EXCP_EXTERNAL: /* External input */ 350 cs = CPU(cpu); 351 352 if (!lpes0) { 353 new_msr |= (target_ulong)MSR_HVB; 354 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 355 srr0 = SPR_HSRR0; 356 srr1 = SPR_HSRR1; 357 } 358 if (env->mpic_proxy) { 359 /* IACK the IRQ on delivery */ 360 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); 361 } 362 break; 363 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 364 /* Get rS/rD and rA from faulting opcode */ 365 /* 366 * Note: the opcode fields will not be set properly for a 367 * direct store load/store, but nobody cares as nobody 368 * actually uses direct store segments. 369 */ 370 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 371 break; 372 case POWERPC_EXCP_PROGRAM: /* Program exception */ 373 switch (env->error_code & ~0xF) { 374 case POWERPC_EXCP_FP: 375 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { 376 LOG_EXCP("Ignore floating point exception\n"); 377 cs->exception_index = POWERPC_EXCP_NONE; 378 env->error_code = 0; 379 return; 380 } 381 382 /* 383 * FP exceptions always have NIP pointing to the faulting 384 * instruction, so always use store_next and claim we are 385 * precise in the MSR. 386 */ 387 msr |= 0x00100000; 388 env->spr[SPR_BOOKE_ESR] = ESR_FP; 389 break; 390 case POWERPC_EXCP_INVAL: 391 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); 392 msr |= 0x00080000; 393 env->spr[SPR_BOOKE_ESR] = ESR_PIL; 394 break; 395 case POWERPC_EXCP_PRIV: 396 msr |= 0x00040000; 397 env->spr[SPR_BOOKE_ESR] = ESR_PPR; 398 break; 399 case POWERPC_EXCP_TRAP: 400 msr |= 0x00020000; 401 env->spr[SPR_BOOKE_ESR] = ESR_PTR; 402 break; 403 default: 404 /* Should never occur */ 405 cpu_abort(cs, "Invalid program exception %d. Aborting\n", 406 env->error_code); 407 break; 408 } 409 break; 410 case POWERPC_EXCP_SYSCALL: /* System call exception */ 411 lev = env->error_code; 412 413 if ((lev == 1) && cpu->vhyp) { 414 dump_hcall(env); 415 } else { 416 dump_syscall(env); 417 } 418 419 /* 420 * We need to correct the NIP which in this case is supposed 421 * to point to the next instruction 422 */ 423 env->nip += 4; 424 425 /* "PAPR mode" built-in hypercall emulation */ 426 if ((lev == 1) && cpu->vhyp) { 427 PPCVirtualHypervisorClass *vhc = 428 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 429 vhc->hypercall(cpu->vhyp, cpu); 430 return; 431 } 432 if (lev == 1) { 433 new_msr |= (target_ulong)MSR_HVB; 434 } 435 break; 436 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */ 437 lev = env->error_code; 438 dump_syscall_vectored(env); 439 env->nip += 4; 440 new_msr |= env->msr & ((target_ulong)1 << MSR_EE); 441 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 442 break; 443 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 444 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 445 case POWERPC_EXCP_DECR: /* Decrementer exception */ 446 break; 447 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 448 /* FIT on 4xx */ 449 LOG_EXCP("FIT exception\n"); 450 break; 451 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 452 LOG_EXCP("WDT exception\n"); 453 switch (excp_model) { 454 case POWERPC_EXCP_BOOKE: 455 srr0 = SPR_BOOKE_CSRR0; 456 srr1 = SPR_BOOKE_CSRR1; 457 break; 458 default: 459 break; 460 } 461 break; 462 case POWERPC_EXCP_DTLB: /* Data TLB error */ 463 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 464 break; 465 case POWERPC_EXCP_DEBUG: /* Debug interrupt */ 466 if (env->flags & POWERPC_FLAG_DE) { 467 /* FIXME: choose one or the other based on CPU type */ 468 srr0 = SPR_BOOKE_DSRR0; 469 srr1 = SPR_BOOKE_DSRR1; 470 asrr0 = SPR_BOOKE_CSRR0; 471 asrr1 = SPR_BOOKE_CSRR1; 472 /* DBSR already modified by caller */ 473 } else { 474 cpu_abort(cs, "Debug exception triggered on unsupported model\n"); 475 } 476 break; 477 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ 478 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 479 break; 480 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ 481 /* XXX: TODO */ 482 cpu_abort(cs, "Embedded floating point data exception " 483 "is not implemented yet !\n"); 484 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 485 break; 486 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ 487 /* XXX: TODO */ 488 cpu_abort(cs, "Embedded floating point round exception " 489 "is not implemented yet !\n"); 490 env->spr[SPR_BOOKE_ESR] = ESR_SPV; 491 break; 492 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ 493 /* XXX: TODO */ 494 cpu_abort(cs, 495 "Performance counter exception is not implemented yet !\n"); 496 break; 497 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 498 break; 499 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 500 srr0 = SPR_BOOKE_CSRR0; 501 srr1 = SPR_BOOKE_CSRR1; 502 break; 503 case POWERPC_EXCP_RESET: /* System reset exception */ 504 /* A power-saving exception sets ME, otherwise it is unchanged */ 505 if (msr_pow) { 506 /* indicate that we resumed from power save mode */ 507 msr |= 0x10000; 508 new_msr |= ((target_ulong)1 << MSR_ME); 509 } 510 if (env->msr_mask & MSR_HVB) { 511 /* 512 * ISA specifies HV, but can be delivered to guest with HV 513 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 514 */ 515 new_msr |= (target_ulong)MSR_HVB; 516 } else { 517 if (msr_pow) { 518 cpu_abort(cs, "Trying to deliver power-saving system reset " 519 "exception %d with no HV support\n", excp); 520 } 521 } 522 ail = 0; 523 break; 524 case POWERPC_EXCP_DSEG: /* Data segment exception */ 525 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 526 case POWERPC_EXCP_TRACE: /* Trace exception */ 527 break; 528 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 529 msr |= env->error_code; 530 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 531 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 532 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 533 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 534 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ 535 case POWERPC_EXCP_HV_EMU: 536 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ 537 srr0 = SPR_HSRR0; 538 srr1 = SPR_HSRR1; 539 new_msr |= (target_ulong)MSR_HVB; 540 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 541 break; 542 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 543 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ 544 case POWERPC_EXCP_FU: /* Facility unavailable exception */ 545 #ifdef TARGET_PPC64 546 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); 547 #endif 548 break; 549 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ 550 #ifdef TARGET_PPC64 551 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); 552 srr0 = SPR_HSRR0; 553 srr1 = SPR_HSRR1; 554 new_msr |= (target_ulong)MSR_HVB; 555 new_msr |= env->msr & ((target_ulong)1 << MSR_RI); 556 #endif 557 break; 558 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ 559 LOG_EXCP("PIT exception\n"); 560 break; 561 case POWERPC_EXCP_IO: /* IO error exception */ 562 /* XXX: TODO */ 563 cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); 564 break; 565 case POWERPC_EXCP_RUNM: /* Run mode exception */ 566 /* XXX: TODO */ 567 cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); 568 break; 569 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 570 /* XXX: TODO */ 571 cpu_abort(cs, "602 emulation trap exception " 572 "is not implemented yet !\n"); 573 break; 574 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 575 switch (excp_model) { 576 case POWERPC_EXCP_602: 577 case POWERPC_EXCP_603: 578 case POWERPC_EXCP_603E: 579 case POWERPC_EXCP_G2: 580 goto tlb_miss_tgpr; 581 case POWERPC_EXCP_7x5: 582 goto tlb_miss; 583 case POWERPC_EXCP_74xx: 584 goto tlb_miss_74xx; 585 default: 586 cpu_abort(cs, "Invalid instruction TLB miss exception\n"); 587 break; 588 } 589 break; 590 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 591 switch (excp_model) { 592 case POWERPC_EXCP_602: 593 case POWERPC_EXCP_603: 594 case POWERPC_EXCP_603E: 595 case POWERPC_EXCP_G2: 596 goto tlb_miss_tgpr; 597 case POWERPC_EXCP_7x5: 598 goto tlb_miss; 599 case POWERPC_EXCP_74xx: 600 goto tlb_miss_74xx; 601 default: 602 cpu_abort(cs, "Invalid data load TLB miss exception\n"); 603 break; 604 } 605 break; 606 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 607 switch (excp_model) { 608 case POWERPC_EXCP_602: 609 case POWERPC_EXCP_603: 610 case POWERPC_EXCP_603E: 611 case POWERPC_EXCP_G2: 612 tlb_miss_tgpr: 613 /* Swap temporary saved registers with GPRs */ 614 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { 615 new_msr |= (target_ulong)1 << MSR_TGPR; 616 hreg_swap_gpr_tgpr(env); 617 } 618 goto tlb_miss; 619 case POWERPC_EXCP_7x5: 620 tlb_miss: 621 #if defined(DEBUG_SOFTWARE_TLB) 622 if (qemu_log_enabled()) { 623 const char *es; 624 target_ulong *miss, *cmp; 625 int en; 626 627 if (excp == POWERPC_EXCP_IFTLB) { 628 es = "I"; 629 en = 'I'; 630 miss = &env->spr[SPR_IMISS]; 631 cmp = &env->spr[SPR_ICMP]; 632 } else { 633 if (excp == POWERPC_EXCP_DLTLB) { 634 es = "DL"; 635 } else { 636 es = "DS"; 637 } 638 en = 'D'; 639 miss = &env->spr[SPR_DMISS]; 640 cmp = &env->spr[SPR_DCMP]; 641 } 642 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 643 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " 644 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 645 env->spr[SPR_HASH1], env->spr[SPR_HASH2], 646 env->error_code); 647 } 648 #endif 649 msr |= env->crf[0] << 28; 650 msr |= env->error_code; /* key, D/I, S/L bits */ 651 /* Set way using a LRU mechanism */ 652 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; 653 break; 654 case POWERPC_EXCP_74xx: 655 tlb_miss_74xx: 656 #if defined(DEBUG_SOFTWARE_TLB) 657 if (qemu_log_enabled()) { 658 const char *es; 659 target_ulong *miss, *cmp; 660 int en; 661 662 if (excp == POWERPC_EXCP_IFTLB) { 663 es = "I"; 664 en = 'I'; 665 miss = &env->spr[SPR_TLBMISS]; 666 cmp = &env->spr[SPR_PTEHI]; 667 } else { 668 if (excp == POWERPC_EXCP_DLTLB) { 669 es = "DL"; 670 } else { 671 es = "DS"; 672 } 673 en = 'D'; 674 miss = &env->spr[SPR_TLBMISS]; 675 cmp = &env->spr[SPR_PTEHI]; 676 } 677 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " 678 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, 679 env->error_code); 680 } 681 #endif 682 msr |= env->error_code; /* key bit */ 683 break; 684 default: 685 cpu_abort(cs, "Invalid data store TLB miss exception\n"); 686 break; 687 } 688 break; 689 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 690 /* XXX: TODO */ 691 cpu_abort(cs, "Floating point assist exception " 692 "is not implemented yet !\n"); 693 break; 694 case POWERPC_EXCP_DABR: /* Data address breakpoint */ 695 /* XXX: TODO */ 696 cpu_abort(cs, "DABR exception is not implemented yet !\n"); 697 break; 698 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 699 /* XXX: TODO */ 700 cpu_abort(cs, "IABR exception is not implemented yet !\n"); 701 break; 702 case POWERPC_EXCP_SMI: /* System management interrupt */ 703 /* XXX: TODO */ 704 cpu_abort(cs, "SMI exception is not implemented yet !\n"); 705 break; 706 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 707 /* XXX: TODO */ 708 cpu_abort(cs, "Thermal management exception " 709 "is not implemented yet !\n"); 710 break; 711 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ 712 /* XXX: TODO */ 713 cpu_abort(cs, 714 "Performance counter exception is not implemented yet !\n"); 715 break; 716 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 717 /* XXX: TODO */ 718 cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); 719 break; 720 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 721 /* XXX: TODO */ 722 cpu_abort(cs, 723 "970 soft-patch exception is not implemented yet !\n"); 724 break; 725 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 726 /* XXX: TODO */ 727 cpu_abort(cs, 728 "970 maintenance exception is not implemented yet !\n"); 729 break; 730 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ 731 /* XXX: TODO */ 732 cpu_abort(cs, "Maskable external exception " 733 "is not implemented yet !\n"); 734 break; 735 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ 736 /* XXX: TODO */ 737 cpu_abort(cs, "Non maskable external exception " 738 "is not implemented yet !\n"); 739 break; 740 default: 741 excp_invalid: 742 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); 743 break; 744 } 745 746 /* Sanity check */ 747 if (!(env->msr_mask & MSR_HVB)) { 748 if (new_msr & MSR_HVB) { 749 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " 750 "no HV support\n", excp); 751 } 752 if (srr0 == SPR_HSRR0) { 753 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " 754 "no HV support\n", excp); 755 } 756 } 757 758 /* 759 * Sort out endianness of interrupt, this differs depending on the 760 * CPU, the HV mode, etc... 761 */ 762 #ifdef TARGET_PPC64 763 if (excp_model == POWERPC_EXCP_POWER7) { 764 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { 765 new_msr |= (target_ulong)1 << MSR_LE; 766 } 767 } else if (excp_model == POWERPC_EXCP_POWER8) { 768 if (new_msr & MSR_HVB) { 769 if (env->spr[SPR_HID0] & HID0_HILE) { 770 new_msr |= (target_ulong)1 << MSR_LE; 771 } 772 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 773 new_msr |= (target_ulong)1 << MSR_LE; 774 } 775 } else if (excp_model == POWERPC_EXCP_POWER9) { 776 if (new_msr & MSR_HVB) { 777 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { 778 new_msr |= (target_ulong)1 << MSR_LE; 779 } 780 } else if (env->spr[SPR_LPCR] & LPCR_ILE) { 781 new_msr |= (target_ulong)1 << MSR_LE; 782 } 783 } else if (msr_ile) { 784 new_msr |= (target_ulong)1 << MSR_LE; 785 } 786 #else 787 if (msr_ile) { 788 new_msr |= (target_ulong)1 << MSR_LE; 789 } 790 #endif 791 792 /* 793 * AIL only works if there is no HV transition and we are running 794 * with translations enabled 795 */ 796 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 797 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { 798 ail = 0; 799 } 800 801 vector = env->excp_vectors[excp]; 802 if (vector == (target_ulong)-1ULL) { 803 cpu_abort(cs, "Raised an exception without defined vector %d\n", 804 excp); 805 } 806 807 vector |= env->excp_prefix; 808 809 /* If any alternate SRR register are defined, duplicate saved values */ 810 if (asrr0 != -1) { 811 env->spr[asrr0] = env->nip; 812 } 813 if (asrr1 != -1) { 814 env->spr[asrr1] = msr; 815 } 816 817 #if defined(TARGET_PPC64) 818 if (excp_model == POWERPC_EXCP_BOOKE) { 819 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { 820 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ 821 new_msr |= (target_ulong)1 << MSR_CM; 822 } else { 823 vector = (uint32_t)vector; 824 } 825 } else { 826 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { 827 vector = (uint32_t)vector; 828 } else { 829 new_msr |= (target_ulong)1 << MSR_SF; 830 } 831 } 832 #endif 833 834 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { 835 /* Save PC */ 836 env->spr[srr0] = env->nip; 837 838 /* Save MSR */ 839 env->spr[srr1] = msr; 840 841 /* Handle AIL */ 842 if (ail) { 843 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 844 vector |= ppc_excp_vector_offset(cs, ail); 845 } 846 847 #if defined(TARGET_PPC64) 848 } else { 849 /* scv AIL is a little different */ 850 if (ail) { 851 new_msr |= (1 << MSR_IR) | (1 << MSR_DR); 852 } 853 if (ail == AIL_C000_0000_0000_4000) { 854 vector |= 0xc000000000003000ull; 855 } else { 856 vector |= 0x0000000000017000ull; 857 } 858 vector += lev * 0x20; 859 860 env->lr = env->nip; 861 env->ctr = msr; 862 #endif 863 } 864 865 powerpc_set_excp_state(cpu, vector, new_msr); 866 } 867 868 void ppc_cpu_do_interrupt(CPUState *cs) 869 { 870 PowerPCCPU *cpu = POWERPC_CPU(cs); 871 CPUPPCState *env = &cpu->env; 872 873 powerpc_excp(cpu, env->excp_model, cs->exception_index); 874 } 875 876 static void ppc_hw_interrupt(CPUPPCState *env) 877 { 878 PowerPCCPU *cpu = env_archcpu(env); 879 bool async_deliver; 880 881 /* External reset */ 882 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { 883 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); 884 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 885 return; 886 } 887 /* Machine check exception */ 888 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { 889 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); 890 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); 891 return; 892 } 893 #if 0 /* TODO */ 894 /* External debug exception */ 895 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { 896 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); 897 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); 898 return; 899 } 900 #endif 901 902 /* 903 * For interrupts that gate on MSR:EE, we need to do something a 904 * bit more subtle, as we need to let them through even when EE is 905 * clear when coming out of some power management states (in order 906 * for them to become a 0x100). 907 */ 908 async_deliver = (msr_ee != 0) || env->resume_as_sreset; 909 910 /* Hypervisor decrementer exception */ 911 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { 912 /* LPCR will be clear when not supported so this will work */ 913 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); 914 if ((async_deliver || msr_hv == 0) && hdice) { 915 /* HDEC clears on delivery */ 916 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 917 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); 918 return; 919 } 920 } 921 922 /* Hypervisor virtualization interrupt */ 923 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { 924 /* LPCR will be clear when not supported so this will work */ 925 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); 926 if ((async_deliver || msr_hv == 0) && hvice) { 927 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); 928 return; 929 } 930 } 931 932 /* External interrupt can ignore MSR:EE under some circumstances */ 933 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { 934 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); 935 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); 936 /* HEIC blocks delivery to the hypervisor */ 937 if ((async_deliver && !(heic && msr_hv && !msr_pr)) || 938 (env->has_hv_mode && msr_hv == 0 && !lpes0)) { 939 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); 940 return; 941 } 942 } 943 if (msr_ce != 0) { 944 /* External critical interrupt */ 945 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { 946 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); 947 return; 948 } 949 } 950 if (async_deliver != 0) { 951 /* Watchdog timer on embedded PowerPC */ 952 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { 953 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); 954 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); 955 return; 956 } 957 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { 958 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); 959 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); 960 return; 961 } 962 /* Fixed interval timer on embedded PowerPC */ 963 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { 964 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); 965 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); 966 return; 967 } 968 /* Programmable interval timer on embedded PowerPC */ 969 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { 970 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); 971 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); 972 return; 973 } 974 /* Decrementer exception */ 975 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { 976 if (ppc_decr_clear_on_delivery(env)) { 977 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); 978 } 979 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); 980 return; 981 } 982 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { 983 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 984 if (is_book3s_arch2x(env)) { 985 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); 986 } else { 987 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); 988 } 989 return; 990 } 991 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { 992 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 993 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); 994 return; 995 } 996 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { 997 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); 998 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); 999 return; 1000 } 1001 /* Thermal interrupt */ 1002 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { 1003 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); 1004 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); 1005 return; 1006 } 1007 } 1008 1009 if (env->resume_as_sreset) { 1010 /* 1011 * This is a bug ! It means that has_work took us out of halt without 1012 * anything to deliver while in a PM state that requires getting 1013 * out via a 0x100 1014 * 1015 * This means we will incorrectly execute past the power management 1016 * instruction instead of triggering a reset. 1017 * 1018 * It generally means a discrepancy between the wakeup conditions in the 1019 * processor has_work implementation and the logic in this function. 1020 */ 1021 cpu_abort(env_cpu(env), 1022 "Wakeup from PM state but interrupt Undelivered"); 1023 } 1024 } 1025 1026 void ppc_cpu_do_system_reset(CPUState *cs) 1027 { 1028 PowerPCCPU *cpu = POWERPC_CPU(cs); 1029 CPUPPCState *env = &cpu->env; 1030 1031 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 1032 } 1033 1034 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) 1035 { 1036 PowerPCCPU *cpu = POWERPC_CPU(cs); 1037 CPUPPCState *env = &cpu->env; 1038 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1039 target_ulong msr = 0; 1040 1041 /* 1042 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already 1043 * been set by KVM. 1044 */ 1045 msr = (1ULL << MSR_ME); 1046 msr |= env->msr & (1ULL << MSR_SF); 1047 if (!(*pcc->interrupts_big_endian)(cpu)) { 1048 msr |= (1ULL << MSR_LE); 1049 } 1050 1051 powerpc_set_excp_state(cpu, vector, msr); 1052 } 1053 #endif /* !CONFIG_USER_ONLY */ 1054 1055 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1056 { 1057 PowerPCCPU *cpu = POWERPC_CPU(cs); 1058 CPUPPCState *env = &cpu->env; 1059 1060 if (interrupt_request & CPU_INTERRUPT_HARD) { 1061 ppc_hw_interrupt(env); 1062 if (env->pending_interrupts == 0) { 1063 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 1064 } 1065 return true; 1066 } 1067 return false; 1068 } 1069 1070 #if defined(DEBUG_OP) 1071 static void cpu_dump_rfi(target_ulong RA, target_ulong msr) 1072 { 1073 qemu_log("Return from exception at " TARGET_FMT_lx " with flags " 1074 TARGET_FMT_lx "\n", RA, msr); 1075 } 1076 #endif 1077 1078 /*****************************************************************************/ 1079 /* Exceptions processing helpers */ 1080 1081 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 1082 uint32_t error_code, uintptr_t raddr) 1083 { 1084 CPUState *cs = env_cpu(env); 1085 1086 cs->exception_index = exception; 1087 env->error_code = error_code; 1088 cpu_loop_exit_restore(cs, raddr); 1089 } 1090 1091 void raise_exception_err(CPUPPCState *env, uint32_t exception, 1092 uint32_t error_code) 1093 { 1094 raise_exception_err_ra(env, exception, error_code, 0); 1095 } 1096 1097 void raise_exception(CPUPPCState *env, uint32_t exception) 1098 { 1099 raise_exception_err_ra(env, exception, 0, 0); 1100 } 1101 1102 void raise_exception_ra(CPUPPCState *env, uint32_t exception, 1103 uintptr_t raddr) 1104 { 1105 raise_exception_err_ra(env, exception, 0, raddr); 1106 } 1107 1108 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, 1109 uint32_t error_code) 1110 { 1111 raise_exception_err_ra(env, exception, error_code, 0); 1112 } 1113 1114 void helper_raise_exception(CPUPPCState *env, uint32_t exception) 1115 { 1116 raise_exception_err_ra(env, exception, 0, 0); 1117 } 1118 1119 #if !defined(CONFIG_USER_ONLY) 1120 void helper_store_msr(CPUPPCState *env, target_ulong val) 1121 { 1122 uint32_t excp = hreg_store_msr(env, val, 0); 1123 1124 if (excp != 0) { 1125 CPUState *cs = env_cpu(env); 1126 cpu_interrupt_exittb(cs); 1127 raise_exception(env, excp); 1128 } 1129 } 1130 1131 #if defined(TARGET_PPC64) 1132 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) 1133 { 1134 CPUState *cs; 1135 1136 cs = env_cpu(env); 1137 cs->halted = 1; 1138 1139 /* 1140 * The architecture specifies that HDEC interrupts are discarded 1141 * in PM states 1142 */ 1143 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1144 1145 /* Condition for waking up at 0x100 */ 1146 env->resume_as_sreset = (insn != PPC_PM_STOP) || 1147 (env->spr[SPR_PSSCR] & PSSCR_EC); 1148 } 1149 #endif /* defined(TARGET_PPC64) */ 1150 1151 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1152 { 1153 CPUState *cs = env_cpu(env); 1154 1155 /* MSR:POW cannot be set by any form of rfi */ 1156 msr &= ~(1ULL << MSR_POW); 1157 1158 #if defined(TARGET_PPC64) 1159 /* Switching to 32-bit ? Crop the nip */ 1160 if (!msr_is_64bit(env, msr)) { 1161 nip = (uint32_t)nip; 1162 } 1163 #else 1164 nip = (uint32_t)nip; 1165 #endif 1166 /* XXX: beware: this is false if VLE is supported */ 1167 env->nip = nip & ~((target_ulong)0x00000003); 1168 hreg_store_msr(env, msr, 1); 1169 #if defined(DEBUG_OP) 1170 cpu_dump_rfi(env->nip, env->msr); 1171 #endif 1172 /* 1173 * No need to raise an exception here, as rfi is always the last 1174 * insn of a TB 1175 */ 1176 cpu_interrupt_exittb(cs); 1177 /* Reset the reservation */ 1178 env->reserve_addr = -1; 1179 1180 /* Context synchronizing: check if TCG TLB needs flush */ 1181 check_tlb_flush(env, false); 1182 } 1183 1184 void helper_rfi(CPUPPCState *env) 1185 { 1186 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); 1187 } 1188 1189 #define MSR_BOOK3S_MASK 1190 #if defined(TARGET_PPC64) 1191 void helper_rfid(CPUPPCState *env) 1192 { 1193 /* 1194 * The architecture defines a number of rules for which bits can 1195 * change but in practice, we handle this in hreg_store_msr() 1196 * which will be called by do_rfi(), so there is no need to filter 1197 * here 1198 */ 1199 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); 1200 } 1201 1202 void helper_rfscv(CPUPPCState *env) 1203 { 1204 do_rfi(env, env->lr, env->ctr); 1205 } 1206 1207 void helper_hrfid(CPUPPCState *env) 1208 { 1209 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); 1210 } 1211 #endif 1212 1213 /*****************************************************************************/ 1214 /* Embedded PowerPC specific helpers */ 1215 void helper_40x_rfci(CPUPPCState *env) 1216 { 1217 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); 1218 } 1219 1220 void helper_rfci(CPUPPCState *env) 1221 { 1222 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); 1223 } 1224 1225 void helper_rfdi(CPUPPCState *env) 1226 { 1227 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ 1228 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); 1229 } 1230 1231 void helper_rfmci(CPUPPCState *env) 1232 { 1233 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ 1234 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); 1235 } 1236 #endif 1237 1238 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1239 uint32_t flags) 1240 { 1241 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || 1242 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || 1243 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || 1244 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || 1245 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { 1246 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1247 POWERPC_EXCP_TRAP, GETPC()); 1248 } 1249 } 1250 1251 #if defined(TARGET_PPC64) 1252 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, 1253 uint32_t flags) 1254 { 1255 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || 1256 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || 1257 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || 1258 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || 1259 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { 1260 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1261 POWERPC_EXCP_TRAP, GETPC()); 1262 } 1263 } 1264 #endif 1265 1266 #if !defined(CONFIG_USER_ONLY) 1267 /*****************************************************************************/ 1268 /* PowerPC 601 specific instructions (POWER bridge) */ 1269 1270 void helper_rfsvc(CPUPPCState *env) 1271 { 1272 do_rfi(env, env->lr, env->ctr & 0x0000FFFF); 1273 } 1274 1275 /* Embedded.Processor Control */ 1276 static int dbell2irq(target_ulong rb) 1277 { 1278 int msg = rb & DBELL_TYPE_MASK; 1279 int irq = -1; 1280 1281 switch (msg) { 1282 case DBELL_TYPE_DBELL: 1283 irq = PPC_INTERRUPT_DOORBELL; 1284 break; 1285 case DBELL_TYPE_DBELL_CRIT: 1286 irq = PPC_INTERRUPT_CDOORBELL; 1287 break; 1288 case DBELL_TYPE_G_DBELL: 1289 case DBELL_TYPE_G_DBELL_CRIT: 1290 case DBELL_TYPE_G_DBELL_MC: 1291 /* XXX implement */ 1292 default: 1293 break; 1294 } 1295 1296 return irq; 1297 } 1298 1299 void helper_msgclr(CPUPPCState *env, target_ulong rb) 1300 { 1301 int irq = dbell2irq(rb); 1302 1303 if (irq < 0) { 1304 return; 1305 } 1306 1307 env->pending_interrupts &= ~(1 << irq); 1308 } 1309 1310 void helper_msgsnd(target_ulong rb) 1311 { 1312 int irq = dbell2irq(rb); 1313 int pir = rb & DBELL_PIRTAG_MASK; 1314 CPUState *cs; 1315 1316 if (irq < 0) { 1317 return; 1318 } 1319 1320 qemu_mutex_lock_iothread(); 1321 CPU_FOREACH(cs) { 1322 PowerPCCPU *cpu = POWERPC_CPU(cs); 1323 CPUPPCState *cenv = &cpu->env; 1324 1325 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { 1326 cenv->pending_interrupts |= 1 << irq; 1327 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1328 } 1329 } 1330 qemu_mutex_unlock_iothread(); 1331 } 1332 1333 /* Server Processor Control */ 1334 1335 static bool dbell_type_server(target_ulong rb) 1336 { 1337 /* 1338 * A Directed Hypervisor Doorbell message is sent only if the 1339 * message type is 5. All other types are reserved and the 1340 * instruction is a no-op 1341 */ 1342 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; 1343 } 1344 1345 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) 1346 { 1347 if (!dbell_type_server(rb)) { 1348 return; 1349 } 1350 1351 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); 1352 } 1353 1354 static void book3s_msgsnd_common(int pir, int irq) 1355 { 1356 CPUState *cs; 1357 1358 qemu_mutex_lock_iothread(); 1359 CPU_FOREACH(cs) { 1360 PowerPCCPU *cpu = POWERPC_CPU(cs); 1361 CPUPPCState *cenv = &cpu->env; 1362 1363 /* TODO: broadcast message to all threads of the same processor */ 1364 if (cenv->spr_cb[SPR_PIR].default_value == pir) { 1365 cenv->pending_interrupts |= 1 << irq; 1366 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 1367 } 1368 } 1369 qemu_mutex_unlock_iothread(); 1370 } 1371 1372 void helper_book3s_msgsnd(target_ulong rb) 1373 { 1374 int pir = rb & DBELL_PROCIDTAG_MASK; 1375 1376 if (!dbell_type_server(rb)) { 1377 return; 1378 } 1379 1380 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); 1381 } 1382 1383 #if defined(TARGET_PPC64) 1384 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) 1385 { 1386 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); 1387 1388 if (!dbell_type_server(rb)) { 1389 return; 1390 } 1391 1392 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); 1393 } 1394 1395 /* 1396 * sends a message to other threads that are on the same 1397 * multi-threaded processor 1398 */ 1399 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) 1400 { 1401 int pir = env->spr_cb[SPR_PIR].default_value; 1402 1403 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); 1404 1405 if (!dbell_type_server(rb)) { 1406 return; 1407 } 1408 1409 /* TODO: TCG supports only one thread */ 1410 1411 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); 1412 } 1413 #endif 1414 #endif 1415 1416 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 1417 MMUAccessType access_type, 1418 int mmu_idx, uintptr_t retaddr) 1419 { 1420 CPUPPCState *env = cs->env_ptr; 1421 uint32_t insn; 1422 1423 /* Restore state and reload the insn we executed, for filling in DSISR. */ 1424 cpu_restore_state(cs, retaddr, true); 1425 insn = cpu_ldl_code(env, env->nip); 1426 1427 cs->exception_index = POWERPC_EXCP_ALIGN; 1428 env->error_code = insn & 0x03FF0000; 1429 cpu_loop_exit(cs); 1430 } 1431