xref: /openbmc/qemu/target/ppc/excp_helper.c (revision ac12b601)
1 /*
2  *  PowerPC exception emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "internal.h"
26 #include "helper_regs.h"
27 
28 /* #define DEBUG_OP */
29 /* #define DEBUG_SOFTWARE_TLB */
30 /* #define DEBUG_EXCEPTIONS */
31 
32 #ifdef DEBUG_EXCEPTIONS
33 #  define LOG_EXCP(...) qemu_log(__VA_ARGS__)
34 #else
35 #  define LOG_EXCP(...) do { } while (0)
36 #endif
37 
38 /*****************************************************************************/
39 /* Exception processing */
40 #if defined(CONFIG_USER_ONLY)
41 void ppc_cpu_do_interrupt(CPUState *cs)
42 {
43     PowerPCCPU *cpu = POWERPC_CPU(cs);
44     CPUPPCState *env = &cpu->env;
45 
46     cs->exception_index = POWERPC_EXCP_NONE;
47     env->error_code = 0;
48 }
49 
50 static void ppc_hw_interrupt(CPUPPCState *env)
51 {
52     CPUState *cs = env_cpu(env);
53 
54     cs->exception_index = POWERPC_EXCP_NONE;
55     env->error_code = 0;
56 }
57 #else /* defined(CONFIG_USER_ONLY) */
58 static inline void dump_syscall(CPUPPCState *env)
59 {
60     qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
61                   " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
62                   " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
63                   " nip=" TARGET_FMT_lx "\n",
64                   ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
65                   ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
66                   ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
67                   ppc_dump_gpr(env, 8), env->nip);
68 }
69 
70 static inline void dump_syscall_vectored(CPUPPCState *env)
71 {
72     qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
73                   " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
74                   " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
75                   " nip=" TARGET_FMT_lx "\n",
76                   ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
77                   ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
78                   ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
79                   ppc_dump_gpr(env, 8), env->nip);
80 }
81 
82 static inline void dump_hcall(CPUPPCState *env)
83 {
84     qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
85                   " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
86                   " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
87                   " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
88                   " nip=" TARGET_FMT_lx "\n",
89                   ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
90                   ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
91                   ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
92                   ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
93                   ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
94                   env->nip);
95 }
96 
97 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
98                                 target_ulong *msr)
99 {
100     /* We no longer are in a PM state */
101     env->resume_as_sreset = false;
102 
103     /* Pretend to be returning from doze always as we don't lose state */
104     *msr |= SRR1_WS_NOLOSS;
105 
106     /* Machine checks are sent normally */
107     if (excp == POWERPC_EXCP_MCHECK) {
108         return excp;
109     }
110     switch (excp) {
111     case POWERPC_EXCP_RESET:
112         *msr |= SRR1_WAKERESET;
113         break;
114     case POWERPC_EXCP_EXTERNAL:
115         *msr |= SRR1_WAKEEE;
116         break;
117     case POWERPC_EXCP_DECR:
118         *msr |= SRR1_WAKEDEC;
119         break;
120     case POWERPC_EXCP_SDOOR:
121         *msr |= SRR1_WAKEDBELL;
122         break;
123     case POWERPC_EXCP_SDOOR_HV:
124         *msr |= SRR1_WAKEHDBELL;
125         break;
126     case POWERPC_EXCP_HV_MAINT:
127         *msr |= SRR1_WAKEHMI;
128         break;
129     case POWERPC_EXCP_HVIRT:
130         *msr |= SRR1_WAKEHVI;
131         break;
132     default:
133         cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
134                   excp);
135     }
136     return POWERPC_EXCP_RESET;
137 }
138 
139 /*
140  * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
141  * taken with the MMU on, and which uses an alternate location (e.g., so the
142  * kernel/hv can map the vectors there with an effective address).
143  *
144  * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
145  * are delivered in this way. AIL requires the LPCR to be set to enable this
146  * mode, and then a number of conditions have to be true for AIL to apply.
147  *
148  * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
149  * they specifically want to be in real mode (e.g., the MCE might be signaling
150  * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
151  *
152  * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
153  * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
154  * radix mode (LPCR[HR]).
155  *
156  * POWER8, POWER9 with LPCR[HR]=0
157  * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
158  * +-----------+-------------+---------+-------------+-----+
159  * | a         | 00/01/10    | x       | x           | 0   |
160  * | a         | 11          | 0       | 1           | 0   |
161  * | a         | 11          | 1       | 1           | a   |
162  * | a         | 11          | 0       | 0           | a   |
163  * +-------------------------------------------------------+
164  *
165  * POWER9 with LPCR[HR]=1
166  * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
167  * +-----------+-------------+---------+-------------+-----+
168  * | a         | 00/01/10    | x       | x           | 0   |
169  * | a         | 11          | x       | x           | a   |
170  * +-------------------------------------------------------+
171  *
172  * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
173  * the hypervisor in AIL mode if the guest is radix. This is good for
174  * performance but allows the guest to influence the AIL of hypervisor
175  * interrupts using its MSR, and also the hypervisor must disallow guest
176  * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
177  * use AIL for its MSR[HV] 0->1 interrupts.
178  *
179  * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
180  * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
181  * MSR[HV] 1->1).
182  *
183  * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
184  *
185  * POWER10 behaviour is
186  * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
187  * +-----------+------------+-------------+---------+-------------+-----+
188  * | a         | h          | 00/01/10    | 0       | 0           | 0   |
189  * | a         | h          | 11          | 0       | 0           | a   |
190  * | a         | h          | x           | 0       | 1           | h   |
191  * | a         | h          | 00/01/10    | 1       | 1           | 0   |
192  * | a         | h          | 11          | 1       | 1           | h   |
193  * +--------------------------------------------------------------------+
194  */
195 static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
196                                       target_ulong msr,
197                                       target_ulong *new_msr,
198                                       target_ulong *vector)
199 {
200 #if defined(TARGET_PPC64)
201     CPUPPCState *env = &cpu->env;
202     bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
203     bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
204     int ail = 0;
205 
206     if (excp == POWERPC_EXCP_MCHECK ||
207         excp == POWERPC_EXCP_RESET ||
208         excp == POWERPC_EXCP_HV_MAINT) {
209         /* SRESET, MCE, HMI never apply AIL */
210         return;
211     }
212 
213     if (excp_model == POWERPC_EXCP_POWER8 ||
214         excp_model == POWERPC_EXCP_POWER9) {
215         if (!mmu_all_on) {
216             /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
217             return;
218         }
219         if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
220             /*
221              * AIL does not work if there is a MSR[HV] 0->1 transition and the
222              * partition is in HPT mode. For radix guests, such interrupts are
223              * allowed to be delivered to the hypervisor in ail mode.
224              */
225             return;
226         }
227 
228         ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
229         if (ail == 0) {
230             return;
231         }
232         if (ail == 1) {
233             /* AIL=1 is reserved, treat it like AIL=0 */
234             return;
235         }
236 
237     } else if (excp_model == POWERPC_EXCP_POWER10) {
238         if (!mmu_all_on && !hv_escalation) {
239             /*
240              * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
241              * Guest->guest and HV->HV interrupts do require MMU on.
242              */
243             return;
244         }
245 
246         if (*new_msr & MSR_HVB) {
247             if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
248                 /* HV interrupts depend on LPCR[HAIL] */
249                 return;
250             }
251             ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
252         } else {
253             ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
254         }
255         if (ail == 0) {
256             return;
257         }
258         if (ail == 1 || ail == 2) {
259             /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
260             return;
261         }
262     } else {
263         /* Other processors do not support AIL */
264         return;
265     }
266 
267     /*
268      * AIL applies, so the new MSR gets IR and DR set, and an offset applied
269      * to the new IP.
270      */
271     *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
272 
273     if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
274         if (ail == 2) {
275             *vector |= 0x0000000000018000ull;
276         } else if (ail == 3) {
277             *vector |= 0xc000000000004000ull;
278         }
279     } else {
280         /*
281          * scv AIL is a little different. AIL=2 does not change the address,
282          * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
283          */
284         if (ail == 3) {
285             *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
286             *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
287         }
288     }
289 #endif
290 }
291 
292 static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
293                                           target_ulong vector, target_ulong msr)
294 {
295     CPUState *cs = CPU(cpu);
296     CPUPPCState *env = &cpu->env;
297 
298     /*
299      * We don't use hreg_store_msr here as already have treated any
300      * special case that could occur. Just store MSR and update hflags
301      *
302      * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
303      * will prevent setting of the HV bit which some exceptions might need
304      * to do.
305      */
306     env->msr = msr & env->msr_mask;
307     hreg_compute_hflags(env);
308     env->nip = vector;
309     /* Reset exception state */
310     cs->exception_index = POWERPC_EXCP_NONE;
311     env->error_code = 0;
312 
313     /* Reset the reservation */
314     env->reserve_addr = -1;
315 
316     /*
317      * Any interrupt is context synchronizing, check if TCG TLB needs
318      * a delayed flush on ppc64
319      */
320     check_tlb_flush(env, false);
321 }
322 
323 /*
324  * Note that this function should be greatly optimized when called
325  * with a constant excp, from ppc_hw_interrupt
326  */
327 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
328 {
329     CPUState *cs = CPU(cpu);
330     CPUPPCState *env = &cpu->env;
331     target_ulong msr, new_msr, vector;
332     int srr0, srr1, asrr0, asrr1, lev = -1;
333     bool lpes0;
334 
335     qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
336                   " => %08x (%02x)\n", env->nip, excp, env->error_code);
337 
338     /* new srr1 value excluding must-be-zero bits */
339     if (excp_model == POWERPC_EXCP_BOOKE) {
340         msr = env->msr;
341     } else {
342         msr = env->msr & ~0x783f0000ULL;
343     }
344 
345     /*
346      * new interrupt handler msr preserves existing HV and ME unless
347      * explicitly overriden
348      */
349     new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
350 
351     /* target registers */
352     srr0 = SPR_SRR0;
353     srr1 = SPR_SRR1;
354     asrr0 = -1;
355     asrr1 = -1;
356 
357     /*
358      * check for special resume at 0x100 from doze/nap/sleep/winkle on
359      * P7/P8/P9
360      */
361     if (env->resume_as_sreset) {
362         excp = powerpc_reset_wakeup(cs, env, excp, &msr);
363     }
364 
365     /*
366      * Exception targeting modifiers
367      *
368      * LPES0 is supported on POWER7/8/9
369      * LPES1 is not supported (old iSeries mode)
370      *
371      * On anything else, we behave as if LPES0 is 1
372      * (externals don't alter MSR:HV)
373      */
374 #if defined(TARGET_PPC64)
375     if (excp_model == POWERPC_EXCP_POWER7 ||
376         excp_model == POWERPC_EXCP_POWER8 ||
377         excp_model == POWERPC_EXCP_POWER9 ||
378         excp_model == POWERPC_EXCP_POWER10) {
379         lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
380     } else
381 #endif /* defined(TARGET_PPC64) */
382     {
383         lpes0 = true;
384     }
385 
386     /*
387      * Hypervisor emulation assistance interrupt only exists on server
388      * arch 2.05 server or later. We also don't want to generate it if
389      * we don't have HVB in msr_mask (PAPR mode).
390      */
391     if (excp == POWERPC_EXCP_HV_EMU
392 #if defined(TARGET_PPC64)
393         && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
394 #endif /* defined(TARGET_PPC64) */
395 
396     ) {
397         excp = POWERPC_EXCP_PROGRAM;
398     }
399 
400     switch (excp) {
401     case POWERPC_EXCP_NONE:
402         /* Should never happen */
403         return;
404     case POWERPC_EXCP_CRITICAL:    /* Critical input                         */
405         switch (excp_model) {
406         case POWERPC_EXCP_40x:
407             srr0 = SPR_40x_SRR2;
408             srr1 = SPR_40x_SRR3;
409             break;
410         case POWERPC_EXCP_BOOKE:
411             srr0 = SPR_BOOKE_CSRR0;
412             srr1 = SPR_BOOKE_CSRR1;
413             break;
414         case POWERPC_EXCP_G2:
415             break;
416         default:
417             goto excp_invalid;
418         }
419         break;
420     case POWERPC_EXCP_MCHECK:    /* Machine check exception                  */
421         if (msr_me == 0) {
422             /*
423              * Machine check exception is not enabled.  Enter
424              * checkstop state.
425              */
426             fprintf(stderr, "Machine check while not allowed. "
427                     "Entering checkstop state\n");
428             if (qemu_log_separate()) {
429                 qemu_log("Machine check while not allowed. "
430                         "Entering checkstop state\n");
431             }
432             cs->halted = 1;
433             cpu_interrupt_exittb(cs);
434         }
435         if (env->msr_mask & MSR_HVB) {
436             /*
437              * ISA specifies HV, but can be delivered to guest with HV
438              * clear (e.g., see FWNMI in PAPR).
439              */
440             new_msr |= (target_ulong)MSR_HVB;
441         }
442 
443         /* machine check exceptions don't have ME set */
444         new_msr &= ~((target_ulong)1 << MSR_ME);
445 
446         /* XXX: should also have something loaded in DAR / DSISR */
447         switch (excp_model) {
448         case POWERPC_EXCP_40x:
449             srr0 = SPR_40x_SRR2;
450             srr1 = SPR_40x_SRR3;
451             break;
452         case POWERPC_EXCP_BOOKE:
453             /* FIXME: choose one or the other based on CPU type */
454             srr0 = SPR_BOOKE_MCSRR0;
455             srr1 = SPR_BOOKE_MCSRR1;
456             asrr0 = SPR_BOOKE_CSRR0;
457             asrr1 = SPR_BOOKE_CSRR1;
458             break;
459         default:
460             break;
461         }
462         break;
463     case POWERPC_EXCP_DSI:       /* Data storage exception                   */
464         LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
465                  "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
466         break;
467     case POWERPC_EXCP_ISI:       /* Instruction storage exception            */
468         LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
469                  "\n", msr, env->nip);
470         msr |= env->error_code;
471         break;
472     case POWERPC_EXCP_EXTERNAL:  /* External input                           */
473         cs = CPU(cpu);
474 
475         if (!lpes0) {
476             new_msr |= (target_ulong)MSR_HVB;
477             new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
478             srr0 = SPR_HSRR0;
479             srr1 = SPR_HSRR1;
480         }
481         if (env->mpic_proxy) {
482             /* IACK the IRQ on delivery */
483             env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
484         }
485         break;
486     case POWERPC_EXCP_ALIGN:     /* Alignment exception                      */
487         /* Get rS/rD and rA from faulting opcode */
488         /*
489          * Note: the opcode fields will not be set properly for a
490          * direct store load/store, but nobody cares as nobody
491          * actually uses direct store segments.
492          */
493         env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
494         break;
495     case POWERPC_EXCP_PROGRAM:   /* Program exception                        */
496         switch (env->error_code & ~0xF) {
497         case POWERPC_EXCP_FP:
498             if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
499                 LOG_EXCP("Ignore floating point exception\n");
500                 cs->exception_index = POWERPC_EXCP_NONE;
501                 env->error_code = 0;
502                 return;
503             }
504 
505             /*
506              * FP exceptions always have NIP pointing to the faulting
507              * instruction, so always use store_next and claim we are
508              * precise in the MSR.
509              */
510             msr |= 0x00100000;
511             env->spr[SPR_BOOKE_ESR] = ESR_FP;
512             break;
513         case POWERPC_EXCP_INVAL:
514             LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
515             msr |= 0x00080000;
516             env->spr[SPR_BOOKE_ESR] = ESR_PIL;
517             break;
518         case POWERPC_EXCP_PRIV:
519             msr |= 0x00040000;
520             env->spr[SPR_BOOKE_ESR] = ESR_PPR;
521             break;
522         case POWERPC_EXCP_TRAP:
523             msr |= 0x00020000;
524             env->spr[SPR_BOOKE_ESR] = ESR_PTR;
525             break;
526         default:
527             /* Should never occur */
528             cpu_abort(cs, "Invalid program exception %d. Aborting\n",
529                       env->error_code);
530             break;
531         }
532         break;
533     case POWERPC_EXCP_SYSCALL:   /* System call exception                    */
534         lev = env->error_code;
535 
536         if ((lev == 1) && cpu->vhyp) {
537             dump_hcall(env);
538         } else {
539             dump_syscall(env);
540         }
541 
542         /*
543          * We need to correct the NIP which in this case is supposed
544          * to point to the next instruction
545          */
546         env->nip += 4;
547 
548         /* "PAPR mode" built-in hypercall emulation */
549         if ((lev == 1) && cpu->vhyp) {
550             PPCVirtualHypervisorClass *vhc =
551                 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
552             vhc->hypercall(cpu->vhyp, cpu);
553             return;
554         }
555         if (lev == 1) {
556             new_msr |= (target_ulong)MSR_HVB;
557         }
558         break;
559     case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception                     */
560         lev = env->error_code;
561         dump_syscall_vectored(env);
562         env->nip += 4;
563         new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
564         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
565         break;
566     case POWERPC_EXCP_FPU:       /* Floating-point unavailable exception     */
567     case POWERPC_EXCP_APU:       /* Auxiliary processor unavailable          */
568     case POWERPC_EXCP_DECR:      /* Decrementer exception                    */
569         break;
570     case POWERPC_EXCP_FIT:       /* Fixed-interval timer interrupt           */
571         /* FIT on 4xx */
572         LOG_EXCP("FIT exception\n");
573         break;
574     case POWERPC_EXCP_WDT:       /* Watchdog timer interrupt                 */
575         LOG_EXCP("WDT exception\n");
576         switch (excp_model) {
577         case POWERPC_EXCP_BOOKE:
578             srr0 = SPR_BOOKE_CSRR0;
579             srr1 = SPR_BOOKE_CSRR1;
580             break;
581         default:
582             break;
583         }
584         break;
585     case POWERPC_EXCP_DTLB:      /* Data TLB error                           */
586     case POWERPC_EXCP_ITLB:      /* Instruction TLB error                    */
587         break;
588     case POWERPC_EXCP_DEBUG:     /* Debug interrupt                          */
589         if (env->flags & POWERPC_FLAG_DE) {
590             /* FIXME: choose one or the other based on CPU type */
591             srr0 = SPR_BOOKE_DSRR0;
592             srr1 = SPR_BOOKE_DSRR1;
593             asrr0 = SPR_BOOKE_CSRR0;
594             asrr1 = SPR_BOOKE_CSRR1;
595             /* DBSR already modified by caller */
596         } else {
597             cpu_abort(cs, "Debug exception triggered on unsupported model\n");
598         }
599         break;
600     case POWERPC_EXCP_SPEU:      /* SPE/embedded floating-point unavailable  */
601         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
602         break;
603     case POWERPC_EXCP_EFPDI:     /* Embedded floating-point data interrupt   */
604         /* XXX: TODO */
605         cpu_abort(cs, "Embedded floating point data exception "
606                   "is not implemented yet !\n");
607         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
608         break;
609     case POWERPC_EXCP_EFPRI:     /* Embedded floating-point round interrupt  */
610         /* XXX: TODO */
611         cpu_abort(cs, "Embedded floating point round exception "
612                   "is not implemented yet !\n");
613         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
614         break;
615     case POWERPC_EXCP_EPERFM:    /* Embedded performance monitor interrupt   */
616         /* XXX: TODO */
617         cpu_abort(cs,
618                   "Performance counter exception is not implemented yet !\n");
619         break;
620     case POWERPC_EXCP_DOORI:     /* Embedded doorbell interrupt              */
621         break;
622     case POWERPC_EXCP_DOORCI:    /* Embedded doorbell critical interrupt     */
623         srr0 = SPR_BOOKE_CSRR0;
624         srr1 = SPR_BOOKE_CSRR1;
625         break;
626     case POWERPC_EXCP_RESET:     /* System reset exception                   */
627         /* A power-saving exception sets ME, otherwise it is unchanged */
628         if (msr_pow) {
629             /* indicate that we resumed from power save mode */
630             msr |= 0x10000;
631             new_msr |= ((target_ulong)1 << MSR_ME);
632         }
633         if (env->msr_mask & MSR_HVB) {
634             /*
635              * ISA specifies HV, but can be delivered to guest with HV
636              * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
637              */
638             new_msr |= (target_ulong)MSR_HVB;
639         } else {
640             if (msr_pow) {
641                 cpu_abort(cs, "Trying to deliver power-saving system reset "
642                           "exception %d with no HV support\n", excp);
643             }
644         }
645         break;
646     case POWERPC_EXCP_DSEG:      /* Data segment exception                   */
647     case POWERPC_EXCP_ISEG:      /* Instruction segment exception            */
648     case POWERPC_EXCP_TRACE:     /* Trace exception                          */
649         break;
650     case POWERPC_EXCP_HISI:      /* Hypervisor instruction storage exception */
651         msr |= env->error_code;
652         /* fall through */
653     case POWERPC_EXCP_HDECR:     /* Hypervisor decrementer exception         */
654     case POWERPC_EXCP_HDSI:      /* Hypervisor data storage exception        */
655     case POWERPC_EXCP_HDSEG:     /* Hypervisor data segment exception        */
656     case POWERPC_EXCP_HISEG:     /* Hypervisor instruction segment exception */
657     case POWERPC_EXCP_SDOOR_HV:  /* Hypervisor Doorbell interrupt            */
658     case POWERPC_EXCP_HV_EMU:
659     case POWERPC_EXCP_HVIRT:     /* Hypervisor virtualization                */
660         srr0 = SPR_HSRR0;
661         srr1 = SPR_HSRR1;
662         new_msr |= (target_ulong)MSR_HVB;
663         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
664         break;
665     case POWERPC_EXCP_VPU:       /* Vector unavailable exception             */
666     case POWERPC_EXCP_VSXU:       /* VSX unavailable exception               */
667     case POWERPC_EXCP_FU:         /* Facility unavailable exception          */
668 #ifdef TARGET_PPC64
669         env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
670 #endif
671         break;
672     case POWERPC_EXCP_HV_FU:     /* Hypervisor Facility Unavailable Exception */
673 #ifdef TARGET_PPC64
674         env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
675         srr0 = SPR_HSRR0;
676         srr1 = SPR_HSRR1;
677         new_msr |= (target_ulong)MSR_HVB;
678         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
679 #endif
680         break;
681     case POWERPC_EXCP_PIT:       /* Programmable interval timer interrupt    */
682         LOG_EXCP("PIT exception\n");
683         break;
684     case POWERPC_EXCP_IO:        /* IO error exception                       */
685         /* XXX: TODO */
686         cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
687         break;
688     case POWERPC_EXCP_RUNM:      /* Run mode exception                       */
689         /* XXX: TODO */
690         cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
691         break;
692     case POWERPC_EXCP_EMUL:      /* Emulation trap exception                 */
693         /* XXX: TODO */
694         cpu_abort(cs, "602 emulation trap exception "
695                   "is not implemented yet !\n");
696         break;
697     case POWERPC_EXCP_IFTLB:     /* Instruction fetch TLB error              */
698         switch (excp_model) {
699         case POWERPC_EXCP_602:
700         case POWERPC_EXCP_603:
701         case POWERPC_EXCP_603E:
702         case POWERPC_EXCP_G2:
703             goto tlb_miss_tgpr;
704         case POWERPC_EXCP_7x5:
705             goto tlb_miss;
706         case POWERPC_EXCP_74xx:
707             goto tlb_miss_74xx;
708         default:
709             cpu_abort(cs, "Invalid instruction TLB miss exception\n");
710             break;
711         }
712         break;
713     case POWERPC_EXCP_DLTLB:     /* Data load TLB miss                       */
714         switch (excp_model) {
715         case POWERPC_EXCP_602:
716         case POWERPC_EXCP_603:
717         case POWERPC_EXCP_603E:
718         case POWERPC_EXCP_G2:
719             goto tlb_miss_tgpr;
720         case POWERPC_EXCP_7x5:
721             goto tlb_miss;
722         case POWERPC_EXCP_74xx:
723             goto tlb_miss_74xx;
724         default:
725             cpu_abort(cs, "Invalid data load TLB miss exception\n");
726             break;
727         }
728         break;
729     case POWERPC_EXCP_DSTLB:     /* Data store TLB miss                      */
730         switch (excp_model) {
731         case POWERPC_EXCP_602:
732         case POWERPC_EXCP_603:
733         case POWERPC_EXCP_603E:
734         case POWERPC_EXCP_G2:
735         tlb_miss_tgpr:
736             /* Swap temporary saved registers with GPRs */
737             if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
738                 new_msr |= (target_ulong)1 << MSR_TGPR;
739                 hreg_swap_gpr_tgpr(env);
740             }
741             goto tlb_miss;
742         case POWERPC_EXCP_7x5:
743         tlb_miss:
744 #if defined(DEBUG_SOFTWARE_TLB)
745             if (qemu_log_enabled()) {
746                 const char *es;
747                 target_ulong *miss, *cmp;
748                 int en;
749 
750                 if (excp == POWERPC_EXCP_IFTLB) {
751                     es = "I";
752                     en = 'I';
753                     miss = &env->spr[SPR_IMISS];
754                     cmp = &env->spr[SPR_ICMP];
755                 } else {
756                     if (excp == POWERPC_EXCP_DLTLB) {
757                         es = "DL";
758                     } else {
759                         es = "DS";
760                     }
761                     en = 'D';
762                     miss = &env->spr[SPR_DMISS];
763                     cmp = &env->spr[SPR_DCMP];
764                 }
765                 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
766                          TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
767                          TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
768                          env->spr[SPR_HASH1], env->spr[SPR_HASH2],
769                          env->error_code);
770             }
771 #endif
772             msr |= env->crf[0] << 28;
773             msr |= env->error_code; /* key, D/I, S/L bits */
774             /* Set way using a LRU mechanism */
775             msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
776             break;
777         case POWERPC_EXCP_74xx:
778         tlb_miss_74xx:
779 #if defined(DEBUG_SOFTWARE_TLB)
780             if (qemu_log_enabled()) {
781                 const char *es;
782                 target_ulong *miss, *cmp;
783                 int en;
784 
785                 if (excp == POWERPC_EXCP_IFTLB) {
786                     es = "I";
787                     en = 'I';
788                     miss = &env->spr[SPR_TLBMISS];
789                     cmp = &env->spr[SPR_PTEHI];
790                 } else {
791                     if (excp == POWERPC_EXCP_DLTLB) {
792                         es = "DL";
793                     } else {
794                         es = "DS";
795                     }
796                     en = 'D';
797                     miss = &env->spr[SPR_TLBMISS];
798                     cmp = &env->spr[SPR_PTEHI];
799                 }
800                 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
801                          TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
802                          env->error_code);
803             }
804 #endif
805             msr |= env->error_code; /* key bit */
806             break;
807         default:
808             cpu_abort(cs, "Invalid data store TLB miss exception\n");
809             break;
810         }
811         break;
812     case POWERPC_EXCP_FPA:       /* Floating-point assist exception          */
813         /* XXX: TODO */
814         cpu_abort(cs, "Floating point assist exception "
815                   "is not implemented yet !\n");
816         break;
817     case POWERPC_EXCP_DABR:      /* Data address breakpoint                  */
818         /* XXX: TODO */
819         cpu_abort(cs, "DABR exception is not implemented yet !\n");
820         break;
821     case POWERPC_EXCP_IABR:      /* Instruction address breakpoint           */
822         /* XXX: TODO */
823         cpu_abort(cs, "IABR exception is not implemented yet !\n");
824         break;
825     case POWERPC_EXCP_SMI:       /* System management interrupt              */
826         /* XXX: TODO */
827         cpu_abort(cs, "SMI exception is not implemented yet !\n");
828         break;
829     case POWERPC_EXCP_THERM:     /* Thermal interrupt                        */
830         /* XXX: TODO */
831         cpu_abort(cs, "Thermal management exception "
832                   "is not implemented yet !\n");
833         break;
834     case POWERPC_EXCP_PERFM:     /* Embedded performance monitor interrupt   */
835         /* XXX: TODO */
836         cpu_abort(cs,
837                   "Performance counter exception is not implemented yet !\n");
838         break;
839     case POWERPC_EXCP_VPUA:      /* Vector assist exception                  */
840         /* XXX: TODO */
841         cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
842         break;
843     case POWERPC_EXCP_SOFTP:     /* Soft patch exception                     */
844         /* XXX: TODO */
845         cpu_abort(cs,
846                   "970 soft-patch exception is not implemented yet !\n");
847         break;
848     case POWERPC_EXCP_MAINT:     /* Maintenance exception                    */
849         /* XXX: TODO */
850         cpu_abort(cs,
851                   "970 maintenance exception is not implemented yet !\n");
852         break;
853     case POWERPC_EXCP_MEXTBR:    /* Maskable external breakpoint             */
854         /* XXX: TODO */
855         cpu_abort(cs, "Maskable external exception "
856                   "is not implemented yet !\n");
857         break;
858     case POWERPC_EXCP_NMEXTBR:   /* Non maskable external breakpoint         */
859         /* XXX: TODO */
860         cpu_abort(cs, "Non maskable external exception "
861                   "is not implemented yet !\n");
862         break;
863     default:
864     excp_invalid:
865         cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
866         break;
867     }
868 
869     /* Sanity check */
870     if (!(env->msr_mask & MSR_HVB)) {
871         if (new_msr & MSR_HVB) {
872             cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
873                       "no HV support\n", excp);
874         }
875         if (srr0 == SPR_HSRR0) {
876             cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
877                       "no HV support\n", excp);
878         }
879     }
880 
881     /*
882      * Sort out endianness of interrupt, this differs depending on the
883      * CPU, the HV mode, etc...
884      */
885 #ifdef TARGET_PPC64
886     if (excp_model == POWERPC_EXCP_POWER7) {
887         if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
888             new_msr |= (target_ulong)1 << MSR_LE;
889         }
890     } else if (excp_model == POWERPC_EXCP_POWER8) {
891         if (new_msr & MSR_HVB) {
892             if (env->spr[SPR_HID0] & HID0_HILE) {
893                 new_msr |= (target_ulong)1 << MSR_LE;
894             }
895         } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
896             new_msr |= (target_ulong)1 << MSR_LE;
897         }
898     } else if (excp_model == POWERPC_EXCP_POWER9 ||
899                excp_model == POWERPC_EXCP_POWER10) {
900         if (new_msr & MSR_HVB) {
901             if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
902                 new_msr |= (target_ulong)1 << MSR_LE;
903             }
904         } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
905             new_msr |= (target_ulong)1 << MSR_LE;
906         }
907     } else if (msr_ile) {
908         new_msr |= (target_ulong)1 << MSR_LE;
909     }
910 #else
911     if (msr_ile) {
912         new_msr |= (target_ulong)1 << MSR_LE;
913     }
914 #endif
915 
916     vector = env->excp_vectors[excp];
917     if (vector == (target_ulong)-1ULL) {
918         cpu_abort(cs, "Raised an exception without defined vector %d\n",
919                   excp);
920     }
921 
922     vector |= env->excp_prefix;
923 
924     /* If any alternate SRR register are defined, duplicate saved values */
925     if (asrr0 != -1) {
926         env->spr[asrr0] = env->nip;
927     }
928     if (asrr1 != -1) {
929         env->spr[asrr1] = msr;
930     }
931 
932 #if defined(TARGET_PPC64)
933     if (excp_model == POWERPC_EXCP_BOOKE) {
934         if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
935             /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
936             new_msr |= (target_ulong)1 << MSR_CM;
937         } else {
938             vector = (uint32_t)vector;
939         }
940     } else {
941         if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
942             vector = (uint32_t)vector;
943         } else {
944             new_msr |= (target_ulong)1 << MSR_SF;
945         }
946     }
947 #endif
948 
949     if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
950         /* Save PC */
951         env->spr[srr0] = env->nip;
952 
953         /* Save MSR */
954         env->spr[srr1] = msr;
955 
956 #if defined(TARGET_PPC64)
957     } else {
958         vector += lev * 0x20;
959 
960         env->lr = env->nip;
961         env->ctr = msr;
962 #endif
963     }
964 
965     /* This can update new_msr and vector if AIL applies */
966     ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
967 
968     powerpc_set_excp_state(cpu, vector, new_msr);
969 }
970 
971 void ppc_cpu_do_interrupt(CPUState *cs)
972 {
973     PowerPCCPU *cpu = POWERPC_CPU(cs);
974     CPUPPCState *env = &cpu->env;
975 
976     powerpc_excp(cpu, env->excp_model, cs->exception_index);
977 }
978 
979 static void ppc_hw_interrupt(CPUPPCState *env)
980 {
981     PowerPCCPU *cpu = env_archcpu(env);
982     bool async_deliver;
983 
984     /* External reset */
985     if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
986         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
987         powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
988         return;
989     }
990     /* Machine check exception */
991     if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
992         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
993         powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
994         return;
995     }
996 #if 0 /* TODO */
997     /* External debug exception */
998     if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
999         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
1000         powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
1001         return;
1002     }
1003 #endif
1004 
1005     /*
1006      * For interrupts that gate on MSR:EE, we need to do something a
1007      * bit more subtle, as we need to let them through even when EE is
1008      * clear when coming out of some power management states (in order
1009      * for them to become a 0x100).
1010      */
1011     async_deliver = (msr_ee != 0) || env->resume_as_sreset;
1012 
1013     /* Hypervisor decrementer exception */
1014     if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
1015         /* LPCR will be clear when not supported so this will work */
1016         bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1017         if ((async_deliver || msr_hv == 0) && hdice) {
1018             /* HDEC clears on delivery */
1019             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1020             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
1021             return;
1022         }
1023     }
1024 
1025     /* Hypervisor virtualization interrupt */
1026     if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
1027         /* LPCR will be clear when not supported so this will work */
1028         bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
1029         if ((async_deliver || msr_hv == 0) && hvice) {
1030             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
1031             return;
1032         }
1033     }
1034 
1035     /* External interrupt can ignore MSR:EE under some circumstances */
1036     if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
1037         bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1038         bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1039         /* HEIC blocks delivery to the hypervisor */
1040         if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
1041             (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
1042             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
1043             return;
1044         }
1045     }
1046     if (msr_ce != 0) {
1047         /* External critical interrupt */
1048         if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
1049             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
1050             return;
1051         }
1052     }
1053     if (async_deliver != 0) {
1054         /* Watchdog timer on embedded PowerPC */
1055         if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
1056             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
1057             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
1058             return;
1059         }
1060         if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
1061             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
1062             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
1063             return;
1064         }
1065         /* Fixed interval timer on embedded PowerPC */
1066         if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
1067             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
1068             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
1069             return;
1070         }
1071         /* Programmable interval timer on embedded PowerPC */
1072         if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
1073             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
1074             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
1075             return;
1076         }
1077         /* Decrementer exception */
1078         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
1079             if (ppc_decr_clear_on_delivery(env)) {
1080                 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
1081             }
1082             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
1083             return;
1084         }
1085         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
1086             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1087             if (is_book3s_arch2x(env)) {
1088                 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
1089             } else {
1090                 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
1091             }
1092             return;
1093         }
1094         if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
1095             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1096             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
1097             return;
1098         }
1099         if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
1100             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
1101             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
1102             return;
1103         }
1104         /* Thermal interrupt */
1105         if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
1106             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
1107             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
1108             return;
1109         }
1110     }
1111 
1112     if (env->resume_as_sreset) {
1113         /*
1114          * This is a bug ! It means that has_work took us out of halt without
1115          * anything to deliver while in a PM state that requires getting
1116          * out via a 0x100
1117          *
1118          * This means we will incorrectly execute past the power management
1119          * instruction instead of triggering a reset.
1120          *
1121          * It generally means a discrepancy between the wakeup conditions in the
1122          * processor has_work implementation and the logic in this function.
1123          */
1124         cpu_abort(env_cpu(env),
1125                   "Wakeup from PM state but interrupt Undelivered");
1126     }
1127 }
1128 
1129 void ppc_cpu_do_system_reset(CPUState *cs)
1130 {
1131     PowerPCCPU *cpu = POWERPC_CPU(cs);
1132     CPUPPCState *env = &cpu->env;
1133 
1134     powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
1135 }
1136 
1137 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
1138 {
1139     PowerPCCPU *cpu = POWERPC_CPU(cs);
1140     CPUPPCState *env = &cpu->env;
1141     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1142     target_ulong msr = 0;
1143 
1144     /*
1145      * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1146      * been set by KVM.
1147      */
1148     msr = (1ULL << MSR_ME);
1149     msr |= env->msr & (1ULL << MSR_SF);
1150     if (!(*pcc->interrupts_big_endian)(cpu)) {
1151         msr |= (1ULL << MSR_LE);
1152     }
1153 
1154     powerpc_set_excp_state(cpu, vector, msr);
1155 }
1156 #endif /* !CONFIG_USER_ONLY */
1157 
1158 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1159 {
1160     PowerPCCPU *cpu = POWERPC_CPU(cs);
1161     CPUPPCState *env = &cpu->env;
1162 
1163     if (interrupt_request & CPU_INTERRUPT_HARD) {
1164         ppc_hw_interrupt(env);
1165         if (env->pending_interrupts == 0) {
1166             cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1167         }
1168         return true;
1169     }
1170     return false;
1171 }
1172 
1173 #if defined(DEBUG_OP)
1174 static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
1175 {
1176     qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
1177              TARGET_FMT_lx "\n", RA, msr);
1178 }
1179 #endif
1180 
1181 /*****************************************************************************/
1182 /* Exceptions processing helpers */
1183 
1184 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1185                             uint32_t error_code, uintptr_t raddr)
1186 {
1187     CPUState *cs = env_cpu(env);
1188 
1189     cs->exception_index = exception;
1190     env->error_code = error_code;
1191     cpu_loop_exit_restore(cs, raddr);
1192 }
1193 
1194 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1195                          uint32_t error_code)
1196 {
1197     raise_exception_err_ra(env, exception, error_code, 0);
1198 }
1199 
1200 void raise_exception(CPUPPCState *env, uint32_t exception)
1201 {
1202     raise_exception_err_ra(env, exception, 0, 0);
1203 }
1204 
1205 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1206                         uintptr_t raddr)
1207 {
1208     raise_exception_err_ra(env, exception, 0, raddr);
1209 }
1210 
1211 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1212                                 uint32_t error_code)
1213 {
1214     raise_exception_err_ra(env, exception, error_code, 0);
1215 }
1216 
1217 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1218 {
1219     raise_exception_err_ra(env, exception, 0, 0);
1220 }
1221 
1222 #if !defined(CONFIG_USER_ONLY)
1223 void helper_store_msr(CPUPPCState *env, target_ulong val)
1224 {
1225     uint32_t excp = hreg_store_msr(env, val, 0);
1226 
1227     if (excp != 0) {
1228         CPUState *cs = env_cpu(env);
1229         cpu_interrupt_exittb(cs);
1230         raise_exception(env, excp);
1231     }
1232 }
1233 
1234 #if defined(TARGET_PPC64)
1235 void helper_scv(CPUPPCState *env, uint32_t lev)
1236 {
1237     if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
1238         raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
1239     } else {
1240         raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
1241     }
1242 }
1243 
1244 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1245 {
1246     CPUState *cs;
1247 
1248     cs = env_cpu(env);
1249     cs->halted = 1;
1250 
1251     /*
1252      * The architecture specifies that HDEC interrupts are discarded
1253      * in PM states
1254      */
1255     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1256 
1257     /* Condition for waking up at 0x100 */
1258     env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1259         (env->spr[SPR_PSSCR] & PSSCR_EC);
1260 }
1261 #endif /* defined(TARGET_PPC64) */
1262 
1263 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1264 {
1265     CPUState *cs = env_cpu(env);
1266 
1267     /* MSR:POW cannot be set by any form of rfi */
1268     msr &= ~(1ULL << MSR_POW);
1269 
1270 #if defined(TARGET_PPC64)
1271     /* Switching to 32-bit ? Crop the nip */
1272     if (!msr_is_64bit(env, msr)) {
1273         nip = (uint32_t)nip;
1274     }
1275 #else
1276     nip = (uint32_t)nip;
1277 #endif
1278     /* XXX: beware: this is false if VLE is supported */
1279     env->nip = nip & ~((target_ulong)0x00000003);
1280     hreg_store_msr(env, msr, 1);
1281 #if defined(DEBUG_OP)
1282     cpu_dump_rfi(env->nip, env->msr);
1283 #endif
1284     /*
1285      * No need to raise an exception here, as rfi is always the last
1286      * insn of a TB
1287      */
1288     cpu_interrupt_exittb(cs);
1289     /* Reset the reservation */
1290     env->reserve_addr = -1;
1291 
1292     /* Context synchronizing: check if TCG TLB needs flush */
1293     check_tlb_flush(env, false);
1294 }
1295 
1296 void helper_rfi(CPUPPCState *env)
1297 {
1298     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1299 }
1300 
1301 #define MSR_BOOK3S_MASK
1302 #if defined(TARGET_PPC64)
1303 void helper_rfid(CPUPPCState *env)
1304 {
1305     /*
1306      * The architecture defines a number of rules for which bits can
1307      * change but in practice, we handle this in hreg_store_msr()
1308      * which will be called by do_rfi(), so there is no need to filter
1309      * here
1310      */
1311     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1312 }
1313 
1314 void helper_rfscv(CPUPPCState *env)
1315 {
1316     do_rfi(env, env->lr, env->ctr);
1317 }
1318 
1319 void helper_hrfid(CPUPPCState *env)
1320 {
1321     do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1322 }
1323 #endif
1324 
1325 /*****************************************************************************/
1326 /* Embedded PowerPC specific helpers */
1327 void helper_40x_rfci(CPUPPCState *env)
1328 {
1329     do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1330 }
1331 
1332 void helper_rfci(CPUPPCState *env)
1333 {
1334     do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1335 }
1336 
1337 void helper_rfdi(CPUPPCState *env)
1338 {
1339     /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1340     do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1341 }
1342 
1343 void helper_rfmci(CPUPPCState *env)
1344 {
1345     /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1346     do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1347 }
1348 #endif
1349 
1350 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1351                uint32_t flags)
1352 {
1353     if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1354                   ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1355                   ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1356                   ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1357                   ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1358         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1359                                POWERPC_EXCP_TRAP, GETPC());
1360     }
1361 }
1362 
1363 #if defined(TARGET_PPC64)
1364 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1365                uint32_t flags)
1366 {
1367     if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1368                   ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1369                   ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1370                   ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1371                   ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1372         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1373                                POWERPC_EXCP_TRAP, GETPC());
1374     }
1375 }
1376 #endif
1377 
1378 #if !defined(CONFIG_USER_ONLY)
1379 /*****************************************************************************/
1380 /* PowerPC 601 specific instructions (POWER bridge) */
1381 
1382 void helper_rfsvc(CPUPPCState *env)
1383 {
1384     do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1385 }
1386 
1387 /* Embedded.Processor Control */
1388 static int dbell2irq(target_ulong rb)
1389 {
1390     int msg = rb & DBELL_TYPE_MASK;
1391     int irq = -1;
1392 
1393     switch (msg) {
1394     case DBELL_TYPE_DBELL:
1395         irq = PPC_INTERRUPT_DOORBELL;
1396         break;
1397     case DBELL_TYPE_DBELL_CRIT:
1398         irq = PPC_INTERRUPT_CDOORBELL;
1399         break;
1400     case DBELL_TYPE_G_DBELL:
1401     case DBELL_TYPE_G_DBELL_CRIT:
1402     case DBELL_TYPE_G_DBELL_MC:
1403         /* XXX implement */
1404     default:
1405         break;
1406     }
1407 
1408     return irq;
1409 }
1410 
1411 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1412 {
1413     int irq = dbell2irq(rb);
1414 
1415     if (irq < 0) {
1416         return;
1417     }
1418 
1419     env->pending_interrupts &= ~(1 << irq);
1420 }
1421 
1422 void helper_msgsnd(target_ulong rb)
1423 {
1424     int irq = dbell2irq(rb);
1425     int pir = rb & DBELL_PIRTAG_MASK;
1426     CPUState *cs;
1427 
1428     if (irq < 0) {
1429         return;
1430     }
1431 
1432     qemu_mutex_lock_iothread();
1433     CPU_FOREACH(cs) {
1434         PowerPCCPU *cpu = POWERPC_CPU(cs);
1435         CPUPPCState *cenv = &cpu->env;
1436 
1437         if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1438             cenv->pending_interrupts |= 1 << irq;
1439             cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1440         }
1441     }
1442     qemu_mutex_unlock_iothread();
1443 }
1444 
1445 /* Server Processor Control */
1446 
1447 static bool dbell_type_server(target_ulong rb)
1448 {
1449     /*
1450      * A Directed Hypervisor Doorbell message is sent only if the
1451      * message type is 5. All other types are reserved and the
1452      * instruction is a no-op
1453      */
1454     return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1455 }
1456 
1457 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1458 {
1459     if (!dbell_type_server(rb)) {
1460         return;
1461     }
1462 
1463     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1464 }
1465 
1466 static void book3s_msgsnd_common(int pir, int irq)
1467 {
1468     CPUState *cs;
1469 
1470     qemu_mutex_lock_iothread();
1471     CPU_FOREACH(cs) {
1472         PowerPCCPU *cpu = POWERPC_CPU(cs);
1473         CPUPPCState *cenv = &cpu->env;
1474 
1475         /* TODO: broadcast message to all threads of the same  processor */
1476         if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1477             cenv->pending_interrupts |= 1 << irq;
1478             cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1479         }
1480     }
1481     qemu_mutex_unlock_iothread();
1482 }
1483 
1484 void helper_book3s_msgsnd(target_ulong rb)
1485 {
1486     int pir = rb & DBELL_PROCIDTAG_MASK;
1487 
1488     if (!dbell_type_server(rb)) {
1489         return;
1490     }
1491 
1492     book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1493 }
1494 
1495 #if defined(TARGET_PPC64)
1496 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1497 {
1498     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1499 
1500     if (!dbell_type_server(rb)) {
1501         return;
1502     }
1503 
1504     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1505 }
1506 
1507 /*
1508  * sends a message to other threads that are on the same
1509  * multi-threaded processor
1510  */
1511 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1512 {
1513     int pir = env->spr_cb[SPR_PIR].default_value;
1514 
1515     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1516 
1517     if (!dbell_type_server(rb)) {
1518         return;
1519     }
1520 
1521     /* TODO: TCG supports only one thread */
1522 
1523     book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1524 }
1525 #endif
1526 #endif
1527 
1528 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1529                                  MMUAccessType access_type,
1530                                  int mmu_idx, uintptr_t retaddr)
1531 {
1532     CPUPPCState *env = cs->env_ptr;
1533     uint32_t insn;
1534 
1535     /* Restore state and reload the insn we executed, for filling in DSISR.  */
1536     cpu_restore_state(cs, retaddr, true);
1537     insn = cpu_ldl_code(env, env->nip);
1538 
1539     cs->exception_index = POWERPC_EXCP_ALIGN;
1540     env->error_code = insn & 0x03FF0000;
1541     cpu_loop_exit(cs);
1542 }
1543