xref: /openbmc/qemu/target/ppc/excp_helper.c (revision 3cfafd31)
1 /*
2  *  PowerPC exception emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "helper_regs.h"
25 
26 #ifdef CONFIG_TCG
27 #include "exec/helper-proto.h"
28 #include "exec/cpu_ldst.h"
29 #endif
30 
31 /* #define DEBUG_OP */
32 /* #define DEBUG_SOFTWARE_TLB */
33 /* #define DEBUG_EXCEPTIONS */
34 
35 #ifdef DEBUG_EXCEPTIONS
36 #  define LOG_EXCP(...) qemu_log(__VA_ARGS__)
37 #else
38 #  define LOG_EXCP(...) do { } while (0)
39 #endif
40 
41 /*****************************************************************************/
42 /* Exception processing */
43 #if defined(CONFIG_USER_ONLY)
44 void ppc_cpu_do_interrupt(CPUState *cs)
45 {
46     PowerPCCPU *cpu = POWERPC_CPU(cs);
47     CPUPPCState *env = &cpu->env;
48 
49     cs->exception_index = POWERPC_EXCP_NONE;
50     env->error_code = 0;
51 }
52 
53 static void ppc_hw_interrupt(CPUPPCState *env)
54 {
55     CPUState *cs = env_cpu(env);
56 
57     cs->exception_index = POWERPC_EXCP_NONE;
58     env->error_code = 0;
59 }
60 #else /* defined(CONFIG_USER_ONLY) */
61 static inline void dump_syscall(CPUPPCState *env)
62 {
63     qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
64                   " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
65                   " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
66                   " nip=" TARGET_FMT_lx "\n",
67                   ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
68                   ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
69                   ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
70                   ppc_dump_gpr(env, 8), env->nip);
71 }
72 
73 static inline void dump_hcall(CPUPPCState *env)
74 {
75     qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
76                   " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
77                   " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
78                   " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
79                   " nip=" TARGET_FMT_lx "\n",
80                   ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
81                   ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
82                   ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
83                   ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
84                   ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
85                   env->nip);
86 }
87 
88 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
89                                 target_ulong *msr)
90 {
91     /* We no longer are in a PM state */
92     env->resume_as_sreset = false;
93 
94     /* Pretend to be returning from doze always as we don't lose state */
95     *msr |= SRR1_WS_NOLOSS;
96 
97     /* Machine checks are sent normally */
98     if (excp == POWERPC_EXCP_MCHECK) {
99         return excp;
100     }
101     switch (excp) {
102     case POWERPC_EXCP_RESET:
103         *msr |= SRR1_WAKERESET;
104         break;
105     case POWERPC_EXCP_EXTERNAL:
106         *msr |= SRR1_WAKEEE;
107         break;
108     case POWERPC_EXCP_DECR:
109         *msr |= SRR1_WAKEDEC;
110         break;
111     case POWERPC_EXCP_SDOOR:
112         *msr |= SRR1_WAKEDBELL;
113         break;
114     case POWERPC_EXCP_SDOOR_HV:
115         *msr |= SRR1_WAKEHDBELL;
116         break;
117     case POWERPC_EXCP_HV_MAINT:
118         *msr |= SRR1_WAKEHMI;
119         break;
120     case POWERPC_EXCP_HVIRT:
121         *msr |= SRR1_WAKEHVI;
122         break;
123     default:
124         cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
125                   excp);
126     }
127     return POWERPC_EXCP_RESET;
128 }
129 
130 /*
131  * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
132  * taken with the MMU on, and which uses an alternate location (e.g., so the
133  * kernel/hv can map the vectors there with an effective address).
134  *
135  * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
136  * are delivered in this way. AIL requires the LPCR to be set to enable this
137  * mode, and then a number of conditions have to be true for AIL to apply.
138  *
139  * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
140  * they specifically want to be in real mode (e.g., the MCE might be signaling
141  * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
142  *
143  * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
144  * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
145  * radix mode (LPCR[HR]).
146  *
147  * POWER8, POWER9 with LPCR[HR]=0
148  * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
149  * +-----------+-------------+---------+-------------+-----+
150  * | a         | 00/01/10    | x       | x           | 0   |
151  * | a         | 11          | 0       | 1           | 0   |
152  * | a         | 11          | 1       | 1           | a   |
153  * | a         | 11          | 0       | 0           | a   |
154  * +-------------------------------------------------------+
155  *
156  * POWER9 with LPCR[HR]=1
157  * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
158  * +-----------+-------------+---------+-------------+-----+
159  * | a         | 00/01/10    | x       | x           | 0   |
160  * | a         | 11          | x       | x           | a   |
161  * +-------------------------------------------------------+
162  *
163  * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
164  * the hypervisor in AIL mode if the guest is radix. This is good for
165  * performance but allows the guest to influence the AIL of hypervisor
166  * interrupts using its MSR, and also the hypervisor must disallow guest
167  * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
168  * use AIL for its MSR[HV] 0->1 interrupts.
169  *
170  * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
171  * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
172  * MSR[HV] 1->1).
173  *
174  * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
175  *
176  * POWER10 behaviour is
177  * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
178  * +-----------+------------+-------------+---------+-------------+-----+
179  * | a         | h          | 00/01/10    | 0       | 0           | 0   |
180  * | a         | h          | 11          | 0       | 0           | a   |
181  * | a         | h          | x           | 0       | 1           | h   |
182  * | a         | h          | 00/01/10    | 1       | 1           | 0   |
183  * | a         | h          | 11          | 1       | 1           | h   |
184  * +--------------------------------------------------------------------+
185  */
186 static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
187                                       target_ulong msr,
188                                       target_ulong *new_msr,
189                                       target_ulong *vector)
190 {
191 #if defined(TARGET_PPC64)
192     CPUPPCState *env = &cpu->env;
193     bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
194     bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
195     int ail = 0;
196 
197     if (excp == POWERPC_EXCP_MCHECK ||
198         excp == POWERPC_EXCP_RESET ||
199         excp == POWERPC_EXCP_HV_MAINT) {
200         /* SRESET, MCE, HMI never apply AIL */
201         return;
202     }
203 
204     if (excp_model == POWERPC_EXCP_POWER8 ||
205         excp_model == POWERPC_EXCP_POWER9) {
206         if (!mmu_all_on) {
207             /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
208             return;
209         }
210         if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
211             /*
212              * AIL does not work if there is a MSR[HV] 0->1 transition and the
213              * partition is in HPT mode. For radix guests, such interrupts are
214              * allowed to be delivered to the hypervisor in ail mode.
215              */
216             return;
217         }
218 
219         ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
220         if (ail == 0) {
221             return;
222         }
223         if (ail == 1) {
224             /* AIL=1 is reserved, treat it like AIL=0 */
225             return;
226         }
227 
228     } else if (excp_model == POWERPC_EXCP_POWER10) {
229         if (!mmu_all_on && !hv_escalation) {
230             /*
231              * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
232              * Guest->guest and HV->HV interrupts do require MMU on.
233              */
234             return;
235         }
236 
237         if (*new_msr & MSR_HVB) {
238             if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
239                 /* HV interrupts depend on LPCR[HAIL] */
240                 return;
241             }
242             ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
243         } else {
244             ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
245         }
246         if (ail == 0) {
247             return;
248         }
249         if (ail == 1 || ail == 2) {
250             /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
251             return;
252         }
253     } else {
254         /* Other processors do not support AIL */
255         return;
256     }
257 
258     /*
259      * AIL applies, so the new MSR gets IR and DR set, and an offset applied
260      * to the new IP.
261      */
262     *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
263 
264     if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
265         if (ail == 2) {
266             *vector |= 0x0000000000018000ull;
267         } else if (ail == 3) {
268             *vector |= 0xc000000000004000ull;
269         }
270     } else {
271         /*
272          * scv AIL is a little different. AIL=2 does not change the address,
273          * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
274          */
275         if (ail == 3) {
276             *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
277             *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
278         }
279     }
280 #endif
281 }
282 
283 static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
284                                           target_ulong vector, target_ulong msr)
285 {
286     CPUState *cs = CPU(cpu);
287     CPUPPCState *env = &cpu->env;
288 
289     /*
290      * We don't use hreg_store_msr here as already have treated any
291      * special case that could occur. Just store MSR and update hflags
292      *
293      * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
294      * will prevent setting of the HV bit which some exceptions might need
295      * to do.
296      */
297     env->msr = msr & env->msr_mask;
298     hreg_compute_hflags(env);
299     env->nip = vector;
300     /* Reset exception state */
301     cs->exception_index = POWERPC_EXCP_NONE;
302     env->error_code = 0;
303 
304     /* Reset the reservation */
305     env->reserve_addr = -1;
306 
307     /*
308      * Any interrupt is context synchronizing, check if TCG TLB needs
309      * a delayed flush on ppc64
310      */
311     check_tlb_flush(env, false);
312 }
313 
314 /*
315  * Note that this function should be greatly optimized when called
316  * with a constant excp, from ppc_hw_interrupt
317  */
318 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
319 {
320     CPUState *cs = CPU(cpu);
321     CPUPPCState *env = &cpu->env;
322     target_ulong msr, new_msr, vector;
323     int srr0, srr1, asrr0, asrr1, lev = -1;
324 
325     qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
326                   " => %08x (%02x)\n", env->nip, excp, env->error_code);
327 
328     /* new srr1 value excluding must-be-zero bits */
329     if (excp_model == POWERPC_EXCP_BOOKE) {
330         msr = env->msr;
331     } else {
332         msr = env->msr & ~0x783f0000ULL;
333     }
334 
335     /*
336      * new interrupt handler msr preserves existing HV and ME unless
337      * explicitly overriden
338      */
339     new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
340 
341     /* target registers */
342     srr0 = SPR_SRR0;
343     srr1 = SPR_SRR1;
344     asrr0 = -1;
345     asrr1 = -1;
346 
347     /*
348      * check for special resume at 0x100 from doze/nap/sleep/winkle on
349      * P7/P8/P9
350      */
351     if (env->resume_as_sreset) {
352         excp = powerpc_reset_wakeup(cs, env, excp, &msr);
353     }
354 
355     /*
356      * Hypervisor emulation assistance interrupt only exists on server
357      * arch 2.05 server or later. We also don't want to generate it if
358      * we don't have HVB in msr_mask (PAPR mode).
359      */
360     if (excp == POWERPC_EXCP_HV_EMU
361 #if defined(TARGET_PPC64)
362         && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
363 #endif /* defined(TARGET_PPC64) */
364 
365     ) {
366         excp = POWERPC_EXCP_PROGRAM;
367     }
368 
369     switch (excp) {
370     case POWERPC_EXCP_NONE:
371         /* Should never happen */
372         return;
373     case POWERPC_EXCP_CRITICAL:    /* Critical input                         */
374         switch (excp_model) {
375         case POWERPC_EXCP_40x:
376             srr0 = SPR_40x_SRR2;
377             srr1 = SPR_40x_SRR3;
378             break;
379         case POWERPC_EXCP_BOOKE:
380             srr0 = SPR_BOOKE_CSRR0;
381             srr1 = SPR_BOOKE_CSRR1;
382             break;
383         case POWERPC_EXCP_G2:
384             break;
385         default:
386             goto excp_invalid;
387         }
388         break;
389     case POWERPC_EXCP_MCHECK:    /* Machine check exception                  */
390         if (msr_me == 0) {
391             /*
392              * Machine check exception is not enabled.  Enter
393              * checkstop state.
394              */
395             fprintf(stderr, "Machine check while not allowed. "
396                     "Entering checkstop state\n");
397             if (qemu_log_separate()) {
398                 qemu_log("Machine check while not allowed. "
399                         "Entering checkstop state\n");
400             }
401             cs->halted = 1;
402             cpu_interrupt_exittb(cs);
403         }
404         if (env->msr_mask & MSR_HVB) {
405             /*
406              * ISA specifies HV, but can be delivered to guest with HV
407              * clear (e.g., see FWNMI in PAPR).
408              */
409             new_msr |= (target_ulong)MSR_HVB;
410         }
411 
412         /* machine check exceptions don't have ME set */
413         new_msr &= ~((target_ulong)1 << MSR_ME);
414 
415         /* XXX: should also have something loaded in DAR / DSISR */
416         switch (excp_model) {
417         case POWERPC_EXCP_40x:
418             srr0 = SPR_40x_SRR2;
419             srr1 = SPR_40x_SRR3;
420             break;
421         case POWERPC_EXCP_BOOKE:
422             /* FIXME: choose one or the other based on CPU type */
423             srr0 = SPR_BOOKE_MCSRR0;
424             srr1 = SPR_BOOKE_MCSRR1;
425             asrr0 = SPR_BOOKE_CSRR0;
426             asrr1 = SPR_BOOKE_CSRR1;
427             break;
428         default:
429             break;
430         }
431         break;
432     case POWERPC_EXCP_DSI:       /* Data storage exception                   */
433         LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
434                  "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
435         break;
436     case POWERPC_EXCP_ISI:       /* Instruction storage exception            */
437         LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
438                  "\n", msr, env->nip);
439         msr |= env->error_code;
440         break;
441     case POWERPC_EXCP_EXTERNAL:  /* External input                           */
442     {
443         bool lpes0;
444 
445         cs = CPU(cpu);
446 
447         /*
448          * Exception targeting modifiers
449          *
450          * LPES0 is supported on POWER7/8/9
451          * LPES1 is not supported (old iSeries mode)
452          *
453          * On anything else, we behave as if LPES0 is 1
454          * (externals don't alter MSR:HV)
455          */
456 #if defined(TARGET_PPC64)
457         if (excp_model == POWERPC_EXCP_POWER7 ||
458             excp_model == POWERPC_EXCP_POWER8 ||
459             excp_model == POWERPC_EXCP_POWER9 ||
460             excp_model == POWERPC_EXCP_POWER10) {
461             lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
462         } else
463 #endif /* defined(TARGET_PPC64) */
464         {
465             lpes0 = true;
466         }
467 
468         if (!lpes0) {
469             new_msr |= (target_ulong)MSR_HVB;
470             new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
471             srr0 = SPR_HSRR0;
472             srr1 = SPR_HSRR1;
473         }
474         if (env->mpic_proxy) {
475             /* IACK the IRQ on delivery */
476             env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
477         }
478         break;
479     }
480     case POWERPC_EXCP_ALIGN:     /* Alignment exception                      */
481         /* Get rS/rD and rA from faulting opcode */
482         /*
483          * Note: the opcode fields will not be set properly for a
484          * direct store load/store, but nobody cares as nobody
485          * actually uses direct store segments.
486          */
487         env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
488         break;
489     case POWERPC_EXCP_PROGRAM:   /* Program exception                        */
490         switch (env->error_code & ~0xF) {
491         case POWERPC_EXCP_FP:
492             if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
493                 LOG_EXCP("Ignore floating point exception\n");
494                 cs->exception_index = POWERPC_EXCP_NONE;
495                 env->error_code = 0;
496                 return;
497             }
498 
499             /*
500              * FP exceptions always have NIP pointing to the faulting
501              * instruction, so always use store_next and claim we are
502              * precise in the MSR.
503              */
504             msr |= 0x00100000;
505             env->spr[SPR_BOOKE_ESR] = ESR_FP;
506             break;
507         case POWERPC_EXCP_INVAL:
508             LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
509             msr |= 0x00080000;
510             env->spr[SPR_BOOKE_ESR] = ESR_PIL;
511             break;
512         case POWERPC_EXCP_PRIV:
513             msr |= 0x00040000;
514             env->spr[SPR_BOOKE_ESR] = ESR_PPR;
515             break;
516         case POWERPC_EXCP_TRAP:
517             msr |= 0x00020000;
518             env->spr[SPR_BOOKE_ESR] = ESR_PTR;
519             break;
520         default:
521             /* Should never occur */
522             cpu_abort(cs, "Invalid program exception %d. Aborting\n",
523                       env->error_code);
524             break;
525         }
526         break;
527     case POWERPC_EXCP_SYSCALL:   /* System call exception                    */
528         lev = env->error_code;
529 
530         if ((lev == 1) && cpu->vhyp) {
531             dump_hcall(env);
532         } else {
533             dump_syscall(env);
534         }
535 
536         /*
537          * We need to correct the NIP which in this case is supposed
538          * to point to the next instruction
539          */
540         env->nip += 4;
541 
542         /* "PAPR mode" built-in hypercall emulation */
543         if ((lev == 1) && cpu->vhyp) {
544             PPCVirtualHypervisorClass *vhc =
545                 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
546             vhc->hypercall(cpu->vhyp, cpu);
547             return;
548         }
549         if (lev == 1) {
550             new_msr |= (target_ulong)MSR_HVB;
551         }
552         break;
553     case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception                     */
554         lev = env->error_code;
555         dump_syscall(env);
556         env->nip += 4;
557         new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
558         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
559         break;
560     case POWERPC_EXCP_FPU:       /* Floating-point unavailable exception     */
561     case POWERPC_EXCP_APU:       /* Auxiliary processor unavailable          */
562     case POWERPC_EXCP_DECR:      /* Decrementer exception                    */
563         break;
564     case POWERPC_EXCP_FIT:       /* Fixed-interval timer interrupt           */
565         /* FIT on 4xx */
566         LOG_EXCP("FIT exception\n");
567         break;
568     case POWERPC_EXCP_WDT:       /* Watchdog timer interrupt                 */
569         LOG_EXCP("WDT exception\n");
570         switch (excp_model) {
571         case POWERPC_EXCP_BOOKE:
572             srr0 = SPR_BOOKE_CSRR0;
573             srr1 = SPR_BOOKE_CSRR1;
574             break;
575         default:
576             break;
577         }
578         break;
579     case POWERPC_EXCP_DTLB:      /* Data TLB error                           */
580     case POWERPC_EXCP_ITLB:      /* Instruction TLB error                    */
581         break;
582     case POWERPC_EXCP_DEBUG:     /* Debug interrupt                          */
583         if (env->flags & POWERPC_FLAG_DE) {
584             /* FIXME: choose one or the other based on CPU type */
585             srr0 = SPR_BOOKE_DSRR0;
586             srr1 = SPR_BOOKE_DSRR1;
587             asrr0 = SPR_BOOKE_CSRR0;
588             asrr1 = SPR_BOOKE_CSRR1;
589             /* DBSR already modified by caller */
590         } else {
591             cpu_abort(cs, "Debug exception triggered on unsupported model\n");
592         }
593         break;
594     case POWERPC_EXCP_SPEU:      /* SPE/embedded floating-point unavailable  */
595         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
596         break;
597     case POWERPC_EXCP_EFPDI:     /* Embedded floating-point data interrupt   */
598         /* XXX: TODO */
599         cpu_abort(cs, "Embedded floating point data exception "
600                   "is not implemented yet !\n");
601         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
602         break;
603     case POWERPC_EXCP_EFPRI:     /* Embedded floating-point round interrupt  */
604         /* XXX: TODO */
605         cpu_abort(cs, "Embedded floating point round exception "
606                   "is not implemented yet !\n");
607         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
608         break;
609     case POWERPC_EXCP_EPERFM:    /* Embedded performance monitor interrupt   */
610         /* XXX: TODO */
611         cpu_abort(cs,
612                   "Performance counter exception is not implemented yet !\n");
613         break;
614     case POWERPC_EXCP_DOORI:     /* Embedded doorbell interrupt              */
615         break;
616     case POWERPC_EXCP_DOORCI:    /* Embedded doorbell critical interrupt     */
617         srr0 = SPR_BOOKE_CSRR0;
618         srr1 = SPR_BOOKE_CSRR1;
619         break;
620     case POWERPC_EXCP_RESET:     /* System reset exception                   */
621         /* A power-saving exception sets ME, otherwise it is unchanged */
622         if (msr_pow) {
623             /* indicate that we resumed from power save mode */
624             msr |= 0x10000;
625             new_msr |= ((target_ulong)1 << MSR_ME);
626         }
627         if (env->msr_mask & MSR_HVB) {
628             /*
629              * ISA specifies HV, but can be delivered to guest with HV
630              * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
631              */
632             new_msr |= (target_ulong)MSR_HVB;
633         } else {
634             if (msr_pow) {
635                 cpu_abort(cs, "Trying to deliver power-saving system reset "
636                           "exception %d with no HV support\n", excp);
637             }
638         }
639         break;
640     case POWERPC_EXCP_DSEG:      /* Data segment exception                   */
641     case POWERPC_EXCP_ISEG:      /* Instruction segment exception            */
642     case POWERPC_EXCP_TRACE:     /* Trace exception                          */
643         break;
644     case POWERPC_EXCP_HISI:      /* Hypervisor instruction storage exception */
645         msr |= env->error_code;
646         /* fall through */
647     case POWERPC_EXCP_HDECR:     /* Hypervisor decrementer exception         */
648     case POWERPC_EXCP_HDSI:      /* Hypervisor data storage exception        */
649     case POWERPC_EXCP_HDSEG:     /* Hypervisor data segment exception        */
650     case POWERPC_EXCP_HISEG:     /* Hypervisor instruction segment exception */
651     case POWERPC_EXCP_SDOOR_HV:  /* Hypervisor Doorbell interrupt            */
652     case POWERPC_EXCP_HV_EMU:
653     case POWERPC_EXCP_HVIRT:     /* Hypervisor virtualization                */
654         srr0 = SPR_HSRR0;
655         srr1 = SPR_HSRR1;
656         new_msr |= (target_ulong)MSR_HVB;
657         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
658         break;
659     case POWERPC_EXCP_VPU:       /* Vector unavailable exception             */
660     case POWERPC_EXCP_VSXU:       /* VSX unavailable exception               */
661     case POWERPC_EXCP_FU:         /* Facility unavailable exception          */
662 #ifdef TARGET_PPC64
663         env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
664 #endif
665         break;
666     case POWERPC_EXCP_HV_FU:     /* Hypervisor Facility Unavailable Exception */
667 #ifdef TARGET_PPC64
668         env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
669         srr0 = SPR_HSRR0;
670         srr1 = SPR_HSRR1;
671         new_msr |= (target_ulong)MSR_HVB;
672         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
673 #endif
674         break;
675     case POWERPC_EXCP_PIT:       /* Programmable interval timer interrupt    */
676         LOG_EXCP("PIT exception\n");
677         break;
678     case POWERPC_EXCP_IO:        /* IO error exception                       */
679         /* XXX: TODO */
680         cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
681         break;
682     case POWERPC_EXCP_RUNM:      /* Run mode exception                       */
683         /* XXX: TODO */
684         cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
685         break;
686     case POWERPC_EXCP_EMUL:      /* Emulation trap exception                 */
687         /* XXX: TODO */
688         cpu_abort(cs, "602 emulation trap exception "
689                   "is not implemented yet !\n");
690         break;
691     case POWERPC_EXCP_IFTLB:     /* Instruction fetch TLB error              */
692     case POWERPC_EXCP_DLTLB:     /* Data load TLB miss                       */
693     case POWERPC_EXCP_DSTLB:     /* Data store TLB miss                      */
694         switch (excp_model) {
695         case POWERPC_EXCP_602:
696         case POWERPC_EXCP_603:
697         case POWERPC_EXCP_603E:
698         case POWERPC_EXCP_G2:
699             /* Swap temporary saved registers with GPRs */
700             if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
701                 new_msr |= (target_ulong)1 << MSR_TGPR;
702                 hreg_swap_gpr_tgpr(env);
703             }
704             /* fall through */
705         case POWERPC_EXCP_7x5:
706 #if defined(DEBUG_SOFTWARE_TLB)
707             if (qemu_log_enabled()) {
708                 const char *es;
709                 target_ulong *miss, *cmp;
710                 int en;
711 
712                 if (excp == POWERPC_EXCP_IFTLB) {
713                     es = "I";
714                     en = 'I';
715                     miss = &env->spr[SPR_IMISS];
716                     cmp = &env->spr[SPR_ICMP];
717                 } else {
718                     if (excp == POWERPC_EXCP_DLTLB) {
719                         es = "DL";
720                     } else {
721                         es = "DS";
722                     }
723                     en = 'D';
724                     miss = &env->spr[SPR_DMISS];
725                     cmp = &env->spr[SPR_DCMP];
726                 }
727                 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
728                          TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
729                          TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
730                          env->spr[SPR_HASH1], env->spr[SPR_HASH2],
731                          env->error_code);
732             }
733 #endif
734             msr |= env->crf[0] << 28;
735             msr |= env->error_code; /* key, D/I, S/L bits */
736             /* Set way using a LRU mechanism */
737             msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
738             break;
739         case POWERPC_EXCP_74xx:
740 #if defined(DEBUG_SOFTWARE_TLB)
741             if (qemu_log_enabled()) {
742                 const char *es;
743                 target_ulong *miss, *cmp;
744                 int en;
745 
746                 if (excp == POWERPC_EXCP_IFTLB) {
747                     es = "I";
748                     en = 'I';
749                     miss = &env->spr[SPR_TLBMISS];
750                     cmp = &env->spr[SPR_PTEHI];
751                 } else {
752                     if (excp == POWERPC_EXCP_DLTLB) {
753                         es = "DL";
754                     } else {
755                         es = "DS";
756                     }
757                     en = 'D';
758                     miss = &env->spr[SPR_TLBMISS];
759                     cmp = &env->spr[SPR_PTEHI];
760                 }
761                 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
762                          TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
763                          env->error_code);
764             }
765 #endif
766             msr |= env->error_code; /* key bit */
767             break;
768         default:
769             cpu_abort(cs, "Invalid TLB miss exception\n");
770             break;
771         }
772         break;
773     case POWERPC_EXCP_FPA:       /* Floating-point assist exception          */
774         /* XXX: TODO */
775         cpu_abort(cs, "Floating point assist exception "
776                   "is not implemented yet !\n");
777         break;
778     case POWERPC_EXCP_DABR:      /* Data address breakpoint                  */
779         /* XXX: TODO */
780         cpu_abort(cs, "DABR exception is not implemented yet !\n");
781         break;
782     case POWERPC_EXCP_IABR:      /* Instruction address breakpoint           */
783         /* XXX: TODO */
784         cpu_abort(cs, "IABR exception is not implemented yet !\n");
785         break;
786     case POWERPC_EXCP_SMI:       /* System management interrupt              */
787         /* XXX: TODO */
788         cpu_abort(cs, "SMI exception is not implemented yet !\n");
789         break;
790     case POWERPC_EXCP_THERM:     /* Thermal interrupt                        */
791         /* XXX: TODO */
792         cpu_abort(cs, "Thermal management exception "
793                   "is not implemented yet !\n");
794         break;
795     case POWERPC_EXCP_PERFM:     /* Embedded performance monitor interrupt   */
796         /* XXX: TODO */
797         cpu_abort(cs,
798                   "Performance counter exception is not implemented yet !\n");
799         break;
800     case POWERPC_EXCP_VPUA:      /* Vector assist exception                  */
801         /* XXX: TODO */
802         cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
803         break;
804     case POWERPC_EXCP_SOFTP:     /* Soft patch exception                     */
805         /* XXX: TODO */
806         cpu_abort(cs,
807                   "970 soft-patch exception is not implemented yet !\n");
808         break;
809     case POWERPC_EXCP_MAINT:     /* Maintenance exception                    */
810         /* XXX: TODO */
811         cpu_abort(cs,
812                   "970 maintenance exception is not implemented yet !\n");
813         break;
814     case POWERPC_EXCP_MEXTBR:    /* Maskable external breakpoint             */
815         /* XXX: TODO */
816         cpu_abort(cs, "Maskable external exception "
817                   "is not implemented yet !\n");
818         break;
819     case POWERPC_EXCP_NMEXTBR:   /* Non maskable external breakpoint         */
820         /* XXX: TODO */
821         cpu_abort(cs, "Non maskable external exception "
822                   "is not implemented yet !\n");
823         break;
824     default:
825     excp_invalid:
826         cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
827         break;
828     }
829 
830     /* Sanity check */
831     if (!(env->msr_mask & MSR_HVB)) {
832         if (new_msr & MSR_HVB) {
833             cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
834                       "no HV support\n", excp);
835         }
836         if (srr0 == SPR_HSRR0) {
837             cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
838                       "no HV support\n", excp);
839         }
840     }
841 
842     /*
843      * Sort out endianness of interrupt, this differs depending on the
844      * CPU, the HV mode, etc...
845      */
846 #ifdef TARGET_PPC64
847     if (excp_model == POWERPC_EXCP_POWER7) {
848         if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
849             new_msr |= (target_ulong)1 << MSR_LE;
850         }
851     } else if (excp_model == POWERPC_EXCP_POWER8) {
852         if (new_msr & MSR_HVB) {
853             if (env->spr[SPR_HID0] & HID0_HILE) {
854                 new_msr |= (target_ulong)1 << MSR_LE;
855             }
856         } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
857             new_msr |= (target_ulong)1 << MSR_LE;
858         }
859     } else if (excp_model == POWERPC_EXCP_POWER9 ||
860                excp_model == POWERPC_EXCP_POWER10) {
861         if (new_msr & MSR_HVB) {
862             if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
863                 new_msr |= (target_ulong)1 << MSR_LE;
864             }
865         } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
866             new_msr |= (target_ulong)1 << MSR_LE;
867         }
868     } else if (msr_ile) {
869         new_msr |= (target_ulong)1 << MSR_LE;
870     }
871 #else
872     if (msr_ile) {
873         new_msr |= (target_ulong)1 << MSR_LE;
874     }
875 #endif
876 
877     vector = env->excp_vectors[excp];
878     if (vector == (target_ulong)-1ULL) {
879         cpu_abort(cs, "Raised an exception without defined vector %d\n",
880                   excp);
881     }
882 
883     vector |= env->excp_prefix;
884 
885     /* If any alternate SRR register are defined, duplicate saved values */
886     if (asrr0 != -1) {
887         env->spr[asrr0] = env->nip;
888     }
889     if (asrr1 != -1) {
890         env->spr[asrr1] = msr;
891     }
892 
893 #if defined(TARGET_PPC64)
894     if (excp_model == POWERPC_EXCP_BOOKE) {
895         if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
896             /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
897             new_msr |= (target_ulong)1 << MSR_CM;
898         } else {
899             vector = (uint32_t)vector;
900         }
901     } else {
902         if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
903             vector = (uint32_t)vector;
904         } else {
905             new_msr |= (target_ulong)1 << MSR_SF;
906         }
907     }
908 #endif
909 
910     if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
911         /* Save PC */
912         env->spr[srr0] = env->nip;
913 
914         /* Save MSR */
915         env->spr[srr1] = msr;
916 
917 #if defined(TARGET_PPC64)
918     } else {
919         vector += lev * 0x20;
920 
921         env->lr = env->nip;
922         env->ctr = msr;
923 #endif
924     }
925 
926     /* This can update new_msr and vector if AIL applies */
927     ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
928 
929     powerpc_set_excp_state(cpu, vector, new_msr);
930 }
931 
932 void ppc_cpu_do_interrupt(CPUState *cs)
933 {
934     PowerPCCPU *cpu = POWERPC_CPU(cs);
935     CPUPPCState *env = &cpu->env;
936 
937     powerpc_excp(cpu, env->excp_model, cs->exception_index);
938 }
939 
940 static void ppc_hw_interrupt(CPUPPCState *env)
941 {
942     PowerPCCPU *cpu = env_archcpu(env);
943     bool async_deliver;
944 
945     /* External reset */
946     if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
947         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
948         powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
949         return;
950     }
951     /* Machine check exception */
952     if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
953         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
954         powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
955         return;
956     }
957 #if 0 /* TODO */
958     /* External debug exception */
959     if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
960         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
961         powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
962         return;
963     }
964 #endif
965 
966     /*
967      * For interrupts that gate on MSR:EE, we need to do something a
968      * bit more subtle, as we need to let them through even when EE is
969      * clear when coming out of some power management states (in order
970      * for them to become a 0x100).
971      */
972     async_deliver = (msr_ee != 0) || env->resume_as_sreset;
973 
974     /* Hypervisor decrementer exception */
975     if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
976         /* LPCR will be clear when not supported so this will work */
977         bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
978         if ((async_deliver || msr_hv == 0) && hdice) {
979             /* HDEC clears on delivery */
980             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
981             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
982             return;
983         }
984     }
985 
986     /* Hypervisor virtualization interrupt */
987     if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
988         /* LPCR will be clear when not supported so this will work */
989         bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
990         if ((async_deliver || msr_hv == 0) && hvice) {
991             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
992             return;
993         }
994     }
995 
996     /* External interrupt can ignore MSR:EE under some circumstances */
997     if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
998         bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
999         bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1000         /* HEIC blocks delivery to the hypervisor */
1001         if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
1002             (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
1003             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
1004             return;
1005         }
1006     }
1007     if (msr_ce != 0) {
1008         /* External critical interrupt */
1009         if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
1010             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
1011             return;
1012         }
1013     }
1014     if (async_deliver != 0) {
1015         /* Watchdog timer on embedded PowerPC */
1016         if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
1017             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
1018             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
1019             return;
1020         }
1021         if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
1022             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
1023             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
1024             return;
1025         }
1026         /* Fixed interval timer on embedded PowerPC */
1027         if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
1028             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
1029             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
1030             return;
1031         }
1032         /* Programmable interval timer on embedded PowerPC */
1033         if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
1034             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
1035             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
1036             return;
1037         }
1038         /* Decrementer exception */
1039         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
1040             if (ppc_decr_clear_on_delivery(env)) {
1041                 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
1042             }
1043             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
1044             return;
1045         }
1046         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
1047             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1048             if (is_book3s_arch2x(env)) {
1049                 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
1050             } else {
1051                 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
1052             }
1053             return;
1054         }
1055         if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
1056             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1057             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
1058             return;
1059         }
1060         if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
1061             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
1062             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
1063             return;
1064         }
1065         /* Thermal interrupt */
1066         if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
1067             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
1068             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
1069             return;
1070         }
1071     }
1072 
1073     if (env->resume_as_sreset) {
1074         /*
1075          * This is a bug ! It means that has_work took us out of halt without
1076          * anything to deliver while in a PM state that requires getting
1077          * out via a 0x100
1078          *
1079          * This means we will incorrectly execute past the power management
1080          * instruction instead of triggering a reset.
1081          *
1082          * It generally means a discrepancy between the wakeup conditions in the
1083          * processor has_work implementation and the logic in this function.
1084          */
1085         cpu_abort(env_cpu(env),
1086                   "Wakeup from PM state but interrupt Undelivered");
1087     }
1088 }
1089 
1090 void ppc_cpu_do_system_reset(CPUState *cs)
1091 {
1092     PowerPCCPU *cpu = POWERPC_CPU(cs);
1093     CPUPPCState *env = &cpu->env;
1094 
1095     powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
1096 }
1097 
1098 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
1099 {
1100     PowerPCCPU *cpu = POWERPC_CPU(cs);
1101     CPUPPCState *env = &cpu->env;
1102     target_ulong msr = 0;
1103 
1104     /*
1105      * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1106      * been set by KVM.
1107      */
1108     msr = (1ULL << MSR_ME);
1109     msr |= env->msr & (1ULL << MSR_SF);
1110     if (ppc_interrupts_little_endian(cpu)) {
1111         msr |= (1ULL << MSR_LE);
1112     }
1113 
1114     powerpc_set_excp_state(cpu, vector, msr);
1115 }
1116 #endif /* !CONFIG_USER_ONLY */
1117 
1118 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1119 {
1120     PowerPCCPU *cpu = POWERPC_CPU(cs);
1121     CPUPPCState *env = &cpu->env;
1122 
1123     if (interrupt_request & CPU_INTERRUPT_HARD) {
1124         ppc_hw_interrupt(env);
1125         if (env->pending_interrupts == 0) {
1126             cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1127         }
1128         return true;
1129     }
1130     return false;
1131 }
1132 
1133 #if defined(DEBUG_OP)
1134 static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
1135 {
1136     qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
1137              TARGET_FMT_lx "\n", RA, msr);
1138 }
1139 #endif
1140 
1141 /*****************************************************************************/
1142 /* Exceptions processing helpers */
1143 
1144 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1145                             uint32_t error_code, uintptr_t raddr)
1146 {
1147     CPUState *cs = env_cpu(env);
1148 
1149     cs->exception_index = exception;
1150     env->error_code = error_code;
1151     cpu_loop_exit_restore(cs, raddr);
1152 }
1153 
1154 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1155                          uint32_t error_code)
1156 {
1157     raise_exception_err_ra(env, exception, error_code, 0);
1158 }
1159 
1160 void raise_exception(CPUPPCState *env, uint32_t exception)
1161 {
1162     raise_exception_err_ra(env, exception, 0, 0);
1163 }
1164 
1165 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1166                         uintptr_t raddr)
1167 {
1168     raise_exception_err_ra(env, exception, 0, raddr);
1169 }
1170 
1171 #ifdef CONFIG_TCG
1172 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1173                                 uint32_t error_code)
1174 {
1175     raise_exception_err_ra(env, exception, error_code, 0);
1176 }
1177 
1178 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1179 {
1180     raise_exception_err_ra(env, exception, 0, 0);
1181 }
1182 #endif
1183 
1184 #if !defined(CONFIG_USER_ONLY)
1185 #ifdef CONFIG_TCG
1186 void helper_store_msr(CPUPPCState *env, target_ulong val)
1187 {
1188     uint32_t excp = hreg_store_msr(env, val, 0);
1189 
1190     if (excp != 0) {
1191         CPUState *cs = env_cpu(env);
1192         cpu_interrupt_exittb(cs);
1193         raise_exception(env, excp);
1194     }
1195 }
1196 
1197 #if defined(TARGET_PPC64)
1198 void helper_scv(CPUPPCState *env, uint32_t lev)
1199 {
1200     if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
1201         raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
1202     } else {
1203         raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
1204     }
1205 }
1206 
1207 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1208 {
1209     CPUState *cs;
1210 
1211     cs = env_cpu(env);
1212     cs->halted = 1;
1213 
1214     /*
1215      * The architecture specifies that HDEC interrupts are discarded
1216      * in PM states
1217      */
1218     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1219 
1220     /* Condition for waking up at 0x100 */
1221     env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1222         (env->spr[SPR_PSSCR] & PSSCR_EC);
1223 }
1224 #endif /* defined(TARGET_PPC64) */
1225 #endif /* CONFIG_TCG */
1226 
1227 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1228 {
1229     CPUState *cs = env_cpu(env);
1230 
1231     /* MSR:POW cannot be set by any form of rfi */
1232     msr &= ~(1ULL << MSR_POW);
1233 
1234 #if defined(TARGET_PPC64)
1235     /* Switching to 32-bit ? Crop the nip */
1236     if (!msr_is_64bit(env, msr)) {
1237         nip = (uint32_t)nip;
1238     }
1239 #else
1240     nip = (uint32_t)nip;
1241 #endif
1242     /* XXX: beware: this is false if VLE is supported */
1243     env->nip = nip & ~((target_ulong)0x00000003);
1244     hreg_store_msr(env, msr, 1);
1245 #if defined(DEBUG_OP)
1246     cpu_dump_rfi(env->nip, env->msr);
1247 #endif
1248     /*
1249      * No need to raise an exception here, as rfi is always the last
1250      * insn of a TB
1251      */
1252     cpu_interrupt_exittb(cs);
1253     /* Reset the reservation */
1254     env->reserve_addr = -1;
1255 
1256     /* Context synchronizing: check if TCG TLB needs flush */
1257     check_tlb_flush(env, false);
1258 }
1259 
1260 #ifdef CONFIG_TCG
1261 void helper_rfi(CPUPPCState *env)
1262 {
1263     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1264 }
1265 
1266 #define MSR_BOOK3S_MASK
1267 #if defined(TARGET_PPC64)
1268 void helper_rfid(CPUPPCState *env)
1269 {
1270     /*
1271      * The architecture defines a number of rules for which bits can
1272      * change but in practice, we handle this in hreg_store_msr()
1273      * which will be called by do_rfi(), so there is no need to filter
1274      * here
1275      */
1276     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1277 }
1278 
1279 void helper_rfscv(CPUPPCState *env)
1280 {
1281     do_rfi(env, env->lr, env->ctr);
1282 }
1283 
1284 void helper_hrfid(CPUPPCState *env)
1285 {
1286     do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1287 }
1288 #endif
1289 
1290 /*****************************************************************************/
1291 /* Embedded PowerPC specific helpers */
1292 void helper_40x_rfci(CPUPPCState *env)
1293 {
1294     do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1295 }
1296 
1297 void helper_rfci(CPUPPCState *env)
1298 {
1299     do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1300 }
1301 
1302 void helper_rfdi(CPUPPCState *env)
1303 {
1304     /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1305     do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1306 }
1307 
1308 void helper_rfmci(CPUPPCState *env)
1309 {
1310     /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1311     do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1312 }
1313 #endif /* CONFIG_TCG */
1314 #endif /* !defined(CONFIG_USER_ONLY) */
1315 
1316 #ifdef CONFIG_TCG
1317 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1318                uint32_t flags)
1319 {
1320     if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1321                   ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1322                   ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1323                   ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1324                   ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1325         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1326                                POWERPC_EXCP_TRAP, GETPC());
1327     }
1328 }
1329 
1330 #if defined(TARGET_PPC64)
1331 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1332                uint32_t flags)
1333 {
1334     if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1335                   ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1336                   ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1337                   ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1338                   ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1339         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1340                                POWERPC_EXCP_TRAP, GETPC());
1341     }
1342 }
1343 #endif
1344 #endif
1345 
1346 #if !defined(CONFIG_USER_ONLY)
1347 /*****************************************************************************/
1348 /* PowerPC 601 specific instructions (POWER bridge) */
1349 
1350 #ifdef CONFIG_TCG
1351 void helper_rfsvc(CPUPPCState *env)
1352 {
1353     do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1354 }
1355 
1356 /* Embedded.Processor Control */
1357 static int dbell2irq(target_ulong rb)
1358 {
1359     int msg = rb & DBELL_TYPE_MASK;
1360     int irq = -1;
1361 
1362     switch (msg) {
1363     case DBELL_TYPE_DBELL:
1364         irq = PPC_INTERRUPT_DOORBELL;
1365         break;
1366     case DBELL_TYPE_DBELL_CRIT:
1367         irq = PPC_INTERRUPT_CDOORBELL;
1368         break;
1369     case DBELL_TYPE_G_DBELL:
1370     case DBELL_TYPE_G_DBELL_CRIT:
1371     case DBELL_TYPE_G_DBELL_MC:
1372         /* XXX implement */
1373     default:
1374         break;
1375     }
1376 
1377     return irq;
1378 }
1379 
1380 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1381 {
1382     int irq = dbell2irq(rb);
1383 
1384     if (irq < 0) {
1385         return;
1386     }
1387 
1388     env->pending_interrupts &= ~(1 << irq);
1389 }
1390 
1391 void helper_msgsnd(target_ulong rb)
1392 {
1393     int irq = dbell2irq(rb);
1394     int pir = rb & DBELL_PIRTAG_MASK;
1395     CPUState *cs;
1396 
1397     if (irq < 0) {
1398         return;
1399     }
1400 
1401     qemu_mutex_lock_iothread();
1402     CPU_FOREACH(cs) {
1403         PowerPCCPU *cpu = POWERPC_CPU(cs);
1404         CPUPPCState *cenv = &cpu->env;
1405 
1406         if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1407             cenv->pending_interrupts |= 1 << irq;
1408             cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1409         }
1410     }
1411     qemu_mutex_unlock_iothread();
1412 }
1413 
1414 /* Server Processor Control */
1415 
1416 static bool dbell_type_server(target_ulong rb)
1417 {
1418     /*
1419      * A Directed Hypervisor Doorbell message is sent only if the
1420      * message type is 5. All other types are reserved and the
1421      * instruction is a no-op
1422      */
1423     return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1424 }
1425 
1426 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1427 {
1428     if (!dbell_type_server(rb)) {
1429         return;
1430     }
1431 
1432     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1433 }
1434 
1435 static void book3s_msgsnd_common(int pir, int irq)
1436 {
1437     CPUState *cs;
1438 
1439     qemu_mutex_lock_iothread();
1440     CPU_FOREACH(cs) {
1441         PowerPCCPU *cpu = POWERPC_CPU(cs);
1442         CPUPPCState *cenv = &cpu->env;
1443 
1444         /* TODO: broadcast message to all threads of the same  processor */
1445         if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1446             cenv->pending_interrupts |= 1 << irq;
1447             cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1448         }
1449     }
1450     qemu_mutex_unlock_iothread();
1451 }
1452 
1453 void helper_book3s_msgsnd(target_ulong rb)
1454 {
1455     int pir = rb & DBELL_PROCIDTAG_MASK;
1456 
1457     if (!dbell_type_server(rb)) {
1458         return;
1459     }
1460 
1461     book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1462 }
1463 
1464 #if defined(TARGET_PPC64)
1465 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1466 {
1467     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1468 
1469     if (!dbell_type_server(rb)) {
1470         return;
1471     }
1472 
1473     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1474 }
1475 
1476 /*
1477  * sends a message to other threads that are on the same
1478  * multi-threaded processor
1479  */
1480 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1481 {
1482     int pir = env->spr_cb[SPR_PIR].default_value;
1483 
1484     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1485 
1486     if (!dbell_type_server(rb)) {
1487         return;
1488     }
1489 
1490     /* TODO: TCG supports only one thread */
1491 
1492     book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1493 }
1494 #endif
1495 #endif /* CONFIG_TCG */
1496 #endif
1497 
1498 #ifdef CONFIG_TCG
1499 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1500                                  MMUAccessType access_type,
1501                                  int mmu_idx, uintptr_t retaddr)
1502 {
1503     CPUPPCState *env = cs->env_ptr;
1504     uint32_t insn;
1505 
1506     /* Restore state and reload the insn we executed, for filling in DSISR.  */
1507     cpu_restore_state(cs, retaddr, true);
1508     insn = cpu_ldl_code(env, env->nip);
1509 
1510     cs->exception_index = POWERPC_EXCP_ALIGN;
1511     env->error_code = insn & 0x03FF0000;
1512     cpu_loop_exit(cs);
1513 }
1514 #endif
1515