xref: /openbmc/qemu/target/ppc/excp_helper.c (revision cbb45ff0)
1 /*
2  *  PowerPC exception emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "helper_regs.h"
25 
26 #include "trace.h"
27 
28 #ifdef CONFIG_TCG
29 #include "exec/helper-proto.h"
30 #include "exec/cpu_ldst.h"
31 #endif
32 
33 /*****************************************************************************/
34 /* Exception processing */
35 #if !defined(CONFIG_USER_ONLY)
36 
37 static const char *powerpc_excp_name(int excp)
38 {
39     switch (excp) {
40     case POWERPC_EXCP_CRITICAL: return "CRITICAL";
41     case POWERPC_EXCP_MCHECK:   return "MCHECK";
42     case POWERPC_EXCP_DSI:      return "DSI";
43     case POWERPC_EXCP_ISI:      return "ISI";
44     case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
45     case POWERPC_EXCP_ALIGN:    return "ALIGN";
46     case POWERPC_EXCP_PROGRAM:  return "PROGRAM";
47     case POWERPC_EXCP_FPU:      return "FPU";
48     case POWERPC_EXCP_SYSCALL:  return "SYSCALL";
49     case POWERPC_EXCP_APU:      return "APU";
50     case POWERPC_EXCP_DECR:     return "DECR";
51     case POWERPC_EXCP_FIT:      return "FIT";
52     case POWERPC_EXCP_WDT:      return "WDT";
53     case POWERPC_EXCP_DTLB:     return "DTLB";
54     case POWERPC_EXCP_ITLB:     return "ITLB";
55     case POWERPC_EXCP_DEBUG:    return "DEBUG";
56     case POWERPC_EXCP_SPEU:     return "SPEU";
57     case POWERPC_EXCP_EFPDI:    return "EFPDI";
58     case POWERPC_EXCP_EFPRI:    return "EFPRI";
59     case POWERPC_EXCP_EPERFM:   return "EPERFM";
60     case POWERPC_EXCP_DOORI:    return "DOORI";
61     case POWERPC_EXCP_DOORCI:   return "DOORCI";
62     case POWERPC_EXCP_GDOORI:   return "GDOORI";
63     case POWERPC_EXCP_GDOORCI:  return "GDOORCI";
64     case POWERPC_EXCP_HYPPRIV:  return "HYPPRIV";
65     case POWERPC_EXCP_RESET:    return "RESET";
66     case POWERPC_EXCP_DSEG:     return "DSEG";
67     case POWERPC_EXCP_ISEG:     return "ISEG";
68     case POWERPC_EXCP_HDECR:    return "HDECR";
69     case POWERPC_EXCP_TRACE:    return "TRACE";
70     case POWERPC_EXCP_HDSI:     return "HDSI";
71     case POWERPC_EXCP_HISI:     return "HISI";
72     case POWERPC_EXCP_HDSEG:    return "HDSEG";
73     case POWERPC_EXCP_HISEG:    return "HISEG";
74     case POWERPC_EXCP_VPU:      return "VPU";
75     case POWERPC_EXCP_PIT:      return "PIT";
76     case POWERPC_EXCP_IO:       return "IO";
77     case POWERPC_EXCP_RUNM:     return "RUNM";
78     case POWERPC_EXCP_EMUL:     return "EMUL";
79     case POWERPC_EXCP_IFTLB:    return "IFTLB";
80     case POWERPC_EXCP_DLTLB:    return "DLTLB";
81     case POWERPC_EXCP_DSTLB:    return "DSTLB";
82     case POWERPC_EXCP_FPA:      return "FPA";
83     case POWERPC_EXCP_DABR:     return "DABR";
84     case POWERPC_EXCP_IABR:     return "IABR";
85     case POWERPC_EXCP_SMI:      return "SMI";
86     case POWERPC_EXCP_PERFM:    return "PERFM";
87     case POWERPC_EXCP_THERM:    return "THERM";
88     case POWERPC_EXCP_VPUA:     return "VPUA";
89     case POWERPC_EXCP_SOFTP:    return "SOFTP";
90     case POWERPC_EXCP_MAINT:    return "MAINT";
91     case POWERPC_EXCP_MEXTBR:   return "MEXTBR";
92     case POWERPC_EXCP_NMEXTBR:  return "NMEXTBR";
93     case POWERPC_EXCP_ITLBE:    return "ITLBE";
94     case POWERPC_EXCP_DTLBE:    return "DTLBE";
95     case POWERPC_EXCP_VSXU:     return "VSXU";
96     case POWERPC_EXCP_FU:       return "FU";
97     case POWERPC_EXCP_HV_EMU:   return "HV_EMU";
98     case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
99     case POWERPC_EXCP_HV_FU:    return "HV_FU";
100     case POWERPC_EXCP_SDOOR:    return "SDOOR";
101     case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
102     case POWERPC_EXCP_HVIRT:    return "HVIRT";
103     case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
104     default:
105         g_assert_not_reached();
106     }
107 }
108 
109 static void dump_syscall(CPUPPCState *env)
110 {
111     qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
112                   " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
113                   " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
114                   " nip=" TARGET_FMT_lx "\n",
115                   ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
116                   ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
117                   ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
118                   ppc_dump_gpr(env, 8), env->nip);
119 }
120 
121 static void dump_hcall(CPUPPCState *env)
122 {
123     qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
124                   " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
125                   " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
126                   " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
127                   " nip=" TARGET_FMT_lx "\n",
128                   ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
129                   ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
130                   ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
131                   ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
132                   ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
133                   env->nip);
134 }
135 
136 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
137 {
138     const char *es;
139     target_ulong *miss, *cmp;
140     int en;
141 
142     if (!qemu_loglevel_mask(CPU_LOG_MMU)) {
143         return;
144     }
145 
146     if (excp == POWERPC_EXCP_IFTLB) {
147         es = "I";
148         en = 'I';
149         miss = &env->spr[SPR_IMISS];
150         cmp = &env->spr[SPR_ICMP];
151     } else {
152         if (excp == POWERPC_EXCP_DLTLB) {
153             es = "DL";
154         } else {
155             es = "DS";
156         }
157         en = 'D';
158         miss = &env->spr[SPR_DMISS];
159         cmp = &env->spr[SPR_DCMP];
160     }
161     qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
162              TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
163              TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
164              env->spr[SPR_HASH1], env->spr[SPR_HASH2],
165              env->error_code);
166 }
167 
168 
169 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
170                                 target_ulong *msr)
171 {
172     /* We no longer are in a PM state */
173     env->resume_as_sreset = false;
174 
175     /* Pretend to be returning from doze always as we don't lose state */
176     *msr |= SRR1_WS_NOLOSS;
177 
178     /* Machine checks are sent normally */
179     if (excp == POWERPC_EXCP_MCHECK) {
180         return excp;
181     }
182     switch (excp) {
183     case POWERPC_EXCP_RESET:
184         *msr |= SRR1_WAKERESET;
185         break;
186     case POWERPC_EXCP_EXTERNAL:
187         *msr |= SRR1_WAKEEE;
188         break;
189     case POWERPC_EXCP_DECR:
190         *msr |= SRR1_WAKEDEC;
191         break;
192     case POWERPC_EXCP_SDOOR:
193         *msr |= SRR1_WAKEDBELL;
194         break;
195     case POWERPC_EXCP_SDOOR_HV:
196         *msr |= SRR1_WAKEHDBELL;
197         break;
198     case POWERPC_EXCP_HV_MAINT:
199         *msr |= SRR1_WAKEHMI;
200         break;
201     case POWERPC_EXCP_HVIRT:
202         *msr |= SRR1_WAKEHVI;
203         break;
204     default:
205         cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
206                   excp);
207     }
208     return POWERPC_EXCP_RESET;
209 }
210 
211 /*
212  * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
213  * taken with the MMU on, and which uses an alternate location (e.g., so the
214  * kernel/hv can map the vectors there with an effective address).
215  *
216  * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
217  * are delivered in this way. AIL requires the LPCR to be set to enable this
218  * mode, and then a number of conditions have to be true for AIL to apply.
219  *
220  * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
221  * they specifically want to be in real mode (e.g., the MCE might be signaling
222  * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
223  *
224  * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
225  * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
226  * radix mode (LPCR[HR]).
227  *
228  * POWER8, POWER9 with LPCR[HR]=0
229  * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
230  * +-----------+-------------+---------+-------------+-----+
231  * | a         | 00/01/10    | x       | x           | 0   |
232  * | a         | 11          | 0       | 1           | 0   |
233  * | a         | 11          | 1       | 1           | a   |
234  * | a         | 11          | 0       | 0           | a   |
235  * +-------------------------------------------------------+
236  *
237  * POWER9 with LPCR[HR]=1
238  * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
239  * +-----------+-------------+---------+-------------+-----+
240  * | a         | 00/01/10    | x       | x           | 0   |
241  * | a         | 11          | x       | x           | a   |
242  * +-------------------------------------------------------+
243  *
244  * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
245  * the hypervisor in AIL mode if the guest is radix. This is good for
246  * performance but allows the guest to influence the AIL of hypervisor
247  * interrupts using its MSR, and also the hypervisor must disallow guest
248  * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
249  * use AIL for its MSR[HV] 0->1 interrupts.
250  *
251  * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
252  * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
253  * MSR[HV] 1->1).
254  *
255  * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
256  *
257  * POWER10 behaviour is
258  * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
259  * +-----------+------------+-------------+---------+-------------+-----+
260  * | a         | h          | 00/01/10    | 0       | 0           | 0   |
261  * | a         | h          | 11          | 0       | 0           | a   |
262  * | a         | h          | x           | 0       | 1           | h   |
263  * | a         | h          | 00/01/10    | 1       | 1           | 0   |
264  * | a         | h          | 11          | 1       | 1           | h   |
265  * +--------------------------------------------------------------------+
266  */
267 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
268                                       target_ulong msr,
269                                       target_ulong *new_msr,
270                                       target_ulong *vector)
271 {
272 #if defined(TARGET_PPC64)
273     CPUPPCState *env = &cpu->env;
274     bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
275     bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
276     int ail = 0;
277 
278     if (excp == POWERPC_EXCP_MCHECK ||
279         excp == POWERPC_EXCP_RESET ||
280         excp == POWERPC_EXCP_HV_MAINT) {
281         /* SRESET, MCE, HMI never apply AIL */
282         return;
283     }
284 
285     if (excp_model == POWERPC_EXCP_POWER8 ||
286         excp_model == POWERPC_EXCP_POWER9) {
287         if (!mmu_all_on) {
288             /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
289             return;
290         }
291         if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
292             /*
293              * AIL does not work if there is a MSR[HV] 0->1 transition and the
294              * partition is in HPT mode. For radix guests, such interrupts are
295              * allowed to be delivered to the hypervisor in ail mode.
296              */
297             return;
298         }
299 
300         ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
301         if (ail == 0) {
302             return;
303         }
304         if (ail == 1) {
305             /* AIL=1 is reserved, treat it like AIL=0 */
306             return;
307         }
308 
309     } else if (excp_model == POWERPC_EXCP_POWER10) {
310         if (!mmu_all_on && !hv_escalation) {
311             /*
312              * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
313              * Guest->guest and HV->HV interrupts do require MMU on.
314              */
315             return;
316         }
317 
318         if (*new_msr & MSR_HVB) {
319             if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
320                 /* HV interrupts depend on LPCR[HAIL] */
321                 return;
322             }
323             ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
324         } else {
325             ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
326         }
327         if (ail == 0) {
328             return;
329         }
330         if (ail == 1 || ail == 2) {
331             /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
332             return;
333         }
334     } else {
335         /* Other processors do not support AIL */
336         return;
337     }
338 
339     /*
340      * AIL applies, so the new MSR gets IR and DR set, and an offset applied
341      * to the new IP.
342      */
343     *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
344 
345     if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
346         if (ail == 2) {
347             *vector |= 0x0000000000018000ull;
348         } else if (ail == 3) {
349             *vector |= 0xc000000000004000ull;
350         }
351     } else {
352         /*
353          * scv AIL is a little different. AIL=2 does not change the address,
354          * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
355          */
356         if (ail == 3) {
357             *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
358             *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
359         }
360     }
361 #endif
362 }
363 
364 static void powerpc_set_excp_state(PowerPCCPU *cpu,
365                                           target_ulong vector, target_ulong msr)
366 {
367     CPUState *cs = CPU(cpu);
368     CPUPPCState *env = &cpu->env;
369 
370     /*
371      * We don't use hreg_store_msr here as already have treated any
372      * special case that could occur. Just store MSR and update hflags
373      *
374      * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
375      * will prevent setting of the HV bit which some exceptions might need
376      * to do.
377      */
378     env->msr = msr & env->msr_mask;
379     hreg_compute_hflags(env);
380     env->nip = vector;
381     /* Reset exception state */
382     cs->exception_index = POWERPC_EXCP_NONE;
383     env->error_code = 0;
384 
385     /* Reset the reservation */
386     env->reserve_addr = -1;
387 
388     /*
389      * Any interrupt is context synchronizing, check if TCG TLB needs
390      * a delayed flush on ppc64
391      */
392     check_tlb_flush(env, false);
393 }
394 
395 /*
396  * Note that this function should be greatly optimized when called
397  * with a constant excp, from ppc_hw_interrupt
398  */
399 static inline void powerpc_excp_legacy(PowerPCCPU *cpu, int excp)
400 {
401     CPUState *cs = CPU(cpu);
402     CPUPPCState *env = &cpu->env;
403     int excp_model = env->excp_model;
404     target_ulong msr, new_msr, vector;
405     int srr0, srr1, lev = -1;
406 
407     if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
408         cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
409     }
410 
411     qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
412                   " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
413                   excp, env->error_code);
414 
415     /* new srr1 value excluding must-be-zero bits */
416     if (excp_model == POWERPC_EXCP_BOOKE) {
417         msr = env->msr;
418     } else {
419         msr = env->msr & ~0x783f0000ULL;
420     }
421 
422     /*
423      * new interrupt handler msr preserves existing HV and ME unless
424      * explicitly overriden
425      */
426     new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
427 
428     /* target registers */
429     srr0 = SPR_SRR0;
430     srr1 = SPR_SRR1;
431 
432     /*
433      * check for special resume at 0x100 from doze/nap/sleep/winkle on
434      * P7/P8/P9
435      */
436     if (env->resume_as_sreset) {
437         excp = powerpc_reset_wakeup(cs, env, excp, &msr);
438     }
439 
440     /*
441      * Hypervisor emulation assistance interrupt only exists on server
442      * arch 2.05 server or later. We also don't want to generate it if
443      * we don't have HVB in msr_mask (PAPR mode).
444      */
445     if (excp == POWERPC_EXCP_HV_EMU
446 #if defined(TARGET_PPC64)
447         && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
448 #endif /* defined(TARGET_PPC64) */
449 
450     ) {
451         excp = POWERPC_EXCP_PROGRAM;
452     }
453 
454 #ifdef TARGET_PPC64
455     /*
456      * SPEU and VPU share the same IVOR but they exist in different
457      * processors. SPEU is e500v1/2 only and VPU is e6500 only.
458      */
459     if (excp_model == POWERPC_EXCP_BOOKE && excp == POWERPC_EXCP_VPU) {
460         excp = POWERPC_EXCP_SPEU;
461     }
462 #endif
463 
464     vector = env->excp_vectors[excp];
465     if (vector == (target_ulong)-1ULL) {
466         cpu_abort(cs, "Raised an exception without defined vector %d\n",
467                   excp);
468     }
469 
470     vector |= env->excp_prefix;
471 
472     switch (excp) {
473     case POWERPC_EXCP_CRITICAL:    /* Critical input                         */
474         switch (excp_model) {
475         case POWERPC_EXCP_40x:
476             srr0 = SPR_40x_SRR2;
477             srr1 = SPR_40x_SRR3;
478             break;
479         case POWERPC_EXCP_BOOKE:
480             srr0 = SPR_BOOKE_CSRR0;
481             srr1 = SPR_BOOKE_CSRR1;
482             break;
483         case POWERPC_EXCP_G2:
484             break;
485         default:
486             goto excp_invalid;
487         }
488         break;
489     case POWERPC_EXCP_MCHECK:    /* Machine check exception                  */
490         if (msr_me == 0) {
491             /*
492              * Machine check exception is not enabled.  Enter
493              * checkstop state.
494              */
495             fprintf(stderr, "Machine check while not allowed. "
496                     "Entering checkstop state\n");
497             if (qemu_log_separate()) {
498                 qemu_log("Machine check while not allowed. "
499                         "Entering checkstop state\n");
500             }
501             cs->halted = 1;
502             cpu_interrupt_exittb(cs);
503         }
504         if (env->msr_mask & MSR_HVB) {
505             /*
506              * ISA specifies HV, but can be delivered to guest with HV
507              * clear (e.g., see FWNMI in PAPR).
508              */
509             new_msr |= (target_ulong)MSR_HVB;
510         }
511 
512         /* machine check exceptions don't have ME set */
513         new_msr &= ~((target_ulong)1 << MSR_ME);
514 
515         /* XXX: should also have something loaded in DAR / DSISR */
516         switch (excp_model) {
517         case POWERPC_EXCP_40x:
518             srr0 = SPR_40x_SRR2;
519             srr1 = SPR_40x_SRR3;
520             break;
521         case POWERPC_EXCP_BOOKE:
522             /* FIXME: choose one or the other based on CPU type */
523             srr0 = SPR_BOOKE_MCSRR0;
524             srr1 = SPR_BOOKE_MCSRR1;
525 
526             env->spr[SPR_BOOKE_CSRR0] = env->nip;
527             env->spr[SPR_BOOKE_CSRR1] = msr;
528             break;
529         default:
530             break;
531         }
532         break;
533     case POWERPC_EXCP_DSI:       /* Data storage exception                   */
534         trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
535         break;
536     case POWERPC_EXCP_ISI:       /* Instruction storage exception            */
537         trace_ppc_excp_isi(msr, env->nip);
538         msr |= env->error_code;
539         break;
540     case POWERPC_EXCP_EXTERNAL:  /* External input                           */
541     {
542         bool lpes0;
543 
544         cs = CPU(cpu);
545 
546         /*
547          * Exception targeting modifiers
548          *
549          * LPES0 is supported on POWER7/8/9
550          * LPES1 is not supported (old iSeries mode)
551          *
552          * On anything else, we behave as if LPES0 is 1
553          * (externals don't alter MSR:HV)
554          */
555 #if defined(TARGET_PPC64)
556         if (excp_model == POWERPC_EXCP_POWER7 ||
557             excp_model == POWERPC_EXCP_POWER8 ||
558             excp_model == POWERPC_EXCP_POWER9 ||
559             excp_model == POWERPC_EXCP_POWER10) {
560             lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
561         } else
562 #endif /* defined(TARGET_PPC64) */
563         {
564             lpes0 = true;
565         }
566 
567         if (!lpes0) {
568             new_msr |= (target_ulong)MSR_HVB;
569             new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
570             srr0 = SPR_HSRR0;
571             srr1 = SPR_HSRR1;
572         }
573         if (env->mpic_proxy) {
574             /* IACK the IRQ on delivery */
575             env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
576         }
577         break;
578     }
579     case POWERPC_EXCP_ALIGN:     /* Alignment exception                      */
580         /* Get rS/rD and rA from faulting opcode */
581         /*
582          * Note: the opcode fields will not be set properly for a
583          * direct store load/store, but nobody cares as nobody
584          * actually uses direct store segments.
585          */
586         env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
587         break;
588     case POWERPC_EXCP_PROGRAM:   /* Program exception                        */
589         switch (env->error_code & ~0xF) {
590         case POWERPC_EXCP_FP:
591             if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
592                 trace_ppc_excp_fp_ignore();
593                 cs->exception_index = POWERPC_EXCP_NONE;
594                 env->error_code = 0;
595                 return;
596             }
597 
598             /*
599              * FP exceptions always have NIP pointing to the faulting
600              * instruction, so always use store_next and claim we are
601              * precise in the MSR.
602              */
603             msr |= 0x00100000;
604             env->spr[SPR_BOOKE_ESR] = ESR_FP;
605             break;
606         case POWERPC_EXCP_INVAL:
607             trace_ppc_excp_inval(env->nip);
608             msr |= 0x00080000;
609             env->spr[SPR_BOOKE_ESR] = ESR_PIL;
610             break;
611         case POWERPC_EXCP_PRIV:
612             msr |= 0x00040000;
613             env->spr[SPR_BOOKE_ESR] = ESR_PPR;
614             break;
615         case POWERPC_EXCP_TRAP:
616             msr |= 0x00020000;
617             env->spr[SPR_BOOKE_ESR] = ESR_PTR;
618             break;
619         default:
620             /* Should never occur */
621             cpu_abort(cs, "Invalid program exception %d. Aborting\n",
622                       env->error_code);
623             break;
624         }
625         break;
626     case POWERPC_EXCP_SYSCALL:   /* System call exception                    */
627         lev = env->error_code;
628 
629         if ((lev == 1) && cpu->vhyp) {
630             dump_hcall(env);
631         } else {
632             dump_syscall(env);
633         }
634 
635         /*
636          * We need to correct the NIP which in this case is supposed
637          * to point to the next instruction
638          */
639         env->nip += 4;
640 
641         /* "PAPR mode" built-in hypercall emulation */
642         if ((lev == 1) && cpu->vhyp) {
643             PPCVirtualHypervisorClass *vhc =
644                 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
645             vhc->hypercall(cpu->vhyp, cpu);
646             return;
647         }
648         if (lev == 1) {
649             new_msr |= (target_ulong)MSR_HVB;
650         }
651         break;
652     case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception                     */
653         lev = env->error_code;
654         dump_syscall(env);
655         env->nip += 4;
656         new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
657         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
658 
659         vector += lev * 0x20;
660 
661         env->lr = env->nip;
662         env->ctr = msr;
663         break;
664     case POWERPC_EXCP_FPU:       /* Floating-point unavailable exception     */
665     case POWERPC_EXCP_APU:       /* Auxiliary processor unavailable          */
666     case POWERPC_EXCP_DECR:      /* Decrementer exception                    */
667         break;
668     case POWERPC_EXCP_FIT:       /* Fixed-interval timer interrupt           */
669         /* FIT on 4xx */
670         trace_ppc_excp_print("FIT");
671         break;
672     case POWERPC_EXCP_WDT:       /* Watchdog timer interrupt                 */
673         trace_ppc_excp_print("WDT");
674         switch (excp_model) {
675         case POWERPC_EXCP_BOOKE:
676             srr0 = SPR_BOOKE_CSRR0;
677             srr1 = SPR_BOOKE_CSRR1;
678             break;
679         default:
680             break;
681         }
682         break;
683     case POWERPC_EXCP_DTLB:      /* Data TLB error                           */
684     case POWERPC_EXCP_ITLB:      /* Instruction TLB error                    */
685         break;
686     case POWERPC_EXCP_DEBUG:     /* Debug interrupt                          */
687         if (env->flags & POWERPC_FLAG_DE) {
688             /* FIXME: choose one or the other based on CPU type */
689             srr0 = SPR_BOOKE_DSRR0;
690             srr1 = SPR_BOOKE_DSRR1;
691 
692             env->spr[SPR_BOOKE_CSRR0] = env->nip;
693             env->spr[SPR_BOOKE_CSRR1] = msr;
694 
695             /* DBSR already modified by caller */
696         } else {
697             cpu_abort(cs, "Debug exception triggered on unsupported model\n");
698         }
699         break;
700     case POWERPC_EXCP_SPEU:   /* SPE/embedded floating-point unavailable/VPU  */
701         env->spr[SPR_BOOKE_ESR] = ESR_SPV;
702         break;
703     case POWERPC_EXCP_DOORI:     /* Embedded doorbell interrupt              */
704         break;
705     case POWERPC_EXCP_DOORCI:    /* Embedded doorbell critical interrupt     */
706         srr0 = SPR_BOOKE_CSRR0;
707         srr1 = SPR_BOOKE_CSRR1;
708         break;
709     case POWERPC_EXCP_RESET:     /* System reset exception                   */
710         /* A power-saving exception sets ME, otherwise it is unchanged */
711         if (msr_pow) {
712             /* indicate that we resumed from power save mode */
713             msr |= 0x10000;
714             new_msr |= ((target_ulong)1 << MSR_ME);
715         }
716         if (env->msr_mask & MSR_HVB) {
717             /*
718              * ISA specifies HV, but can be delivered to guest with HV
719              * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
720              */
721             new_msr |= (target_ulong)MSR_HVB;
722         } else {
723             if (msr_pow) {
724                 cpu_abort(cs, "Trying to deliver power-saving system reset "
725                           "exception %d with no HV support\n", excp);
726             }
727         }
728         break;
729     case POWERPC_EXCP_DSEG:      /* Data segment exception                   */
730     case POWERPC_EXCP_ISEG:      /* Instruction segment exception            */
731     case POWERPC_EXCP_TRACE:     /* Trace exception                          */
732         break;
733     case POWERPC_EXCP_HISI:      /* Hypervisor instruction storage exception */
734         msr |= env->error_code;
735         /* fall through */
736     case POWERPC_EXCP_HDECR:     /* Hypervisor decrementer exception         */
737     case POWERPC_EXCP_HDSI:      /* Hypervisor data storage exception        */
738     case POWERPC_EXCP_HDSEG:     /* Hypervisor data segment exception        */
739     case POWERPC_EXCP_HISEG:     /* Hypervisor instruction segment exception */
740     case POWERPC_EXCP_SDOOR_HV:  /* Hypervisor Doorbell interrupt            */
741     case POWERPC_EXCP_HV_EMU:
742     case POWERPC_EXCP_HVIRT:     /* Hypervisor virtualization                */
743         srr0 = SPR_HSRR0;
744         srr1 = SPR_HSRR1;
745         new_msr |= (target_ulong)MSR_HVB;
746         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
747         break;
748     case POWERPC_EXCP_VPU:       /* Vector unavailable exception             */
749     case POWERPC_EXCP_VSXU:       /* VSX unavailable exception               */
750     case POWERPC_EXCP_FU:         /* Facility unavailable exception          */
751 #ifdef TARGET_PPC64
752         env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
753 #endif
754         break;
755     case POWERPC_EXCP_HV_FU:     /* Hypervisor Facility Unavailable Exception */
756 #ifdef TARGET_PPC64
757         env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
758         srr0 = SPR_HSRR0;
759         srr1 = SPR_HSRR1;
760         new_msr |= (target_ulong)MSR_HVB;
761         new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
762 #endif
763         break;
764     case POWERPC_EXCP_PIT:       /* Programmable interval timer interrupt    */
765         trace_ppc_excp_print("PIT");
766         break;
767     case POWERPC_EXCP_IFTLB:     /* Instruction fetch TLB error              */
768     case POWERPC_EXCP_DLTLB:     /* Data load TLB miss                       */
769     case POWERPC_EXCP_DSTLB:     /* Data store TLB miss                      */
770         switch (excp_model) {
771         case POWERPC_EXCP_602:
772         case POWERPC_EXCP_603:
773         case POWERPC_EXCP_G2:
774             /* Swap temporary saved registers with GPRs */
775             if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
776                 new_msr |= (target_ulong)1 << MSR_TGPR;
777                 hreg_swap_gpr_tgpr(env);
778             }
779             /* fall through */
780         case POWERPC_EXCP_7x5:
781             ppc_excp_debug_sw_tlb(env, excp);
782 
783             msr |= env->crf[0] << 28;
784             msr |= env->error_code; /* key, D/I, S/L bits */
785             /* Set way using a LRU mechanism */
786             msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
787             break;
788         default:
789             cpu_abort(cs, "Invalid TLB miss exception\n");
790             break;
791         }
792         break;
793     case POWERPC_EXCP_EFPDI:     /* Embedded floating-point data interrupt   */
794     case POWERPC_EXCP_EFPRI:     /* Embedded floating-point round interrupt  */
795     case POWERPC_EXCP_EPERFM:    /* Embedded performance monitor interrupt   */
796     case POWERPC_EXCP_IO:        /* IO error exception                       */
797     case POWERPC_EXCP_RUNM:      /* Run mode exception                       */
798     case POWERPC_EXCP_EMUL:      /* Emulation trap exception                 */
799     case POWERPC_EXCP_FPA:       /* Floating-point assist exception          */
800     case POWERPC_EXCP_DABR:      /* Data address breakpoint                  */
801     case POWERPC_EXCP_IABR:      /* Instruction address breakpoint           */
802     case POWERPC_EXCP_SMI:       /* System management interrupt              */
803     case POWERPC_EXCP_THERM:     /* Thermal interrupt                        */
804     case POWERPC_EXCP_PERFM:     /* Embedded performance monitor interrupt   */
805     case POWERPC_EXCP_VPUA:      /* Vector assist exception                  */
806     case POWERPC_EXCP_SOFTP:     /* Soft patch exception                     */
807     case POWERPC_EXCP_MAINT:     /* Maintenance exception                    */
808     case POWERPC_EXCP_MEXTBR:    /* Maskable external breakpoint             */
809     case POWERPC_EXCP_NMEXTBR:   /* Non maskable external breakpoint         */
810         cpu_abort(cs, "%s exception not implemented\n",
811                   powerpc_excp_name(excp));
812         break;
813     default:
814     excp_invalid:
815         cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
816         break;
817     }
818 
819     /* Sanity check */
820     if (!(env->msr_mask & MSR_HVB)) {
821         if (new_msr & MSR_HVB) {
822             cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
823                       "no HV support\n", excp);
824         }
825         if (srr0 == SPR_HSRR0) {
826             cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
827                       "no HV support\n", excp);
828         }
829     }
830 
831     /*
832      * Sort out endianness of interrupt, this differs depending on the
833      * CPU, the HV mode, etc...
834      */
835     if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
836         new_msr |= (target_ulong)1 << MSR_LE;
837     }
838 
839 #if defined(TARGET_PPC64)
840     if (excp_model == POWERPC_EXCP_BOOKE) {
841         if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
842             /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
843             new_msr |= (target_ulong)1 << MSR_CM;
844         } else {
845             vector = (uint32_t)vector;
846         }
847     } else {
848         if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
849             vector = (uint32_t)vector;
850         } else {
851             new_msr |= (target_ulong)1 << MSR_SF;
852         }
853     }
854 #endif
855 
856     if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
857         /* Save PC */
858         env->spr[srr0] = env->nip;
859 
860         /* Save MSR */
861         env->spr[srr1] = msr;
862     }
863 
864     /* This can update new_msr and vector if AIL applies */
865     ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
866 
867     powerpc_set_excp_state(cpu, vector, new_msr);
868 }
869 
870 static void powerpc_excp(PowerPCCPU *cpu, int excp)
871 {
872     CPUPPCState *env = &cpu->env;
873 
874     switch (env->excp_model) {
875     default:
876         powerpc_excp_legacy(cpu, excp);
877     }
878 }
879 
880 void ppc_cpu_do_interrupt(CPUState *cs)
881 {
882     PowerPCCPU *cpu = POWERPC_CPU(cs);
883 
884     powerpc_excp(cpu, cs->exception_index);
885 }
886 
887 static void ppc_hw_interrupt(CPUPPCState *env)
888 {
889     PowerPCCPU *cpu = env_archcpu(env);
890     bool async_deliver;
891 
892     /* External reset */
893     if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
894         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
895         powerpc_excp(cpu, POWERPC_EXCP_RESET);
896         return;
897     }
898     /* Machine check exception */
899     if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
900         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
901         powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
902         return;
903     }
904 #if 0 /* TODO */
905     /* External debug exception */
906     if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
907         env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
908         powerpc_excp(cpu, POWERPC_EXCP_DEBUG);
909         return;
910     }
911 #endif
912 
913     /*
914      * For interrupts that gate on MSR:EE, we need to do something a
915      * bit more subtle, as we need to let them through even when EE is
916      * clear when coming out of some power management states (in order
917      * for them to become a 0x100).
918      */
919     async_deliver = (msr_ee != 0) || env->resume_as_sreset;
920 
921     /* Hypervisor decrementer exception */
922     if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
923         /* LPCR will be clear when not supported so this will work */
924         bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
925         if ((async_deliver || msr_hv == 0) && hdice) {
926             /* HDEC clears on delivery */
927             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
928             powerpc_excp(cpu, POWERPC_EXCP_HDECR);
929             return;
930         }
931     }
932 
933     /* Hypervisor virtualization interrupt */
934     if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
935         /* LPCR will be clear when not supported so this will work */
936         bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
937         if ((async_deliver || msr_hv == 0) && hvice) {
938             powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
939             return;
940         }
941     }
942 
943     /* External interrupt can ignore MSR:EE under some circumstances */
944     if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
945         bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
946         bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
947         /* HEIC blocks delivery to the hypervisor */
948         if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
949             (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
950             powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
951             return;
952         }
953     }
954     if (msr_ce != 0) {
955         /* External critical interrupt */
956         if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
957             powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
958             return;
959         }
960     }
961     if (async_deliver != 0) {
962         /* Watchdog timer on embedded PowerPC */
963         if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
964             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
965             powerpc_excp(cpu, POWERPC_EXCP_WDT);
966             return;
967         }
968         if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
969             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
970             powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
971             return;
972         }
973         /* Fixed interval timer on embedded PowerPC */
974         if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
975             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
976             powerpc_excp(cpu, POWERPC_EXCP_FIT);
977             return;
978         }
979         /* Programmable interval timer on embedded PowerPC */
980         if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
981             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
982             powerpc_excp(cpu, POWERPC_EXCP_PIT);
983             return;
984         }
985         /* Decrementer exception */
986         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
987             if (ppc_decr_clear_on_delivery(env)) {
988                 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
989             }
990             powerpc_excp(cpu, POWERPC_EXCP_DECR);
991             return;
992         }
993         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
994             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
995             if (is_book3s_arch2x(env)) {
996                 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
997             } else {
998                 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
999             }
1000             return;
1001         }
1002         if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
1003             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1004             powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
1005             return;
1006         }
1007         if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
1008             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
1009             powerpc_excp(cpu, POWERPC_EXCP_PERFM);
1010             return;
1011         }
1012         /* Thermal interrupt */
1013         if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
1014             env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
1015             powerpc_excp(cpu, POWERPC_EXCP_THERM);
1016             return;
1017         }
1018     }
1019 
1020     if (env->resume_as_sreset) {
1021         /*
1022          * This is a bug ! It means that has_work took us out of halt without
1023          * anything to deliver while in a PM state that requires getting
1024          * out via a 0x100
1025          *
1026          * This means we will incorrectly execute past the power management
1027          * instruction instead of triggering a reset.
1028          *
1029          * It generally means a discrepancy between the wakeup conditions in the
1030          * processor has_work implementation and the logic in this function.
1031          */
1032         cpu_abort(env_cpu(env),
1033                   "Wakeup from PM state but interrupt Undelivered");
1034     }
1035 }
1036 
1037 void ppc_cpu_do_system_reset(CPUState *cs)
1038 {
1039     PowerPCCPU *cpu = POWERPC_CPU(cs);
1040 
1041     powerpc_excp(cpu, POWERPC_EXCP_RESET);
1042 }
1043 
1044 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
1045 {
1046     PowerPCCPU *cpu = POWERPC_CPU(cs);
1047     CPUPPCState *env = &cpu->env;
1048     target_ulong msr = 0;
1049 
1050     /*
1051      * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1052      * been set by KVM.
1053      */
1054     msr = (1ULL << MSR_ME);
1055     msr |= env->msr & (1ULL << MSR_SF);
1056     if (ppc_interrupts_little_endian(cpu, false)) {
1057         msr |= (1ULL << MSR_LE);
1058     }
1059 
1060     powerpc_set_excp_state(cpu, vector, msr);
1061 }
1062 
1063 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1064 {
1065     PowerPCCPU *cpu = POWERPC_CPU(cs);
1066     CPUPPCState *env = &cpu->env;
1067 
1068     if (interrupt_request & CPU_INTERRUPT_HARD) {
1069         ppc_hw_interrupt(env);
1070         if (env->pending_interrupts == 0) {
1071             cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1072         }
1073         return true;
1074     }
1075     return false;
1076 }
1077 
1078 #endif /* !CONFIG_USER_ONLY */
1079 
1080 /*****************************************************************************/
1081 /* Exceptions processing helpers */
1082 
1083 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1084                             uint32_t error_code, uintptr_t raddr)
1085 {
1086     CPUState *cs = env_cpu(env);
1087 
1088     cs->exception_index = exception;
1089     env->error_code = error_code;
1090     cpu_loop_exit_restore(cs, raddr);
1091 }
1092 
1093 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1094                          uint32_t error_code)
1095 {
1096     raise_exception_err_ra(env, exception, error_code, 0);
1097 }
1098 
1099 void raise_exception(CPUPPCState *env, uint32_t exception)
1100 {
1101     raise_exception_err_ra(env, exception, 0, 0);
1102 }
1103 
1104 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1105                         uintptr_t raddr)
1106 {
1107     raise_exception_err_ra(env, exception, 0, raddr);
1108 }
1109 
1110 #ifdef CONFIG_TCG
1111 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1112                                 uint32_t error_code)
1113 {
1114     raise_exception_err_ra(env, exception, error_code, 0);
1115 }
1116 
1117 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1118 {
1119     raise_exception_err_ra(env, exception, 0, 0);
1120 }
1121 #endif
1122 
1123 #if !defined(CONFIG_USER_ONLY)
1124 #ifdef CONFIG_TCG
1125 void helper_store_msr(CPUPPCState *env, target_ulong val)
1126 {
1127     uint32_t excp = hreg_store_msr(env, val, 0);
1128 
1129     if (excp != 0) {
1130         CPUState *cs = env_cpu(env);
1131         cpu_interrupt_exittb(cs);
1132         raise_exception(env, excp);
1133     }
1134 }
1135 
1136 #if defined(TARGET_PPC64)
1137 void helper_scv(CPUPPCState *env, uint32_t lev)
1138 {
1139     if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
1140         raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
1141     } else {
1142         raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
1143     }
1144 }
1145 
1146 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1147 {
1148     CPUState *cs;
1149 
1150     cs = env_cpu(env);
1151     cs->halted = 1;
1152 
1153     /* Condition for waking up at 0x100 */
1154     env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1155         (env->spr[SPR_PSSCR] & PSSCR_EC);
1156 }
1157 #endif /* defined(TARGET_PPC64) */
1158 #endif /* CONFIG_TCG */
1159 
1160 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1161 {
1162     CPUState *cs = env_cpu(env);
1163 
1164     /* MSR:POW cannot be set by any form of rfi */
1165     msr &= ~(1ULL << MSR_POW);
1166 
1167 #if defined(TARGET_PPC64)
1168     /* Switching to 32-bit ? Crop the nip */
1169     if (!msr_is_64bit(env, msr)) {
1170         nip = (uint32_t)nip;
1171     }
1172 #else
1173     nip = (uint32_t)nip;
1174 #endif
1175     /* XXX: beware: this is false if VLE is supported */
1176     env->nip = nip & ~((target_ulong)0x00000003);
1177     hreg_store_msr(env, msr, 1);
1178     trace_ppc_excp_rfi(env->nip, env->msr);
1179     /*
1180      * No need to raise an exception here, as rfi is always the last
1181      * insn of a TB
1182      */
1183     cpu_interrupt_exittb(cs);
1184     /* Reset the reservation */
1185     env->reserve_addr = -1;
1186 
1187     /* Context synchronizing: check if TCG TLB needs flush */
1188     check_tlb_flush(env, false);
1189 }
1190 
1191 #ifdef CONFIG_TCG
1192 void helper_rfi(CPUPPCState *env)
1193 {
1194     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1195 }
1196 
1197 #define MSR_BOOK3S_MASK
1198 #if defined(TARGET_PPC64)
1199 void helper_rfid(CPUPPCState *env)
1200 {
1201     /*
1202      * The architecture defines a number of rules for which bits can
1203      * change but in practice, we handle this in hreg_store_msr()
1204      * which will be called by do_rfi(), so there is no need to filter
1205      * here
1206      */
1207     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1208 }
1209 
1210 void helper_rfscv(CPUPPCState *env)
1211 {
1212     do_rfi(env, env->lr, env->ctr);
1213 }
1214 
1215 void helper_hrfid(CPUPPCState *env)
1216 {
1217     do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1218 }
1219 #endif
1220 
1221 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1222 void helper_rfebb(CPUPPCState *env, target_ulong s)
1223 {
1224     target_ulong msr = env->msr;
1225 
1226     /*
1227      * Handling of BESCR bits 32:33 according to PowerISA v3.1:
1228      *
1229      * "If BESCR 32:33 != 0b00 the instruction is treated as if
1230      *  the instruction form were invalid."
1231      */
1232     if (env->spr[SPR_BESCR] & BESCR_INVALID) {
1233         raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1234                             POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1235     }
1236 
1237     env->nip = env->spr[SPR_EBBRR];
1238 
1239     /* Switching to 32-bit ? Crop the nip */
1240     if (!msr_is_64bit(env, msr)) {
1241         env->nip = (uint32_t)env->spr[SPR_EBBRR];
1242     }
1243 
1244     if (s) {
1245         env->spr[SPR_BESCR] |= BESCR_GE;
1246     } else {
1247         env->spr[SPR_BESCR] &= ~BESCR_GE;
1248     }
1249 }
1250 #endif
1251 
1252 /*****************************************************************************/
1253 /* Embedded PowerPC specific helpers */
1254 void helper_40x_rfci(CPUPPCState *env)
1255 {
1256     do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1257 }
1258 
1259 void helper_rfci(CPUPPCState *env)
1260 {
1261     do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1262 }
1263 
1264 void helper_rfdi(CPUPPCState *env)
1265 {
1266     /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1267     do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1268 }
1269 
1270 void helper_rfmci(CPUPPCState *env)
1271 {
1272     /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1273     do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1274 }
1275 #endif /* CONFIG_TCG */
1276 #endif /* !defined(CONFIG_USER_ONLY) */
1277 
1278 #ifdef CONFIG_TCG
1279 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1280                uint32_t flags)
1281 {
1282     if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1283                   ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1284                   ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1285                   ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1286                   ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1287         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1288                                POWERPC_EXCP_TRAP, GETPC());
1289     }
1290 }
1291 
1292 #if defined(TARGET_PPC64)
1293 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1294                uint32_t flags)
1295 {
1296     if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1297                   ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1298                   ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1299                   ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1300                   ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1301         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1302                                POWERPC_EXCP_TRAP, GETPC());
1303     }
1304 }
1305 #endif
1306 #endif
1307 
1308 #if !defined(CONFIG_USER_ONLY)
1309 /*****************************************************************************/
1310 /* PowerPC 601 specific instructions (POWER bridge) */
1311 
1312 #ifdef CONFIG_TCG
1313 void helper_rfsvc(CPUPPCState *env)
1314 {
1315     do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1316 }
1317 
1318 /* Embedded.Processor Control */
1319 static int dbell2irq(target_ulong rb)
1320 {
1321     int msg = rb & DBELL_TYPE_MASK;
1322     int irq = -1;
1323 
1324     switch (msg) {
1325     case DBELL_TYPE_DBELL:
1326         irq = PPC_INTERRUPT_DOORBELL;
1327         break;
1328     case DBELL_TYPE_DBELL_CRIT:
1329         irq = PPC_INTERRUPT_CDOORBELL;
1330         break;
1331     case DBELL_TYPE_G_DBELL:
1332     case DBELL_TYPE_G_DBELL_CRIT:
1333     case DBELL_TYPE_G_DBELL_MC:
1334         /* XXX implement */
1335     default:
1336         break;
1337     }
1338 
1339     return irq;
1340 }
1341 
1342 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1343 {
1344     int irq = dbell2irq(rb);
1345 
1346     if (irq < 0) {
1347         return;
1348     }
1349 
1350     env->pending_interrupts &= ~(1 << irq);
1351 }
1352 
1353 void helper_msgsnd(target_ulong rb)
1354 {
1355     int irq = dbell2irq(rb);
1356     int pir = rb & DBELL_PIRTAG_MASK;
1357     CPUState *cs;
1358 
1359     if (irq < 0) {
1360         return;
1361     }
1362 
1363     qemu_mutex_lock_iothread();
1364     CPU_FOREACH(cs) {
1365         PowerPCCPU *cpu = POWERPC_CPU(cs);
1366         CPUPPCState *cenv = &cpu->env;
1367 
1368         if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1369             cenv->pending_interrupts |= 1 << irq;
1370             cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1371         }
1372     }
1373     qemu_mutex_unlock_iothread();
1374 }
1375 
1376 /* Server Processor Control */
1377 
1378 static bool dbell_type_server(target_ulong rb)
1379 {
1380     /*
1381      * A Directed Hypervisor Doorbell message is sent only if the
1382      * message type is 5. All other types are reserved and the
1383      * instruction is a no-op
1384      */
1385     return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1386 }
1387 
1388 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1389 {
1390     if (!dbell_type_server(rb)) {
1391         return;
1392     }
1393 
1394     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1395 }
1396 
1397 static void book3s_msgsnd_common(int pir, int irq)
1398 {
1399     CPUState *cs;
1400 
1401     qemu_mutex_lock_iothread();
1402     CPU_FOREACH(cs) {
1403         PowerPCCPU *cpu = POWERPC_CPU(cs);
1404         CPUPPCState *cenv = &cpu->env;
1405 
1406         /* TODO: broadcast message to all threads of the same  processor */
1407         if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1408             cenv->pending_interrupts |= 1 << irq;
1409             cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1410         }
1411     }
1412     qemu_mutex_unlock_iothread();
1413 }
1414 
1415 void helper_book3s_msgsnd(target_ulong rb)
1416 {
1417     int pir = rb & DBELL_PROCIDTAG_MASK;
1418 
1419     if (!dbell_type_server(rb)) {
1420         return;
1421     }
1422 
1423     book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1424 }
1425 
1426 #if defined(TARGET_PPC64)
1427 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1428 {
1429     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1430 
1431     if (!dbell_type_server(rb)) {
1432         return;
1433     }
1434 
1435     env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1436 }
1437 
1438 /*
1439  * sends a message to other threads that are on the same
1440  * multi-threaded processor
1441  */
1442 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1443 {
1444     int pir = env->spr_cb[SPR_PIR].default_value;
1445 
1446     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1447 
1448     if (!dbell_type_server(rb)) {
1449         return;
1450     }
1451 
1452     /* TODO: TCG supports only one thread */
1453 
1454     book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1455 }
1456 #endif /* TARGET_PPC64 */
1457 
1458 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1459                                  MMUAccessType access_type,
1460                                  int mmu_idx, uintptr_t retaddr)
1461 {
1462     CPUPPCState *env = cs->env_ptr;
1463     uint32_t insn;
1464 
1465     /* Restore state and reload the insn we executed, for filling in DSISR.  */
1466     cpu_restore_state(cs, retaddr, true);
1467     insn = cpu_ldl_code(env, env->nip);
1468 
1469     switch (env->mmu_model) {
1470     case POWERPC_MMU_SOFT_4xx:
1471         env->spr[SPR_40x_DEAR] = vaddr;
1472         break;
1473     case POWERPC_MMU_BOOKE:
1474     case POWERPC_MMU_BOOKE206:
1475         env->spr[SPR_BOOKE_DEAR] = vaddr;
1476         break;
1477     default:
1478         env->spr[SPR_DAR] = vaddr;
1479         break;
1480     }
1481 
1482     cs->exception_index = POWERPC_EXCP_ALIGN;
1483     env->error_code = insn & 0x03FF0000;
1484     cpu_loop_exit(cs);
1485 }
1486 #endif /* CONFIG_TCG */
1487 #endif /* !CONFIG_USER_ONLY */
1488