xref: /openbmc/qemu/target/ppc/tcg-excp_helper.c (revision 9b16edec6e9a483469c789475b2065d26b52db35)
1 /*
2  *  PowerPC exception emulation helpers for QEMU (TCG specific)
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/log.h"
22 #include "target/ppc/cpu.h"
23 #include "accel/tcg/cpu-ldst.h"
24 #include "exec/helper-proto.h"
25 #include "system/runstate.h"
26 
27 #include "helper_regs.h"
28 #include "hw/ppc/ppc.h"
29 #include "internal.h"
30 #include "cpu.h"
31 #include "trace.h"
32 
33 /*****************************************************************************/
34 /* Exceptions processing helpers */
35 
36 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
37                             uint32_t error_code, uintptr_t raddr)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cs->exception_index = exception;
42     env->error_code = error_code;
43     cpu_loop_exit_restore(cs, raddr);
44 }
45 
46 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
47                                 uint32_t error_code)
48 {
49     raise_exception_err_ra(env, exception, error_code, 0);
50 }
51 
52 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
53 {
54     raise_exception_err_ra(env, exception, 0, 0);
55 }
56 
57 #ifndef CONFIG_USER_ONLY
58 
59 static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
60                                            uint32_t error_code)
61 {
62     raise_exception_err_ra(env, exception, error_code, 0);
63 }
64 
65 static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
66 {
67     raise_exception_err_ra(env, exception, 0, 0);
68 }
69 
70 #endif /* !CONFIG_USER_ONLY */
71 
72 void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
73                uint32_t flags)
74 {
75     if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
76                   ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
77                   ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
78                   ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
79                   ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
80         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
81                                POWERPC_EXCP_TRAP, GETPC());
82     }
83 }
84 
85 #ifdef TARGET_PPC64
86 void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
87                uint32_t flags)
88 {
89     if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
90                   ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
91                   ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
92                   ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
93                   ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
94         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
95                                POWERPC_EXCP_TRAP, GETPC());
96     }
97 }
98 #endif /* TARGET_PPC64 */
99 
100 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
101 {
102     const uint16_t c = 0xfffc;
103     const uint64_t z0 = 0xfa2561cdf44ac398ULL;
104     uint16_t z = 0, temp;
105     uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
106 
107     for (int i = 3; i >= 0; i--) {
108         k[i] = key & 0xffff;
109         key >>= 16;
110     }
111     xleft[0] = x & 0xffff;
112     xright[0] = (x >> 16) & 0xffff;
113 
114     for (int i = 0; i < 28; i++) {
115         z = (z0 >> (63 - i)) & 1;
116         temp = ror16(k[i + 3], 3) ^ k[i + 1];
117         k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
118     }
119 
120     for (int i = 0; i < 8; i++) {
121         eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
122         eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
123         eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
124         eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
125     }
126 
127     for (int i = 0; i < 32; i++) {
128         fxleft[i] = (rol16(xleft[i], 1) &
129             rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
130         xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
131         xright[i + 1] = xleft[i];
132     }
133 
134     return (((uint32_t)xright[32]) << 16) | xleft[32];
135 }
136 
137 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
138 {
139     uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
140     uint64_t stage1_h, stage1_l;
141 
142     for (int i = 0; i < 4; i++) {
143         stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
144         stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
145         stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
146         stage0_l |= (ra & 0xff) << (8 * 2 * i);
147         rb >>= 8;
148         ra >>= 8;
149     }
150 
151     stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
152     stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
153     stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
154     stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
155 
156     return stage1_h ^ stage1_l;
157 }
158 
159 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
160                     target_ulong rb, uint64_t key, bool store)
161 {
162     uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
163 
164     if (store) {
165         cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
166     } else {
167         loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
168         if (loaded_hash != calculated_hash) {
169             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
170                 POWERPC_EXCP_TRAP, GETPC());
171         }
172     }
173 }
174 
175 #include "qemu/guest-random.h"
176 
177 #ifdef TARGET_PPC64
178 #define HELPER_HASH(op, key, store, dexcr_aspect)                             \
179 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra,          \
180                  target_ulong rb)                                             \
181 {                                                                             \
182     if (env->msr & R_MSR_PR_MASK) {                                           \
183         if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK ||      \
184             env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK))       \
185             return;                                                           \
186     } else if (!(env->msr & R_MSR_HV_MASK)) {                                 \
187         if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK ||      \
188             env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK))       \
189             return;                                                           \
190     } else if (!(env->msr & R_MSR_S_MASK)) {                                  \
191         if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK))     \
192             return;                                                           \
193     }                                                                         \
194                                                                               \
195     do_hash(env, ea, ra, rb, key, store);                                     \
196 }
197 #else
198 #define HELPER_HASH(op, key, store, dexcr_aspect)                             \
199 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra,          \
200                  target_ulong rb)                                             \
201 {                                                                             \
202     do_hash(env, ea, ra, rb, key, store);                                     \
203 }
204 #endif /* TARGET_PPC64 */
205 
206 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
207 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
208 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
209 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
210 
211 #ifndef CONFIG_USER_ONLY
212 
213 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
214                                  MMUAccessType access_type,
215                                  int mmu_idx, uintptr_t retaddr)
216 {
217     CPUPPCState *env = cpu_env(cs);
218     uint32_t insn;
219 
220     /* Restore state and reload the insn we executed, for filling in DSISR.  */
221     cpu_restore_state(cs, retaddr);
222     insn = ppc_ldl_code(env, env->nip);
223 
224     switch (env->mmu_model) {
225     case POWERPC_MMU_SOFT_4xx:
226         env->spr[SPR_40x_DEAR] = vaddr;
227         break;
228     case POWERPC_MMU_BOOKE:
229     case POWERPC_MMU_BOOKE206:
230         env->spr[SPR_BOOKE_DEAR] = vaddr;
231         break;
232     case POWERPC_MMU_REAL:
233         if (env->flags & POWERPC_FLAG_PPE42) {
234             env->spr[SPR_PPE42_EDR] = vaddr;
235             if (access_type == MMU_DATA_STORE) {
236                 env->spr[SPR_PPE42_ISR] |= PPE42_ISR_ST;
237             } else {
238                 env->spr[SPR_PPE42_ISR] &= ~PPE42_ISR_ST;
239             }
240         } else {
241             env->spr[SPR_DAR] = vaddr;
242         }
243         break;
244     default:
245         env->spr[SPR_DAR] = vaddr;
246         break;
247     }
248 
249     cs->exception_index = POWERPC_EXCP_ALIGN;
250     env->error_code = insn & 0x03FF0000;
251     cpu_loop_exit(cs);
252 }
253 
254 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
255                                    vaddr vaddr, unsigned size,
256                                    MMUAccessType access_type,
257                                    int mmu_idx, MemTxAttrs attrs,
258                                    MemTxResult response, uintptr_t retaddr)
259 {
260     CPUPPCState *env = cpu_env(cs);
261 
262     switch (env->excp_model) {
263 #if defined(TARGET_PPC64)
264     case POWERPC_EXCP_POWER8:
265     case POWERPC_EXCP_POWER9:
266     case POWERPC_EXCP_POWER10:
267     case POWERPC_EXCP_POWER11:
268         /*
269          * Machine check codes can be found in processor User Manual or
270          * Linux or skiboot source.
271          */
272         if (access_type == MMU_DATA_LOAD) {
273             env->spr[SPR_DAR] = vaddr;
274             env->spr[SPR_DSISR] = PPC_BIT(57);
275             env->error_code = PPC_BIT(42);
276 
277         } else if (access_type == MMU_DATA_STORE) {
278             /*
279              * MCE for stores in POWER is asynchronous so hardware does
280              * not set DAR, but QEMU can do better.
281              */
282             env->spr[SPR_DAR] = vaddr;
283             env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
284             env->error_code |= PPC_BIT(42);
285 
286         } else { /* Fetch */
287             /*
288              * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
289              * the instruction, so that must always be clear for fetches.
290              */
291             env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
292         }
293         break;
294 #endif
295     default:
296         /*
297          * TODO: Check behaviour for other CPUs, for now do nothing.
298          * Could add a basic MCE even if real hardware ignores.
299          */
300         return;
301     }
302 
303     cs->exception_index = POWERPC_EXCP_MCHECK;
304     cpu_loop_exit_restore(cs, retaddr);
305 }
306 
307 void ppc_cpu_debug_excp_handler(CPUState *cs)
308 {
309 #if defined(TARGET_PPC64)
310     CPUPPCState *env = cpu_env(cs);
311 
312     if (env->insns_flags2 & PPC2_ISA207S) {
313         if (cs->watchpoint_hit) {
314             if (cs->watchpoint_hit->flags & BP_CPU) {
315                 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
316                 env->spr[SPR_DSISR] = PPC_BIT(41);
317                 cs->watchpoint_hit = NULL;
318                 raise_exception(env, POWERPC_EXCP_DSI);
319             }
320             cs->watchpoint_hit = NULL;
321         } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
322             raise_exception_err(env, POWERPC_EXCP_TRACE,
323                                 PPC_BIT(33) | PPC_BIT(43));
324         }
325     }
326 #endif
327 }
328 
329 bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
330 {
331 #if defined(TARGET_PPC64)
332     CPUPPCState *env = cpu_env(cs);
333 
334     if (env->insns_flags2 & PPC2_ISA207S) {
335         target_ulong priv;
336 
337         priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
338         switch (priv) {
339         case 0x1: /* problem */
340             return env->msr & ((target_ulong)1 << MSR_PR);
341         case 0x2: /* supervisor */
342             return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
343                     !(env->msr & ((target_ulong)1 << MSR_HV)));
344         case 0x3: /* hypervisor */
345             return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
346                      (env->msr & ((target_ulong)1 << MSR_HV)));
347         default:
348             g_assert_not_reached();
349         }
350     }
351 #endif
352 
353     return false;
354 }
355 
356 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
357 {
358 #if defined(TARGET_PPC64)
359     CPUPPCState *env = cpu_env(cs);
360     bool wt, wti, hv, sv, pr;
361     uint32_t dawrx;
362 
363     if ((env->insns_flags2 & PPC2_ISA207S) &&
364         (wp == env->dawr_watchpoint[0])) {
365         dawrx = env->spr[SPR_DAWRX0];
366     } else if ((env->insns_flags2 & PPC2_ISA310) &&
367                (wp == env->dawr_watchpoint[1])) {
368         dawrx = env->spr[SPR_DAWRX1];
369     } else {
370         return false;
371     }
372 
373     wt = extract32(dawrx, PPC_BIT_NR(59), 1);
374     wti = extract32(dawrx, PPC_BIT_NR(60), 1);
375     hv = extract32(dawrx, PPC_BIT_NR(61), 1);
376     sv = extract32(dawrx, PPC_BIT_NR(62), 1);
377     pr = extract32(dawrx, PPC_BIT_NR(62), 1);
378 
379     if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
380         return false;
381     } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
382         return false;
383     } else if (!sv) {
384         return false;
385     }
386 
387     if (!wti) {
388         if (env->msr & ((target_ulong)1 << MSR_DR)) {
389             return wt;
390         } else {
391             return !wt;
392         }
393     }
394 
395     return true;
396 #endif
397 
398     return false;
399 }
400 
401 /*
402  * This stops the machine and logs CPU state without killing QEMU (like
403  * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
404  * so the machine can still be debugged.
405  */
406 G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
407 {
408     CPUState *cs = env_cpu(env);
409     FILE *f;
410 
411     f = qemu_log_trylock();
412     if (f) {
413         fprintf(f, "Entering checkstop state: %s\n", reason);
414         cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
415         qemu_log_unlock(f);
416     }
417 
418     /*
419      * This stops the machine and logs CPU state without killing QEMU
420      * (like cpu_abort()) so the machine can still be debugged (because
421      * it is often a guest error).
422      */
423     qemu_system_guest_panicked(NULL);
424     cpu_loop_exit_noexc(cs);
425 }
426 
427 /* Return true iff byteswap is needed to load instruction */
428 static inline bool insn_need_byteswap(CPUArchState *env)
429 {
430     /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
431     return !!(env->msr & ((target_ulong)1 << MSR_LE));
432 }
433 
434 uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
435 {
436     uint32_t insn = cpu_ldl_code(env, addr);
437 
438     if (insn_need_byteswap(env)) {
439         insn = bswap32(insn);
440     }
441 
442     return insn;
443 }
444 
445 #if defined(TARGET_PPC64)
446 void helper_attn(CPUPPCState *env)
447 {
448     /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
449     if ((*env->check_attn)(env)) {
450         powerpc_checkstop(env, "host executed attn");
451     } else {
452         raise_exception_err(env, POWERPC_EXCP_HV_EMU,
453                             POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
454     }
455 }
456 
457 void helper_scv(CPUPPCState *env, uint32_t lev)
458 {
459     if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
460         raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
461     } else {
462         raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
463     }
464 }
465 
466 void helper_pminsn(CPUPPCState *env, uint32_t insn)
467 {
468     CPUState *cs = env_cpu(env);
469 
470     cs->halted = 1;
471 
472     /* Condition for waking up at 0x100 */
473     env->resume_as_sreset = (insn != PPC_PM_STOP) ||
474         (env->spr[SPR_PSSCR] & PSSCR_EC);
475 
476     /* HDECR is not to wake from PM state, it may have already fired */
477     if (env->resume_as_sreset) {
478         PowerPCCPU *cpu = env_archcpu(env);
479         ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
480     }
481 
482     ppc_maybe_interrupt(env);
483 }
484 
485 #endif /* TARGET_PPC64 */
486 void helper_store_msr(CPUPPCState *env, target_ulong val)
487 {
488     uint32_t excp = hreg_store_msr(env, val, 0);
489 
490     if (excp != 0) {
491         cpu_interrupt_exittb(env_cpu(env));
492         raise_exception(env, excp);
493     }
494 }
495 
496 void helper_ppc_maybe_interrupt(CPUPPCState *env)
497 {
498     ppc_maybe_interrupt(env);
499 }
500 
501 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
502 {
503     /* MSR:POW cannot be set by any form of rfi */
504     msr &= ~(1ULL << MSR_POW);
505 
506     /* MSR:TGPR cannot be set by any form of rfi */
507     if (env->flags & POWERPC_FLAG_TGPR) {
508         msr &= ~(1ULL << MSR_TGPR);
509     }
510 
511 #ifdef TARGET_PPC64
512     /* Switching to 32-bit ? Crop the nip */
513     if (!msr_is_64bit(env, msr)) {
514         nip = (uint32_t)nip;
515     }
516 #else
517     nip = (uint32_t)nip;
518 #endif
519     /* XXX: beware: this is false if VLE is supported */
520     env->nip = nip & ~((target_ulong)0x00000003);
521     hreg_store_msr(env, msr, 1);
522     trace_ppc_excp_rfi(env->nip, env->msr);
523     /*
524      * No need to raise an exception here, as rfi is always the last
525      * insn of a TB
526      */
527     cpu_interrupt_exittb(env_cpu(env));
528     /* Reset the reservation */
529     env->reserve_addr = -1;
530 
531     /* Context synchronizing: check if TCG TLB needs flush */
532     check_tlb_flush(env, false);
533 }
534 
535 void helper_rfi(CPUPPCState *env)
536 {
537     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
538 }
539 
540 #ifdef TARGET_PPC64
541 void helper_rfid(CPUPPCState *env)
542 {
543     /*
544      * The architecture defines a number of rules for which bits can
545      * change but in practice, we handle this in hreg_store_msr()
546      * which will be called by do_rfi(), so there is no need to filter
547      * here
548      */
549     do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
550 }
551 
552 void helper_rfscv(CPUPPCState *env)
553 {
554     do_rfi(env, env->lr, env->ctr);
555 }
556 
557 void helper_hrfid(CPUPPCState *env)
558 {
559     do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
560 }
561 
562 void helper_rfebb(CPUPPCState *env, target_ulong s)
563 {
564     target_ulong msr = env->msr;
565 
566     /*
567      * Handling of BESCR bits 32:33 according to PowerISA v3.1:
568      *
569      * "If BESCR 32:33 != 0b00 the instruction is treated as if
570      *  the instruction form were invalid."
571      */
572     if (env->spr[SPR_BESCR] & BESCR_INVALID) {
573         raise_exception_err(env, POWERPC_EXCP_PROGRAM,
574                             POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
575     }
576 
577     env->nip = env->spr[SPR_EBBRR];
578 
579     /* Switching to 32-bit ? Crop the nip */
580     if (!msr_is_64bit(env, msr)) {
581         env->nip = (uint32_t)env->spr[SPR_EBBRR];
582     }
583 
584     if (s) {
585         env->spr[SPR_BESCR] |= BESCR_GE;
586     } else {
587         env->spr[SPR_BESCR] &= ~BESCR_GE;
588     }
589 }
590 
591 /*
592  * Triggers or queues an 'ebb_excp' EBB exception. All checks
593  * but FSCR, HFSCR and msr_pr must be done beforehand.
594  *
595  * PowerISA v3.1 isn't clear about whether an EBB should be
596  * postponed or cancelled if the EBB facility is unavailable.
597  * Our assumption here is that the EBB is cancelled if both
598  * FSCR and HFSCR EBB facilities aren't available.
599  */
600 static void do_ebb(CPUPPCState *env, int ebb_excp)
601 {
602     PowerPCCPU *cpu = env_archcpu(env);
603 
604     /*
605      * FSCR_EBB and FSCR_IC_EBB are the same bits used with
606      * HFSCR.
607      */
608     helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
609     helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
610 
611     if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
612         env->spr[SPR_BESCR] |= BESCR_PMEO;
613     } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
614         env->spr[SPR_BESCR] |= BESCR_EEO;
615     }
616 
617     if (FIELD_EX64(env->msr, MSR, PR)) {
618         powerpc_excp(cpu, ebb_excp);
619     } else {
620         ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
621     }
622 }
623 
624 void raise_ebb_perfm_exception(CPUPPCState *env)
625 {
626     bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
627                              env->spr[SPR_BESCR] & BESCR_PME &&
628                              env->spr[SPR_BESCR] & BESCR_GE;
629 
630     if (!perfm_ebb_enabled) {
631         return;
632     }
633 
634     do_ebb(env, POWERPC_EXCP_PERFM_EBB);
635 }
636 #endif /* TARGET_PPC64 */
637 
638 /*****************************************************************************/
639 /* Embedded PowerPC specific helpers */
640 void helper_40x_rfci(CPUPPCState *env)
641 {
642     do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
643 }
644 
645 void helper_rfci(CPUPPCState *env)
646 {
647     do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
648 }
649 
650 void helper_rfdi(CPUPPCState *env)
651 {
652     /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
653     do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
654 }
655 
656 void helper_rfmci(CPUPPCState *env)
657 {
658     /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
659     do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
660 }
661 
662 /* Embedded.Processor Control */
663 static int dbell2irq(target_ulong rb)
664 {
665     int msg = rb & DBELL_TYPE_MASK;
666     int irq = -1;
667 
668     switch (msg) {
669     case DBELL_TYPE_DBELL:
670         irq = PPC_INTERRUPT_DOORBELL;
671         break;
672     case DBELL_TYPE_DBELL_CRIT:
673         irq = PPC_INTERRUPT_CDOORBELL;
674         break;
675     case DBELL_TYPE_G_DBELL:
676     case DBELL_TYPE_G_DBELL_CRIT:
677     case DBELL_TYPE_G_DBELL_MC:
678         /* XXX implement */
679     default:
680         break;
681     }
682 
683     return irq;
684 }
685 
686 void helper_msgclr(CPUPPCState *env, target_ulong rb)
687 {
688     int irq = dbell2irq(rb);
689 
690     if (irq < 0) {
691         return;
692     }
693 
694     ppc_set_irq(env_archcpu(env), irq, 0);
695 }
696 
697 void helper_msgsnd(target_ulong rb)
698 {
699     int irq = dbell2irq(rb);
700     int pir = rb & DBELL_PIRTAG_MASK;
701     CPUState *cs;
702 
703     if (irq < 0) {
704         return;
705     }
706 
707     bql_lock();
708     CPU_FOREACH(cs) {
709         PowerPCCPU *cpu = POWERPC_CPU(cs);
710         CPUPPCState *cenv = &cpu->env;
711 
712         if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
713             ppc_set_irq(cpu, irq, 1);
714         }
715     }
716     bql_unlock();
717 }
718 
719 /* Server Processor Control */
720 
721 static bool dbell_type_server(target_ulong rb)
722 {
723     /*
724      * A Directed Hypervisor Doorbell message is sent only if the
725      * message type is 5. All other types are reserved and the
726      * instruction is a no-op
727      */
728     return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
729 }
730 
731 static inline bool dbell_bcast_core(target_ulong rb)
732 {
733     return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
734 }
735 
736 static inline bool dbell_bcast_subproc(target_ulong rb)
737 {
738     return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
739 }
740 
741 /*
742  * Send an interrupt to a thread in the same core as env).
743  */
744 static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
745 {
746     PowerPCCPU *cpu = env_archcpu(env);
747     CPUState *cs = env_cpu(env);
748 
749     if (ppc_cpu_lpar_single_threaded(cs)) {
750         if (target_tir == 0) {
751             ppc_set_irq(cpu, irq, 1);
752         }
753     } else {
754         CPUState *ccs;
755 
756         /* Does iothread need to be locked for walking CPU list? */
757         bql_lock();
758         THREAD_SIBLING_FOREACH(cs, ccs) {
759             PowerPCCPU *ccpu = POWERPC_CPU(ccs);
760             if (target_tir == ppc_cpu_tir(ccpu)) {
761                 ppc_set_irq(ccpu, irq, 1);
762                 break;
763             }
764         }
765         bql_unlock();
766     }
767 }
768 
769 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
770 {
771     if (!dbell_type_server(rb)) {
772         return;
773     }
774 
775     ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
776 }
777 
778 void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
779 {
780     int pir = rb & DBELL_PROCIDTAG_MASK;
781     bool brdcast = false;
782     CPUState *cs, *ccs;
783     PowerPCCPU *cpu;
784 
785     if (!dbell_type_server(rb)) {
786         return;
787     }
788 
789     /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
790     if (!(env->insns_flags2 & PPC2_ISA300)) {
791         msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
792         return;
793     }
794 
795     /* POWER9 and later msgsnd is a global (targets any thread) */
796     cpu = ppc_get_vcpu_by_pir(pir);
797     if (!cpu) {
798         return;
799     }
800     cs = CPU(cpu);
801 
802     if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
803                                  (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
804         brdcast = true;
805     }
806 
807     if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
808         ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
809         return;
810     }
811 
812     /*
813      * Why is bql needed for walking CPU list? Answer seems to be because ppc
814      * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
815      * so could this be removed?
816      */
817     bql_lock();
818     THREAD_SIBLING_FOREACH(cs, ccs) {
819         ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
820     }
821     bql_unlock();
822 }
823 
824 #ifdef TARGET_PPC64
825 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
826 {
827     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
828 
829     if (!dbell_type_server(rb)) {
830         return;
831     }
832 
833     ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
834 }
835 
836 /*
837  * sends a message to another thread  on the same
838  * multi-threaded processor
839  */
840 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
841 {
842     helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
843 
844     if (!dbell_type_server(rb)) {
845         return;
846     }
847 
848     msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
849 }
850 #endif /* TARGET_PPC64 */
851 
852 /* Single-step tracing */
853 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
854 {
855     uint32_t error_code = 0;
856     if (env->insns_flags2 & PPC2_ISA207S) {
857         /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
858         env->spr[SPR_POWER_SIAR] = prev_ip;
859         error_code = PPC_BIT(33);
860     }
861     raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
862 }
863 #endif /* !CONFIG_USER_ONLY */
864