1 /*
2 * PowerPC exception emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/log.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/runstate.h"
24 #include "cpu.h"
25 #include "exec/exec-all.h"
26 #include "internal.h"
27 #include "helper_regs.h"
28 #include "hw/ppc/ppc.h"
29
30 #include "trace.h"
31
32 #ifdef CONFIG_TCG
33 #include "sysemu/tcg.h"
34 #include "exec/helper-proto.h"
35 #include "exec/cpu_ldst.h"
36 #endif
37
38 /*****************************************************************************/
39 /* Exception processing */
40 #ifndef CONFIG_USER_ONLY
41
powerpc_excp_name(int excp)42 static const char *powerpc_excp_name(int excp)
43 {
44 switch (excp) {
45 case POWERPC_EXCP_CRITICAL: return "CRITICAL";
46 case POWERPC_EXCP_MCHECK: return "MCHECK";
47 case POWERPC_EXCP_DSI: return "DSI";
48 case POWERPC_EXCP_ISI: return "ISI";
49 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
50 case POWERPC_EXCP_ALIGN: return "ALIGN";
51 case POWERPC_EXCP_PROGRAM: return "PROGRAM";
52 case POWERPC_EXCP_FPU: return "FPU";
53 case POWERPC_EXCP_SYSCALL: return "SYSCALL";
54 case POWERPC_EXCP_APU: return "APU";
55 case POWERPC_EXCP_DECR: return "DECR";
56 case POWERPC_EXCP_FIT: return "FIT";
57 case POWERPC_EXCP_WDT: return "WDT";
58 case POWERPC_EXCP_DTLB: return "DTLB";
59 case POWERPC_EXCP_ITLB: return "ITLB";
60 case POWERPC_EXCP_DEBUG: return "DEBUG";
61 case POWERPC_EXCP_SPEU: return "SPEU";
62 case POWERPC_EXCP_EFPDI: return "EFPDI";
63 case POWERPC_EXCP_EFPRI: return "EFPRI";
64 case POWERPC_EXCP_EPERFM: return "EPERFM";
65 case POWERPC_EXCP_DOORI: return "DOORI";
66 case POWERPC_EXCP_DOORCI: return "DOORCI";
67 case POWERPC_EXCP_GDOORI: return "GDOORI";
68 case POWERPC_EXCP_GDOORCI: return "GDOORCI";
69 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
70 case POWERPC_EXCP_RESET: return "RESET";
71 case POWERPC_EXCP_DSEG: return "DSEG";
72 case POWERPC_EXCP_ISEG: return "ISEG";
73 case POWERPC_EXCP_HDECR: return "HDECR";
74 case POWERPC_EXCP_TRACE: return "TRACE";
75 case POWERPC_EXCP_HDSI: return "HDSI";
76 case POWERPC_EXCP_HISI: return "HISI";
77 case POWERPC_EXCP_HDSEG: return "HDSEG";
78 case POWERPC_EXCP_HISEG: return "HISEG";
79 case POWERPC_EXCP_VPU: return "VPU";
80 case POWERPC_EXCP_PIT: return "PIT";
81 case POWERPC_EXCP_EMUL: return "EMUL";
82 case POWERPC_EXCP_IFTLB: return "IFTLB";
83 case POWERPC_EXCP_DLTLB: return "DLTLB";
84 case POWERPC_EXCP_DSTLB: return "DSTLB";
85 case POWERPC_EXCP_FPA: return "FPA";
86 case POWERPC_EXCP_DABR: return "DABR";
87 case POWERPC_EXCP_IABR: return "IABR";
88 case POWERPC_EXCP_SMI: return "SMI";
89 case POWERPC_EXCP_PERFM: return "PERFM";
90 case POWERPC_EXCP_THERM: return "THERM";
91 case POWERPC_EXCP_VPUA: return "VPUA";
92 case POWERPC_EXCP_SOFTP: return "SOFTP";
93 case POWERPC_EXCP_MAINT: return "MAINT";
94 case POWERPC_EXCP_MEXTBR: return "MEXTBR";
95 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
96 case POWERPC_EXCP_ITLBE: return "ITLBE";
97 case POWERPC_EXCP_DTLBE: return "DTLBE";
98 case POWERPC_EXCP_VSXU: return "VSXU";
99 case POWERPC_EXCP_FU: return "FU";
100 case POWERPC_EXCP_HV_EMU: return "HV_EMU";
101 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
102 case POWERPC_EXCP_HV_FU: return "HV_FU";
103 case POWERPC_EXCP_SDOOR: return "SDOOR";
104 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
105 case POWERPC_EXCP_HVIRT: return "HVIRT";
106 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
107 default:
108 g_assert_not_reached();
109 }
110 }
111
dump_syscall(CPUPPCState * env)112 static void dump_syscall(CPUPPCState *env)
113 {
114 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
115 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
116 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
117 " nip=" TARGET_FMT_lx "\n",
118 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
119 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
120 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
121 ppc_dump_gpr(env, 8), env->nip);
122 }
123
dump_hcall(CPUPPCState * env)124 static void dump_hcall(CPUPPCState *env)
125 {
126 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
127 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
128 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
129 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
130 " nip=" TARGET_FMT_lx "\n",
131 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
132 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
133 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
134 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
135 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
136 env->nip);
137 }
138
139 #ifdef CONFIG_TCG
140 /* Return true iff byteswap is needed to load instruction */
insn_need_byteswap(CPUArchState * env)141 static inline bool insn_need_byteswap(CPUArchState *env)
142 {
143 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
144 return !!(env->msr & ((target_ulong)1 << MSR_LE));
145 }
146
ppc_ldl_code(CPUArchState * env,target_ulong addr)147 static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
148 {
149 uint32_t insn = cpu_ldl_code(env, addr);
150
151 if (insn_need_byteswap(env)) {
152 insn = bswap32(insn);
153 }
154
155 return insn;
156 }
157
158 #endif
159
ppc_excp_debug_sw_tlb(CPUPPCState * env,int excp)160 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
161 {
162 const char *es;
163 target_ulong *miss, *cmp;
164 int en;
165
166 if (!qemu_loglevel_mask(CPU_LOG_MMU)) {
167 return;
168 }
169
170 if (excp == POWERPC_EXCP_IFTLB) {
171 es = "I";
172 en = 'I';
173 miss = &env->spr[SPR_IMISS];
174 cmp = &env->spr[SPR_ICMP];
175 } else {
176 if (excp == POWERPC_EXCP_DLTLB) {
177 es = "DL";
178 } else {
179 es = "DS";
180 }
181 en = 'D';
182 miss = &env->spr[SPR_DMISS];
183 cmp = &env->spr[SPR_DCMP];
184 }
185 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
186 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
187 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
188 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
189 env->error_code);
190 }
191
192 #ifdef TARGET_PPC64
powerpc_reset_wakeup(CPUPPCState * env,int excp,target_ulong * msr)193 static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr)
194 {
195 /* We no longer are in a PM state */
196 env->resume_as_sreset = false;
197
198 /* Pretend to be returning from doze always as we don't lose state */
199 *msr |= SRR1_WS_NOLOSS;
200
201 /* Machine checks are sent normally */
202 if (excp == POWERPC_EXCP_MCHECK) {
203 return excp;
204 }
205 switch (excp) {
206 case POWERPC_EXCP_RESET:
207 *msr |= SRR1_WAKERESET;
208 break;
209 case POWERPC_EXCP_EXTERNAL:
210 *msr |= SRR1_WAKEEE;
211 break;
212 case POWERPC_EXCP_DECR:
213 *msr |= SRR1_WAKEDEC;
214 break;
215 case POWERPC_EXCP_SDOOR:
216 *msr |= SRR1_WAKEDBELL;
217 break;
218 case POWERPC_EXCP_SDOOR_HV:
219 *msr |= SRR1_WAKEHDBELL;
220 break;
221 case POWERPC_EXCP_HV_MAINT:
222 *msr |= SRR1_WAKEHMI;
223 break;
224 case POWERPC_EXCP_HVIRT:
225 *msr |= SRR1_WAKEHVI;
226 break;
227 default:
228 cpu_abort(env_cpu(env),
229 "Unsupported exception %d in Power Save mode\n", excp);
230 }
231 return POWERPC_EXCP_RESET;
232 }
233
234 /*
235 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
236 * taken with the MMU on, and which uses an alternate location (e.g., so the
237 * kernel/hv can map the vectors there with an effective address).
238 *
239 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
240 * are delivered in this way. AIL requires the LPCR to be set to enable this
241 * mode, and then a number of conditions have to be true for AIL to apply.
242 *
243 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
244 * they specifically want to be in real mode (e.g., the MCE might be signaling
245 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
246 *
247 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
248 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
249 * radix mode (LPCR[HR]).
250 *
251 * POWER8, POWER9 with LPCR[HR]=0
252 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
253 * +-----------+-------------+---------+-------------+-----+
254 * | a | 00/01/10 | x | x | 0 |
255 * | a | 11 | 0 | 1 | 0 |
256 * | a | 11 | 1 | 1 | a |
257 * | a | 11 | 0 | 0 | a |
258 * +-------------------------------------------------------+
259 *
260 * POWER9 with LPCR[HR]=1
261 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
262 * +-----------+-------------+---------+-------------+-----+
263 * | a | 00/01/10 | x | x | 0 |
264 * | a | 11 | x | x | a |
265 * +-------------------------------------------------------+
266 *
267 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
268 * the hypervisor in AIL mode if the guest is radix. This is good for
269 * performance but allows the guest to influence the AIL of hypervisor
270 * interrupts using its MSR, and also the hypervisor must disallow guest
271 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
272 * use AIL for its MSR[HV] 0->1 interrupts.
273 *
274 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
275 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
276 * MSR[HV] 1->1).
277 *
278 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
279 *
280 * POWER10 behaviour is
281 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
282 * +-----------+------------+-------------+---------+-------------+-----+
283 * | a | h | 00/01/10 | 0 | 0 | 0 |
284 * | a | h | 11 | 0 | 0 | a |
285 * | a | h | x | 0 | 1 | h |
286 * | a | h | 00/01/10 | 1 | 1 | 0 |
287 * | a | h | 11 | 1 | 1 | h |
288 * +--------------------------------------------------------------------+
289 */
ppc_excp_apply_ail(PowerPCCPU * cpu,int excp,target_ulong msr,target_ulong * new_msr,target_ulong * vector)290 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
291 target_ulong *new_msr, target_ulong *vector)
292 {
293 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
294 CPUPPCState *env = &cpu->env;
295 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
296 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
297 int ail = 0;
298
299 if (excp == POWERPC_EXCP_MCHECK ||
300 excp == POWERPC_EXCP_RESET ||
301 excp == POWERPC_EXCP_HV_MAINT) {
302 /* SRESET, MCE, HMI never apply AIL */
303 return;
304 }
305
306 if (!(pcc->lpcr_mask & LPCR_AIL)) {
307 /* This CPU does not have AIL */
308 return;
309 }
310
311 /* P8 & P9 */
312 if (!(pcc->lpcr_mask & LPCR_HAIL)) {
313 if (!mmu_all_on) {
314 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
315 return;
316 }
317 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
318 /*
319 * AIL does not work if there is a MSR[HV] 0->1 transition and the
320 * partition is in HPT mode. For radix guests, such interrupts are
321 * allowed to be delivered to the hypervisor in ail mode.
322 */
323 return;
324 }
325
326 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
327 if (ail == 0 || ail == 1) {
328 /* AIL=1 is reserved, treat it like AIL=0 */
329 return;
330 }
331
332 /* P10 and up */
333 } else {
334 if (!mmu_all_on && !hv_escalation) {
335 /*
336 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
337 * Guest->guest and HV->HV interrupts do require MMU on.
338 */
339 return;
340 }
341
342 if (*new_msr & MSR_HVB) {
343 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
344 /* HV interrupts depend on LPCR[HAIL] */
345 return;
346 }
347 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
348 } else {
349 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
350 }
351 if (ail == 0 || ail == 1 || ail == 2) {
352 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
353 return;
354 }
355 }
356
357 /*
358 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
359 * to the new IP.
360 */
361 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
362
363 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
364 if (ail == 2) {
365 *vector |= 0x0000000000018000ull;
366 } else if (ail == 3) {
367 *vector |= 0xc000000000004000ull;
368 }
369 } else {
370 /*
371 * scv AIL is a little different. AIL=2 does not change the address,
372 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
373 */
374 if (ail == 3) {
375 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
376 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
377 }
378 }
379 }
380 #endif /* TARGET_PPC64 */
381
powerpc_reset_excp_state(PowerPCCPU * cpu)382 static void powerpc_reset_excp_state(PowerPCCPU *cpu)
383 {
384 CPUState *cs = CPU(cpu);
385 CPUPPCState *env = &cpu->env;
386
387 /* Reset exception state */
388 cs->exception_index = POWERPC_EXCP_NONE;
389 env->error_code = 0;
390 }
391
powerpc_set_excp_state(PowerPCCPU * cpu,target_ulong vector,target_ulong msr)392 static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
393 target_ulong msr)
394 {
395 CPUPPCState *env = &cpu->env;
396
397 assert((msr & env->msr_mask) == msr);
398
399 /*
400 * We don't use hreg_store_msr here as already have treated any
401 * special case that could occur. Just store MSR and update hflags
402 *
403 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will
404 * prevent setting of the HV bit which some exceptions might need to do.
405 */
406 env->nip = vector;
407 env->msr = msr;
408 hreg_compute_hflags(env);
409 ppc_maybe_interrupt(env);
410
411 powerpc_reset_excp_state(cpu);
412
413 /*
414 * Any interrupt is context synchronizing, check if TCG TLB needs
415 * a delayed flush on ppc64
416 */
417 check_tlb_flush(env, false);
418
419 /* Reset the reservation */
420 env->reserve_addr = -1;
421 }
422
423 #ifdef CONFIG_TCG
424 /*
425 * This stops the machine and logs CPU state without killing QEMU (like
426 * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
427 * so the machine can still be debugged.
428 */
powerpc_checkstop(CPUPPCState * env,const char * reason)429 static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
430 {
431 CPUState *cs = env_cpu(env);
432 FILE *f;
433
434 f = qemu_log_trylock();
435 if (f) {
436 fprintf(f, "Entering checkstop state: %s\n", reason);
437 cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
438 qemu_log_unlock(f);
439 }
440
441 /*
442 * This stops the machine and logs CPU state without killing QEMU
443 * (like cpu_abort()) so the machine can still be debugged (because
444 * it is often a guest error).
445 */
446 qemu_system_guest_panicked(NULL);
447 cpu_loop_exit_noexc(cs);
448 }
449
450 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
helper_attn(CPUPPCState * env)451 void helper_attn(CPUPPCState *env)
452 {
453 /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
454 if ((*env->check_attn)(env)) {
455 powerpc_checkstop(env, "host executed attn");
456 } else {
457 raise_exception_err(env, POWERPC_EXCP_HV_EMU,
458 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
459 }
460 }
461 #endif
462 #endif /* CONFIG_TCG */
463
powerpc_mcheck_checkstop(CPUPPCState * env)464 static void powerpc_mcheck_checkstop(CPUPPCState *env)
465 {
466 /* KVM guests always have MSR[ME] enabled */
467 #ifdef CONFIG_TCG
468 if (FIELD_EX64(env->msr, MSR, ME)) {
469 return;
470 }
471
472 powerpc_checkstop(env, "machine check with MSR[ME]=0");
473 #endif
474 }
475
powerpc_excp_40x(PowerPCCPU * cpu,int excp)476 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
477 {
478 CPUPPCState *env = &cpu->env;
479 target_ulong msr, new_msr, vector;
480 int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
481
482 /* new srr1 value excluding must-be-zero bits */
483 msr = env->msr & ~0x783f0000ULL;
484
485 /* new interrupt handler msr preserves ME unless explicitly overridden */
486 new_msr = env->msr & (((target_ulong)1 << MSR_ME));
487
488 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
489 if (excp == POWERPC_EXCP_HV_EMU) {
490 excp = POWERPC_EXCP_PROGRAM;
491 }
492
493 vector = env->excp_vectors[excp];
494 if (vector == (target_ulong)-1ULL) {
495 cpu_abort(env_cpu(env),
496 "Raised an exception without defined vector %d\n", excp);
497 }
498 vector |= env->excp_prefix;
499
500 switch (excp) {
501 case POWERPC_EXCP_CRITICAL: /* Critical input */
502 srr0 = SPR_40x_SRR2;
503 srr1 = SPR_40x_SRR3;
504 break;
505 case POWERPC_EXCP_MCHECK: /* Machine check exception */
506 powerpc_mcheck_checkstop(env);
507 /* machine check exceptions don't have ME set */
508 new_msr &= ~((target_ulong)1 << MSR_ME);
509 srr0 = SPR_40x_SRR2;
510 srr1 = SPR_40x_SRR3;
511 break;
512 case POWERPC_EXCP_DSI: /* Data storage exception */
513 trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
514 break;
515 case POWERPC_EXCP_ISI: /* Instruction storage exception */
516 trace_ppc_excp_isi(msr, env->nip);
517 break;
518 case POWERPC_EXCP_EXTERNAL: /* External input */
519 break;
520 case POWERPC_EXCP_ALIGN: /* Alignment exception */
521 break;
522 case POWERPC_EXCP_PROGRAM: /* Program exception */
523 switch (env->error_code & ~0xF) {
524 case POWERPC_EXCP_FP:
525 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
526 trace_ppc_excp_fp_ignore();
527 powerpc_reset_excp_state(cpu);
528 return;
529 }
530 env->spr[SPR_40x_ESR] = ESR_FP;
531 break;
532 case POWERPC_EXCP_INVAL:
533 trace_ppc_excp_inval(env->nip);
534 env->spr[SPR_40x_ESR] = ESR_PIL;
535 break;
536 case POWERPC_EXCP_PRIV:
537 env->spr[SPR_40x_ESR] = ESR_PPR;
538 break;
539 case POWERPC_EXCP_TRAP:
540 env->spr[SPR_40x_ESR] = ESR_PTR;
541 break;
542 default:
543 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
544 env->error_code);
545 break;
546 }
547 break;
548 case POWERPC_EXCP_SYSCALL: /* System call exception */
549 dump_syscall(env);
550
551 /*
552 * We need to correct the NIP which in this case is supposed
553 * to point to the next instruction
554 */
555 env->nip += 4;
556 break;
557 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
558 trace_ppc_excp_print("FIT");
559 break;
560 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
561 trace_ppc_excp_print("WDT");
562 break;
563 case POWERPC_EXCP_DTLB: /* Data TLB error */
564 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
565 break;
566 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
567 trace_ppc_excp_print("PIT");
568 break;
569 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
570 cpu_abort(env_cpu(env), "%s exception not implemented\n",
571 powerpc_excp_name(excp));
572 break;
573 default:
574 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
575 excp);
576 break;
577 }
578
579 env->spr[srr0] = env->nip;
580 env->spr[srr1] = msr;
581 powerpc_set_excp_state(cpu, vector, new_msr);
582 }
583
powerpc_excp_6xx(PowerPCCPU * cpu,int excp)584 static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
585 {
586 CPUPPCState *env = &cpu->env;
587 target_ulong msr, new_msr, vector;
588
589 /* new srr1 value excluding must-be-zero bits */
590 msr = env->msr & ~0x783f0000ULL;
591
592 /* new interrupt handler msr preserves ME unless explicitly overridden */
593 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
594
595 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
596 if (excp == POWERPC_EXCP_HV_EMU) {
597 excp = POWERPC_EXCP_PROGRAM;
598 }
599
600 vector = env->excp_vectors[excp];
601 if (vector == (target_ulong)-1ULL) {
602 cpu_abort(env_cpu(env),
603 "Raised an exception without defined vector %d\n", excp);
604 }
605 vector |= env->excp_prefix;
606
607 switch (excp) {
608 case POWERPC_EXCP_CRITICAL: /* Critical input */
609 break;
610 case POWERPC_EXCP_MCHECK: /* Machine check exception */
611 powerpc_mcheck_checkstop(env);
612 /* machine check exceptions don't have ME set */
613 new_msr &= ~((target_ulong)1 << MSR_ME);
614 break;
615 case POWERPC_EXCP_DSI: /* Data storage exception */
616 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
617 break;
618 case POWERPC_EXCP_ISI: /* Instruction storage exception */
619 trace_ppc_excp_isi(msr, env->nip);
620 msr |= env->error_code;
621 break;
622 case POWERPC_EXCP_EXTERNAL: /* External input */
623 break;
624 case POWERPC_EXCP_ALIGN: /* Alignment exception */
625 /* Get rS/rD and rA from faulting opcode */
626 /*
627 * Note: the opcode fields will not be set properly for a
628 * direct store load/store, but nobody cares as nobody
629 * actually uses direct store segments.
630 */
631 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
632 break;
633 case POWERPC_EXCP_PROGRAM: /* Program exception */
634 switch (env->error_code & ~0xF) {
635 case POWERPC_EXCP_FP:
636 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
637 trace_ppc_excp_fp_ignore();
638 powerpc_reset_excp_state(cpu);
639 return;
640 }
641 /*
642 * NIP always points to the faulting instruction for FP exceptions,
643 * so always use store_next and claim we are precise in the MSR.
644 */
645 msr |= 0x00100000;
646 break;
647 case POWERPC_EXCP_INVAL:
648 trace_ppc_excp_inval(env->nip);
649 msr |= 0x00080000;
650 break;
651 case POWERPC_EXCP_PRIV:
652 msr |= 0x00040000;
653 break;
654 case POWERPC_EXCP_TRAP:
655 msr |= 0x00020000;
656 break;
657 default:
658 /* Should never occur */
659 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
660 env->error_code);
661 break;
662 }
663 break;
664 case POWERPC_EXCP_SYSCALL: /* System call exception */
665 dump_syscall(env);
666
667 /*
668 * We need to correct the NIP which in this case is supposed
669 * to point to the next instruction
670 */
671 env->nip += 4;
672 break;
673 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
674 case POWERPC_EXCP_DECR: /* Decrementer exception */
675 break;
676 case POWERPC_EXCP_DTLB: /* Data TLB error */
677 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
678 break;
679 case POWERPC_EXCP_RESET: /* System reset exception */
680 if (FIELD_EX64(env->msr, MSR, POW)) {
681 cpu_abort(env_cpu(env),
682 "Trying to deliver power-saving system reset exception "
683 "%d with no HV support\n", excp);
684 }
685 break;
686 case POWERPC_EXCP_TRACE: /* Trace exception */
687 break;
688 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
689 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
690 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
691 /* Swap temporary saved registers with GPRs */
692 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
693 new_msr |= (target_ulong)1 << MSR_TGPR;
694 hreg_swap_gpr_tgpr(env);
695 }
696
697 ppc_excp_debug_sw_tlb(env, excp);
698
699 msr |= env->crf[0] << 28;
700 msr |= env->error_code; /* key, D/I, S/L bits */
701 /* Set way using a LRU mechanism */
702 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
703 break;
704 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
705 case POWERPC_EXCP_DABR: /* Data address breakpoint */
706 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
707 case POWERPC_EXCP_SMI: /* System management interrupt */
708 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
709 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
710 cpu_abort(env_cpu(env), "%s exception not implemented\n",
711 powerpc_excp_name(excp));
712 break;
713 default:
714 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
715 excp);
716 break;
717 }
718
719 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
720 new_msr |= (target_ulong)1 << MSR_LE;
721 }
722 env->spr[SPR_SRR0] = env->nip;
723 env->spr[SPR_SRR1] = msr;
724 powerpc_set_excp_state(cpu, vector, new_msr);
725 }
726
powerpc_excp_7xx(PowerPCCPU * cpu,int excp)727 static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
728 {
729 CPUPPCState *env = &cpu->env;
730 target_ulong msr, new_msr, vector;
731
732 /* new srr1 value excluding must-be-zero bits */
733 msr = env->msr & ~0x783f0000ULL;
734
735 /* new interrupt handler msr preserves ME unless explicitly overridden */
736 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
737
738 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
739 if (excp == POWERPC_EXCP_HV_EMU) {
740 excp = POWERPC_EXCP_PROGRAM;
741 }
742
743 vector = env->excp_vectors[excp];
744 if (vector == (target_ulong)-1ULL) {
745 cpu_abort(env_cpu(env),
746 "Raised an exception without defined vector %d\n", excp);
747 }
748 vector |= env->excp_prefix;
749
750 switch (excp) {
751 case POWERPC_EXCP_MCHECK: /* Machine check exception */
752 powerpc_mcheck_checkstop(env);
753 /* machine check exceptions don't have ME set */
754 new_msr &= ~((target_ulong)1 << MSR_ME);
755 break;
756 case POWERPC_EXCP_DSI: /* Data storage exception */
757 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
758 break;
759 case POWERPC_EXCP_ISI: /* Instruction storage exception */
760 trace_ppc_excp_isi(msr, env->nip);
761 msr |= env->error_code;
762 break;
763 case POWERPC_EXCP_EXTERNAL: /* External input */
764 break;
765 case POWERPC_EXCP_ALIGN: /* Alignment exception */
766 /* Get rS/rD and rA from faulting opcode */
767 /*
768 * Note: the opcode fields will not be set properly for a
769 * direct store load/store, but nobody cares as nobody
770 * actually uses direct store segments.
771 */
772 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
773 break;
774 case POWERPC_EXCP_PROGRAM: /* Program exception */
775 switch (env->error_code & ~0xF) {
776 case POWERPC_EXCP_FP:
777 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
778 trace_ppc_excp_fp_ignore();
779 powerpc_reset_excp_state(cpu);
780 return;
781 }
782 /*
783 * NIP always points to the faulting instruction for FP exceptions,
784 * so always use store_next and claim we are precise in the MSR.
785 */
786 msr |= 0x00100000;
787 break;
788 case POWERPC_EXCP_INVAL:
789 trace_ppc_excp_inval(env->nip);
790 msr |= 0x00080000;
791 break;
792 case POWERPC_EXCP_PRIV:
793 msr |= 0x00040000;
794 break;
795 case POWERPC_EXCP_TRAP:
796 msr |= 0x00020000;
797 break;
798 default:
799 /* Should never occur */
800 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
801 env->error_code);
802 break;
803 }
804 break;
805 case POWERPC_EXCP_SYSCALL: /* System call exception */
806 {
807 int lev = env->error_code;
808
809 if (lev == 1 && cpu->vhyp) {
810 dump_hcall(env);
811 } else {
812 dump_syscall(env);
813 }
814
815 /*
816 * We need to correct the NIP which in this case is supposed
817 * to point to the next instruction
818 */
819 env->nip += 4;
820
821 /*
822 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
823 * instruction to communicate with QEMU. The pegasos2 machine
824 * uses VOF and the 7xx CPUs, so although the 7xx don't have
825 * HV mode, we need to keep hypercall support.
826 */
827 if (lev == 1 && cpu->vhyp) {
828 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
829 powerpc_reset_excp_state(cpu);
830 return;
831 }
832
833 break;
834 }
835 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
836 case POWERPC_EXCP_DECR: /* Decrementer exception */
837 break;
838 case POWERPC_EXCP_RESET: /* System reset exception */
839 if (FIELD_EX64(env->msr, MSR, POW)) {
840 cpu_abort(env_cpu(env),
841 "Trying to deliver power-saving system reset exception "
842 "%d with no HV support\n", excp);
843 }
844 break;
845 case POWERPC_EXCP_TRACE: /* Trace exception */
846 break;
847 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
848 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
849 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
850 ppc_excp_debug_sw_tlb(env, excp);
851 msr |= env->crf[0] << 28;
852 msr |= env->error_code; /* key, D/I, S/L bits */
853 /* Set way using a LRU mechanism */
854 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
855 break;
856 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
857 case POWERPC_EXCP_SMI: /* System management interrupt */
858 case POWERPC_EXCP_THERM: /* Thermal interrupt */
859 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
860 cpu_abort(env_cpu(env), "%s exception not implemented\n",
861 powerpc_excp_name(excp));
862 break;
863 default:
864 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
865 excp);
866 break;
867 }
868
869 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
870 new_msr |= (target_ulong)1 << MSR_LE;
871 }
872 env->spr[SPR_SRR0] = env->nip;
873 env->spr[SPR_SRR1] = msr;
874 powerpc_set_excp_state(cpu, vector, new_msr);
875 }
876
powerpc_excp_74xx(PowerPCCPU * cpu,int excp)877 static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
878 {
879 CPUPPCState *env = &cpu->env;
880 target_ulong msr, new_msr, vector;
881
882 /* new srr1 value excluding must-be-zero bits */
883 msr = env->msr & ~0x783f0000ULL;
884
885 /* new interrupt handler msr preserves ME unless explicitly overridden */
886 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
887
888 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
889 if (excp == POWERPC_EXCP_HV_EMU) {
890 excp = POWERPC_EXCP_PROGRAM;
891 }
892
893 vector = env->excp_vectors[excp];
894 if (vector == (target_ulong)-1ULL) {
895 cpu_abort(env_cpu(env),
896 "Raised an exception without defined vector %d\n", excp);
897 }
898 vector |= env->excp_prefix;
899
900 switch (excp) {
901 case POWERPC_EXCP_MCHECK: /* Machine check exception */
902 powerpc_mcheck_checkstop(env);
903 /* machine check exceptions don't have ME set */
904 new_msr &= ~((target_ulong)1 << MSR_ME);
905 break;
906 case POWERPC_EXCP_DSI: /* Data storage exception */
907 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
908 break;
909 case POWERPC_EXCP_ISI: /* Instruction storage exception */
910 trace_ppc_excp_isi(msr, env->nip);
911 msr |= env->error_code;
912 break;
913 case POWERPC_EXCP_EXTERNAL: /* External input */
914 break;
915 case POWERPC_EXCP_ALIGN: /* Alignment exception */
916 /* Get rS/rD and rA from faulting opcode */
917 /*
918 * Note: the opcode fields will not be set properly for a
919 * direct store load/store, but nobody cares as nobody
920 * actually uses direct store segments.
921 */
922 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
923 break;
924 case POWERPC_EXCP_PROGRAM: /* Program exception */
925 switch (env->error_code & ~0xF) {
926 case POWERPC_EXCP_FP:
927 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
928 trace_ppc_excp_fp_ignore();
929 powerpc_reset_excp_state(cpu);
930 return;
931 }
932 /*
933 * NIP always points to the faulting instruction for FP exceptions,
934 * so always use store_next and claim we are precise in the MSR.
935 */
936 msr |= 0x00100000;
937 break;
938 case POWERPC_EXCP_INVAL:
939 trace_ppc_excp_inval(env->nip);
940 msr |= 0x00080000;
941 break;
942 case POWERPC_EXCP_PRIV:
943 msr |= 0x00040000;
944 break;
945 case POWERPC_EXCP_TRAP:
946 msr |= 0x00020000;
947 break;
948 default:
949 /* Should never occur */
950 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
951 env->error_code);
952 break;
953 }
954 break;
955 case POWERPC_EXCP_SYSCALL: /* System call exception */
956 {
957 int lev = env->error_code;
958
959 if (lev == 1 && cpu->vhyp) {
960 dump_hcall(env);
961 } else {
962 dump_syscall(env);
963 }
964
965 /*
966 * We need to correct the NIP which in this case is supposed
967 * to point to the next instruction
968 */
969 env->nip += 4;
970
971 /*
972 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
973 * instruction to communicate with QEMU. The pegasos2 machine
974 * uses VOF and the 74xx CPUs, so although the 74xx don't have
975 * HV mode, we need to keep hypercall support.
976 */
977 if (lev == 1 && cpu->vhyp) {
978 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
979 powerpc_reset_excp_state(cpu);
980 return;
981 }
982
983 break;
984 }
985 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
986 case POWERPC_EXCP_DECR: /* Decrementer exception */
987 break;
988 case POWERPC_EXCP_RESET: /* System reset exception */
989 if (FIELD_EX64(env->msr, MSR, POW)) {
990 cpu_abort(env_cpu(env),
991 "Trying to deliver power-saving system reset "
992 "exception %d with no HV support\n", excp);
993 }
994 break;
995 case POWERPC_EXCP_TRACE: /* Trace exception */
996 break;
997 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
998 break;
999 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1000 case POWERPC_EXCP_SMI: /* System management interrupt */
1001 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1002 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
1003 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1004 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1005 powerpc_excp_name(excp));
1006 break;
1007 default:
1008 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1009 excp);
1010 break;
1011 }
1012
1013 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1014 new_msr |= (target_ulong)1 << MSR_LE;
1015 }
1016 env->spr[SPR_SRR0] = env->nip;
1017 env->spr[SPR_SRR1] = msr;
1018 powerpc_set_excp_state(cpu, vector, new_msr);
1019 }
1020
powerpc_excp_booke(PowerPCCPU * cpu,int excp)1021 static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
1022 {
1023 CPUPPCState *env = &cpu->env;
1024 target_ulong msr, new_msr, vector;
1025 int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
1026
1027 /*
1028 * Book E does not play games with certain bits of xSRR1 being MSR save
1029 * bits and others being error status. xSRR1 is the old MSR, period.
1030 */
1031 msr = env->msr;
1032
1033 /* new interrupt handler msr preserves ME unless explicitly overridden */
1034 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
1035
1036 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
1037 if (excp == POWERPC_EXCP_HV_EMU) {
1038 excp = POWERPC_EXCP_PROGRAM;
1039 }
1040
1041 #ifdef TARGET_PPC64
1042 /*
1043 * SPEU and VPU share the same IVOR but they exist in different
1044 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
1045 */
1046 if (excp == POWERPC_EXCP_VPU) {
1047 excp = POWERPC_EXCP_SPEU;
1048 }
1049 #endif
1050
1051 vector = env->excp_vectors[excp];
1052 if (vector == (target_ulong)-1ULL) {
1053 cpu_abort(env_cpu(env),
1054 "Raised an exception without defined vector %d\n", excp);
1055 }
1056 vector |= env->excp_prefix;
1057
1058 switch (excp) {
1059 case POWERPC_EXCP_CRITICAL: /* Critical input */
1060 srr0 = SPR_BOOKE_CSRR0;
1061 srr1 = SPR_BOOKE_CSRR1;
1062 break;
1063 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1064 powerpc_mcheck_checkstop(env);
1065 /* machine check exceptions don't have ME set */
1066 new_msr &= ~((target_ulong)1 << MSR_ME);
1067
1068 /* FIXME: choose one or the other based on CPU type */
1069 srr0 = SPR_BOOKE_MCSRR0;
1070 srr1 = SPR_BOOKE_MCSRR1;
1071
1072 env->spr[SPR_BOOKE_CSRR0] = env->nip;
1073 env->spr[SPR_BOOKE_CSRR1] = msr;
1074
1075 break;
1076 case POWERPC_EXCP_DSI: /* Data storage exception */
1077 trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]);
1078 break;
1079 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1080 trace_ppc_excp_isi(msr, env->nip);
1081 break;
1082 case POWERPC_EXCP_EXTERNAL: /* External input */
1083 if (env->mpic_proxy) {
1084 CPUState *cs = env_cpu(env);
1085 /* IACK the IRQ on delivery */
1086 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
1087 }
1088 break;
1089 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1090 break;
1091 case POWERPC_EXCP_PROGRAM: /* Program exception */
1092 switch (env->error_code & ~0xF) {
1093 case POWERPC_EXCP_FP:
1094 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
1095 trace_ppc_excp_fp_ignore();
1096 powerpc_reset_excp_state(cpu);
1097 return;
1098 }
1099 /*
1100 * NIP always points to the faulting instruction for FP exceptions,
1101 * so always use store_next and claim we are precise in the MSR.
1102 */
1103 msr |= 0x00100000;
1104 env->spr[SPR_BOOKE_ESR] = ESR_FP;
1105 break;
1106 case POWERPC_EXCP_INVAL:
1107 trace_ppc_excp_inval(env->nip);
1108 msr |= 0x00080000;
1109 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
1110 break;
1111 case POWERPC_EXCP_PRIV:
1112 msr |= 0x00040000;
1113 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
1114 break;
1115 case POWERPC_EXCP_TRAP:
1116 msr |= 0x00020000;
1117 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
1118 break;
1119 default:
1120 /* Should never occur */
1121 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
1122 env->error_code);
1123 break;
1124 }
1125 break;
1126 case POWERPC_EXCP_SYSCALL: /* System call exception */
1127 dump_syscall(env);
1128
1129 /*
1130 * We need to correct the NIP which in this case is supposed
1131 * to point to the next instruction
1132 */
1133 env->nip += 4;
1134 break;
1135 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1136 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1137 case POWERPC_EXCP_DECR: /* Decrementer exception */
1138 break;
1139 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1140 /* FIT on 4xx */
1141 trace_ppc_excp_print("FIT");
1142 break;
1143 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1144 trace_ppc_excp_print("WDT");
1145 srr0 = SPR_BOOKE_CSRR0;
1146 srr1 = SPR_BOOKE_CSRR1;
1147 break;
1148 case POWERPC_EXCP_DTLB: /* Data TLB error */
1149 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1150 break;
1151 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
1152 if (env->flags & POWERPC_FLAG_DE) {
1153 /* FIXME: choose one or the other based on CPU type */
1154 srr0 = SPR_BOOKE_DSRR0;
1155 srr1 = SPR_BOOKE_DSRR1;
1156
1157 env->spr[SPR_BOOKE_CSRR0] = env->nip;
1158 env->spr[SPR_BOOKE_CSRR1] = msr;
1159
1160 /* DBSR already modified by caller */
1161 } else {
1162 cpu_abort(env_cpu(env),
1163 "Debug exception triggered on unsupported model\n");
1164 }
1165 break;
1166 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
1167 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
1168 break;
1169 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1170 break;
1171 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1172 srr0 = SPR_BOOKE_CSRR0;
1173 srr1 = SPR_BOOKE_CSRR1;
1174 break;
1175 case POWERPC_EXCP_RESET: /* System reset exception */
1176 if (FIELD_EX64(env->msr, MSR, POW)) {
1177 cpu_abort(env_cpu(env),
1178 "Trying to deliver power-saving system reset "
1179 "exception %d with no HV support\n", excp);
1180 }
1181 break;
1182 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
1183 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
1184 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1185 powerpc_excp_name(excp));
1186 break;
1187 default:
1188 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1189 excp);
1190 break;
1191 }
1192
1193 #ifdef TARGET_PPC64
1194 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
1195 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
1196 new_msr |= (target_ulong)1 << MSR_CM;
1197 } else {
1198 vector = (uint32_t)vector;
1199 }
1200 #endif
1201
1202 env->spr[srr0] = env->nip;
1203 env->spr[srr1] = msr;
1204 powerpc_set_excp_state(cpu, vector, new_msr);
1205 }
1206
1207 /*
1208 * When running a nested HV guest under vhyp, external interrupts are
1209 * delivered as HVIRT.
1210 */
books_vhyp_promotes_external_to_hvirt(PowerPCCPU * cpu)1211 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu)
1212 {
1213 if (cpu->vhyp) {
1214 return vhyp_cpu_in_nested(cpu);
1215 }
1216 return false;
1217 }
1218
1219 #ifdef TARGET_PPC64
1220 /*
1221 * When running under vhyp, hcalls are always intercepted and sent to the
1222 * vhc->hypercall handler.
1223 */
books_vhyp_handles_hcall(PowerPCCPU * cpu)1224 static bool books_vhyp_handles_hcall(PowerPCCPU *cpu)
1225 {
1226 if (cpu->vhyp) {
1227 return !vhyp_cpu_in_nested(cpu);
1228 }
1229 return false;
1230 }
1231
1232 /*
1233 * When running a nested KVM HV guest under vhyp, HV exceptions are not
1234 * delivered to the guest (because there is no concept of HV support), but
1235 * rather they are sent to the vhyp to exit from the L2 back to the L1 and
1236 * return from the H_ENTER_NESTED hypercall.
1237 */
books_vhyp_handles_hv_excp(PowerPCCPU * cpu)1238 static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu)
1239 {
1240 if (cpu->vhyp) {
1241 return vhyp_cpu_in_nested(cpu);
1242 }
1243 return false;
1244 }
1245
1246 #ifdef CONFIG_TCG
is_prefix_insn(CPUPPCState * env,uint32_t insn)1247 static bool is_prefix_insn(CPUPPCState *env, uint32_t insn)
1248 {
1249 if (!(env->insns_flags2 & PPC2_ISA310)) {
1250 return false;
1251 }
1252 return ((insn & 0xfc000000) == 0x04000000);
1253 }
1254
is_prefix_insn_excp(PowerPCCPU * cpu,int excp)1255 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
1256 {
1257 CPUPPCState *env = &cpu->env;
1258
1259 if (!(env->insns_flags2 & PPC2_ISA310)) {
1260 return false;
1261 }
1262
1263 if (!tcg_enabled()) {
1264 /*
1265 * This does not load instructions and set the prefix bit correctly
1266 * for injected interrupts with KVM. That may have to be discovered
1267 * and set by the KVM layer before injecting.
1268 */
1269 return false;
1270 }
1271
1272 switch (excp) {
1273 case POWERPC_EXCP_MCHECK:
1274 if (!(env->error_code & PPC_BIT(42))) {
1275 /*
1276 * Fetch attempt caused a machine check, so attempting to fetch
1277 * again would cause a recursive machine check.
1278 */
1279 return false;
1280 }
1281 break;
1282 case POWERPC_EXCP_HDSI:
1283 /* HDSI PRTABLE_FAULT has the originating access type in error_code */
1284 if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) &&
1285 (env->error_code == MMU_INST_FETCH)) {
1286 /*
1287 * Fetch failed due to partition scope translation, so prefix
1288 * indication is not relevant (and attempting to load the
1289 * instruction at NIP would cause recursive faults with the same
1290 * translation).
1291 */
1292 return false;
1293 }
1294 break;
1295
1296 case POWERPC_EXCP_DSI:
1297 case POWERPC_EXCP_DSEG:
1298 case POWERPC_EXCP_ALIGN:
1299 case POWERPC_EXCP_PROGRAM:
1300 case POWERPC_EXCP_FPU:
1301 case POWERPC_EXCP_TRACE:
1302 case POWERPC_EXCP_HV_EMU:
1303 case POWERPC_EXCP_VPU:
1304 case POWERPC_EXCP_VSXU:
1305 case POWERPC_EXCP_FU:
1306 case POWERPC_EXCP_HV_FU:
1307 break;
1308 default:
1309 return false;
1310 }
1311
1312 return is_prefix_insn(env, ppc_ldl_code(env, env->nip));
1313 }
1314 #else
is_prefix_insn_excp(PowerPCCPU * cpu,int excp)1315 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
1316 {
1317 return false;
1318 }
1319 #endif
1320
powerpc_excp_books(PowerPCCPU * cpu,int excp)1321 static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
1322 {
1323 CPUPPCState *env = &cpu->env;
1324 target_ulong msr, new_msr, vector;
1325 int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1;
1326
1327 /* new srr1 value excluding must-be-zero bits */
1328 msr = env->msr & ~0x783f0000ULL;
1329
1330 /*
1331 * new interrupt handler msr preserves HV and ME unless explicitly
1332 * overridden
1333 */
1334 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
1335
1336 /*
1337 * check for special resume at 0x100 from doze/nap/sleep/winkle on
1338 * P7/P8/P9
1339 */
1340 if (env->resume_as_sreset) {
1341 excp = powerpc_reset_wakeup(env, excp, &msr);
1342 }
1343
1344 /*
1345 * We don't want to generate a Hypervisor Emulation Assistance
1346 * Interrupt if we don't have HVB in msr_mask (PAPR mode),
1347 * unless running a nested-hv guest, in which case the L1
1348 * kernel wants the interrupt.
1349 */
1350 if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) &&
1351 !books_vhyp_handles_hv_excp(cpu)) {
1352 excp = POWERPC_EXCP_PROGRAM;
1353 }
1354
1355 vector = env->excp_vectors[excp];
1356 if (vector == (target_ulong)-1ULL) {
1357 cpu_abort(env_cpu(env),
1358 "Raised an exception without defined vector %d\n", excp);
1359 }
1360 vector |= env->excp_prefix;
1361
1362 if (is_prefix_insn_excp(cpu, excp)) {
1363 msr |= PPC_BIT(34);
1364 }
1365
1366 switch (excp) {
1367 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1368 powerpc_mcheck_checkstop(env);
1369 if (env->msr_mask & MSR_HVB) {
1370 /*
1371 * ISA specifies HV, but can be delivered to guest with HV
1372 * clear (e.g., see FWNMI in PAPR).
1373 */
1374 new_msr |= (target_ulong)MSR_HVB;
1375
1376 /* HV machine check exceptions don't have ME set */
1377 new_msr &= ~((target_ulong)1 << MSR_ME);
1378 }
1379
1380 msr |= env->error_code;
1381 break;
1382
1383 case POWERPC_EXCP_DSI: /* Data storage exception */
1384 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
1385 break;
1386 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1387 trace_ppc_excp_isi(msr, env->nip);
1388 msr |= env->error_code;
1389 break;
1390 case POWERPC_EXCP_EXTERNAL: /* External input */
1391 {
1392 bool lpes0;
1393
1394 /* LPES0 is only taken into consideration if we support HV mode */
1395 if (!env->has_hv_mode) {
1396 break;
1397 }
1398 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1399 if (!lpes0) {
1400 new_msr |= (target_ulong)MSR_HVB;
1401 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1402 srr0 = SPR_HSRR0;
1403 srr1 = SPR_HSRR1;
1404 }
1405 break;
1406 }
1407 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1408 /* Optional DSISR update was removed from ISA v3.0 */
1409 if (!(env->insns_flags2 & PPC2_ISA300)) {
1410 /* Get rS/rD and rA from faulting opcode */
1411 /*
1412 * Note: the opcode fields will not be set properly for a
1413 * direct store load/store, but nobody cares as nobody
1414 * actually uses direct store segments.
1415 */
1416 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
1417 }
1418 break;
1419 case POWERPC_EXCP_PROGRAM: /* Program exception */
1420 switch (env->error_code & ~0xF) {
1421 case POWERPC_EXCP_FP:
1422 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
1423 trace_ppc_excp_fp_ignore();
1424 powerpc_reset_excp_state(cpu);
1425 return;
1426 }
1427 /*
1428 * NIP always points to the faulting instruction for FP exceptions,
1429 * so always use store_next and claim we are precise in the MSR.
1430 */
1431 msr |= 0x00100000;
1432 break;
1433 case POWERPC_EXCP_INVAL:
1434 trace_ppc_excp_inval(env->nip);
1435 msr |= 0x00080000;
1436 break;
1437 case POWERPC_EXCP_PRIV:
1438 msr |= 0x00040000;
1439 break;
1440 case POWERPC_EXCP_TRAP:
1441 msr |= 0x00020000;
1442 break;
1443 default:
1444 /* Should never occur */
1445 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
1446 env->error_code);
1447 break;
1448 }
1449 break;
1450 case POWERPC_EXCP_SYSCALL: /* System call exception */
1451 lev = env->error_code;
1452
1453 if (lev == 1 && cpu->vhyp) {
1454 dump_hcall(env);
1455 } else {
1456 dump_syscall(env);
1457 }
1458
1459 /*
1460 * We need to correct the NIP which in this case is supposed
1461 * to point to the next instruction
1462 */
1463 env->nip += 4;
1464
1465 /* "PAPR mode" built-in hypercall emulation */
1466 if (lev == 1 && books_vhyp_handles_hcall(cpu)) {
1467 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
1468 powerpc_reset_excp_state(cpu);
1469 return;
1470 }
1471 if (env->insns_flags2 & PPC2_ISA310) {
1472 /* ISAv3.1 puts LEV into SRR1 */
1473 msr |= lev << 20;
1474 }
1475 if (lev == 1) {
1476 new_msr |= (target_ulong)MSR_HVB;
1477 }
1478 break;
1479 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
1480 lev = env->error_code;
1481 dump_syscall(env);
1482 env->nip += 4;
1483 new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
1484 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1485
1486 vector += lev * 0x20;
1487
1488 env->lr = env->nip;
1489 env->ctr = msr;
1490 break;
1491 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1492 case POWERPC_EXCP_DECR: /* Decrementer exception */
1493 break;
1494 case POWERPC_EXCP_RESET: /* System reset exception */
1495 /* A power-saving exception sets ME, otherwise it is unchanged */
1496 if (FIELD_EX64(env->msr, MSR, POW)) {
1497 /* indicate that we resumed from power save mode */
1498 msr |= 0x10000;
1499 new_msr |= ((target_ulong)1 << MSR_ME);
1500 }
1501 if (env->msr_mask & MSR_HVB) {
1502 /*
1503 * ISA specifies HV, but can be delivered to guest with HV
1504 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
1505 */
1506 new_msr |= (target_ulong)MSR_HVB;
1507 } else {
1508 if (FIELD_EX64(env->msr, MSR, POW)) {
1509 cpu_abort(env_cpu(env),
1510 "Trying to deliver power-saving system reset "
1511 "exception %d with no HV support\n", excp);
1512 }
1513 }
1514 break;
1515 case POWERPC_EXCP_TRACE: /* Trace exception */
1516 msr |= env->error_code;
1517 /* fall through */
1518 case POWERPC_EXCP_DSEG: /* Data segment exception */
1519 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1520 case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
1521 case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */
1522 break;
1523 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
1524 msr |= env->error_code;
1525 /* fall through */
1526 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1527 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1528 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
1529 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
1530 srr0 = SPR_HSRR0;
1531 srr1 = SPR_HSRR1;
1532 new_msr |= (target_ulong)MSR_HVB;
1533 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1534 break;
1535 #ifdef CONFIG_TCG
1536 case POWERPC_EXCP_HV_EMU: {
1537 uint32_t insn = ppc_ldl_code(env, env->nip);
1538 env->spr[SPR_HEIR] = insn;
1539 if (is_prefix_insn(env, insn)) {
1540 uint32_t insn2 = ppc_ldl_code(env, env->nip + 4);
1541 env->spr[SPR_HEIR] <<= 32;
1542 env->spr[SPR_HEIR] |= insn2;
1543 }
1544 srr0 = SPR_HSRR0;
1545 srr1 = SPR_HSRR1;
1546 new_msr |= (target_ulong)MSR_HVB;
1547 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1548 break;
1549 }
1550 #endif
1551 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1552 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
1553 case POWERPC_EXCP_FU: /* Facility unavailable exception */
1554 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
1555 break;
1556 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
1557 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
1558 srr0 = SPR_HSRR0;
1559 srr1 = SPR_HSRR1;
1560 new_msr |= (target_ulong)MSR_HVB;
1561 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1562 break;
1563 case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */
1564 case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */
1565 env->spr[SPR_BESCR] &= ~BESCR_GE;
1566
1567 /*
1568 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is
1569 * stored in the EBB Handler SPR_EBBHR.
1570 */
1571 env->spr[SPR_EBBRR] = env->nip;
1572 powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr);
1573
1574 /*
1575 * This exception is handled in userspace. No need to proceed.
1576 */
1577 return;
1578 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1579 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1580 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1581 case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */
1582 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1583 powerpc_excp_name(excp));
1584 break;
1585 default:
1586 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1587 excp);
1588 break;
1589 }
1590
1591 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1592 new_msr |= (target_ulong)1 << MSR_LE;
1593 }
1594 new_msr |= (target_ulong)1 << MSR_SF;
1595
1596 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
1597 env->spr[srr0] = env->nip;
1598 env->spr[srr1] = msr;
1599 }
1600
1601 if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) {
1602 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */
1603 cpu->vhyp_class->deliver_hv_excp(cpu, excp);
1604 powerpc_reset_excp_state(cpu);
1605 } else {
1606 /* Sanity check */
1607 if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) {
1608 cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d "
1609 "with no HV support\n", excp);
1610 }
1611 /* This can update new_msr and vector if AIL applies */
1612 ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector);
1613 powerpc_set_excp_state(cpu, vector, new_msr);
1614 }
1615 }
1616 #else
powerpc_excp_books(PowerPCCPU * cpu,int excp)1617 static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
1618 {
1619 g_assert_not_reached();
1620 }
1621 #endif /* TARGET_PPC64 */
1622
powerpc_excp(PowerPCCPU * cpu,int excp)1623 static void powerpc_excp(PowerPCCPU *cpu, int excp)
1624 {
1625 CPUPPCState *env = &cpu->env;
1626
1627 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
1628 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1629 excp);
1630 }
1631
1632 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
1633 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
1634 excp, env->error_code);
1635 env->excp_stats[excp]++;
1636
1637 switch (env->excp_model) {
1638 case POWERPC_EXCP_40x:
1639 powerpc_excp_40x(cpu, excp);
1640 break;
1641 case POWERPC_EXCP_6xx:
1642 powerpc_excp_6xx(cpu, excp);
1643 break;
1644 case POWERPC_EXCP_7xx:
1645 powerpc_excp_7xx(cpu, excp);
1646 break;
1647 case POWERPC_EXCP_74xx:
1648 powerpc_excp_74xx(cpu, excp);
1649 break;
1650 case POWERPC_EXCP_BOOKE:
1651 powerpc_excp_booke(cpu, excp);
1652 break;
1653 case POWERPC_EXCP_970:
1654 case POWERPC_EXCP_POWER7:
1655 case POWERPC_EXCP_POWER8:
1656 case POWERPC_EXCP_POWER9:
1657 case POWERPC_EXCP_POWER10:
1658 case POWERPC_EXCP_POWER11:
1659 powerpc_excp_books(cpu, excp);
1660 break;
1661 default:
1662 g_assert_not_reached();
1663 }
1664 }
1665
ppc_cpu_do_interrupt(CPUState * cs)1666 void ppc_cpu_do_interrupt(CPUState *cs)
1667 {
1668 PowerPCCPU *cpu = POWERPC_CPU(cs);
1669
1670 powerpc_excp(cpu, cs->exception_index);
1671 }
1672
1673 #ifdef TARGET_PPC64
1674 #define P7_UNUSED_INTERRUPTS \
1675 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \
1676 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1677 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
1678 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
1679
p7_interrupt_powersave(uint32_t pending_interrupts,target_ulong lpcr)1680 static int p7_interrupt_powersave(uint32_t pending_interrupts,
1681 target_ulong lpcr)
1682 {
1683 if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
1684 (lpcr & LPCR_P7_PECE0)) {
1685 return PPC_INTERRUPT_EXT;
1686 }
1687 if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
1688 (lpcr & LPCR_P7_PECE1)) {
1689 return PPC_INTERRUPT_DECR;
1690 }
1691 if ((pending_interrupts & PPC_INTERRUPT_MCK) &&
1692 (lpcr & LPCR_P7_PECE2)) {
1693 return PPC_INTERRUPT_MCK;
1694 }
1695 if ((pending_interrupts & PPC_INTERRUPT_HMI) &&
1696 (lpcr & LPCR_P7_PECE2)) {
1697 return PPC_INTERRUPT_HMI;
1698 }
1699 if (pending_interrupts & PPC_INTERRUPT_RESET) {
1700 return PPC_INTERRUPT_RESET;
1701 }
1702 return 0;
1703 }
1704
p7_next_unmasked_interrupt(CPUPPCState * env,uint32_t pending_interrupts,target_ulong lpcr)1705 static int p7_next_unmasked_interrupt(CPUPPCState *env,
1706 uint32_t pending_interrupts,
1707 target_ulong lpcr)
1708 {
1709 CPUState *cs = env_cpu(env);
1710
1711 /* Ignore MSR[EE] when coming out of some power management states */
1712 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1713
1714 assert((pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
1715
1716 if (cs->halted) {
1717 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1718 return p7_interrupt_powersave(pending_interrupts, lpcr);
1719 }
1720
1721 /* Machine check exception */
1722 if (pending_interrupts & PPC_INTERRUPT_MCK) {
1723 return PPC_INTERRUPT_MCK;
1724 }
1725
1726 /* Hypervisor decrementer exception */
1727 if (pending_interrupts & PPC_INTERRUPT_HDECR) {
1728 /* LPCR will be clear when not supported so this will work */
1729 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1730 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1731 /* HDEC clears on delivery */
1732 return PPC_INTERRUPT_HDECR;
1733 }
1734 }
1735
1736 /* External interrupt can ignore MSR:EE under some circumstances */
1737 if (pending_interrupts & PPC_INTERRUPT_EXT) {
1738 bool lpes0 = !!(lpcr & LPCR_LPES0);
1739 bool heic = !!(lpcr & LPCR_HEIC);
1740 /* HEIC blocks delivery to the hypervisor */
1741 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1742 !FIELD_EX64(env->msr, MSR, PR))) ||
1743 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1744 return PPC_INTERRUPT_EXT;
1745 }
1746 }
1747 if (msr_ee != 0) {
1748 /* Decrementer exception */
1749 if (pending_interrupts & PPC_INTERRUPT_DECR) {
1750 return PPC_INTERRUPT_DECR;
1751 }
1752 if (pending_interrupts & PPC_INTERRUPT_PERFM) {
1753 return PPC_INTERRUPT_PERFM;
1754 }
1755 }
1756
1757 return 0;
1758 }
1759
1760 #define P8_UNUSED_INTERRUPTS \
1761 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \
1762 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
1763 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1764
p8_interrupt_powersave(uint32_t pending_interrupts,target_ulong lpcr)1765 static int p8_interrupt_powersave(uint32_t pending_interrupts,
1766 target_ulong lpcr)
1767 {
1768 if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
1769 (lpcr & LPCR_P8_PECE2)) {
1770 return PPC_INTERRUPT_EXT;
1771 }
1772 if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
1773 (lpcr & LPCR_P8_PECE3)) {
1774 return PPC_INTERRUPT_DECR;
1775 }
1776 if ((pending_interrupts & PPC_INTERRUPT_MCK) &&
1777 (lpcr & LPCR_P8_PECE4)) {
1778 return PPC_INTERRUPT_MCK;
1779 }
1780 if ((pending_interrupts & PPC_INTERRUPT_HMI) &&
1781 (lpcr & LPCR_P8_PECE4)) {
1782 return PPC_INTERRUPT_HMI;
1783 }
1784 if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
1785 (lpcr & LPCR_P8_PECE0)) {
1786 return PPC_INTERRUPT_DOORBELL;
1787 }
1788 if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
1789 (lpcr & LPCR_P8_PECE1)) {
1790 return PPC_INTERRUPT_HDOORBELL;
1791 }
1792 if (pending_interrupts & PPC_INTERRUPT_RESET) {
1793 return PPC_INTERRUPT_RESET;
1794 }
1795 return 0;
1796 }
1797
p8_next_unmasked_interrupt(CPUPPCState * env,uint32_t pending_interrupts,target_ulong lpcr)1798 static int p8_next_unmasked_interrupt(CPUPPCState *env,
1799 uint32_t pending_interrupts,
1800 target_ulong lpcr)
1801 {
1802 CPUState *cs = env_cpu(env);
1803
1804 /* Ignore MSR[EE] when coming out of some power management states */
1805 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1806
1807 assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0);
1808
1809 if (cs->halted) {
1810 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1811 return p8_interrupt_powersave(pending_interrupts, lpcr);
1812 }
1813
1814 /* Machine check exception */
1815 if (pending_interrupts & PPC_INTERRUPT_MCK) {
1816 return PPC_INTERRUPT_MCK;
1817 }
1818
1819 /* Hypervisor decrementer exception */
1820 if (pending_interrupts & PPC_INTERRUPT_HDECR) {
1821 /* LPCR will be clear when not supported so this will work */
1822 bool hdice = !!(lpcr & LPCR_HDICE);
1823 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1824 /* HDEC clears on delivery */
1825 return PPC_INTERRUPT_HDECR;
1826 }
1827 }
1828
1829 /* External interrupt can ignore MSR:EE under some circumstances */
1830 if (pending_interrupts & PPC_INTERRUPT_EXT) {
1831 bool lpes0 = !!(lpcr & LPCR_LPES0);
1832 bool heic = !!(lpcr & LPCR_HEIC);
1833 /* HEIC blocks delivery to the hypervisor */
1834 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1835 !FIELD_EX64(env->msr, MSR, PR))) ||
1836 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1837 return PPC_INTERRUPT_EXT;
1838 }
1839 }
1840 if (msr_ee != 0) {
1841 /* Decrementer exception */
1842 if (pending_interrupts & PPC_INTERRUPT_DECR) {
1843 return PPC_INTERRUPT_DECR;
1844 }
1845 if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
1846 return PPC_INTERRUPT_DOORBELL;
1847 }
1848 if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
1849 return PPC_INTERRUPT_HDOORBELL;
1850 }
1851 if (pending_interrupts & PPC_INTERRUPT_PERFM) {
1852 return PPC_INTERRUPT_PERFM;
1853 }
1854 /* EBB exception */
1855 if (pending_interrupts & PPC_INTERRUPT_EBB) {
1856 /*
1857 * EBB exception must be taken in problem state and
1858 * with BESCR_GE set.
1859 */
1860 if (FIELD_EX64(env->msr, MSR, PR) &&
1861 (env->spr[SPR_BESCR] & BESCR_GE)) {
1862 return PPC_INTERRUPT_EBB;
1863 }
1864 }
1865 }
1866
1867 return 0;
1868 }
1869
1870 #define P9_UNUSED_INTERRUPTS \
1871 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \
1872 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1873 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1874
p9_interrupt_powersave(CPUPPCState * env,uint32_t pending_interrupts,target_ulong lpcr)1875 static int p9_interrupt_powersave(CPUPPCState *env,
1876 uint32_t pending_interrupts,
1877 target_ulong lpcr)
1878 {
1879
1880 /* External Exception */
1881 if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
1882 (lpcr & LPCR_EEE)) {
1883 bool heic = !!(lpcr & LPCR_HEIC);
1884 if (!heic || !FIELD_EX64_HV(env->msr) ||
1885 FIELD_EX64(env->msr, MSR, PR)) {
1886 return PPC_INTERRUPT_EXT;
1887 }
1888 }
1889 /* Decrementer Exception */
1890 if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
1891 (lpcr & LPCR_DEE)) {
1892 return PPC_INTERRUPT_DECR;
1893 }
1894 /* Machine Check or Hypervisor Maintenance Exception */
1895 if (lpcr & LPCR_OEE) {
1896 if (pending_interrupts & PPC_INTERRUPT_MCK) {
1897 return PPC_INTERRUPT_MCK;
1898 }
1899 if (pending_interrupts & PPC_INTERRUPT_HMI) {
1900 return PPC_INTERRUPT_HMI;
1901 }
1902 }
1903 /* Privileged Doorbell Exception */
1904 if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
1905 (lpcr & LPCR_PDEE)) {
1906 return PPC_INTERRUPT_DOORBELL;
1907 }
1908 /* Hypervisor Doorbell Exception */
1909 if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
1910 (lpcr & LPCR_HDEE)) {
1911 return PPC_INTERRUPT_HDOORBELL;
1912 }
1913 /* Hypervisor virtualization exception */
1914 if ((pending_interrupts & PPC_INTERRUPT_HVIRT) &&
1915 (lpcr & LPCR_HVEE)) {
1916 return PPC_INTERRUPT_HVIRT;
1917 }
1918 if (pending_interrupts & PPC_INTERRUPT_RESET) {
1919 return PPC_INTERRUPT_RESET;
1920 }
1921 return 0;
1922 }
1923
p9_next_unmasked_interrupt(CPUPPCState * env,uint32_t pending_interrupts,target_ulong lpcr)1924 static int p9_next_unmasked_interrupt(CPUPPCState *env,
1925 uint32_t pending_interrupts,
1926 target_ulong lpcr)
1927 {
1928 CPUState *cs = env_cpu(env);
1929
1930 /* Ignore MSR[EE] when coming out of some power management states */
1931 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1932
1933 assert((pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
1934
1935 if (cs->halted) {
1936 if (env->spr[SPR_PSSCR] & PSSCR_EC) {
1937 /*
1938 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
1939 * wakeup the processor
1940 */
1941 return p9_interrupt_powersave(env, pending_interrupts, lpcr);
1942 } else {
1943 /*
1944 * When it's clear, any system-caused exception exits power-saving
1945 * mode, even the ones that gate on MSR[EE].
1946 */
1947 msr_ee = true;
1948 }
1949 }
1950
1951 /* Machine check exception */
1952 if (pending_interrupts & PPC_INTERRUPT_MCK) {
1953 return PPC_INTERRUPT_MCK;
1954 }
1955
1956 /* Hypervisor decrementer exception */
1957 if (pending_interrupts & PPC_INTERRUPT_HDECR) {
1958 /* LPCR will be clear when not supported so this will work */
1959 bool hdice = !!(lpcr & LPCR_HDICE);
1960 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1961 /* HDEC clears on delivery */
1962 return PPC_INTERRUPT_HDECR;
1963 }
1964 }
1965
1966 /* Hypervisor virtualization interrupt */
1967 if (pending_interrupts & PPC_INTERRUPT_HVIRT) {
1968 /* LPCR will be clear when not supported so this will work */
1969 bool hvice = !!(lpcr & LPCR_HVICE);
1970 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) {
1971 return PPC_INTERRUPT_HVIRT;
1972 }
1973 }
1974
1975 /* External interrupt can ignore MSR:EE under some circumstances */
1976 if (pending_interrupts & PPC_INTERRUPT_EXT) {
1977 bool lpes0 = !!(lpcr & LPCR_LPES0);
1978 bool heic = !!(lpcr & LPCR_HEIC);
1979 /* HEIC blocks delivery to the hypervisor */
1980 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1981 !FIELD_EX64(env->msr, MSR, PR))) ||
1982 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1983 return PPC_INTERRUPT_EXT;
1984 }
1985 }
1986 if (msr_ee != 0) {
1987 /* Decrementer exception */
1988 if (pending_interrupts & PPC_INTERRUPT_DECR) {
1989 return PPC_INTERRUPT_DECR;
1990 }
1991 if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
1992 return PPC_INTERRUPT_DOORBELL;
1993 }
1994 if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
1995 return PPC_INTERRUPT_HDOORBELL;
1996 }
1997 if (pending_interrupts & PPC_INTERRUPT_PERFM) {
1998 return PPC_INTERRUPT_PERFM;
1999 }
2000 /* EBB exception */
2001 if (pending_interrupts & PPC_INTERRUPT_EBB) {
2002 /*
2003 * EBB exception must be taken in problem state and
2004 * with BESCR_GE set.
2005 */
2006 if (FIELD_EX64(env->msr, MSR, PR) &&
2007 (env->spr[SPR_BESCR] & BESCR_GE)) {
2008 return PPC_INTERRUPT_EBB;
2009 }
2010 }
2011 }
2012
2013 return 0;
2014 }
2015 #endif /* TARGET_PPC64 */
2016
ppc_next_unmasked_interrupt(CPUPPCState * env)2017 static int ppc_next_unmasked_interrupt(CPUPPCState *env)
2018 {
2019 uint32_t pending_interrupts = env->pending_interrupts;
2020 target_ulong lpcr = env->spr[SPR_LPCR];
2021 bool async_deliver;
2022
2023 #ifdef TARGET_PPC64
2024 switch (env->excp_model) {
2025 case POWERPC_EXCP_POWER7:
2026 return p7_next_unmasked_interrupt(env, pending_interrupts, lpcr);
2027 case POWERPC_EXCP_POWER8:
2028 return p8_next_unmasked_interrupt(env, pending_interrupts, lpcr);
2029 case POWERPC_EXCP_POWER9:
2030 case POWERPC_EXCP_POWER10:
2031 case POWERPC_EXCP_POWER11:
2032 return p9_next_unmasked_interrupt(env, pending_interrupts, lpcr);
2033 default:
2034 break;
2035 }
2036 #endif
2037
2038 /* External reset */
2039 if (pending_interrupts & PPC_INTERRUPT_RESET) {
2040 return PPC_INTERRUPT_RESET;
2041 }
2042 /* Machine check exception */
2043 if (pending_interrupts & PPC_INTERRUPT_MCK) {
2044 return PPC_INTERRUPT_MCK;
2045 }
2046 #if 0 /* TODO */
2047 /* External debug exception */
2048 if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) {
2049 return PPC_INTERRUPT_DEBUG;
2050 }
2051 #endif
2052
2053 /*
2054 * For interrupts that gate on MSR:EE, we need to do something a
2055 * bit more subtle, as we need to let them through even when EE is
2056 * clear when coming out of some power management states (in order
2057 * for them to become a 0x100).
2058 */
2059 async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
2060
2061 /* Hypervisor decrementer exception */
2062 if (pending_interrupts & PPC_INTERRUPT_HDECR) {
2063 /* LPCR will be clear when not supported so this will work */
2064 bool hdice = !!(lpcr & LPCR_HDICE);
2065 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
2066 /* HDEC clears on delivery */
2067 return PPC_INTERRUPT_HDECR;
2068 }
2069 }
2070
2071 /* Hypervisor virtualization interrupt */
2072 if (pending_interrupts & PPC_INTERRUPT_HVIRT) {
2073 /* LPCR will be clear when not supported so this will work */
2074 bool hvice = !!(lpcr & LPCR_HVICE);
2075 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
2076 return PPC_INTERRUPT_HVIRT;
2077 }
2078 }
2079
2080 /* External interrupt can ignore MSR:EE under some circumstances */
2081 if (pending_interrupts & PPC_INTERRUPT_EXT) {
2082 bool lpes0 = !!(lpcr & LPCR_LPES0);
2083 bool heic = !!(lpcr & LPCR_HEIC);
2084 /* HEIC blocks delivery to the hypervisor */
2085 if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
2086 !FIELD_EX64(env->msr, MSR, PR))) ||
2087 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
2088 return PPC_INTERRUPT_EXT;
2089 }
2090 }
2091 if (FIELD_EX64(env->msr, MSR, CE)) {
2092 /* External critical interrupt */
2093 if (pending_interrupts & PPC_INTERRUPT_CEXT) {
2094 return PPC_INTERRUPT_CEXT;
2095 }
2096 }
2097 if (async_deliver != 0) {
2098 /* Watchdog timer on embedded PowerPC */
2099 if (pending_interrupts & PPC_INTERRUPT_WDT) {
2100 return PPC_INTERRUPT_WDT;
2101 }
2102 if (pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
2103 return PPC_INTERRUPT_CDOORBELL;
2104 }
2105 /* Fixed interval timer on embedded PowerPC */
2106 if (pending_interrupts & PPC_INTERRUPT_FIT) {
2107 return PPC_INTERRUPT_FIT;
2108 }
2109 /* Programmable interval timer on embedded PowerPC */
2110 if (pending_interrupts & PPC_INTERRUPT_PIT) {
2111 return PPC_INTERRUPT_PIT;
2112 }
2113 /* Decrementer exception */
2114 if (pending_interrupts & PPC_INTERRUPT_DECR) {
2115 return PPC_INTERRUPT_DECR;
2116 }
2117 if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
2118 return PPC_INTERRUPT_DOORBELL;
2119 }
2120 if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
2121 return PPC_INTERRUPT_HDOORBELL;
2122 }
2123 if (pending_interrupts & PPC_INTERRUPT_PERFM) {
2124 return PPC_INTERRUPT_PERFM;
2125 }
2126 /* Thermal interrupt */
2127 if (pending_interrupts & PPC_INTERRUPT_THERM) {
2128 return PPC_INTERRUPT_THERM;
2129 }
2130 /* EBB exception */
2131 if (pending_interrupts & PPC_INTERRUPT_EBB) {
2132 /*
2133 * EBB exception must be taken in problem state and
2134 * with BESCR_GE set.
2135 */
2136 if (FIELD_EX64(env->msr, MSR, PR) &&
2137 (env->spr[SPR_BESCR] & BESCR_GE)) {
2138 return PPC_INTERRUPT_EBB;
2139 }
2140 }
2141 }
2142
2143 return 0;
2144 }
2145
2146 /*
2147 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be
2148 * delivered and clears CPU_INTERRUPT_HARD otherwise.
2149 *
2150 * This method is called by ppc_set_interrupt when an interrupt is raised or
2151 * lowered, and should also be called whenever an interrupt masking condition
2152 * is changed, e.g.:
2153 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.;
2154 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.;
2155 * - When PSSCR[EC] or env->resume_as_sreset are changed;
2156 * - When cs->halted is changed and the CPU has a different interrupt masking
2157 * logic in power-saving mode (e.g., POWER7/8/9/10);
2158 */
ppc_maybe_interrupt(CPUPPCState * env)2159 void ppc_maybe_interrupt(CPUPPCState *env)
2160 {
2161 CPUState *cs = env_cpu(env);
2162 BQL_LOCK_GUARD();
2163
2164 if (ppc_next_unmasked_interrupt(env)) {
2165 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
2166 } else {
2167 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
2168 }
2169 }
2170
2171 #ifdef TARGET_PPC64
p7_deliver_interrupt(CPUPPCState * env,int interrupt)2172 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt)
2173 {
2174 PowerPCCPU *cpu = env_archcpu(env);
2175
2176 switch (interrupt) {
2177 case PPC_INTERRUPT_MCK: /* Machine check exception */
2178 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2179 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2180 break;
2181
2182 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2183 /* HDEC clears on delivery */
2184 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2185 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2186 break;
2187
2188 case PPC_INTERRUPT_EXT:
2189 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2190 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2191 } else {
2192 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2193 }
2194 break;
2195
2196 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2197 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2198 break;
2199 case PPC_INTERRUPT_PERFM:
2200 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2201 break;
2202 case 0:
2203 /*
2204 * This is a bug ! It means that has_work took us out of halt without
2205 * anything to deliver while in a PM state that requires getting
2206 * out via a 0x100
2207 *
2208 * This means we will incorrectly execute past the power management
2209 * instruction instead of triggering a reset.
2210 *
2211 * It generally means a discrepancy between the wakeup conditions in the
2212 * processor has_work implementation and the logic in this function.
2213 */
2214 assert(!env->resume_as_sreset);
2215 break;
2216 default:
2217 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2218 interrupt);
2219 }
2220 }
2221
p8_deliver_interrupt(CPUPPCState * env,int interrupt)2222 static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
2223 {
2224 PowerPCCPU *cpu = env_archcpu(env);
2225
2226 switch (interrupt) {
2227 case PPC_INTERRUPT_MCK: /* Machine check exception */
2228 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2229 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2230 break;
2231
2232 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2233 /* HDEC clears on delivery */
2234 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2235 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2236 break;
2237
2238 case PPC_INTERRUPT_EXT:
2239 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2240 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2241 } else {
2242 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2243 }
2244 break;
2245
2246 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2247 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2248 break;
2249 case PPC_INTERRUPT_DOORBELL:
2250 if (!env->resume_as_sreset) {
2251 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2252 }
2253 if (is_book3s_arch2x(env)) {
2254 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2255 } else {
2256 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
2257 }
2258 break;
2259 case PPC_INTERRUPT_HDOORBELL:
2260 if (!env->resume_as_sreset) {
2261 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2262 }
2263 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2264 break;
2265 case PPC_INTERRUPT_PERFM:
2266 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2267 break;
2268 case PPC_INTERRUPT_EBB: /* EBB exception */
2269 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2270 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2271 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2272 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2273 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2274 }
2275 break;
2276 case 0:
2277 /*
2278 * This is a bug ! It means that has_work took us out of halt without
2279 * anything to deliver while in a PM state that requires getting
2280 * out via a 0x100
2281 *
2282 * This means we will incorrectly execute past the power management
2283 * instruction instead of triggering a reset.
2284 *
2285 * It generally means a discrepancy between the wakeup conditions in the
2286 * processor has_work implementation and the logic in this function.
2287 */
2288 assert(!env->resume_as_sreset);
2289 break;
2290 default:
2291 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2292 interrupt);
2293 }
2294 }
2295
p9_deliver_interrupt(CPUPPCState * env,int interrupt)2296 static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
2297 {
2298 PowerPCCPU *cpu = env_archcpu(env);
2299 CPUState *cs = env_cpu(env);
2300
2301 if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) &&
2302 !FIELD_EX64(env->msr, MSR, EE)) {
2303 /*
2304 * A pending interrupt took us out of power-saving, but MSR[EE] says
2305 * that we should return to NIP+4 instead of delivering it.
2306 */
2307 return;
2308 }
2309
2310 switch (interrupt) {
2311 case PPC_INTERRUPT_MCK: /* Machine check exception */
2312 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2313 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2314 break;
2315
2316 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2317 /* HDEC clears on delivery */
2318 /* XXX: should not see an HDEC if resume_as_sreset. assert? */
2319 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2320 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2321 break;
2322 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
2323 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2324 break;
2325
2326 case PPC_INTERRUPT_EXT:
2327 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2328 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2329 } else {
2330 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2331 }
2332 break;
2333
2334 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2335 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2336 break;
2337 case PPC_INTERRUPT_DOORBELL:
2338 if (!env->resume_as_sreset) {
2339 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2340 }
2341 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2342 break;
2343 case PPC_INTERRUPT_HDOORBELL:
2344 if (!env->resume_as_sreset) {
2345 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2346 }
2347 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2348 break;
2349 case PPC_INTERRUPT_PERFM:
2350 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2351 break;
2352 case PPC_INTERRUPT_EBB: /* EBB exception */
2353 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2354 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2355 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2356 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2357 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2358 }
2359 break;
2360 case 0:
2361 /*
2362 * This is a bug ! It means that has_work took us out of halt without
2363 * anything to deliver while in a PM state that requires getting
2364 * out via a 0x100
2365 *
2366 * This means we will incorrectly execute past the power management
2367 * instruction instead of triggering a reset.
2368 *
2369 * It generally means a discrepancy between the wakeup conditions in the
2370 * processor has_work implementation and the logic in this function.
2371 */
2372 assert(!env->resume_as_sreset);
2373 break;
2374 default:
2375 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2376 interrupt);
2377 }
2378 }
2379 #endif /* TARGET_PPC64 */
2380
ppc_deliver_interrupt(CPUPPCState * env,int interrupt)2381 static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
2382 {
2383 #ifdef TARGET_PPC64
2384 switch (env->excp_model) {
2385 case POWERPC_EXCP_POWER7:
2386 return p7_deliver_interrupt(env, interrupt);
2387 case POWERPC_EXCP_POWER8:
2388 return p8_deliver_interrupt(env, interrupt);
2389 case POWERPC_EXCP_POWER9:
2390 case POWERPC_EXCP_POWER10:
2391 case POWERPC_EXCP_POWER11:
2392 return p9_deliver_interrupt(env, interrupt);
2393 default:
2394 break;
2395 }
2396 #endif
2397 PowerPCCPU *cpu = env_archcpu(env);
2398
2399 switch (interrupt) {
2400 case PPC_INTERRUPT_RESET: /* External reset */
2401 env->pending_interrupts &= ~PPC_INTERRUPT_RESET;
2402 powerpc_excp(cpu, POWERPC_EXCP_RESET);
2403 break;
2404 case PPC_INTERRUPT_MCK: /* Machine check exception */
2405 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2406 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2407 break;
2408
2409 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2410 /* HDEC clears on delivery */
2411 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2412 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2413 break;
2414 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
2415 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2416 break;
2417
2418 case PPC_INTERRUPT_EXT:
2419 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2420 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2421 } else {
2422 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2423 }
2424 break;
2425 case PPC_INTERRUPT_CEXT: /* External critical interrupt */
2426 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
2427 break;
2428
2429 case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */
2430 env->pending_interrupts &= ~PPC_INTERRUPT_WDT;
2431 powerpc_excp(cpu, POWERPC_EXCP_WDT);
2432 break;
2433 case PPC_INTERRUPT_CDOORBELL:
2434 env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL;
2435 powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
2436 break;
2437 case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */
2438 env->pending_interrupts &= ~PPC_INTERRUPT_FIT;
2439 powerpc_excp(cpu, POWERPC_EXCP_FIT);
2440 break;
2441 case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */
2442 env->pending_interrupts &= ~PPC_INTERRUPT_PIT;
2443 powerpc_excp(cpu, POWERPC_EXCP_PIT);
2444 break;
2445 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2446 if (ppc_decr_clear_on_delivery(env)) {
2447 env->pending_interrupts &= ~PPC_INTERRUPT_DECR;
2448 }
2449 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2450 break;
2451 case PPC_INTERRUPT_DOORBELL:
2452 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2453 if (is_book3s_arch2x(env)) {
2454 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2455 } else {
2456 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
2457 }
2458 break;
2459 case PPC_INTERRUPT_HDOORBELL:
2460 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2461 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2462 break;
2463 case PPC_INTERRUPT_PERFM:
2464 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2465 break;
2466 case PPC_INTERRUPT_THERM: /* Thermal interrupt */
2467 env->pending_interrupts &= ~PPC_INTERRUPT_THERM;
2468 powerpc_excp(cpu, POWERPC_EXCP_THERM);
2469 break;
2470 case PPC_INTERRUPT_EBB: /* EBB exception */
2471 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2472 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2473 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2474 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2475 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2476 }
2477 break;
2478 case 0:
2479 /*
2480 * This is a bug ! It means that has_work took us out of halt without
2481 * anything to deliver while in a PM state that requires getting
2482 * out via a 0x100
2483 *
2484 * This means we will incorrectly execute past the power management
2485 * instruction instead of triggering a reset.
2486 *
2487 * It generally means a discrepancy between the wakeup conditions in the
2488 * processor has_work implementation and the logic in this function.
2489 */
2490 assert(!env->resume_as_sreset);
2491 break;
2492 default:
2493 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2494 interrupt);
2495 }
2496 }
2497
2498 /*
2499 * system reset is not delivered via normal irq method, so have to set
2500 * halted = 0 to resume CPU running if it was halted. Possibly we should
2501 * move it over to using PPC_INTERRUPT_RESET rather than async_run_on_cpu.
2502 */
ppc_cpu_do_system_reset(CPUState * cs)2503 void ppc_cpu_do_system_reset(CPUState *cs)
2504 {
2505 PowerPCCPU *cpu = POWERPC_CPU(cs);
2506
2507 cs->halted = 0;
2508 powerpc_excp(cpu, POWERPC_EXCP_RESET);
2509 }
2510
ppc_cpu_do_fwnmi_machine_check(CPUState * cs,target_ulong vector)2511 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
2512 {
2513 PowerPCCPU *cpu = POWERPC_CPU(cs);
2514 CPUPPCState *env = &cpu->env;
2515 target_ulong msr = 0;
2516
2517 /*
2518 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
2519 * been set by KVM.
2520 */
2521 msr = (1ULL << MSR_ME);
2522 msr |= env->msr & (1ULL << MSR_SF);
2523 if (ppc_interrupts_little_endian(cpu, false)) {
2524 msr |= (1ULL << MSR_LE);
2525 }
2526
2527 /* Anything for nested required here? MSR[HV] bit? */
2528
2529 cs->halted = 0;
2530 powerpc_set_excp_state(cpu, vector, msr);
2531 }
2532
ppc_cpu_exec_interrupt(CPUState * cs,int interrupt_request)2533 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
2534 {
2535 CPUPPCState *env = cpu_env(cs);
2536 int interrupt;
2537
2538 if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) {
2539 return false;
2540 }
2541
2542 interrupt = ppc_next_unmasked_interrupt(env);
2543 if (interrupt == 0) {
2544 return false;
2545 }
2546
2547 ppc_deliver_interrupt(env, interrupt);
2548 if (env->pending_interrupts == 0) {
2549 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
2550 }
2551 return true;
2552 }
2553
2554 #endif /* !CONFIG_USER_ONLY */
2555
2556 /*****************************************************************************/
2557 /* Exceptions processing helpers */
2558
raise_exception_err_ra(CPUPPCState * env,uint32_t exception,uint32_t error_code,uintptr_t raddr)2559 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
2560 uint32_t error_code, uintptr_t raddr)
2561 {
2562 CPUState *cs = env_cpu(env);
2563
2564 cs->exception_index = exception;
2565 env->error_code = error_code;
2566 cpu_loop_exit_restore(cs, raddr);
2567 }
2568
raise_exception_err(CPUPPCState * env,uint32_t exception,uint32_t error_code)2569 void raise_exception_err(CPUPPCState *env, uint32_t exception,
2570 uint32_t error_code)
2571 {
2572 raise_exception_err_ra(env, exception, error_code, 0);
2573 }
2574
raise_exception(CPUPPCState * env,uint32_t exception)2575 void raise_exception(CPUPPCState *env, uint32_t exception)
2576 {
2577 raise_exception_err_ra(env, exception, 0, 0);
2578 }
2579
raise_exception_ra(CPUPPCState * env,uint32_t exception,uintptr_t raddr)2580 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
2581 uintptr_t raddr)
2582 {
2583 raise_exception_err_ra(env, exception, 0, raddr);
2584 }
2585
2586 #ifdef CONFIG_TCG
helper_raise_exception_err(CPUPPCState * env,uint32_t exception,uint32_t error_code)2587 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
2588 uint32_t error_code)
2589 {
2590 raise_exception_err_ra(env, exception, error_code, 0);
2591 }
2592
helper_raise_exception(CPUPPCState * env,uint32_t exception)2593 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
2594 {
2595 raise_exception_err_ra(env, exception, 0, 0);
2596 }
2597
2598 #ifndef CONFIG_USER_ONLY
helper_store_msr(CPUPPCState * env,target_ulong val)2599 void helper_store_msr(CPUPPCState *env, target_ulong val)
2600 {
2601 uint32_t excp = hreg_store_msr(env, val, 0);
2602
2603 if (excp != 0) {
2604 cpu_interrupt_exittb(env_cpu(env));
2605 raise_exception(env, excp);
2606 }
2607 }
2608
helper_ppc_maybe_interrupt(CPUPPCState * env)2609 void helper_ppc_maybe_interrupt(CPUPPCState *env)
2610 {
2611 ppc_maybe_interrupt(env);
2612 }
2613
2614 #ifdef TARGET_PPC64
helper_scv(CPUPPCState * env,uint32_t lev)2615 void helper_scv(CPUPPCState *env, uint32_t lev)
2616 {
2617 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
2618 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
2619 } else {
2620 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
2621 }
2622 }
2623
helper_pminsn(CPUPPCState * env,uint32_t insn)2624 void helper_pminsn(CPUPPCState *env, uint32_t insn)
2625 {
2626 CPUState *cs = env_cpu(env);
2627
2628 cs->halted = 1;
2629
2630 /* Condition for waking up at 0x100 */
2631 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
2632 (env->spr[SPR_PSSCR] & PSSCR_EC);
2633
2634 /* HDECR is not to wake from PM state, it may have already fired */
2635 if (env->resume_as_sreset) {
2636 PowerPCCPU *cpu = env_archcpu(env);
2637 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
2638 }
2639
2640 ppc_maybe_interrupt(env);
2641 }
2642 #endif /* TARGET_PPC64 */
2643
do_rfi(CPUPPCState * env,target_ulong nip,target_ulong msr)2644 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
2645 {
2646 /* MSR:POW cannot be set by any form of rfi */
2647 msr &= ~(1ULL << MSR_POW);
2648
2649 /* MSR:TGPR cannot be set by any form of rfi */
2650 if (env->flags & POWERPC_FLAG_TGPR)
2651 msr &= ~(1ULL << MSR_TGPR);
2652
2653 #ifdef TARGET_PPC64
2654 /* Switching to 32-bit ? Crop the nip */
2655 if (!msr_is_64bit(env, msr)) {
2656 nip = (uint32_t)nip;
2657 }
2658 #else
2659 nip = (uint32_t)nip;
2660 #endif
2661 /* XXX: beware: this is false if VLE is supported */
2662 env->nip = nip & ~((target_ulong)0x00000003);
2663 hreg_store_msr(env, msr, 1);
2664 trace_ppc_excp_rfi(env->nip, env->msr);
2665 /*
2666 * No need to raise an exception here, as rfi is always the last
2667 * insn of a TB
2668 */
2669 cpu_interrupt_exittb(env_cpu(env));
2670 /* Reset the reservation */
2671 env->reserve_addr = -1;
2672
2673 /* Context synchronizing: check if TCG TLB needs flush */
2674 check_tlb_flush(env, false);
2675 }
2676
helper_rfi(CPUPPCState * env)2677 void helper_rfi(CPUPPCState *env)
2678 {
2679 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
2680 }
2681
2682 #ifdef TARGET_PPC64
helper_rfid(CPUPPCState * env)2683 void helper_rfid(CPUPPCState *env)
2684 {
2685 /*
2686 * The architecture defines a number of rules for which bits can
2687 * change but in practice, we handle this in hreg_store_msr()
2688 * which will be called by do_rfi(), so there is no need to filter
2689 * here
2690 */
2691 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
2692 }
2693
helper_rfscv(CPUPPCState * env)2694 void helper_rfscv(CPUPPCState *env)
2695 {
2696 do_rfi(env, env->lr, env->ctr);
2697 }
2698
helper_hrfid(CPUPPCState * env)2699 void helper_hrfid(CPUPPCState *env)
2700 {
2701 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
2702 }
2703
helper_rfebb(CPUPPCState * env,target_ulong s)2704 void helper_rfebb(CPUPPCState *env, target_ulong s)
2705 {
2706 target_ulong msr = env->msr;
2707
2708 /*
2709 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
2710 *
2711 * "If BESCR 32:33 != 0b00 the instruction is treated as if
2712 * the instruction form were invalid."
2713 */
2714 if (env->spr[SPR_BESCR] & BESCR_INVALID) {
2715 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
2716 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
2717 }
2718
2719 env->nip = env->spr[SPR_EBBRR];
2720
2721 /* Switching to 32-bit ? Crop the nip */
2722 if (!msr_is_64bit(env, msr)) {
2723 env->nip = (uint32_t)env->spr[SPR_EBBRR];
2724 }
2725
2726 if (s) {
2727 env->spr[SPR_BESCR] |= BESCR_GE;
2728 } else {
2729 env->spr[SPR_BESCR] &= ~BESCR_GE;
2730 }
2731 }
2732
2733 /*
2734 * Triggers or queues an 'ebb_excp' EBB exception. All checks
2735 * but FSCR, HFSCR and msr_pr must be done beforehand.
2736 *
2737 * PowerISA v3.1 isn't clear about whether an EBB should be
2738 * postponed or cancelled if the EBB facility is unavailable.
2739 * Our assumption here is that the EBB is cancelled if both
2740 * FSCR and HFSCR EBB facilities aren't available.
2741 */
do_ebb(CPUPPCState * env,int ebb_excp)2742 static void do_ebb(CPUPPCState *env, int ebb_excp)
2743 {
2744 PowerPCCPU *cpu = env_archcpu(env);
2745
2746 /*
2747 * FSCR_EBB and FSCR_IC_EBB are the same bits used with
2748 * HFSCR.
2749 */
2750 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
2751 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
2752
2753 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
2754 env->spr[SPR_BESCR] |= BESCR_PMEO;
2755 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
2756 env->spr[SPR_BESCR] |= BESCR_EEO;
2757 }
2758
2759 if (FIELD_EX64(env->msr, MSR, PR)) {
2760 powerpc_excp(cpu, ebb_excp);
2761 } else {
2762 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
2763 }
2764 }
2765
raise_ebb_perfm_exception(CPUPPCState * env)2766 void raise_ebb_perfm_exception(CPUPPCState *env)
2767 {
2768 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
2769 env->spr[SPR_BESCR] & BESCR_PME &&
2770 env->spr[SPR_BESCR] & BESCR_GE;
2771
2772 if (!perfm_ebb_enabled) {
2773 return;
2774 }
2775
2776 do_ebb(env, POWERPC_EXCP_PERFM_EBB);
2777 }
2778 #endif /* TARGET_PPC64 */
2779
2780 /*****************************************************************************/
2781 /* Embedded PowerPC specific helpers */
helper_40x_rfci(CPUPPCState * env)2782 void helper_40x_rfci(CPUPPCState *env)
2783 {
2784 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
2785 }
2786
helper_rfci(CPUPPCState * env)2787 void helper_rfci(CPUPPCState *env)
2788 {
2789 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
2790 }
2791
helper_rfdi(CPUPPCState * env)2792 void helper_rfdi(CPUPPCState *env)
2793 {
2794 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
2795 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
2796 }
2797
helper_rfmci(CPUPPCState * env)2798 void helper_rfmci(CPUPPCState *env)
2799 {
2800 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
2801 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
2802 }
2803 #endif /* !CONFIG_USER_ONLY */
2804
helper_TW(CPUPPCState * env,target_ulong arg1,target_ulong arg2,uint32_t flags)2805 void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
2806 uint32_t flags)
2807 {
2808 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
2809 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
2810 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
2811 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
2812 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
2813 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2814 POWERPC_EXCP_TRAP, GETPC());
2815 }
2816 }
2817
2818 #ifdef TARGET_PPC64
helper_TD(CPUPPCState * env,target_ulong arg1,target_ulong arg2,uint32_t flags)2819 void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
2820 uint32_t flags)
2821 {
2822 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
2823 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
2824 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
2825 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
2826 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
2827 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2828 POWERPC_EXCP_TRAP, GETPC());
2829 }
2830 }
2831 #endif /* TARGET_PPC64 */
2832
helper_SIMON_LIKE_32_64(uint32_t x,uint64_t key,uint32_t lane)2833 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
2834 {
2835 const uint16_t c = 0xfffc;
2836 const uint64_t z0 = 0xfa2561cdf44ac398ULL;
2837 uint16_t z = 0, temp;
2838 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
2839
2840 for (int i = 3; i >= 0; i--) {
2841 k[i] = key & 0xffff;
2842 key >>= 16;
2843 }
2844 xleft[0] = x & 0xffff;
2845 xright[0] = (x >> 16) & 0xffff;
2846
2847 for (int i = 0; i < 28; i++) {
2848 z = (z0 >> (63 - i)) & 1;
2849 temp = ror16(k[i + 3], 3) ^ k[i + 1];
2850 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
2851 }
2852
2853 for (int i = 0; i < 8; i++) {
2854 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
2855 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
2856 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
2857 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
2858 }
2859
2860 for (int i = 0; i < 32; i++) {
2861 fxleft[i] = (rol16(xleft[i], 1) &
2862 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
2863 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
2864 xright[i + 1] = xleft[i];
2865 }
2866
2867 return (((uint32_t)xright[32]) << 16) | xleft[32];
2868 }
2869
hash_digest(uint64_t ra,uint64_t rb,uint64_t key)2870 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
2871 {
2872 uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
2873 uint64_t stage1_h, stage1_l;
2874
2875 for (int i = 0; i < 4; i++) {
2876 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
2877 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
2878 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
2879 stage0_l |= (ra & 0xff) << (8 * 2 * i);
2880 rb >>= 8;
2881 ra >>= 8;
2882 }
2883
2884 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
2885 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
2886 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
2887 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
2888
2889 return stage1_h ^ stage1_l;
2890 }
2891
do_hash(CPUPPCState * env,target_ulong ea,target_ulong ra,target_ulong rb,uint64_t key,bool store)2892 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
2893 target_ulong rb, uint64_t key, bool store)
2894 {
2895 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
2896
2897 if (store) {
2898 cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
2899 } else {
2900 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
2901 if (loaded_hash != calculated_hash) {
2902 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2903 POWERPC_EXCP_TRAP, GETPC());
2904 }
2905 }
2906 }
2907
2908 #include "qemu/guest-random.h"
2909
2910 #ifdef TARGET_PPC64
2911 #define HELPER_HASH(op, key, store, dexcr_aspect) \
2912 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
2913 target_ulong rb) \
2914 { \
2915 if (env->msr & R_MSR_PR_MASK) { \
2916 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
2917 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
2918 return; \
2919 } else if (!(env->msr & R_MSR_HV_MASK)) { \
2920 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
2921 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
2922 return; \
2923 } else if (!(env->msr & R_MSR_S_MASK)) { \
2924 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
2925 return; \
2926 } \
2927 \
2928 do_hash(env, ea, ra, rb, key, store); \
2929 }
2930 #else
2931 #define HELPER_HASH(op, key, store, dexcr_aspect) \
2932 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
2933 target_ulong rb) \
2934 { \
2935 do_hash(env, ea, ra, rb, key, store); \
2936 }
2937 #endif /* TARGET_PPC64 */
2938
2939 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
2940 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
2941 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
2942 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
2943
2944 #ifndef CONFIG_USER_ONLY
2945 /* Embedded.Processor Control */
dbell2irq(target_ulong rb)2946 static int dbell2irq(target_ulong rb)
2947 {
2948 int msg = rb & DBELL_TYPE_MASK;
2949 int irq = -1;
2950
2951 switch (msg) {
2952 case DBELL_TYPE_DBELL:
2953 irq = PPC_INTERRUPT_DOORBELL;
2954 break;
2955 case DBELL_TYPE_DBELL_CRIT:
2956 irq = PPC_INTERRUPT_CDOORBELL;
2957 break;
2958 case DBELL_TYPE_G_DBELL:
2959 case DBELL_TYPE_G_DBELL_CRIT:
2960 case DBELL_TYPE_G_DBELL_MC:
2961 /* XXX implement */
2962 default:
2963 break;
2964 }
2965
2966 return irq;
2967 }
2968
helper_msgclr(CPUPPCState * env,target_ulong rb)2969 void helper_msgclr(CPUPPCState *env, target_ulong rb)
2970 {
2971 int irq = dbell2irq(rb);
2972
2973 if (irq < 0) {
2974 return;
2975 }
2976
2977 ppc_set_irq(env_archcpu(env), irq, 0);
2978 }
2979
helper_msgsnd(target_ulong rb)2980 void helper_msgsnd(target_ulong rb)
2981 {
2982 int irq = dbell2irq(rb);
2983 int pir = rb & DBELL_PIRTAG_MASK;
2984 CPUState *cs;
2985
2986 if (irq < 0) {
2987 return;
2988 }
2989
2990 bql_lock();
2991 CPU_FOREACH(cs) {
2992 PowerPCCPU *cpu = POWERPC_CPU(cs);
2993 CPUPPCState *cenv = &cpu->env;
2994
2995 if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
2996 ppc_set_irq(cpu, irq, 1);
2997 }
2998 }
2999 bql_unlock();
3000 }
3001
3002 /* Server Processor Control */
3003
dbell_type_server(target_ulong rb)3004 static bool dbell_type_server(target_ulong rb)
3005 {
3006 /*
3007 * A Directed Hypervisor Doorbell message is sent only if the
3008 * message type is 5. All other types are reserved and the
3009 * instruction is a no-op
3010 */
3011 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
3012 }
3013
dbell_bcast_core(target_ulong rb)3014 static inline bool dbell_bcast_core(target_ulong rb)
3015 {
3016 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
3017 }
3018
dbell_bcast_subproc(target_ulong rb)3019 static inline bool dbell_bcast_subproc(target_ulong rb)
3020 {
3021 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
3022 }
3023
3024 /*
3025 * Send an interrupt to a thread in the same core as env).
3026 */
msgsnd_core_tir(CPUPPCState * env,uint32_t target_tir,int irq)3027 static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
3028 {
3029 PowerPCCPU *cpu = env_archcpu(env);
3030 CPUState *cs = env_cpu(env);
3031
3032 if (ppc_cpu_lpar_single_threaded(cs)) {
3033 if (target_tir == 0) {
3034 ppc_set_irq(cpu, irq, 1);
3035 }
3036 } else {
3037 CPUState *ccs;
3038
3039 /* Does iothread need to be locked for walking CPU list? */
3040 bql_lock();
3041 THREAD_SIBLING_FOREACH(cs, ccs) {
3042 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3043 if (target_tir == ppc_cpu_tir(ccpu)) {
3044 ppc_set_irq(ccpu, irq, 1);
3045 break;
3046 }
3047 }
3048 bql_unlock();
3049 }
3050 }
3051
helper_book3s_msgclr(CPUPPCState * env,target_ulong rb)3052 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
3053 {
3054 if (!dbell_type_server(rb)) {
3055 return;
3056 }
3057
3058 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
3059 }
3060
helper_book3s_msgsnd(CPUPPCState * env,target_ulong rb)3061 void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
3062 {
3063 int pir = rb & DBELL_PROCIDTAG_MASK;
3064 bool brdcast = false;
3065 CPUState *cs, *ccs;
3066 PowerPCCPU *cpu;
3067
3068 if (!dbell_type_server(rb)) {
3069 return;
3070 }
3071
3072 /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
3073 if (!(env->insns_flags2 & PPC2_ISA300)) {
3074 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
3075 return;
3076 }
3077
3078 /* POWER9 and later msgsnd is a global (targets any thread) */
3079 cpu = ppc_get_vcpu_by_pir(pir);
3080 if (!cpu) {
3081 return;
3082 }
3083 cs = CPU(cpu);
3084
3085 if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
3086 (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
3087 brdcast = true;
3088 }
3089
3090 if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
3091 ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
3092 return;
3093 }
3094
3095 /*
3096 * Why is bql needed for walking CPU list? Answer seems to be because ppc
3097 * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
3098 * so could this be removed?
3099 */
3100 bql_lock();
3101 THREAD_SIBLING_FOREACH(cs, ccs) {
3102 ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
3103 }
3104 bql_unlock();
3105 }
3106
3107 #ifdef TARGET_PPC64
helper_book3s_msgclrp(CPUPPCState * env,target_ulong rb)3108 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
3109 {
3110 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
3111
3112 if (!dbell_type_server(rb)) {
3113 return;
3114 }
3115
3116 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
3117 }
3118
3119 /*
3120 * sends a message to another thread on the same
3121 * multi-threaded processor
3122 */
helper_book3s_msgsndp(CPUPPCState * env,target_ulong rb)3123 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
3124 {
3125 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
3126
3127 if (!dbell_type_server(rb)) {
3128 return;
3129 }
3130
3131 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
3132 }
3133 #endif /* TARGET_PPC64 */
3134
3135 /* Single-step tracing */
helper_book3s_trace(CPUPPCState * env,target_ulong prev_ip)3136 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
3137 {
3138 uint32_t error_code = 0;
3139 if (env->insns_flags2 & PPC2_ISA207S) {
3140 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
3141 env->spr[SPR_POWER_SIAR] = prev_ip;
3142 error_code = PPC_BIT(33);
3143 }
3144 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
3145 }
3146
ppc_cpu_do_unaligned_access(CPUState * cs,vaddr vaddr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)3147 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
3148 MMUAccessType access_type,
3149 int mmu_idx, uintptr_t retaddr)
3150 {
3151 CPUPPCState *env = cpu_env(cs);
3152 uint32_t insn;
3153
3154 /* Restore state and reload the insn we executed, for filling in DSISR. */
3155 cpu_restore_state(cs, retaddr);
3156 insn = ppc_ldl_code(env, env->nip);
3157
3158 switch (env->mmu_model) {
3159 case POWERPC_MMU_SOFT_4xx:
3160 env->spr[SPR_40x_DEAR] = vaddr;
3161 break;
3162 case POWERPC_MMU_BOOKE:
3163 case POWERPC_MMU_BOOKE206:
3164 env->spr[SPR_BOOKE_DEAR] = vaddr;
3165 break;
3166 default:
3167 env->spr[SPR_DAR] = vaddr;
3168 break;
3169 }
3170
3171 cs->exception_index = POWERPC_EXCP_ALIGN;
3172 env->error_code = insn & 0x03FF0000;
3173 cpu_loop_exit(cs);
3174 }
3175
ppc_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr vaddr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)3176 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
3177 vaddr vaddr, unsigned size,
3178 MMUAccessType access_type,
3179 int mmu_idx, MemTxAttrs attrs,
3180 MemTxResult response, uintptr_t retaddr)
3181 {
3182 CPUPPCState *env = cpu_env(cs);
3183
3184 switch (env->excp_model) {
3185 #if defined(TARGET_PPC64)
3186 case POWERPC_EXCP_POWER8:
3187 case POWERPC_EXCP_POWER9:
3188 case POWERPC_EXCP_POWER10:
3189 case POWERPC_EXCP_POWER11:
3190 /*
3191 * Machine check codes can be found in processor User Manual or
3192 * Linux or skiboot source.
3193 */
3194 if (access_type == MMU_DATA_LOAD) {
3195 env->spr[SPR_DAR] = vaddr;
3196 env->spr[SPR_DSISR] = PPC_BIT(57);
3197 env->error_code = PPC_BIT(42);
3198
3199 } else if (access_type == MMU_DATA_STORE) {
3200 /*
3201 * MCE for stores in POWER is asynchronous so hardware does
3202 * not set DAR, but QEMU can do better.
3203 */
3204 env->spr[SPR_DAR] = vaddr;
3205 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
3206 env->error_code |= PPC_BIT(42);
3207
3208 } else { /* Fetch */
3209 /*
3210 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
3211 * the instruction, so that must always be clear for fetches.
3212 */
3213 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
3214 }
3215 break;
3216 #endif
3217 default:
3218 /*
3219 * TODO: Check behaviour for other CPUs, for now do nothing.
3220 * Could add a basic MCE even if real hardware ignores.
3221 */
3222 return;
3223 }
3224
3225 cs->exception_index = POWERPC_EXCP_MCHECK;
3226 cpu_loop_exit_restore(cs, retaddr);
3227 }
3228
ppc_cpu_debug_excp_handler(CPUState * cs)3229 void ppc_cpu_debug_excp_handler(CPUState *cs)
3230 {
3231 #if defined(TARGET_PPC64)
3232 CPUPPCState *env = cpu_env(cs);
3233
3234 if (env->insns_flags2 & PPC2_ISA207S) {
3235 if (cs->watchpoint_hit) {
3236 if (cs->watchpoint_hit->flags & BP_CPU) {
3237 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
3238 env->spr[SPR_DSISR] = PPC_BIT(41);
3239 cs->watchpoint_hit = NULL;
3240 raise_exception(env, POWERPC_EXCP_DSI);
3241 }
3242 cs->watchpoint_hit = NULL;
3243 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
3244 raise_exception_err(env, POWERPC_EXCP_TRACE,
3245 PPC_BIT(33) | PPC_BIT(43));
3246 }
3247 }
3248 #endif
3249 }
3250
ppc_cpu_debug_check_breakpoint(CPUState * cs)3251 bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
3252 {
3253 #if defined(TARGET_PPC64)
3254 CPUPPCState *env = cpu_env(cs);
3255
3256 if (env->insns_flags2 & PPC2_ISA207S) {
3257 target_ulong priv;
3258
3259 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
3260 switch (priv) {
3261 case 0x1: /* problem */
3262 return env->msr & ((target_ulong)1 << MSR_PR);
3263 case 0x2: /* supervisor */
3264 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
3265 !(env->msr & ((target_ulong)1 << MSR_HV)));
3266 case 0x3: /* hypervisor */
3267 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
3268 (env->msr & ((target_ulong)1 << MSR_HV)));
3269 default:
3270 g_assert_not_reached();
3271 }
3272 }
3273 #endif
3274
3275 return false;
3276 }
3277
ppc_cpu_debug_check_watchpoint(CPUState * cs,CPUWatchpoint * wp)3278 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
3279 {
3280 #if defined(TARGET_PPC64)
3281 CPUPPCState *env = cpu_env(cs);
3282
3283 if (env->insns_flags2 & PPC2_ISA207S) {
3284 if (wp == env->dawr0_watchpoint) {
3285 uint32_t dawrx = env->spr[SPR_DAWRX0];
3286 bool wt = extract32(dawrx, PPC_BIT_NR(59), 1);
3287 bool wti = extract32(dawrx, PPC_BIT_NR(60), 1);
3288 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
3289 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
3290 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
3291
3292 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
3293 return false;
3294 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
3295 return false;
3296 } else if (!sv) {
3297 return false;
3298 }
3299
3300 if (!wti) {
3301 if (env->msr & ((target_ulong)1 << MSR_DR)) {
3302 if (!wt) {
3303 return false;
3304 }
3305 } else {
3306 if (wt) {
3307 return false;
3308 }
3309 }
3310 }
3311
3312 return true;
3313 }
3314 }
3315 #endif
3316
3317 return false;
3318 }
3319
3320 #endif /* !CONFIG_USER_ONLY */
3321 #endif /* CONFIG_TCG */
3322