1 /*
2 * PowerPC exception emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/log.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/runstate.h"
24 #include "cpu.h"
25 #include "exec/exec-all.h"
26 #include "internal.h"
27 #include "helper_regs.h"
28 #include "hw/ppc/ppc.h"
29
30 #include "trace.h"
31
32 #ifdef CONFIG_TCG
33 #include "sysemu/tcg.h"
34 #include "exec/helper-proto.h"
35 #include "exec/cpu_ldst.h"
36 #endif
37
38 /*****************************************************************************/
39 /* Exception processing */
40 #ifndef CONFIG_USER_ONLY
41
powerpc_excp_name(int excp)42 static const char *powerpc_excp_name(int excp)
43 {
44 switch (excp) {
45 case POWERPC_EXCP_CRITICAL: return "CRITICAL";
46 case POWERPC_EXCP_MCHECK: return "MCHECK";
47 case POWERPC_EXCP_DSI: return "DSI";
48 case POWERPC_EXCP_ISI: return "ISI";
49 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
50 case POWERPC_EXCP_ALIGN: return "ALIGN";
51 case POWERPC_EXCP_PROGRAM: return "PROGRAM";
52 case POWERPC_EXCP_FPU: return "FPU";
53 case POWERPC_EXCP_SYSCALL: return "SYSCALL";
54 case POWERPC_EXCP_APU: return "APU";
55 case POWERPC_EXCP_DECR: return "DECR";
56 case POWERPC_EXCP_FIT: return "FIT";
57 case POWERPC_EXCP_WDT: return "WDT";
58 case POWERPC_EXCP_DTLB: return "DTLB";
59 case POWERPC_EXCP_ITLB: return "ITLB";
60 case POWERPC_EXCP_DEBUG: return "DEBUG";
61 case POWERPC_EXCP_SPEU: return "SPEU";
62 case POWERPC_EXCP_EFPDI: return "EFPDI";
63 case POWERPC_EXCP_EFPRI: return "EFPRI";
64 case POWERPC_EXCP_EPERFM: return "EPERFM";
65 case POWERPC_EXCP_DOORI: return "DOORI";
66 case POWERPC_EXCP_DOORCI: return "DOORCI";
67 case POWERPC_EXCP_GDOORI: return "GDOORI";
68 case POWERPC_EXCP_GDOORCI: return "GDOORCI";
69 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
70 case POWERPC_EXCP_RESET: return "RESET";
71 case POWERPC_EXCP_DSEG: return "DSEG";
72 case POWERPC_EXCP_ISEG: return "ISEG";
73 case POWERPC_EXCP_HDECR: return "HDECR";
74 case POWERPC_EXCP_TRACE: return "TRACE";
75 case POWERPC_EXCP_HDSI: return "HDSI";
76 case POWERPC_EXCP_HISI: return "HISI";
77 case POWERPC_EXCP_HDSEG: return "HDSEG";
78 case POWERPC_EXCP_HISEG: return "HISEG";
79 case POWERPC_EXCP_VPU: return "VPU";
80 case POWERPC_EXCP_PIT: return "PIT";
81 case POWERPC_EXCP_EMUL: return "EMUL";
82 case POWERPC_EXCP_IFTLB: return "IFTLB";
83 case POWERPC_EXCP_DLTLB: return "DLTLB";
84 case POWERPC_EXCP_DSTLB: return "DSTLB";
85 case POWERPC_EXCP_FPA: return "FPA";
86 case POWERPC_EXCP_DABR: return "DABR";
87 case POWERPC_EXCP_IABR: return "IABR";
88 case POWERPC_EXCP_SMI: return "SMI";
89 case POWERPC_EXCP_PERFM: return "PERFM";
90 case POWERPC_EXCP_THERM: return "THERM";
91 case POWERPC_EXCP_VPUA: return "VPUA";
92 case POWERPC_EXCP_SOFTP: return "SOFTP";
93 case POWERPC_EXCP_MAINT: return "MAINT";
94 case POWERPC_EXCP_MEXTBR: return "MEXTBR";
95 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
96 case POWERPC_EXCP_ITLBE: return "ITLBE";
97 case POWERPC_EXCP_DTLBE: return "DTLBE";
98 case POWERPC_EXCP_VSXU: return "VSXU";
99 case POWERPC_EXCP_FU: return "FU";
100 case POWERPC_EXCP_HV_EMU: return "HV_EMU";
101 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
102 case POWERPC_EXCP_HV_FU: return "HV_FU";
103 case POWERPC_EXCP_SDOOR: return "SDOOR";
104 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
105 case POWERPC_EXCP_HVIRT: return "HVIRT";
106 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
107 default:
108 g_assert_not_reached();
109 }
110 }
111
dump_syscall(CPUPPCState * env)112 static void dump_syscall(CPUPPCState *env)
113 {
114 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
115 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
116 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
117 " nip=" TARGET_FMT_lx "\n",
118 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
119 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
120 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
121 ppc_dump_gpr(env, 8), env->nip);
122 }
123
dump_hcall(CPUPPCState * env)124 static void dump_hcall(CPUPPCState *env)
125 {
126 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
127 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
128 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
129 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
130 " nip=" TARGET_FMT_lx "\n",
131 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
132 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
133 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
134 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
135 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
136 env->nip);
137 }
138
139 #ifdef CONFIG_TCG
140 /* Return true iff byteswap is needed to load instruction */
insn_need_byteswap(CPUArchState * env)141 static inline bool insn_need_byteswap(CPUArchState *env)
142 {
143 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
144 return !!(env->msr & ((target_ulong)1 << MSR_LE));
145 }
146
ppc_ldl_code(CPUArchState * env,target_ulong addr)147 static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
148 {
149 uint32_t insn = cpu_ldl_code(env, addr);
150
151 if (insn_need_byteswap(env)) {
152 insn = bswap32(insn);
153 }
154
155 return insn;
156 }
157
158 #endif
159
ppc_excp_debug_sw_tlb(CPUPPCState * env,int excp)160 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
161 {
162 const char *es;
163 target_ulong *miss, *cmp;
164 int en;
165
166 if (!qemu_loglevel_mask(CPU_LOG_MMU)) {
167 return;
168 }
169
170 if (excp == POWERPC_EXCP_IFTLB) {
171 es = "I";
172 en = 'I';
173 miss = &env->spr[SPR_IMISS];
174 cmp = &env->spr[SPR_ICMP];
175 } else {
176 if (excp == POWERPC_EXCP_DLTLB) {
177 es = "DL";
178 } else {
179 es = "DS";
180 }
181 en = 'D';
182 miss = &env->spr[SPR_DMISS];
183 cmp = &env->spr[SPR_DCMP];
184 }
185 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
186 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
187 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
188 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
189 env->error_code);
190 }
191
192 #ifdef TARGET_PPC64
powerpc_reset_wakeup(CPUPPCState * env,int excp,target_ulong * msr)193 static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr)
194 {
195 /* We no longer are in a PM state */
196 env->resume_as_sreset = false;
197
198 /* Pretend to be returning from doze always as we don't lose state */
199 *msr |= SRR1_WS_NOLOSS;
200
201 /* Machine checks are sent normally */
202 if (excp == POWERPC_EXCP_MCHECK) {
203 return excp;
204 }
205 switch (excp) {
206 case POWERPC_EXCP_RESET:
207 *msr |= SRR1_WAKERESET;
208 break;
209 case POWERPC_EXCP_EXTERNAL:
210 *msr |= SRR1_WAKEEE;
211 break;
212 case POWERPC_EXCP_DECR:
213 *msr |= SRR1_WAKEDEC;
214 break;
215 case POWERPC_EXCP_SDOOR:
216 *msr |= SRR1_WAKEDBELL;
217 break;
218 case POWERPC_EXCP_SDOOR_HV:
219 *msr |= SRR1_WAKEHDBELL;
220 break;
221 case POWERPC_EXCP_HV_MAINT:
222 *msr |= SRR1_WAKEHMI;
223 break;
224 case POWERPC_EXCP_HVIRT:
225 *msr |= SRR1_WAKEHVI;
226 break;
227 default:
228 cpu_abort(env_cpu(env),
229 "Unsupported exception %d in Power Save mode\n", excp);
230 }
231 return POWERPC_EXCP_RESET;
232 }
233
234 /*
235 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
236 * taken with the MMU on, and which uses an alternate location (e.g., so the
237 * kernel/hv can map the vectors there with an effective address).
238 *
239 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
240 * are delivered in this way. AIL requires the LPCR to be set to enable this
241 * mode, and then a number of conditions have to be true for AIL to apply.
242 *
243 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
244 * they specifically want to be in real mode (e.g., the MCE might be signaling
245 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
246 *
247 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
248 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
249 * radix mode (LPCR[HR]).
250 *
251 * POWER8, POWER9 with LPCR[HR]=0
252 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
253 * +-----------+-------------+---------+-------------+-----+
254 * | a | 00/01/10 | x | x | 0 |
255 * | a | 11 | 0 | 1 | 0 |
256 * | a | 11 | 1 | 1 | a |
257 * | a | 11 | 0 | 0 | a |
258 * +-------------------------------------------------------+
259 *
260 * POWER9 with LPCR[HR]=1
261 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
262 * +-----------+-------------+---------+-------------+-----+
263 * | a | 00/01/10 | x | x | 0 |
264 * | a | 11 | x | x | a |
265 * +-------------------------------------------------------+
266 *
267 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
268 * the hypervisor in AIL mode if the guest is radix. This is good for
269 * performance but allows the guest to influence the AIL of hypervisor
270 * interrupts using its MSR, and also the hypervisor must disallow guest
271 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
272 * use AIL for its MSR[HV] 0->1 interrupts.
273 *
274 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
275 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
276 * MSR[HV] 1->1).
277 *
278 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
279 *
280 * POWER10 behaviour is
281 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
282 * +-----------+------------+-------------+---------+-------------+-----+
283 * | a | h | 00/01/10 | 0 | 0 | 0 |
284 * | a | h | 11 | 0 | 0 | a |
285 * | a | h | x | 0 | 1 | h |
286 * | a | h | 00/01/10 | 1 | 1 | 0 |
287 * | a | h | 11 | 1 | 1 | h |
288 * +--------------------------------------------------------------------+
289 */
ppc_excp_apply_ail(PowerPCCPU * cpu,int excp,target_ulong msr,target_ulong * new_msr,target_ulong * vector)290 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
291 target_ulong *new_msr, target_ulong *vector)
292 {
293 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
294 CPUPPCState *env = &cpu->env;
295 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
296 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
297 int ail = 0;
298
299 if (excp == POWERPC_EXCP_MCHECK ||
300 excp == POWERPC_EXCP_RESET ||
301 excp == POWERPC_EXCP_HV_MAINT) {
302 /* SRESET, MCE, HMI never apply AIL */
303 return;
304 }
305
306 if (!(pcc->lpcr_mask & LPCR_AIL)) {
307 /* This CPU does not have AIL */
308 return;
309 }
310
311 /* P8 & P9 */
312 if (!(pcc->lpcr_mask & LPCR_HAIL)) {
313 if (!mmu_all_on) {
314 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
315 return;
316 }
317 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
318 /*
319 * AIL does not work if there is a MSR[HV] 0->1 transition and the
320 * partition is in HPT mode. For radix guests, such interrupts are
321 * allowed to be delivered to the hypervisor in ail mode.
322 */
323 return;
324 }
325
326 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
327 if (ail == 0) {
328 return;
329 }
330 if (ail == 1) {
331 /* AIL=1 is reserved, treat it like AIL=0 */
332 return;
333 }
334
335 /* P10 and up */
336 } else {
337 if (!mmu_all_on && !hv_escalation) {
338 /*
339 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
340 * Guest->guest and HV->HV interrupts do require MMU on.
341 */
342 return;
343 }
344
345 if (*new_msr & MSR_HVB) {
346 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
347 /* HV interrupts depend on LPCR[HAIL] */
348 return;
349 }
350 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
351 } else {
352 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
353 }
354 if (ail == 0) {
355 return;
356 }
357 if (ail == 1 || ail == 2) {
358 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
359 return;
360 }
361 }
362
363 /*
364 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
365 * to the new IP.
366 */
367 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
368
369 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
370 if (ail == 2) {
371 *vector |= 0x0000000000018000ull;
372 } else if (ail == 3) {
373 *vector |= 0xc000000000004000ull;
374 }
375 } else {
376 /*
377 * scv AIL is a little different. AIL=2 does not change the address,
378 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
379 */
380 if (ail == 3) {
381 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
382 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
383 }
384 }
385 }
386 #endif /* TARGET_PPC64 */
387
powerpc_reset_excp_state(PowerPCCPU * cpu)388 static void powerpc_reset_excp_state(PowerPCCPU *cpu)
389 {
390 CPUState *cs = CPU(cpu);
391 CPUPPCState *env = &cpu->env;
392
393 /* Reset exception state */
394 cs->exception_index = POWERPC_EXCP_NONE;
395 env->error_code = 0;
396 }
397
powerpc_set_excp_state(PowerPCCPU * cpu,target_ulong vector,target_ulong msr)398 static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
399 target_ulong msr)
400 {
401 CPUPPCState *env = &cpu->env;
402
403 assert((msr & env->msr_mask) == msr);
404
405 /*
406 * We don't use hreg_store_msr here as already have treated any
407 * special case that could occur. Just store MSR and update hflags
408 *
409 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will
410 * prevent setting of the HV bit which some exceptions might need to do.
411 */
412 env->nip = vector;
413 env->msr = msr;
414 hreg_compute_hflags(env);
415 ppc_maybe_interrupt(env);
416
417 powerpc_reset_excp_state(cpu);
418
419 /*
420 * Any interrupt is context synchronizing, check if TCG TLB needs
421 * a delayed flush on ppc64
422 */
423 check_tlb_flush(env, false);
424
425 /* Reset the reservation */
426 env->reserve_addr = -1;
427 }
428
429 #ifdef CONFIG_TCG
430 /*
431 * This stops the machine and logs CPU state without killing QEMU (like
432 * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
433 * so the machine can still be debugged.
434 */
powerpc_checkstop(CPUPPCState * env,const char * reason)435 static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
436 {
437 CPUState *cs = env_cpu(env);
438 FILE *f;
439
440 f = qemu_log_trylock();
441 if (f) {
442 fprintf(f, "Entering checkstop state: %s\n", reason);
443 cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
444 qemu_log_unlock(f);
445 }
446
447 /*
448 * This stops the machine and logs CPU state without killing QEMU
449 * (like cpu_abort()) so the machine can still be debugged (because
450 * it is often a guest error).
451 */
452 qemu_system_guest_panicked(NULL);
453 cpu_loop_exit_noexc(cs);
454 }
455
456 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
helper_attn(CPUPPCState * env)457 void helper_attn(CPUPPCState *env)
458 {
459 /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
460 if ((*env->check_attn)(env)) {
461 powerpc_checkstop(env, "host executed attn");
462 } else {
463 raise_exception_err(env, POWERPC_EXCP_HV_EMU,
464 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
465 }
466 }
467 #endif
468 #endif /* CONFIG_TCG */
469
powerpc_mcheck_checkstop(CPUPPCState * env)470 static void powerpc_mcheck_checkstop(CPUPPCState *env)
471 {
472 /* KVM guests always have MSR[ME] enabled */
473 #ifdef CONFIG_TCG
474 if (FIELD_EX64(env->msr, MSR, ME)) {
475 return;
476 }
477
478 powerpc_checkstop(env, "machine check with MSR[ME]=0");
479 #endif
480 }
481
powerpc_excp_40x(PowerPCCPU * cpu,int excp)482 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
483 {
484 CPUPPCState *env = &cpu->env;
485 target_ulong msr, new_msr, vector;
486 int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
487
488 /* new srr1 value excluding must-be-zero bits */
489 msr = env->msr & ~0x783f0000ULL;
490
491 /* new interrupt handler msr preserves ME unless explicitly overridden */
492 new_msr = env->msr & (((target_ulong)1 << MSR_ME));
493
494 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
495 if (excp == POWERPC_EXCP_HV_EMU) {
496 excp = POWERPC_EXCP_PROGRAM;
497 }
498
499 vector = env->excp_vectors[excp];
500 if (vector == (target_ulong)-1ULL) {
501 cpu_abort(env_cpu(env),
502 "Raised an exception without defined vector %d\n", excp);
503 }
504 vector |= env->excp_prefix;
505
506 switch (excp) {
507 case POWERPC_EXCP_CRITICAL: /* Critical input */
508 srr0 = SPR_40x_SRR2;
509 srr1 = SPR_40x_SRR3;
510 break;
511 case POWERPC_EXCP_MCHECK: /* Machine check exception */
512 powerpc_mcheck_checkstop(env);
513 /* machine check exceptions don't have ME set */
514 new_msr &= ~((target_ulong)1 << MSR_ME);
515 srr0 = SPR_40x_SRR2;
516 srr1 = SPR_40x_SRR3;
517 break;
518 case POWERPC_EXCP_DSI: /* Data storage exception */
519 trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
520 break;
521 case POWERPC_EXCP_ISI: /* Instruction storage exception */
522 trace_ppc_excp_isi(msr, env->nip);
523 break;
524 case POWERPC_EXCP_EXTERNAL: /* External input */
525 break;
526 case POWERPC_EXCP_ALIGN: /* Alignment exception */
527 break;
528 case POWERPC_EXCP_PROGRAM: /* Program exception */
529 switch (env->error_code & ~0xF) {
530 case POWERPC_EXCP_FP:
531 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
532 trace_ppc_excp_fp_ignore();
533 powerpc_reset_excp_state(cpu);
534 return;
535 }
536 env->spr[SPR_40x_ESR] = ESR_FP;
537 break;
538 case POWERPC_EXCP_INVAL:
539 trace_ppc_excp_inval(env->nip);
540 env->spr[SPR_40x_ESR] = ESR_PIL;
541 break;
542 case POWERPC_EXCP_PRIV:
543 env->spr[SPR_40x_ESR] = ESR_PPR;
544 break;
545 case POWERPC_EXCP_TRAP:
546 env->spr[SPR_40x_ESR] = ESR_PTR;
547 break;
548 default:
549 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
550 env->error_code);
551 break;
552 }
553 break;
554 case POWERPC_EXCP_SYSCALL: /* System call exception */
555 dump_syscall(env);
556
557 /*
558 * We need to correct the NIP which in this case is supposed
559 * to point to the next instruction
560 */
561 env->nip += 4;
562 break;
563 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
564 trace_ppc_excp_print("FIT");
565 break;
566 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
567 trace_ppc_excp_print("WDT");
568 break;
569 case POWERPC_EXCP_DTLB: /* Data TLB error */
570 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
571 break;
572 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
573 trace_ppc_excp_print("PIT");
574 break;
575 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
576 cpu_abort(env_cpu(env), "%s exception not implemented\n",
577 powerpc_excp_name(excp));
578 break;
579 default:
580 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
581 excp);
582 break;
583 }
584
585 env->spr[srr0] = env->nip;
586 env->spr[srr1] = msr;
587 powerpc_set_excp_state(cpu, vector, new_msr);
588 }
589
powerpc_excp_6xx(PowerPCCPU * cpu,int excp)590 static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
591 {
592 CPUPPCState *env = &cpu->env;
593 target_ulong msr, new_msr, vector;
594
595 /* new srr1 value excluding must-be-zero bits */
596 msr = env->msr & ~0x783f0000ULL;
597
598 /* new interrupt handler msr preserves ME unless explicitly overridden */
599 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
600
601 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
602 if (excp == POWERPC_EXCP_HV_EMU) {
603 excp = POWERPC_EXCP_PROGRAM;
604 }
605
606 vector = env->excp_vectors[excp];
607 if (vector == (target_ulong)-1ULL) {
608 cpu_abort(env_cpu(env),
609 "Raised an exception without defined vector %d\n", excp);
610 }
611 vector |= env->excp_prefix;
612
613 switch (excp) {
614 case POWERPC_EXCP_CRITICAL: /* Critical input */
615 break;
616 case POWERPC_EXCP_MCHECK: /* Machine check exception */
617 powerpc_mcheck_checkstop(env);
618 /* machine check exceptions don't have ME set */
619 new_msr &= ~((target_ulong)1 << MSR_ME);
620 break;
621 case POWERPC_EXCP_DSI: /* Data storage exception */
622 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
623 break;
624 case POWERPC_EXCP_ISI: /* Instruction storage exception */
625 trace_ppc_excp_isi(msr, env->nip);
626 msr |= env->error_code;
627 break;
628 case POWERPC_EXCP_EXTERNAL: /* External input */
629 break;
630 case POWERPC_EXCP_ALIGN: /* Alignment exception */
631 /* Get rS/rD and rA from faulting opcode */
632 /*
633 * Note: the opcode fields will not be set properly for a
634 * direct store load/store, but nobody cares as nobody
635 * actually uses direct store segments.
636 */
637 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
638 break;
639 case POWERPC_EXCP_PROGRAM: /* Program exception */
640 switch (env->error_code & ~0xF) {
641 case POWERPC_EXCP_FP:
642 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
643 trace_ppc_excp_fp_ignore();
644 powerpc_reset_excp_state(cpu);
645 return;
646 }
647 /*
648 * NIP always points to the faulting instruction for FP exceptions,
649 * so always use store_next and claim we are precise in the MSR.
650 */
651 msr |= 0x00100000;
652 break;
653 case POWERPC_EXCP_INVAL:
654 trace_ppc_excp_inval(env->nip);
655 msr |= 0x00080000;
656 break;
657 case POWERPC_EXCP_PRIV:
658 msr |= 0x00040000;
659 break;
660 case POWERPC_EXCP_TRAP:
661 msr |= 0x00020000;
662 break;
663 default:
664 /* Should never occur */
665 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
666 env->error_code);
667 break;
668 }
669 break;
670 case POWERPC_EXCP_SYSCALL: /* System call exception */
671 dump_syscall(env);
672
673 /*
674 * We need to correct the NIP which in this case is supposed
675 * to point to the next instruction
676 */
677 env->nip += 4;
678 break;
679 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
680 case POWERPC_EXCP_DECR: /* Decrementer exception */
681 break;
682 case POWERPC_EXCP_DTLB: /* Data TLB error */
683 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
684 break;
685 case POWERPC_EXCP_RESET: /* System reset exception */
686 if (FIELD_EX64(env->msr, MSR, POW)) {
687 cpu_abort(env_cpu(env),
688 "Trying to deliver power-saving system reset exception "
689 "%d with no HV support\n", excp);
690 }
691 break;
692 case POWERPC_EXCP_TRACE: /* Trace exception */
693 break;
694 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
695 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
696 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
697 /* Swap temporary saved registers with GPRs */
698 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
699 new_msr |= (target_ulong)1 << MSR_TGPR;
700 hreg_swap_gpr_tgpr(env);
701 }
702
703 ppc_excp_debug_sw_tlb(env, excp);
704
705 msr |= env->crf[0] << 28;
706 msr |= env->error_code; /* key, D/I, S/L bits */
707 /* Set way using a LRU mechanism */
708 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
709 break;
710 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
711 case POWERPC_EXCP_DABR: /* Data address breakpoint */
712 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
713 case POWERPC_EXCP_SMI: /* System management interrupt */
714 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
715 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
716 cpu_abort(env_cpu(env), "%s exception not implemented\n",
717 powerpc_excp_name(excp));
718 break;
719 default:
720 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
721 excp);
722 break;
723 }
724
725 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
726 new_msr |= (target_ulong)1 << MSR_LE;
727 }
728 env->spr[SPR_SRR0] = env->nip;
729 env->spr[SPR_SRR1] = msr;
730 powerpc_set_excp_state(cpu, vector, new_msr);
731 }
732
powerpc_excp_7xx(PowerPCCPU * cpu,int excp)733 static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
734 {
735 CPUPPCState *env = &cpu->env;
736 target_ulong msr, new_msr, vector;
737
738 /* new srr1 value excluding must-be-zero bits */
739 msr = env->msr & ~0x783f0000ULL;
740
741 /* new interrupt handler msr preserves ME unless explicitly overridden */
742 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
743
744 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
745 if (excp == POWERPC_EXCP_HV_EMU) {
746 excp = POWERPC_EXCP_PROGRAM;
747 }
748
749 vector = env->excp_vectors[excp];
750 if (vector == (target_ulong)-1ULL) {
751 cpu_abort(env_cpu(env),
752 "Raised an exception without defined vector %d\n", excp);
753 }
754 vector |= env->excp_prefix;
755
756 switch (excp) {
757 case POWERPC_EXCP_MCHECK: /* Machine check exception */
758 powerpc_mcheck_checkstop(env);
759 /* machine check exceptions don't have ME set */
760 new_msr &= ~((target_ulong)1 << MSR_ME);
761 break;
762 case POWERPC_EXCP_DSI: /* Data storage exception */
763 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
764 break;
765 case POWERPC_EXCP_ISI: /* Instruction storage exception */
766 trace_ppc_excp_isi(msr, env->nip);
767 msr |= env->error_code;
768 break;
769 case POWERPC_EXCP_EXTERNAL: /* External input */
770 break;
771 case POWERPC_EXCP_ALIGN: /* Alignment exception */
772 /* Get rS/rD and rA from faulting opcode */
773 /*
774 * Note: the opcode fields will not be set properly for a
775 * direct store load/store, but nobody cares as nobody
776 * actually uses direct store segments.
777 */
778 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
779 break;
780 case POWERPC_EXCP_PROGRAM: /* Program exception */
781 switch (env->error_code & ~0xF) {
782 case POWERPC_EXCP_FP:
783 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
784 trace_ppc_excp_fp_ignore();
785 powerpc_reset_excp_state(cpu);
786 return;
787 }
788 /*
789 * NIP always points to the faulting instruction for FP exceptions,
790 * so always use store_next and claim we are precise in the MSR.
791 */
792 msr |= 0x00100000;
793 break;
794 case POWERPC_EXCP_INVAL:
795 trace_ppc_excp_inval(env->nip);
796 msr |= 0x00080000;
797 break;
798 case POWERPC_EXCP_PRIV:
799 msr |= 0x00040000;
800 break;
801 case POWERPC_EXCP_TRAP:
802 msr |= 0x00020000;
803 break;
804 default:
805 /* Should never occur */
806 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
807 env->error_code);
808 break;
809 }
810 break;
811 case POWERPC_EXCP_SYSCALL: /* System call exception */
812 {
813 int lev = env->error_code;
814
815 if (lev == 1 && cpu->vhyp) {
816 dump_hcall(env);
817 } else {
818 dump_syscall(env);
819 }
820
821 /*
822 * We need to correct the NIP which in this case is supposed
823 * to point to the next instruction
824 */
825 env->nip += 4;
826
827 /*
828 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
829 * instruction to communicate with QEMU. The pegasos2 machine
830 * uses VOF and the 7xx CPUs, so although the 7xx don't have
831 * HV mode, we need to keep hypercall support.
832 */
833 if (lev == 1 && cpu->vhyp) {
834 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
835 powerpc_reset_excp_state(cpu);
836 return;
837 }
838
839 break;
840 }
841 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
842 case POWERPC_EXCP_DECR: /* Decrementer exception */
843 break;
844 case POWERPC_EXCP_RESET: /* System reset exception */
845 if (FIELD_EX64(env->msr, MSR, POW)) {
846 cpu_abort(env_cpu(env),
847 "Trying to deliver power-saving system reset exception "
848 "%d with no HV support\n", excp);
849 }
850 break;
851 case POWERPC_EXCP_TRACE: /* Trace exception */
852 break;
853 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
854 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
855 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
856 ppc_excp_debug_sw_tlb(env, excp);
857 msr |= env->crf[0] << 28;
858 msr |= env->error_code; /* key, D/I, S/L bits */
859 /* Set way using a LRU mechanism */
860 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
861 break;
862 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
863 case POWERPC_EXCP_SMI: /* System management interrupt */
864 case POWERPC_EXCP_THERM: /* Thermal interrupt */
865 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
866 cpu_abort(env_cpu(env), "%s exception not implemented\n",
867 powerpc_excp_name(excp));
868 break;
869 default:
870 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
871 excp);
872 break;
873 }
874
875 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
876 new_msr |= (target_ulong)1 << MSR_LE;
877 }
878 env->spr[SPR_SRR0] = env->nip;
879 env->spr[SPR_SRR1] = msr;
880 powerpc_set_excp_state(cpu, vector, new_msr);
881 }
882
powerpc_excp_74xx(PowerPCCPU * cpu,int excp)883 static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
884 {
885 CPUPPCState *env = &cpu->env;
886 target_ulong msr, new_msr, vector;
887
888 /* new srr1 value excluding must-be-zero bits */
889 msr = env->msr & ~0x783f0000ULL;
890
891 /* new interrupt handler msr preserves ME unless explicitly overridden */
892 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
893
894 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
895 if (excp == POWERPC_EXCP_HV_EMU) {
896 excp = POWERPC_EXCP_PROGRAM;
897 }
898
899 vector = env->excp_vectors[excp];
900 if (vector == (target_ulong)-1ULL) {
901 cpu_abort(env_cpu(env),
902 "Raised an exception without defined vector %d\n", excp);
903 }
904 vector |= env->excp_prefix;
905
906 switch (excp) {
907 case POWERPC_EXCP_MCHECK: /* Machine check exception */
908 powerpc_mcheck_checkstop(env);
909 /* machine check exceptions don't have ME set */
910 new_msr &= ~((target_ulong)1 << MSR_ME);
911 break;
912 case POWERPC_EXCP_DSI: /* Data storage exception */
913 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
914 break;
915 case POWERPC_EXCP_ISI: /* Instruction storage exception */
916 trace_ppc_excp_isi(msr, env->nip);
917 msr |= env->error_code;
918 break;
919 case POWERPC_EXCP_EXTERNAL: /* External input */
920 break;
921 case POWERPC_EXCP_ALIGN: /* Alignment exception */
922 /* Get rS/rD and rA from faulting opcode */
923 /*
924 * Note: the opcode fields will not be set properly for a
925 * direct store load/store, but nobody cares as nobody
926 * actually uses direct store segments.
927 */
928 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
929 break;
930 case POWERPC_EXCP_PROGRAM: /* Program exception */
931 switch (env->error_code & ~0xF) {
932 case POWERPC_EXCP_FP:
933 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
934 trace_ppc_excp_fp_ignore();
935 powerpc_reset_excp_state(cpu);
936 return;
937 }
938 /*
939 * NIP always points to the faulting instruction for FP exceptions,
940 * so always use store_next and claim we are precise in the MSR.
941 */
942 msr |= 0x00100000;
943 break;
944 case POWERPC_EXCP_INVAL:
945 trace_ppc_excp_inval(env->nip);
946 msr |= 0x00080000;
947 break;
948 case POWERPC_EXCP_PRIV:
949 msr |= 0x00040000;
950 break;
951 case POWERPC_EXCP_TRAP:
952 msr |= 0x00020000;
953 break;
954 default:
955 /* Should never occur */
956 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
957 env->error_code);
958 break;
959 }
960 break;
961 case POWERPC_EXCP_SYSCALL: /* System call exception */
962 {
963 int lev = env->error_code;
964
965 if (lev == 1 && cpu->vhyp) {
966 dump_hcall(env);
967 } else {
968 dump_syscall(env);
969 }
970
971 /*
972 * We need to correct the NIP which in this case is supposed
973 * to point to the next instruction
974 */
975 env->nip += 4;
976
977 /*
978 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
979 * instruction to communicate with QEMU. The pegasos2 machine
980 * uses VOF and the 74xx CPUs, so although the 74xx don't have
981 * HV mode, we need to keep hypercall support.
982 */
983 if (lev == 1 && cpu->vhyp) {
984 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
985 powerpc_reset_excp_state(cpu);
986 return;
987 }
988
989 break;
990 }
991 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
992 case POWERPC_EXCP_DECR: /* Decrementer exception */
993 break;
994 case POWERPC_EXCP_RESET: /* System reset exception */
995 if (FIELD_EX64(env->msr, MSR, POW)) {
996 cpu_abort(env_cpu(env),
997 "Trying to deliver power-saving system reset "
998 "exception %d with no HV support\n", excp);
999 }
1000 break;
1001 case POWERPC_EXCP_TRACE: /* Trace exception */
1002 break;
1003 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1004 break;
1005 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1006 case POWERPC_EXCP_SMI: /* System management interrupt */
1007 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1008 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
1009 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1010 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1011 powerpc_excp_name(excp));
1012 break;
1013 default:
1014 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1015 excp);
1016 break;
1017 }
1018
1019 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1020 new_msr |= (target_ulong)1 << MSR_LE;
1021 }
1022 env->spr[SPR_SRR0] = env->nip;
1023 env->spr[SPR_SRR1] = msr;
1024 powerpc_set_excp_state(cpu, vector, new_msr);
1025 }
1026
powerpc_excp_booke(PowerPCCPU * cpu,int excp)1027 static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
1028 {
1029 CPUPPCState *env = &cpu->env;
1030 target_ulong msr, new_msr, vector;
1031 int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
1032
1033 /*
1034 * Book E does not play games with certain bits of xSRR1 being MSR save
1035 * bits and others being error status. xSRR1 is the old MSR, period.
1036 */
1037 msr = env->msr;
1038
1039 /* new interrupt handler msr preserves ME unless explicitly overridden */
1040 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
1041
1042 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
1043 if (excp == POWERPC_EXCP_HV_EMU) {
1044 excp = POWERPC_EXCP_PROGRAM;
1045 }
1046
1047 #ifdef TARGET_PPC64
1048 /*
1049 * SPEU and VPU share the same IVOR but they exist in different
1050 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
1051 */
1052 if (excp == POWERPC_EXCP_VPU) {
1053 excp = POWERPC_EXCP_SPEU;
1054 }
1055 #endif
1056
1057 vector = env->excp_vectors[excp];
1058 if (vector == (target_ulong)-1ULL) {
1059 cpu_abort(env_cpu(env),
1060 "Raised an exception without defined vector %d\n", excp);
1061 }
1062 vector |= env->excp_prefix;
1063
1064 switch (excp) {
1065 case POWERPC_EXCP_CRITICAL: /* Critical input */
1066 srr0 = SPR_BOOKE_CSRR0;
1067 srr1 = SPR_BOOKE_CSRR1;
1068 break;
1069 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1070 powerpc_mcheck_checkstop(env);
1071 /* machine check exceptions don't have ME set */
1072 new_msr &= ~((target_ulong)1 << MSR_ME);
1073
1074 /* FIXME: choose one or the other based on CPU type */
1075 srr0 = SPR_BOOKE_MCSRR0;
1076 srr1 = SPR_BOOKE_MCSRR1;
1077
1078 env->spr[SPR_BOOKE_CSRR0] = env->nip;
1079 env->spr[SPR_BOOKE_CSRR1] = msr;
1080
1081 break;
1082 case POWERPC_EXCP_DSI: /* Data storage exception */
1083 trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]);
1084 break;
1085 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1086 trace_ppc_excp_isi(msr, env->nip);
1087 break;
1088 case POWERPC_EXCP_EXTERNAL: /* External input */
1089 if (env->mpic_proxy) {
1090 CPUState *cs = env_cpu(env);
1091 /* IACK the IRQ on delivery */
1092 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
1093 }
1094 break;
1095 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1096 break;
1097 case POWERPC_EXCP_PROGRAM: /* Program exception */
1098 switch (env->error_code & ~0xF) {
1099 case POWERPC_EXCP_FP:
1100 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
1101 trace_ppc_excp_fp_ignore();
1102 powerpc_reset_excp_state(cpu);
1103 return;
1104 }
1105 /*
1106 * NIP always points to the faulting instruction for FP exceptions,
1107 * so always use store_next and claim we are precise in the MSR.
1108 */
1109 msr |= 0x00100000;
1110 env->spr[SPR_BOOKE_ESR] = ESR_FP;
1111 break;
1112 case POWERPC_EXCP_INVAL:
1113 trace_ppc_excp_inval(env->nip);
1114 msr |= 0x00080000;
1115 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
1116 break;
1117 case POWERPC_EXCP_PRIV:
1118 msr |= 0x00040000;
1119 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
1120 break;
1121 case POWERPC_EXCP_TRAP:
1122 msr |= 0x00020000;
1123 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
1124 break;
1125 default:
1126 /* Should never occur */
1127 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
1128 env->error_code);
1129 break;
1130 }
1131 break;
1132 case POWERPC_EXCP_SYSCALL: /* System call exception */
1133 dump_syscall(env);
1134
1135 /*
1136 * We need to correct the NIP which in this case is supposed
1137 * to point to the next instruction
1138 */
1139 env->nip += 4;
1140 break;
1141 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1142 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1143 case POWERPC_EXCP_DECR: /* Decrementer exception */
1144 break;
1145 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1146 /* FIT on 4xx */
1147 trace_ppc_excp_print("FIT");
1148 break;
1149 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1150 trace_ppc_excp_print("WDT");
1151 srr0 = SPR_BOOKE_CSRR0;
1152 srr1 = SPR_BOOKE_CSRR1;
1153 break;
1154 case POWERPC_EXCP_DTLB: /* Data TLB error */
1155 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1156 break;
1157 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
1158 if (env->flags & POWERPC_FLAG_DE) {
1159 /* FIXME: choose one or the other based on CPU type */
1160 srr0 = SPR_BOOKE_DSRR0;
1161 srr1 = SPR_BOOKE_DSRR1;
1162
1163 env->spr[SPR_BOOKE_CSRR0] = env->nip;
1164 env->spr[SPR_BOOKE_CSRR1] = msr;
1165
1166 /* DBSR already modified by caller */
1167 } else {
1168 cpu_abort(env_cpu(env),
1169 "Debug exception triggered on unsupported model\n");
1170 }
1171 break;
1172 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
1173 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
1174 break;
1175 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1176 break;
1177 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1178 srr0 = SPR_BOOKE_CSRR0;
1179 srr1 = SPR_BOOKE_CSRR1;
1180 break;
1181 case POWERPC_EXCP_RESET: /* System reset exception */
1182 if (FIELD_EX64(env->msr, MSR, POW)) {
1183 cpu_abort(env_cpu(env),
1184 "Trying to deliver power-saving system reset "
1185 "exception %d with no HV support\n", excp);
1186 }
1187 break;
1188 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
1189 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
1190 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1191 powerpc_excp_name(excp));
1192 break;
1193 default:
1194 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1195 excp);
1196 break;
1197 }
1198
1199 #ifdef TARGET_PPC64
1200 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
1201 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
1202 new_msr |= (target_ulong)1 << MSR_CM;
1203 } else {
1204 vector = (uint32_t)vector;
1205 }
1206 #endif
1207
1208 env->spr[srr0] = env->nip;
1209 env->spr[srr1] = msr;
1210 powerpc_set_excp_state(cpu, vector, new_msr);
1211 }
1212
1213 /*
1214 * When running a nested HV guest under vhyp, external interrupts are
1215 * delivered as HVIRT.
1216 */
books_vhyp_promotes_external_to_hvirt(PowerPCCPU * cpu)1217 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu)
1218 {
1219 if (cpu->vhyp) {
1220 return vhyp_cpu_in_nested(cpu);
1221 }
1222 return false;
1223 }
1224
1225 #ifdef TARGET_PPC64
1226 /*
1227 * When running under vhyp, hcalls are always intercepted and sent to the
1228 * vhc->hypercall handler.
1229 */
books_vhyp_handles_hcall(PowerPCCPU * cpu)1230 static bool books_vhyp_handles_hcall(PowerPCCPU *cpu)
1231 {
1232 if (cpu->vhyp) {
1233 return !vhyp_cpu_in_nested(cpu);
1234 }
1235 return false;
1236 }
1237
1238 /*
1239 * When running a nested KVM HV guest under vhyp, HV exceptions are not
1240 * delivered to the guest (because there is no concept of HV support), but
1241 * rather they are sent to the vhyp to exit from the L2 back to the L1 and
1242 * return from the H_ENTER_NESTED hypercall.
1243 */
books_vhyp_handles_hv_excp(PowerPCCPU * cpu)1244 static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu)
1245 {
1246 if (cpu->vhyp) {
1247 return vhyp_cpu_in_nested(cpu);
1248 }
1249 return false;
1250 }
1251
1252 #ifdef CONFIG_TCG
is_prefix_insn(CPUPPCState * env,uint32_t insn)1253 static bool is_prefix_insn(CPUPPCState *env, uint32_t insn)
1254 {
1255 if (!(env->insns_flags2 & PPC2_ISA310)) {
1256 return false;
1257 }
1258 return ((insn & 0xfc000000) == 0x04000000);
1259 }
1260
is_prefix_insn_excp(PowerPCCPU * cpu,int excp)1261 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
1262 {
1263 CPUPPCState *env = &cpu->env;
1264
1265 if (!(env->insns_flags2 & PPC2_ISA310)) {
1266 return false;
1267 }
1268
1269 if (!tcg_enabled()) {
1270 /*
1271 * This does not load instructions and set the prefix bit correctly
1272 * for injected interrupts with KVM. That may have to be discovered
1273 * and set by the KVM layer before injecting.
1274 */
1275 return false;
1276 }
1277
1278 switch (excp) {
1279 case POWERPC_EXCP_MCHECK:
1280 if (!(env->error_code & PPC_BIT(42))) {
1281 /*
1282 * Fetch attempt caused a machine check, so attempting to fetch
1283 * again would cause a recursive machine check.
1284 */
1285 return false;
1286 }
1287 break;
1288 case POWERPC_EXCP_HDSI:
1289 /* HDSI PRTABLE_FAULT has the originating access type in error_code */
1290 if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) &&
1291 (env->error_code == MMU_INST_FETCH)) {
1292 /*
1293 * Fetch failed due to partition scope translation, so prefix
1294 * indication is not relevant (and attempting to load the
1295 * instruction at NIP would cause recursive faults with the same
1296 * translation).
1297 */
1298 return false;
1299 }
1300 break;
1301
1302 case POWERPC_EXCP_DSI:
1303 case POWERPC_EXCP_DSEG:
1304 case POWERPC_EXCP_ALIGN:
1305 case POWERPC_EXCP_PROGRAM:
1306 case POWERPC_EXCP_FPU:
1307 case POWERPC_EXCP_TRACE:
1308 case POWERPC_EXCP_HV_EMU:
1309 case POWERPC_EXCP_VPU:
1310 case POWERPC_EXCP_VSXU:
1311 case POWERPC_EXCP_FU:
1312 case POWERPC_EXCP_HV_FU:
1313 break;
1314 default:
1315 return false;
1316 }
1317
1318 return is_prefix_insn(env, ppc_ldl_code(env, env->nip));
1319 }
1320 #else
is_prefix_insn_excp(PowerPCCPU * cpu,int excp)1321 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
1322 {
1323 return false;
1324 }
1325 #endif
1326
powerpc_excp_books(PowerPCCPU * cpu,int excp)1327 static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
1328 {
1329 CPUPPCState *env = &cpu->env;
1330 target_ulong msr, new_msr, vector;
1331 int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1;
1332
1333 /* new srr1 value excluding must-be-zero bits */
1334 msr = env->msr & ~0x783f0000ULL;
1335
1336 /*
1337 * new interrupt handler msr preserves HV and ME unless explicitly
1338 * overridden
1339 */
1340 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
1341
1342 /*
1343 * check for special resume at 0x100 from doze/nap/sleep/winkle on
1344 * P7/P8/P9
1345 */
1346 if (env->resume_as_sreset) {
1347 excp = powerpc_reset_wakeup(env, excp, &msr);
1348 }
1349
1350 /*
1351 * We don't want to generate a Hypervisor Emulation Assistance
1352 * Interrupt if we don't have HVB in msr_mask (PAPR mode),
1353 * unless running a nested-hv guest, in which case the L1
1354 * kernel wants the interrupt.
1355 */
1356 if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) &&
1357 !books_vhyp_handles_hv_excp(cpu)) {
1358 excp = POWERPC_EXCP_PROGRAM;
1359 }
1360
1361 vector = env->excp_vectors[excp];
1362 if (vector == (target_ulong)-1ULL) {
1363 cpu_abort(env_cpu(env),
1364 "Raised an exception without defined vector %d\n", excp);
1365 }
1366 vector |= env->excp_prefix;
1367
1368 if (is_prefix_insn_excp(cpu, excp)) {
1369 msr |= PPC_BIT(34);
1370 }
1371
1372 switch (excp) {
1373 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1374 powerpc_mcheck_checkstop(env);
1375 if (env->msr_mask & MSR_HVB) {
1376 /*
1377 * ISA specifies HV, but can be delivered to guest with HV
1378 * clear (e.g., see FWNMI in PAPR).
1379 */
1380 new_msr |= (target_ulong)MSR_HVB;
1381
1382 /* HV machine check exceptions don't have ME set */
1383 new_msr &= ~((target_ulong)1 << MSR_ME);
1384 }
1385
1386 msr |= env->error_code;
1387 break;
1388
1389 case POWERPC_EXCP_DSI: /* Data storage exception */
1390 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
1391 break;
1392 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1393 trace_ppc_excp_isi(msr, env->nip);
1394 msr |= env->error_code;
1395 break;
1396 case POWERPC_EXCP_EXTERNAL: /* External input */
1397 {
1398 bool lpes0;
1399
1400 /* LPES0 is only taken into consideration if we support HV mode */
1401 if (!env->has_hv_mode) {
1402 break;
1403 }
1404 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1405 if (!lpes0) {
1406 new_msr |= (target_ulong)MSR_HVB;
1407 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1408 srr0 = SPR_HSRR0;
1409 srr1 = SPR_HSRR1;
1410 }
1411 break;
1412 }
1413 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1414 /* Optional DSISR update was removed from ISA v3.0 */
1415 if (!(env->insns_flags2 & PPC2_ISA300)) {
1416 /* Get rS/rD and rA from faulting opcode */
1417 /*
1418 * Note: the opcode fields will not be set properly for a
1419 * direct store load/store, but nobody cares as nobody
1420 * actually uses direct store segments.
1421 */
1422 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
1423 }
1424 break;
1425 case POWERPC_EXCP_PROGRAM: /* Program exception */
1426 switch (env->error_code & ~0xF) {
1427 case POWERPC_EXCP_FP:
1428 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
1429 trace_ppc_excp_fp_ignore();
1430 powerpc_reset_excp_state(cpu);
1431 return;
1432 }
1433 /*
1434 * NIP always points to the faulting instruction for FP exceptions,
1435 * so always use store_next and claim we are precise in the MSR.
1436 */
1437 msr |= 0x00100000;
1438 break;
1439 case POWERPC_EXCP_INVAL:
1440 trace_ppc_excp_inval(env->nip);
1441 msr |= 0x00080000;
1442 break;
1443 case POWERPC_EXCP_PRIV:
1444 msr |= 0x00040000;
1445 break;
1446 case POWERPC_EXCP_TRAP:
1447 msr |= 0x00020000;
1448 break;
1449 default:
1450 /* Should never occur */
1451 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
1452 env->error_code);
1453 break;
1454 }
1455 break;
1456 case POWERPC_EXCP_SYSCALL: /* System call exception */
1457 lev = env->error_code;
1458
1459 if (lev == 1 && cpu->vhyp) {
1460 dump_hcall(env);
1461 } else {
1462 dump_syscall(env);
1463 }
1464
1465 /*
1466 * We need to correct the NIP which in this case is supposed
1467 * to point to the next instruction
1468 */
1469 env->nip += 4;
1470
1471 /* "PAPR mode" built-in hypercall emulation */
1472 if (lev == 1 && books_vhyp_handles_hcall(cpu)) {
1473 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
1474 powerpc_reset_excp_state(cpu);
1475 return;
1476 }
1477 if (env->insns_flags2 & PPC2_ISA310) {
1478 /* ISAv3.1 puts LEV into SRR1 */
1479 msr |= lev << 20;
1480 }
1481 if (lev == 1) {
1482 new_msr |= (target_ulong)MSR_HVB;
1483 }
1484 break;
1485 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
1486 lev = env->error_code;
1487 dump_syscall(env);
1488 env->nip += 4;
1489 new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
1490 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1491
1492 vector += lev * 0x20;
1493
1494 env->lr = env->nip;
1495 env->ctr = msr;
1496 break;
1497 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1498 case POWERPC_EXCP_DECR: /* Decrementer exception */
1499 break;
1500 case POWERPC_EXCP_RESET: /* System reset exception */
1501 /* A power-saving exception sets ME, otherwise it is unchanged */
1502 if (FIELD_EX64(env->msr, MSR, POW)) {
1503 /* indicate that we resumed from power save mode */
1504 msr |= 0x10000;
1505 new_msr |= ((target_ulong)1 << MSR_ME);
1506 }
1507 if (env->msr_mask & MSR_HVB) {
1508 /*
1509 * ISA specifies HV, but can be delivered to guest with HV
1510 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
1511 */
1512 new_msr |= (target_ulong)MSR_HVB;
1513 } else {
1514 if (FIELD_EX64(env->msr, MSR, POW)) {
1515 cpu_abort(env_cpu(env),
1516 "Trying to deliver power-saving system reset "
1517 "exception %d with no HV support\n", excp);
1518 }
1519 }
1520 break;
1521 case POWERPC_EXCP_TRACE: /* Trace exception */
1522 msr |= env->error_code;
1523 /* fall through */
1524 case POWERPC_EXCP_DSEG: /* Data segment exception */
1525 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1526 case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
1527 case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */
1528 break;
1529 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
1530 msr |= env->error_code;
1531 /* fall through */
1532 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1533 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1534 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
1535 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
1536 srr0 = SPR_HSRR0;
1537 srr1 = SPR_HSRR1;
1538 new_msr |= (target_ulong)MSR_HVB;
1539 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1540 break;
1541 #ifdef CONFIG_TCG
1542 case POWERPC_EXCP_HV_EMU: {
1543 uint32_t insn = ppc_ldl_code(env, env->nip);
1544 env->spr[SPR_HEIR] = insn;
1545 if (is_prefix_insn(env, insn)) {
1546 uint32_t insn2 = ppc_ldl_code(env, env->nip + 4);
1547 env->spr[SPR_HEIR] <<= 32;
1548 env->spr[SPR_HEIR] |= insn2;
1549 }
1550 srr0 = SPR_HSRR0;
1551 srr1 = SPR_HSRR1;
1552 new_msr |= (target_ulong)MSR_HVB;
1553 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1554 break;
1555 }
1556 #endif
1557 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1558 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
1559 case POWERPC_EXCP_FU: /* Facility unavailable exception */
1560 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
1561 break;
1562 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
1563 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
1564 srr0 = SPR_HSRR0;
1565 srr1 = SPR_HSRR1;
1566 new_msr |= (target_ulong)MSR_HVB;
1567 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1568 break;
1569 case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */
1570 case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */
1571 env->spr[SPR_BESCR] &= ~BESCR_GE;
1572
1573 /*
1574 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is
1575 * stored in the EBB Handler SPR_EBBHR.
1576 */
1577 env->spr[SPR_EBBRR] = env->nip;
1578 powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr);
1579
1580 /*
1581 * This exception is handled in userspace. No need to proceed.
1582 */
1583 return;
1584 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1585 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1586 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1587 case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */
1588 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1589 powerpc_excp_name(excp));
1590 break;
1591 default:
1592 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1593 excp);
1594 break;
1595 }
1596
1597 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1598 new_msr |= (target_ulong)1 << MSR_LE;
1599 }
1600 new_msr |= (target_ulong)1 << MSR_SF;
1601
1602 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
1603 env->spr[srr0] = env->nip;
1604 env->spr[srr1] = msr;
1605 }
1606
1607 if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) {
1608 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */
1609 cpu->vhyp_class->deliver_hv_excp(cpu, excp);
1610 powerpc_reset_excp_state(cpu);
1611 } else {
1612 /* Sanity check */
1613 if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) {
1614 cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d "
1615 "with no HV support\n", excp);
1616 }
1617 /* This can update new_msr and vector if AIL applies */
1618 ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector);
1619 powerpc_set_excp_state(cpu, vector, new_msr);
1620 }
1621 }
1622 #else
powerpc_excp_books(PowerPCCPU * cpu,int excp)1623 static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
1624 {
1625 g_assert_not_reached();
1626 }
1627 #endif /* TARGET_PPC64 */
1628
powerpc_excp(PowerPCCPU * cpu,int excp)1629 static void powerpc_excp(PowerPCCPU *cpu, int excp)
1630 {
1631 CPUPPCState *env = &cpu->env;
1632
1633 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
1634 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1635 excp);
1636 }
1637
1638 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
1639 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
1640 excp, env->error_code);
1641 env->excp_stats[excp]++;
1642
1643 switch (env->excp_model) {
1644 case POWERPC_EXCP_40x:
1645 powerpc_excp_40x(cpu, excp);
1646 break;
1647 case POWERPC_EXCP_6xx:
1648 powerpc_excp_6xx(cpu, excp);
1649 break;
1650 case POWERPC_EXCP_7xx:
1651 powerpc_excp_7xx(cpu, excp);
1652 break;
1653 case POWERPC_EXCP_74xx:
1654 powerpc_excp_74xx(cpu, excp);
1655 break;
1656 case POWERPC_EXCP_BOOKE:
1657 powerpc_excp_booke(cpu, excp);
1658 break;
1659 case POWERPC_EXCP_970:
1660 case POWERPC_EXCP_POWER7:
1661 case POWERPC_EXCP_POWER8:
1662 case POWERPC_EXCP_POWER9:
1663 case POWERPC_EXCP_POWER10:
1664 powerpc_excp_books(cpu, excp);
1665 break;
1666 default:
1667 g_assert_not_reached();
1668 }
1669 }
1670
ppc_cpu_do_interrupt(CPUState * cs)1671 void ppc_cpu_do_interrupt(CPUState *cs)
1672 {
1673 PowerPCCPU *cpu = POWERPC_CPU(cs);
1674
1675 powerpc_excp(cpu, cs->exception_index);
1676 }
1677
1678 #ifdef TARGET_PPC64
1679 #define P7_UNUSED_INTERRUPTS \
1680 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \
1681 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1682 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
1683 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
1684
p7_interrupt_powersave(CPUPPCState * env)1685 static int p7_interrupt_powersave(CPUPPCState *env)
1686 {
1687 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
1688 (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
1689 return PPC_INTERRUPT_EXT;
1690 }
1691 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
1692 (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
1693 return PPC_INTERRUPT_DECR;
1694 }
1695 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
1696 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
1697 return PPC_INTERRUPT_MCK;
1698 }
1699 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
1700 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
1701 return PPC_INTERRUPT_HMI;
1702 }
1703 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
1704 return PPC_INTERRUPT_RESET;
1705 }
1706 return 0;
1707 }
1708
p7_next_unmasked_interrupt(CPUPPCState * env)1709 static int p7_next_unmasked_interrupt(CPUPPCState *env)
1710 {
1711 CPUState *cs = env_cpu(env);
1712
1713 /* Ignore MSR[EE] when coming out of some power management states */
1714 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1715
1716 assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
1717
1718 if (cs->halted) {
1719 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1720 return p7_interrupt_powersave(env);
1721 }
1722
1723 /* Machine check exception */
1724 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1725 return PPC_INTERRUPT_MCK;
1726 }
1727
1728 /* Hypervisor decrementer exception */
1729 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
1730 /* LPCR will be clear when not supported so this will work */
1731 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1732 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1733 /* HDEC clears on delivery */
1734 return PPC_INTERRUPT_HDECR;
1735 }
1736 }
1737
1738 /* External interrupt can ignore MSR:EE under some circumstances */
1739 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
1740 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1741 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1742 /* HEIC blocks delivery to the hypervisor */
1743 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1744 !FIELD_EX64(env->msr, MSR, PR))) ||
1745 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1746 return PPC_INTERRUPT_EXT;
1747 }
1748 }
1749 if (msr_ee != 0) {
1750 /* Decrementer exception */
1751 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
1752 return PPC_INTERRUPT_DECR;
1753 }
1754 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
1755 return PPC_INTERRUPT_PERFM;
1756 }
1757 }
1758
1759 return 0;
1760 }
1761
1762 #define P8_UNUSED_INTERRUPTS \
1763 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \
1764 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
1765 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1766
p8_interrupt_powersave(CPUPPCState * env)1767 static int p8_interrupt_powersave(CPUPPCState *env)
1768 {
1769 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
1770 (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
1771 return PPC_INTERRUPT_EXT;
1772 }
1773 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
1774 (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
1775 return PPC_INTERRUPT_DECR;
1776 }
1777 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
1778 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
1779 return PPC_INTERRUPT_MCK;
1780 }
1781 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
1782 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
1783 return PPC_INTERRUPT_HMI;
1784 }
1785 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
1786 (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
1787 return PPC_INTERRUPT_DOORBELL;
1788 }
1789 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
1790 (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
1791 return PPC_INTERRUPT_HDOORBELL;
1792 }
1793 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
1794 return PPC_INTERRUPT_RESET;
1795 }
1796 return 0;
1797 }
1798
p8_next_unmasked_interrupt(CPUPPCState * env)1799 static int p8_next_unmasked_interrupt(CPUPPCState *env)
1800 {
1801 CPUState *cs = env_cpu(env);
1802
1803 /* Ignore MSR[EE] when coming out of some power management states */
1804 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1805
1806 assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0);
1807
1808 if (cs->halted) {
1809 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1810 return p8_interrupt_powersave(env);
1811 }
1812
1813 /* Machine check exception */
1814 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1815 return PPC_INTERRUPT_MCK;
1816 }
1817
1818 /* Hypervisor decrementer exception */
1819 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
1820 /* LPCR will be clear when not supported so this will work */
1821 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1822 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1823 /* HDEC clears on delivery */
1824 return PPC_INTERRUPT_HDECR;
1825 }
1826 }
1827
1828 /* External interrupt can ignore MSR:EE under some circumstances */
1829 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
1830 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1831 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1832 /* HEIC blocks delivery to the hypervisor */
1833 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1834 !FIELD_EX64(env->msr, MSR, PR))) ||
1835 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1836 return PPC_INTERRUPT_EXT;
1837 }
1838 }
1839 if (msr_ee != 0) {
1840 /* Decrementer exception */
1841 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
1842 return PPC_INTERRUPT_DECR;
1843 }
1844 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
1845 return PPC_INTERRUPT_DOORBELL;
1846 }
1847 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
1848 return PPC_INTERRUPT_HDOORBELL;
1849 }
1850 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
1851 return PPC_INTERRUPT_PERFM;
1852 }
1853 /* EBB exception */
1854 if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
1855 /*
1856 * EBB exception must be taken in problem state and
1857 * with BESCR_GE set.
1858 */
1859 if (FIELD_EX64(env->msr, MSR, PR) &&
1860 (env->spr[SPR_BESCR] & BESCR_GE)) {
1861 return PPC_INTERRUPT_EBB;
1862 }
1863 }
1864 }
1865
1866 return 0;
1867 }
1868
1869 #define P9_UNUSED_INTERRUPTS \
1870 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \
1871 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1872 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1873
p9_interrupt_powersave(CPUPPCState * env)1874 static int p9_interrupt_powersave(CPUPPCState *env)
1875 {
1876 /* External Exception */
1877 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
1878 (env->spr[SPR_LPCR] & LPCR_EEE)) {
1879 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1880 if (!heic || !FIELD_EX64_HV(env->msr) ||
1881 FIELD_EX64(env->msr, MSR, PR)) {
1882 return PPC_INTERRUPT_EXT;
1883 }
1884 }
1885 /* Decrementer Exception */
1886 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
1887 (env->spr[SPR_LPCR] & LPCR_DEE)) {
1888 return PPC_INTERRUPT_DECR;
1889 }
1890 /* Machine Check or Hypervisor Maintenance Exception */
1891 if (env->spr[SPR_LPCR] & LPCR_OEE) {
1892 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1893 return PPC_INTERRUPT_MCK;
1894 }
1895 if (env->pending_interrupts & PPC_INTERRUPT_HMI) {
1896 return PPC_INTERRUPT_HMI;
1897 }
1898 }
1899 /* Privileged Doorbell Exception */
1900 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
1901 (env->spr[SPR_LPCR] & LPCR_PDEE)) {
1902 return PPC_INTERRUPT_DOORBELL;
1903 }
1904 /* Hypervisor Doorbell Exception */
1905 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
1906 (env->spr[SPR_LPCR] & LPCR_HDEE)) {
1907 return PPC_INTERRUPT_HDOORBELL;
1908 }
1909 /* Hypervisor virtualization exception */
1910 if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) &&
1911 (env->spr[SPR_LPCR] & LPCR_HVEE)) {
1912 return PPC_INTERRUPT_HVIRT;
1913 }
1914 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
1915 return PPC_INTERRUPT_RESET;
1916 }
1917 return 0;
1918 }
1919
p9_next_unmasked_interrupt(CPUPPCState * env)1920 static int p9_next_unmasked_interrupt(CPUPPCState *env)
1921 {
1922 CPUState *cs = env_cpu(env);
1923
1924 /* Ignore MSR[EE] when coming out of some power management states */
1925 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1926
1927 assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
1928
1929 if (cs->halted) {
1930 if (env->spr[SPR_PSSCR] & PSSCR_EC) {
1931 /*
1932 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
1933 * wakeup the processor
1934 */
1935 return p9_interrupt_powersave(env);
1936 } else {
1937 /*
1938 * When it's clear, any system-caused exception exits power-saving
1939 * mode, even the ones that gate on MSR[EE].
1940 */
1941 msr_ee = true;
1942 }
1943 }
1944
1945 /* Machine check exception */
1946 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1947 return PPC_INTERRUPT_MCK;
1948 }
1949
1950 /* Hypervisor decrementer exception */
1951 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
1952 /* LPCR will be clear when not supported so this will work */
1953 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1954 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1955 /* HDEC clears on delivery */
1956 return PPC_INTERRUPT_HDECR;
1957 }
1958 }
1959
1960 /* Hypervisor virtualization interrupt */
1961 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
1962 /* LPCR will be clear when not supported so this will work */
1963 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
1964 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) {
1965 return PPC_INTERRUPT_HVIRT;
1966 }
1967 }
1968
1969 /* External interrupt can ignore MSR:EE under some circumstances */
1970 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
1971 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1972 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1973 /* HEIC blocks delivery to the hypervisor */
1974 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1975 !FIELD_EX64(env->msr, MSR, PR))) ||
1976 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1977 return PPC_INTERRUPT_EXT;
1978 }
1979 }
1980 if (msr_ee != 0) {
1981 /* Decrementer exception */
1982 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
1983 return PPC_INTERRUPT_DECR;
1984 }
1985 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
1986 return PPC_INTERRUPT_DOORBELL;
1987 }
1988 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
1989 return PPC_INTERRUPT_HDOORBELL;
1990 }
1991 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
1992 return PPC_INTERRUPT_PERFM;
1993 }
1994 /* EBB exception */
1995 if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
1996 /*
1997 * EBB exception must be taken in problem state and
1998 * with BESCR_GE set.
1999 */
2000 if (FIELD_EX64(env->msr, MSR, PR) &&
2001 (env->spr[SPR_BESCR] & BESCR_GE)) {
2002 return PPC_INTERRUPT_EBB;
2003 }
2004 }
2005 }
2006
2007 return 0;
2008 }
2009 #endif /* TARGET_PPC64 */
2010
ppc_next_unmasked_interrupt(CPUPPCState * env)2011 static int ppc_next_unmasked_interrupt(CPUPPCState *env)
2012 {
2013 #ifdef TARGET_PPC64
2014 switch (env->excp_model) {
2015 case POWERPC_EXCP_POWER7:
2016 return p7_next_unmasked_interrupt(env);
2017 case POWERPC_EXCP_POWER8:
2018 return p8_next_unmasked_interrupt(env);
2019 case POWERPC_EXCP_POWER9:
2020 case POWERPC_EXCP_POWER10:
2021 return p9_next_unmasked_interrupt(env);
2022 default:
2023 break;
2024 }
2025 #endif
2026 bool async_deliver;
2027
2028 /* External reset */
2029 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
2030 return PPC_INTERRUPT_RESET;
2031 }
2032 /* Machine check exception */
2033 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
2034 return PPC_INTERRUPT_MCK;
2035 }
2036 #if 0 /* TODO */
2037 /* External debug exception */
2038 if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) {
2039 return PPC_INTERRUPT_DEBUG;
2040 }
2041 #endif
2042
2043 /*
2044 * For interrupts that gate on MSR:EE, we need to do something a
2045 * bit more subtle, as we need to let them through even when EE is
2046 * clear when coming out of some power management states (in order
2047 * for them to become a 0x100).
2048 */
2049 async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
2050
2051 /* Hypervisor decrementer exception */
2052 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
2053 /* LPCR will be clear when not supported so this will work */
2054 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
2055 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
2056 /* HDEC clears on delivery */
2057 return PPC_INTERRUPT_HDECR;
2058 }
2059 }
2060
2061 /* Hypervisor virtualization interrupt */
2062 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
2063 /* LPCR will be clear when not supported so this will work */
2064 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
2065 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
2066 return PPC_INTERRUPT_HVIRT;
2067 }
2068 }
2069
2070 /* External interrupt can ignore MSR:EE under some circumstances */
2071 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
2072 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
2073 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
2074 /* HEIC blocks delivery to the hypervisor */
2075 if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
2076 !FIELD_EX64(env->msr, MSR, PR))) ||
2077 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
2078 return PPC_INTERRUPT_EXT;
2079 }
2080 }
2081 if (FIELD_EX64(env->msr, MSR, CE)) {
2082 /* External critical interrupt */
2083 if (env->pending_interrupts & PPC_INTERRUPT_CEXT) {
2084 return PPC_INTERRUPT_CEXT;
2085 }
2086 }
2087 if (async_deliver != 0) {
2088 /* Watchdog timer on embedded PowerPC */
2089 if (env->pending_interrupts & PPC_INTERRUPT_WDT) {
2090 return PPC_INTERRUPT_WDT;
2091 }
2092 if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
2093 return PPC_INTERRUPT_CDOORBELL;
2094 }
2095 /* Fixed interval timer on embedded PowerPC */
2096 if (env->pending_interrupts & PPC_INTERRUPT_FIT) {
2097 return PPC_INTERRUPT_FIT;
2098 }
2099 /* Programmable interval timer on embedded PowerPC */
2100 if (env->pending_interrupts & PPC_INTERRUPT_PIT) {
2101 return PPC_INTERRUPT_PIT;
2102 }
2103 /* Decrementer exception */
2104 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
2105 return PPC_INTERRUPT_DECR;
2106 }
2107 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
2108 return PPC_INTERRUPT_DOORBELL;
2109 }
2110 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
2111 return PPC_INTERRUPT_HDOORBELL;
2112 }
2113 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
2114 return PPC_INTERRUPT_PERFM;
2115 }
2116 /* Thermal interrupt */
2117 if (env->pending_interrupts & PPC_INTERRUPT_THERM) {
2118 return PPC_INTERRUPT_THERM;
2119 }
2120 /* EBB exception */
2121 if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
2122 /*
2123 * EBB exception must be taken in problem state and
2124 * with BESCR_GE set.
2125 */
2126 if (FIELD_EX64(env->msr, MSR, PR) &&
2127 (env->spr[SPR_BESCR] & BESCR_GE)) {
2128 return PPC_INTERRUPT_EBB;
2129 }
2130 }
2131 }
2132
2133 return 0;
2134 }
2135
2136 /*
2137 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be
2138 * delivered and clears CPU_INTERRUPT_HARD otherwise.
2139 *
2140 * This method is called by ppc_set_interrupt when an interrupt is raised or
2141 * lowered, and should also be called whenever an interrupt masking condition
2142 * is changed, e.g.:
2143 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.;
2144 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.;
2145 * - When PSSCR[EC] or env->resume_as_sreset are changed;
2146 * - When cs->halted is changed and the CPU has a different interrupt masking
2147 * logic in power-saving mode (e.g., POWER7/8/9/10);
2148 */
ppc_maybe_interrupt(CPUPPCState * env)2149 void ppc_maybe_interrupt(CPUPPCState *env)
2150 {
2151 CPUState *cs = env_cpu(env);
2152 BQL_LOCK_GUARD();
2153
2154 if (ppc_next_unmasked_interrupt(env)) {
2155 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
2156 } else {
2157 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
2158 }
2159 }
2160
2161 #ifdef TARGET_PPC64
p7_deliver_interrupt(CPUPPCState * env,int interrupt)2162 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt)
2163 {
2164 PowerPCCPU *cpu = env_archcpu(env);
2165
2166 switch (interrupt) {
2167 case PPC_INTERRUPT_MCK: /* Machine check exception */
2168 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2169 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2170 break;
2171
2172 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2173 /* HDEC clears on delivery */
2174 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2175 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2176 break;
2177
2178 case PPC_INTERRUPT_EXT:
2179 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2180 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2181 } else {
2182 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2183 }
2184 break;
2185
2186 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2187 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2188 break;
2189 case PPC_INTERRUPT_PERFM:
2190 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2191 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2192 break;
2193 case 0:
2194 /*
2195 * This is a bug ! It means that has_work took us out of halt without
2196 * anything to deliver while in a PM state that requires getting
2197 * out via a 0x100
2198 *
2199 * This means we will incorrectly execute past the power management
2200 * instruction instead of triggering a reset.
2201 *
2202 * It generally means a discrepancy between the wakeup conditions in the
2203 * processor has_work implementation and the logic in this function.
2204 */
2205 assert(!env->resume_as_sreset);
2206 break;
2207 default:
2208 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2209 interrupt);
2210 }
2211 }
2212
p8_deliver_interrupt(CPUPPCState * env,int interrupt)2213 static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
2214 {
2215 PowerPCCPU *cpu = env_archcpu(env);
2216
2217 switch (interrupt) {
2218 case PPC_INTERRUPT_MCK: /* Machine check exception */
2219 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2220 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2221 break;
2222
2223 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2224 /* HDEC clears on delivery */
2225 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2226 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2227 break;
2228
2229 case PPC_INTERRUPT_EXT:
2230 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2231 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2232 } else {
2233 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2234 }
2235 break;
2236
2237 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2238 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2239 break;
2240 case PPC_INTERRUPT_DOORBELL:
2241 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2242 if (is_book3s_arch2x(env)) {
2243 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2244 } else {
2245 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
2246 }
2247 break;
2248 case PPC_INTERRUPT_HDOORBELL:
2249 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2250 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2251 break;
2252 case PPC_INTERRUPT_PERFM:
2253 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2254 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2255 break;
2256 case PPC_INTERRUPT_EBB: /* EBB exception */
2257 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2258 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2259 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2260 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2261 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2262 }
2263 break;
2264 case 0:
2265 /*
2266 * This is a bug ! It means that has_work took us out of halt without
2267 * anything to deliver while in a PM state that requires getting
2268 * out via a 0x100
2269 *
2270 * This means we will incorrectly execute past the power management
2271 * instruction instead of triggering a reset.
2272 *
2273 * It generally means a discrepancy between the wakeup conditions in the
2274 * processor has_work implementation and the logic in this function.
2275 */
2276 assert(!env->resume_as_sreset);
2277 break;
2278 default:
2279 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2280 interrupt);
2281 }
2282 }
2283
p9_deliver_interrupt(CPUPPCState * env,int interrupt)2284 static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
2285 {
2286 PowerPCCPU *cpu = env_archcpu(env);
2287 CPUState *cs = env_cpu(env);
2288
2289 if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) &&
2290 !FIELD_EX64(env->msr, MSR, EE)) {
2291 /*
2292 * A pending interrupt took us out of power-saving, but MSR[EE] says
2293 * that we should return to NIP+4 instead of delivering it.
2294 */
2295 return;
2296 }
2297
2298 switch (interrupt) {
2299 case PPC_INTERRUPT_MCK: /* Machine check exception */
2300 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2301 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2302 break;
2303
2304 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2305 /* HDEC clears on delivery */
2306 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2307 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2308 break;
2309 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
2310 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2311 break;
2312
2313 case PPC_INTERRUPT_EXT:
2314 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2315 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2316 } else {
2317 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2318 }
2319 break;
2320
2321 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2322 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2323 break;
2324 case PPC_INTERRUPT_DOORBELL:
2325 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2326 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2327 break;
2328 case PPC_INTERRUPT_HDOORBELL:
2329 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2330 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2331 break;
2332 case PPC_INTERRUPT_PERFM:
2333 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2334 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2335 break;
2336 case PPC_INTERRUPT_EBB: /* EBB exception */
2337 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2338 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2339 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2340 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2341 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2342 }
2343 break;
2344 case 0:
2345 /*
2346 * This is a bug ! It means that has_work took us out of halt without
2347 * anything to deliver while in a PM state that requires getting
2348 * out via a 0x100
2349 *
2350 * This means we will incorrectly execute past the power management
2351 * instruction instead of triggering a reset.
2352 *
2353 * It generally means a discrepancy between the wakeup conditions in the
2354 * processor has_work implementation and the logic in this function.
2355 */
2356 assert(!env->resume_as_sreset);
2357 break;
2358 default:
2359 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2360 interrupt);
2361 }
2362 }
2363 #endif /* TARGET_PPC64 */
2364
ppc_deliver_interrupt(CPUPPCState * env,int interrupt)2365 static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
2366 {
2367 #ifdef TARGET_PPC64
2368 switch (env->excp_model) {
2369 case POWERPC_EXCP_POWER7:
2370 return p7_deliver_interrupt(env, interrupt);
2371 case POWERPC_EXCP_POWER8:
2372 return p8_deliver_interrupt(env, interrupt);
2373 case POWERPC_EXCP_POWER9:
2374 case POWERPC_EXCP_POWER10:
2375 return p9_deliver_interrupt(env, interrupt);
2376 default:
2377 break;
2378 }
2379 #endif
2380 PowerPCCPU *cpu = env_archcpu(env);
2381
2382 switch (interrupt) {
2383 case PPC_INTERRUPT_RESET: /* External reset */
2384 env->pending_interrupts &= ~PPC_INTERRUPT_RESET;
2385 powerpc_excp(cpu, POWERPC_EXCP_RESET);
2386 break;
2387 case PPC_INTERRUPT_MCK: /* Machine check exception */
2388 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2389 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2390 break;
2391
2392 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2393 /* HDEC clears on delivery */
2394 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2395 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2396 break;
2397 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
2398 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2399 break;
2400
2401 case PPC_INTERRUPT_EXT:
2402 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2403 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2404 } else {
2405 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2406 }
2407 break;
2408 case PPC_INTERRUPT_CEXT: /* External critical interrupt */
2409 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
2410 break;
2411
2412 case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */
2413 env->pending_interrupts &= ~PPC_INTERRUPT_WDT;
2414 powerpc_excp(cpu, POWERPC_EXCP_WDT);
2415 break;
2416 case PPC_INTERRUPT_CDOORBELL:
2417 env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL;
2418 powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
2419 break;
2420 case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */
2421 env->pending_interrupts &= ~PPC_INTERRUPT_FIT;
2422 powerpc_excp(cpu, POWERPC_EXCP_FIT);
2423 break;
2424 case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */
2425 env->pending_interrupts &= ~PPC_INTERRUPT_PIT;
2426 powerpc_excp(cpu, POWERPC_EXCP_PIT);
2427 break;
2428 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2429 if (ppc_decr_clear_on_delivery(env)) {
2430 env->pending_interrupts &= ~PPC_INTERRUPT_DECR;
2431 }
2432 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2433 break;
2434 case PPC_INTERRUPT_DOORBELL:
2435 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2436 if (is_book3s_arch2x(env)) {
2437 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2438 } else {
2439 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
2440 }
2441 break;
2442 case PPC_INTERRUPT_HDOORBELL:
2443 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2444 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2445 break;
2446 case PPC_INTERRUPT_PERFM:
2447 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2448 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2449 break;
2450 case PPC_INTERRUPT_THERM: /* Thermal interrupt */
2451 env->pending_interrupts &= ~PPC_INTERRUPT_THERM;
2452 powerpc_excp(cpu, POWERPC_EXCP_THERM);
2453 break;
2454 case PPC_INTERRUPT_EBB: /* EBB exception */
2455 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2456 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2457 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2458 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2459 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2460 }
2461 break;
2462 case 0:
2463 /*
2464 * This is a bug ! It means that has_work took us out of halt without
2465 * anything to deliver while in a PM state that requires getting
2466 * out via a 0x100
2467 *
2468 * This means we will incorrectly execute past the power management
2469 * instruction instead of triggering a reset.
2470 *
2471 * It generally means a discrepancy between the wakeup conditions in the
2472 * processor has_work implementation and the logic in this function.
2473 */
2474 assert(!env->resume_as_sreset);
2475 break;
2476 default:
2477 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2478 interrupt);
2479 }
2480 }
2481
ppc_cpu_do_system_reset(CPUState * cs)2482 void ppc_cpu_do_system_reset(CPUState *cs)
2483 {
2484 PowerPCCPU *cpu = POWERPC_CPU(cs);
2485
2486 powerpc_excp(cpu, POWERPC_EXCP_RESET);
2487 }
2488
ppc_cpu_do_fwnmi_machine_check(CPUState * cs,target_ulong vector)2489 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
2490 {
2491 PowerPCCPU *cpu = POWERPC_CPU(cs);
2492 CPUPPCState *env = &cpu->env;
2493 target_ulong msr = 0;
2494
2495 /*
2496 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
2497 * been set by KVM.
2498 */
2499 msr = (1ULL << MSR_ME);
2500 msr |= env->msr & (1ULL << MSR_SF);
2501 if (ppc_interrupts_little_endian(cpu, false)) {
2502 msr |= (1ULL << MSR_LE);
2503 }
2504
2505 /* Anything for nested required here? MSR[HV] bit? */
2506
2507 powerpc_set_excp_state(cpu, vector, msr);
2508 }
2509
ppc_cpu_exec_interrupt(CPUState * cs,int interrupt_request)2510 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
2511 {
2512 CPUPPCState *env = cpu_env(cs);
2513 int interrupt;
2514
2515 if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) {
2516 return false;
2517 }
2518
2519 interrupt = ppc_next_unmasked_interrupt(env);
2520 if (interrupt == 0) {
2521 return false;
2522 }
2523
2524 ppc_deliver_interrupt(env, interrupt);
2525 if (env->pending_interrupts == 0) {
2526 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
2527 }
2528 return true;
2529 }
2530
2531 #endif /* !CONFIG_USER_ONLY */
2532
2533 /*****************************************************************************/
2534 /* Exceptions processing helpers */
2535
raise_exception_err_ra(CPUPPCState * env,uint32_t exception,uint32_t error_code,uintptr_t raddr)2536 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
2537 uint32_t error_code, uintptr_t raddr)
2538 {
2539 CPUState *cs = env_cpu(env);
2540
2541 cs->exception_index = exception;
2542 env->error_code = error_code;
2543 cpu_loop_exit_restore(cs, raddr);
2544 }
2545
raise_exception_err(CPUPPCState * env,uint32_t exception,uint32_t error_code)2546 void raise_exception_err(CPUPPCState *env, uint32_t exception,
2547 uint32_t error_code)
2548 {
2549 raise_exception_err_ra(env, exception, error_code, 0);
2550 }
2551
raise_exception(CPUPPCState * env,uint32_t exception)2552 void raise_exception(CPUPPCState *env, uint32_t exception)
2553 {
2554 raise_exception_err_ra(env, exception, 0, 0);
2555 }
2556
raise_exception_ra(CPUPPCState * env,uint32_t exception,uintptr_t raddr)2557 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
2558 uintptr_t raddr)
2559 {
2560 raise_exception_err_ra(env, exception, 0, raddr);
2561 }
2562
2563 #ifdef CONFIG_TCG
helper_raise_exception_err(CPUPPCState * env,uint32_t exception,uint32_t error_code)2564 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
2565 uint32_t error_code)
2566 {
2567 raise_exception_err_ra(env, exception, error_code, 0);
2568 }
2569
helper_raise_exception(CPUPPCState * env,uint32_t exception)2570 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
2571 {
2572 raise_exception_err_ra(env, exception, 0, 0);
2573 }
2574
2575 #ifndef CONFIG_USER_ONLY
helper_store_msr(CPUPPCState * env,target_ulong val)2576 void helper_store_msr(CPUPPCState *env, target_ulong val)
2577 {
2578 uint32_t excp = hreg_store_msr(env, val, 0);
2579
2580 if (excp != 0) {
2581 cpu_interrupt_exittb(env_cpu(env));
2582 raise_exception(env, excp);
2583 }
2584 }
2585
helper_ppc_maybe_interrupt(CPUPPCState * env)2586 void helper_ppc_maybe_interrupt(CPUPPCState *env)
2587 {
2588 ppc_maybe_interrupt(env);
2589 }
2590
2591 #ifdef TARGET_PPC64
helper_scv(CPUPPCState * env,uint32_t lev)2592 void helper_scv(CPUPPCState *env, uint32_t lev)
2593 {
2594 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
2595 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
2596 } else {
2597 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
2598 }
2599 }
2600
helper_pminsn(CPUPPCState * env,uint32_t insn)2601 void helper_pminsn(CPUPPCState *env, uint32_t insn)
2602 {
2603 CPUState *cs = env_cpu(env);
2604
2605 cs->halted = 1;
2606
2607 /* Condition for waking up at 0x100 */
2608 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
2609 (env->spr[SPR_PSSCR] & PSSCR_EC);
2610
2611 /* HDECR is not to wake from PM state, it may have already fired */
2612 if (env->resume_as_sreset) {
2613 PowerPCCPU *cpu = env_archcpu(env);
2614 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
2615 }
2616
2617 ppc_maybe_interrupt(env);
2618 }
2619 #endif /* TARGET_PPC64 */
2620
do_rfi(CPUPPCState * env,target_ulong nip,target_ulong msr)2621 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
2622 {
2623 /* MSR:POW cannot be set by any form of rfi */
2624 msr &= ~(1ULL << MSR_POW);
2625
2626 /* MSR:TGPR cannot be set by any form of rfi */
2627 if (env->flags & POWERPC_FLAG_TGPR)
2628 msr &= ~(1ULL << MSR_TGPR);
2629
2630 #ifdef TARGET_PPC64
2631 /* Switching to 32-bit ? Crop the nip */
2632 if (!msr_is_64bit(env, msr)) {
2633 nip = (uint32_t)nip;
2634 }
2635 #else
2636 nip = (uint32_t)nip;
2637 #endif
2638 /* XXX: beware: this is false if VLE is supported */
2639 env->nip = nip & ~((target_ulong)0x00000003);
2640 hreg_store_msr(env, msr, 1);
2641 trace_ppc_excp_rfi(env->nip, env->msr);
2642 /*
2643 * No need to raise an exception here, as rfi is always the last
2644 * insn of a TB
2645 */
2646 cpu_interrupt_exittb(env_cpu(env));
2647 /* Reset the reservation */
2648 env->reserve_addr = -1;
2649
2650 /* Context synchronizing: check if TCG TLB needs flush */
2651 check_tlb_flush(env, false);
2652 }
2653
helper_rfi(CPUPPCState * env)2654 void helper_rfi(CPUPPCState *env)
2655 {
2656 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
2657 }
2658
2659 #ifdef TARGET_PPC64
helper_rfid(CPUPPCState * env)2660 void helper_rfid(CPUPPCState *env)
2661 {
2662 /*
2663 * The architecture defines a number of rules for which bits can
2664 * change but in practice, we handle this in hreg_store_msr()
2665 * which will be called by do_rfi(), so there is no need to filter
2666 * here
2667 */
2668 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
2669 }
2670
helper_rfscv(CPUPPCState * env)2671 void helper_rfscv(CPUPPCState *env)
2672 {
2673 do_rfi(env, env->lr, env->ctr);
2674 }
2675
helper_hrfid(CPUPPCState * env)2676 void helper_hrfid(CPUPPCState *env)
2677 {
2678 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
2679 }
2680
helper_rfebb(CPUPPCState * env,target_ulong s)2681 void helper_rfebb(CPUPPCState *env, target_ulong s)
2682 {
2683 target_ulong msr = env->msr;
2684
2685 /*
2686 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
2687 *
2688 * "If BESCR 32:33 != 0b00 the instruction is treated as if
2689 * the instruction form were invalid."
2690 */
2691 if (env->spr[SPR_BESCR] & BESCR_INVALID) {
2692 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
2693 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
2694 }
2695
2696 env->nip = env->spr[SPR_EBBRR];
2697
2698 /* Switching to 32-bit ? Crop the nip */
2699 if (!msr_is_64bit(env, msr)) {
2700 env->nip = (uint32_t)env->spr[SPR_EBBRR];
2701 }
2702
2703 if (s) {
2704 env->spr[SPR_BESCR] |= BESCR_GE;
2705 } else {
2706 env->spr[SPR_BESCR] &= ~BESCR_GE;
2707 }
2708 }
2709
2710 /*
2711 * Triggers or queues an 'ebb_excp' EBB exception. All checks
2712 * but FSCR, HFSCR and msr_pr must be done beforehand.
2713 *
2714 * PowerISA v3.1 isn't clear about whether an EBB should be
2715 * postponed or cancelled if the EBB facility is unavailable.
2716 * Our assumption here is that the EBB is cancelled if both
2717 * FSCR and HFSCR EBB facilities aren't available.
2718 */
do_ebb(CPUPPCState * env,int ebb_excp)2719 static void do_ebb(CPUPPCState *env, int ebb_excp)
2720 {
2721 PowerPCCPU *cpu = env_archcpu(env);
2722
2723 /*
2724 * FSCR_EBB and FSCR_IC_EBB are the same bits used with
2725 * HFSCR.
2726 */
2727 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
2728 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
2729
2730 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
2731 env->spr[SPR_BESCR] |= BESCR_PMEO;
2732 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
2733 env->spr[SPR_BESCR] |= BESCR_EEO;
2734 }
2735
2736 if (FIELD_EX64(env->msr, MSR, PR)) {
2737 powerpc_excp(cpu, ebb_excp);
2738 } else {
2739 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
2740 }
2741 }
2742
raise_ebb_perfm_exception(CPUPPCState * env)2743 void raise_ebb_perfm_exception(CPUPPCState *env)
2744 {
2745 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
2746 env->spr[SPR_BESCR] & BESCR_PME &&
2747 env->spr[SPR_BESCR] & BESCR_GE;
2748
2749 if (!perfm_ebb_enabled) {
2750 return;
2751 }
2752
2753 do_ebb(env, POWERPC_EXCP_PERFM_EBB);
2754 }
2755 #endif /* TARGET_PPC64 */
2756
2757 /*****************************************************************************/
2758 /* Embedded PowerPC specific helpers */
helper_40x_rfci(CPUPPCState * env)2759 void helper_40x_rfci(CPUPPCState *env)
2760 {
2761 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
2762 }
2763
helper_rfci(CPUPPCState * env)2764 void helper_rfci(CPUPPCState *env)
2765 {
2766 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
2767 }
2768
helper_rfdi(CPUPPCState * env)2769 void helper_rfdi(CPUPPCState *env)
2770 {
2771 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
2772 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
2773 }
2774
helper_rfmci(CPUPPCState * env)2775 void helper_rfmci(CPUPPCState *env)
2776 {
2777 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
2778 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
2779 }
2780 #endif /* !CONFIG_USER_ONLY */
2781
helper_TW(CPUPPCState * env,target_ulong arg1,target_ulong arg2,uint32_t flags)2782 void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
2783 uint32_t flags)
2784 {
2785 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
2786 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
2787 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
2788 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
2789 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
2790 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2791 POWERPC_EXCP_TRAP, GETPC());
2792 }
2793 }
2794
2795 #ifdef TARGET_PPC64
helper_TD(CPUPPCState * env,target_ulong arg1,target_ulong arg2,uint32_t flags)2796 void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
2797 uint32_t flags)
2798 {
2799 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
2800 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
2801 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
2802 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
2803 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
2804 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2805 POWERPC_EXCP_TRAP, GETPC());
2806 }
2807 }
2808 #endif /* TARGET_PPC64 */
2809
helper_SIMON_LIKE_32_64(uint32_t x,uint64_t key,uint32_t lane)2810 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
2811 {
2812 const uint16_t c = 0xfffc;
2813 const uint64_t z0 = 0xfa2561cdf44ac398ULL;
2814 uint16_t z = 0, temp;
2815 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
2816
2817 for (int i = 3; i >= 0; i--) {
2818 k[i] = key & 0xffff;
2819 key >>= 16;
2820 }
2821 xleft[0] = x & 0xffff;
2822 xright[0] = (x >> 16) & 0xffff;
2823
2824 for (int i = 0; i < 28; i++) {
2825 z = (z0 >> (63 - i)) & 1;
2826 temp = ror16(k[i + 3], 3) ^ k[i + 1];
2827 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
2828 }
2829
2830 for (int i = 0; i < 8; i++) {
2831 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
2832 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
2833 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
2834 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
2835 }
2836
2837 for (int i = 0; i < 32; i++) {
2838 fxleft[i] = (rol16(xleft[i], 1) &
2839 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
2840 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
2841 xright[i + 1] = xleft[i];
2842 }
2843
2844 return (((uint32_t)xright[32]) << 16) | xleft[32];
2845 }
2846
hash_digest(uint64_t ra,uint64_t rb,uint64_t key)2847 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
2848 {
2849 uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
2850 uint64_t stage1_h, stage1_l;
2851
2852 for (int i = 0; i < 4; i++) {
2853 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
2854 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
2855 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
2856 stage0_l |= (ra & 0xff) << (8 * 2 * i);
2857 rb >>= 8;
2858 ra >>= 8;
2859 }
2860
2861 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
2862 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
2863 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
2864 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
2865
2866 return stage1_h ^ stage1_l;
2867 }
2868
do_hash(CPUPPCState * env,target_ulong ea,target_ulong ra,target_ulong rb,uint64_t key,bool store)2869 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
2870 target_ulong rb, uint64_t key, bool store)
2871 {
2872 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
2873
2874 if (store) {
2875 cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
2876 } else {
2877 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
2878 if (loaded_hash != calculated_hash) {
2879 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2880 POWERPC_EXCP_TRAP, GETPC());
2881 }
2882 }
2883 }
2884
2885 #include "qemu/guest-random.h"
2886
2887 #ifdef TARGET_PPC64
2888 #define HELPER_HASH(op, key, store, dexcr_aspect) \
2889 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
2890 target_ulong rb) \
2891 { \
2892 if (env->msr & R_MSR_PR_MASK) { \
2893 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
2894 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
2895 return; \
2896 } else if (!(env->msr & R_MSR_HV_MASK)) { \
2897 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
2898 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
2899 return; \
2900 } else if (!(env->msr & R_MSR_S_MASK)) { \
2901 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
2902 return; \
2903 } \
2904 \
2905 do_hash(env, ea, ra, rb, key, store); \
2906 }
2907 #else
2908 #define HELPER_HASH(op, key, store, dexcr_aspect) \
2909 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
2910 target_ulong rb) \
2911 { \
2912 do_hash(env, ea, ra, rb, key, store); \
2913 }
2914 #endif /* TARGET_PPC64 */
2915
2916 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
2917 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
2918 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
2919 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
2920
2921 #ifndef CONFIG_USER_ONLY
2922 /* Embedded.Processor Control */
dbell2irq(target_ulong rb)2923 static int dbell2irq(target_ulong rb)
2924 {
2925 int msg = rb & DBELL_TYPE_MASK;
2926 int irq = -1;
2927
2928 switch (msg) {
2929 case DBELL_TYPE_DBELL:
2930 irq = PPC_INTERRUPT_DOORBELL;
2931 break;
2932 case DBELL_TYPE_DBELL_CRIT:
2933 irq = PPC_INTERRUPT_CDOORBELL;
2934 break;
2935 case DBELL_TYPE_G_DBELL:
2936 case DBELL_TYPE_G_DBELL_CRIT:
2937 case DBELL_TYPE_G_DBELL_MC:
2938 /* XXX implement */
2939 default:
2940 break;
2941 }
2942
2943 return irq;
2944 }
2945
helper_msgclr(CPUPPCState * env,target_ulong rb)2946 void helper_msgclr(CPUPPCState *env, target_ulong rb)
2947 {
2948 int irq = dbell2irq(rb);
2949
2950 if (irq < 0) {
2951 return;
2952 }
2953
2954 ppc_set_irq(env_archcpu(env), irq, 0);
2955 }
2956
helper_msgsnd(target_ulong rb)2957 void helper_msgsnd(target_ulong rb)
2958 {
2959 int irq = dbell2irq(rb);
2960 int pir = rb & DBELL_PIRTAG_MASK;
2961 CPUState *cs;
2962
2963 if (irq < 0) {
2964 return;
2965 }
2966
2967 bql_lock();
2968 CPU_FOREACH(cs) {
2969 PowerPCCPU *cpu = POWERPC_CPU(cs);
2970 CPUPPCState *cenv = &cpu->env;
2971
2972 if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
2973 ppc_set_irq(cpu, irq, 1);
2974 }
2975 }
2976 bql_unlock();
2977 }
2978
2979 /* Server Processor Control */
2980
dbell_type_server(target_ulong rb)2981 static bool dbell_type_server(target_ulong rb)
2982 {
2983 /*
2984 * A Directed Hypervisor Doorbell message is sent only if the
2985 * message type is 5. All other types are reserved and the
2986 * instruction is a no-op
2987 */
2988 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
2989 }
2990
dbell_bcast_core(target_ulong rb)2991 static inline bool dbell_bcast_core(target_ulong rb)
2992 {
2993 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
2994 }
2995
dbell_bcast_subproc(target_ulong rb)2996 static inline bool dbell_bcast_subproc(target_ulong rb)
2997 {
2998 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
2999 }
3000
3001 /*
3002 * Send an interrupt to a thread in the same core as env).
3003 */
msgsnd_core_tir(CPUPPCState * env,uint32_t target_tir,int irq)3004 static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
3005 {
3006 PowerPCCPU *cpu = env_archcpu(env);
3007 CPUState *cs = env_cpu(env);
3008
3009 if (ppc_cpu_lpar_single_threaded(cs)) {
3010 if (target_tir == 0) {
3011 ppc_set_irq(cpu, irq, 1);
3012 }
3013 } else {
3014 CPUState *ccs;
3015
3016 /* Does iothread need to be locked for walking CPU list? */
3017 bql_lock();
3018 THREAD_SIBLING_FOREACH(cs, ccs) {
3019 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3020 if (target_tir == ppc_cpu_tir(ccpu)) {
3021 ppc_set_irq(ccpu, irq, 1);
3022 break;
3023 }
3024 }
3025 bql_unlock();
3026 }
3027 }
3028
helper_book3s_msgclr(CPUPPCState * env,target_ulong rb)3029 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
3030 {
3031 if (!dbell_type_server(rb)) {
3032 return;
3033 }
3034
3035 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
3036 }
3037
helper_book3s_msgsnd(CPUPPCState * env,target_ulong rb)3038 void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
3039 {
3040 int pir = rb & DBELL_PROCIDTAG_MASK;
3041 bool brdcast = false;
3042 CPUState *cs, *ccs;
3043 PowerPCCPU *cpu;
3044
3045 if (!dbell_type_server(rb)) {
3046 return;
3047 }
3048
3049 /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
3050 if (!(env->insns_flags2 & PPC2_ISA300)) {
3051 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
3052 return;
3053 }
3054
3055 /* POWER9 and later msgsnd is a global (targets any thread) */
3056 cpu = ppc_get_vcpu_by_pir(pir);
3057 if (!cpu) {
3058 return;
3059 }
3060 cs = CPU(cpu);
3061
3062 if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
3063 (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
3064 brdcast = true;
3065 }
3066
3067 if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
3068 ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
3069 return;
3070 }
3071
3072 /*
3073 * Why is bql needed for walking CPU list? Answer seems to be because ppc
3074 * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
3075 * so could this be removed?
3076 */
3077 bql_lock();
3078 THREAD_SIBLING_FOREACH(cs, ccs) {
3079 ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
3080 }
3081 bql_unlock();
3082 }
3083
3084 #ifdef TARGET_PPC64
helper_book3s_msgclrp(CPUPPCState * env,target_ulong rb)3085 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
3086 {
3087 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
3088
3089 if (!dbell_type_server(rb)) {
3090 return;
3091 }
3092
3093 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
3094 }
3095
3096 /*
3097 * sends a message to another thread on the same
3098 * multi-threaded processor
3099 */
helper_book3s_msgsndp(CPUPPCState * env,target_ulong rb)3100 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
3101 {
3102 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
3103
3104 if (!dbell_type_server(rb)) {
3105 return;
3106 }
3107
3108 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
3109 }
3110 #endif /* TARGET_PPC64 */
3111
3112 /* Single-step tracing */
helper_book3s_trace(CPUPPCState * env,target_ulong prev_ip)3113 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
3114 {
3115 uint32_t error_code = 0;
3116 if (env->insns_flags2 & PPC2_ISA207S) {
3117 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
3118 env->spr[SPR_POWER_SIAR] = prev_ip;
3119 error_code = PPC_BIT(33);
3120 }
3121 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
3122 }
3123
ppc_cpu_do_unaligned_access(CPUState * cs,vaddr vaddr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)3124 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
3125 MMUAccessType access_type,
3126 int mmu_idx, uintptr_t retaddr)
3127 {
3128 CPUPPCState *env = cpu_env(cs);
3129 uint32_t insn;
3130
3131 /* Restore state and reload the insn we executed, for filling in DSISR. */
3132 cpu_restore_state(cs, retaddr);
3133 insn = ppc_ldl_code(env, env->nip);
3134
3135 switch (env->mmu_model) {
3136 case POWERPC_MMU_SOFT_4xx:
3137 env->spr[SPR_40x_DEAR] = vaddr;
3138 break;
3139 case POWERPC_MMU_BOOKE:
3140 case POWERPC_MMU_BOOKE206:
3141 env->spr[SPR_BOOKE_DEAR] = vaddr;
3142 break;
3143 default:
3144 env->spr[SPR_DAR] = vaddr;
3145 break;
3146 }
3147
3148 cs->exception_index = POWERPC_EXCP_ALIGN;
3149 env->error_code = insn & 0x03FF0000;
3150 cpu_loop_exit(cs);
3151 }
3152
ppc_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr vaddr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)3153 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
3154 vaddr vaddr, unsigned size,
3155 MMUAccessType access_type,
3156 int mmu_idx, MemTxAttrs attrs,
3157 MemTxResult response, uintptr_t retaddr)
3158 {
3159 CPUPPCState *env = cpu_env(cs);
3160
3161 switch (env->excp_model) {
3162 #if defined(TARGET_PPC64)
3163 case POWERPC_EXCP_POWER8:
3164 case POWERPC_EXCP_POWER9:
3165 case POWERPC_EXCP_POWER10:
3166 /*
3167 * Machine check codes can be found in processor User Manual or
3168 * Linux or skiboot source.
3169 */
3170 if (access_type == MMU_DATA_LOAD) {
3171 env->spr[SPR_DAR] = vaddr;
3172 env->spr[SPR_DSISR] = PPC_BIT(57);
3173 env->error_code = PPC_BIT(42);
3174
3175 } else if (access_type == MMU_DATA_STORE) {
3176 /*
3177 * MCE for stores in POWER is asynchronous so hardware does
3178 * not set DAR, but QEMU can do better.
3179 */
3180 env->spr[SPR_DAR] = vaddr;
3181 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
3182 env->error_code |= PPC_BIT(42);
3183
3184 } else { /* Fetch */
3185 /*
3186 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
3187 * the instruction, so that must always be clear for fetches.
3188 */
3189 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
3190 }
3191 break;
3192 #endif
3193 default:
3194 /*
3195 * TODO: Check behaviour for other CPUs, for now do nothing.
3196 * Could add a basic MCE even if real hardware ignores.
3197 */
3198 return;
3199 }
3200
3201 cs->exception_index = POWERPC_EXCP_MCHECK;
3202 cpu_loop_exit_restore(cs, retaddr);
3203 }
3204
ppc_cpu_debug_excp_handler(CPUState * cs)3205 void ppc_cpu_debug_excp_handler(CPUState *cs)
3206 {
3207 #if defined(TARGET_PPC64)
3208 CPUPPCState *env = cpu_env(cs);
3209
3210 if (env->insns_flags2 & PPC2_ISA207S) {
3211 if (cs->watchpoint_hit) {
3212 if (cs->watchpoint_hit->flags & BP_CPU) {
3213 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
3214 env->spr[SPR_DSISR] = PPC_BIT(41);
3215 cs->watchpoint_hit = NULL;
3216 raise_exception(env, POWERPC_EXCP_DSI);
3217 }
3218 cs->watchpoint_hit = NULL;
3219 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
3220 raise_exception_err(env, POWERPC_EXCP_TRACE,
3221 PPC_BIT(33) | PPC_BIT(43));
3222 }
3223 }
3224 #endif
3225 }
3226
ppc_cpu_debug_check_breakpoint(CPUState * cs)3227 bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
3228 {
3229 #if defined(TARGET_PPC64)
3230 CPUPPCState *env = cpu_env(cs);
3231
3232 if (env->insns_flags2 & PPC2_ISA207S) {
3233 target_ulong priv;
3234
3235 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
3236 switch (priv) {
3237 case 0x1: /* problem */
3238 return env->msr & ((target_ulong)1 << MSR_PR);
3239 case 0x2: /* supervisor */
3240 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
3241 !(env->msr & ((target_ulong)1 << MSR_HV)));
3242 case 0x3: /* hypervisor */
3243 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
3244 (env->msr & ((target_ulong)1 << MSR_HV)));
3245 default:
3246 g_assert_not_reached();
3247 }
3248 }
3249 #endif
3250
3251 return false;
3252 }
3253
ppc_cpu_debug_check_watchpoint(CPUState * cs,CPUWatchpoint * wp)3254 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
3255 {
3256 #if defined(TARGET_PPC64)
3257 CPUPPCState *env = cpu_env(cs);
3258
3259 if (env->insns_flags2 & PPC2_ISA207S) {
3260 if (wp == env->dawr0_watchpoint) {
3261 uint32_t dawrx = env->spr[SPR_DAWRX0];
3262 bool wt = extract32(dawrx, PPC_BIT_NR(59), 1);
3263 bool wti = extract32(dawrx, PPC_BIT_NR(60), 1);
3264 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
3265 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
3266 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
3267
3268 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
3269 return false;
3270 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
3271 return false;
3272 } else if (!sv) {
3273 return false;
3274 }
3275
3276 if (!wti) {
3277 if (env->msr & ((target_ulong)1 << MSR_DR)) {
3278 if (!wt) {
3279 return false;
3280 }
3281 } else {
3282 if (wt) {
3283 return false;
3284 }
3285 }
3286 }
3287
3288 return true;
3289 }
3290 }
3291 #endif
3292
3293 return false;
3294 }
3295
3296 #endif /* !CONFIG_USER_ONLY */
3297 #endif /* CONFIG_TCG */
3298