1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h>
23#include <asm/reg.h>
24#include <asm/mmu-44x.h>
25#include <asm/page.h>
26#include <asm/asm-offsets.h>
27
28/* The host stack layout: */
29#define HOST_R1         0 /* Implied by stwu. */
30#define HOST_CALLEE_LR  4
31#define HOST_RUN        8
32/* r2 is special: it holds 'current', and it made nonvolatile in the
33 * kernel with the -ffixed-r2 gcc option. */
34#define HOST_R2         12
35#define HOST_CR         16
36#define HOST_NV_GPRS    20
37#define __HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
38#define HOST_NV_GPR(n)  __HOST_NV_GPR(__REG_##n)
39#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
40#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
41#define HOST_STACK_LR   (HOST_STACK_SIZE + 4) /* In caller stack frame. */
42
43#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
44                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
45                        (1<<BOOKE_INTERRUPT_DEBUG))
46
47#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
48                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
49                        (1<<BOOKE_INTERRUPT_ALIGNMENT))
50
51#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
52                       (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
53                       (1<<BOOKE_INTERRUPT_PROGRAM) | \
54                       (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
55                       (1<<BOOKE_INTERRUPT_ALIGNMENT))
56
57.macro KVM_HANDLER ivor_nr scratch srr0
58_GLOBAL(kvmppc_handler_\ivor_nr)
59	/* Get pointer to vcpu and record exit number. */
60	mtspr	\scratch , r4
61	mfspr   r4, SPRN_SPRG_THREAD
62	lwz     r4, THREAD_KVM_VCPU(r4)
63	stw	r3, VCPU_GPR(R3)(r4)
64	stw	r5, VCPU_GPR(R5)(r4)
65	stw	r6, VCPU_GPR(R6)(r4)
66	mfspr	r3, \scratch
67	mfctr	r5
68	stw	r3, VCPU_GPR(R4)(r4)
69	stw	r5, VCPU_CTR(r4)
70	mfspr	r3, \srr0
71	lis	r6, kvmppc_resume_host@h
72	stw	r3, VCPU_PC(r4)
73	li	r5, \ivor_nr
74	ori	r6, r6, kvmppc_resume_host@l
75	mtctr	r6
76	bctr
77.endm
78
79.macro KVM_HANDLER_ADDR ivor_nr
80	.long	kvmppc_handler_\ivor_nr
81.endm
82
83.macro KVM_HANDLER_END
84	.long	kvmppc_handlers_end
85.endm
86
87_GLOBAL(kvmppc_handlers_start)
88KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
89KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK  SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
90KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
91KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
92KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
93KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
94KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
95KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
96KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
97KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
98KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
99KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
100KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
101KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
102KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
103KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
104KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
105KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
106KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
107_GLOBAL(kvmppc_handlers_end)
108
109/* Registers:
110 *  SPRG_SCRATCH0: guest r4
111 *  r4: vcpu pointer
112 *  r5: KVM exit number
113 */
114_GLOBAL(kvmppc_resume_host)
115	mfcr	r3
116	stw	r3, VCPU_CR(r4)
117	stw	r7, VCPU_GPR(R7)(r4)
118	stw	r8, VCPU_GPR(R8)(r4)
119	stw	r9, VCPU_GPR(R9)(r4)
120
121	li	r6, 1
122	slw	r6, r6, r5
123
124#ifdef CONFIG_KVM_EXIT_TIMING
125	/* save exit time */
1261:
127	mfspr	r7, SPRN_TBRU
128	mfspr	r8, SPRN_TBRL
129	mfspr	r9, SPRN_TBRU
130	cmpw	r9, r7
131	bne	1b
132	stw	r8, VCPU_TIMING_EXIT_TBL(r4)
133	stw	r9, VCPU_TIMING_EXIT_TBU(r4)
134#endif
135
136	/* Save the faulting instruction and all GPRs for emulation. */
137	andi.	r7, r6, NEED_INST_MASK
138	beq	..skip_inst_copy
139	mfspr	r9, SPRN_SRR0
140	mfmsr	r8
141	ori	r7, r8, MSR_DS
142	mtmsr	r7
143	isync
144	lwz	r9, 0(r9)
145	mtmsr	r8
146	isync
147	stw	r9, VCPU_LAST_INST(r4)
148
149	stw	r15, VCPU_GPR(R15)(r4)
150	stw	r16, VCPU_GPR(R16)(r4)
151	stw	r17, VCPU_GPR(R17)(r4)
152	stw	r18, VCPU_GPR(R18)(r4)
153	stw	r19, VCPU_GPR(R19)(r4)
154	stw	r20, VCPU_GPR(R20)(r4)
155	stw	r21, VCPU_GPR(R21)(r4)
156	stw	r22, VCPU_GPR(R22)(r4)
157	stw	r23, VCPU_GPR(R23)(r4)
158	stw	r24, VCPU_GPR(R24)(r4)
159	stw	r25, VCPU_GPR(R25)(r4)
160	stw	r26, VCPU_GPR(R26)(r4)
161	stw	r27, VCPU_GPR(R27)(r4)
162	stw	r28, VCPU_GPR(R28)(r4)
163	stw	r29, VCPU_GPR(R29)(r4)
164	stw	r30, VCPU_GPR(R30)(r4)
165	stw	r31, VCPU_GPR(R31)(r4)
166..skip_inst_copy:
167
168	/* Also grab DEAR and ESR before the host can clobber them. */
169
170	andi.	r7, r6, NEED_DEAR_MASK
171	beq	..skip_dear
172	mfspr	r9, SPRN_DEAR
173	stw	r9, VCPU_FAULT_DEAR(r4)
174..skip_dear:
175
176	andi.	r7, r6, NEED_ESR_MASK
177	beq	..skip_esr
178	mfspr	r9, SPRN_ESR
179	stw	r9, VCPU_FAULT_ESR(r4)
180..skip_esr:
181
182	/* Save remaining volatile guest register state to vcpu. */
183	stw	r0, VCPU_GPR(R0)(r4)
184	stw	r1, VCPU_GPR(R1)(r4)
185	stw	r2, VCPU_GPR(R2)(r4)
186	stw	r10, VCPU_GPR(R10)(r4)
187	stw	r11, VCPU_GPR(R11)(r4)
188	stw	r12, VCPU_GPR(R12)(r4)
189	stw	r13, VCPU_GPR(R13)(r4)
190	stw	r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
191	mflr	r3
192	stw	r3, VCPU_LR(r4)
193	mfxer	r3
194	stw	r3, VCPU_XER(r4)
195
196	/* Restore host stack pointer and PID before IVPR, since the host
197	 * exception handlers use them. */
198	lwz	r1, VCPU_HOST_STACK(r4)
199	lwz	r3, VCPU_HOST_PID(r4)
200	mtspr	SPRN_PID, r3
201
202#ifdef CONFIG_FSL_BOOKE
203	/* we cheat and know that Linux doesn't use PID1 which is always 0 */
204	lis	r3, 0
205	mtspr	SPRN_PID1, r3
206#endif
207
208	/* Restore host IVPR before re-enabling interrupts. We cheat and know
209	 * that Linux IVPR is always 0xc0000000. */
210	lis	r3, 0xc000
211	mtspr	SPRN_IVPR, r3
212
213	/* Switch to kernel stack and jump to handler. */
214	LOAD_REG_ADDR(r3, kvmppc_handle_exit)
215	mtctr	r3
216	lwz	r3, HOST_RUN(r1)
217	lwz	r2, HOST_R2(r1)
218	mr	r14, r4 /* Save vcpu pointer. */
219
220	bctrl	/* kvmppc_handle_exit() */
221
222	/* Restore vcpu pointer and the nonvolatiles we used. */
223	mr	r4, r14
224	lwz	r14, VCPU_GPR(R14)(r4)
225
226	/* Sometimes instruction emulation must restore complete GPR state. */
227	andi.	r5, r3, RESUME_FLAG_NV
228	beq	..skip_nv_load
229	lwz	r15, VCPU_GPR(R15)(r4)
230	lwz	r16, VCPU_GPR(R16)(r4)
231	lwz	r17, VCPU_GPR(R17)(r4)
232	lwz	r18, VCPU_GPR(R18)(r4)
233	lwz	r19, VCPU_GPR(R19)(r4)
234	lwz	r20, VCPU_GPR(R20)(r4)
235	lwz	r21, VCPU_GPR(R21)(r4)
236	lwz	r22, VCPU_GPR(R22)(r4)
237	lwz	r23, VCPU_GPR(R23)(r4)
238	lwz	r24, VCPU_GPR(R24)(r4)
239	lwz	r25, VCPU_GPR(R25)(r4)
240	lwz	r26, VCPU_GPR(R26)(r4)
241	lwz	r27, VCPU_GPR(R27)(r4)
242	lwz	r28, VCPU_GPR(R28)(r4)
243	lwz	r29, VCPU_GPR(R29)(r4)
244	lwz	r30, VCPU_GPR(R30)(r4)
245	lwz	r31, VCPU_GPR(R31)(r4)
246..skip_nv_load:
247
248	/* Should we return to the guest? */
249	andi.	r5, r3, RESUME_FLAG_HOST
250	beq	lightweight_exit
251
252	srawi	r3, r3, 2 /* Shift -ERR back down. */
253
254heavyweight_exit:
255	/* Not returning to guest. */
256
257#ifdef CONFIG_SPE
258	/* save guest SPEFSCR and load host SPEFSCR */
259	mfspr	r9, SPRN_SPEFSCR
260	stw	r9, VCPU_SPEFSCR(r4)
261	lwz	r9, VCPU_HOST_SPEFSCR(r4)
262	mtspr	SPRN_SPEFSCR, r9
263#endif
264
265	/* We already saved guest volatile register state; now save the
266	 * non-volatiles. */
267	stw	r15, VCPU_GPR(R15)(r4)
268	stw	r16, VCPU_GPR(R16)(r4)
269	stw	r17, VCPU_GPR(R17)(r4)
270	stw	r18, VCPU_GPR(R18)(r4)
271	stw	r19, VCPU_GPR(R19)(r4)
272	stw	r20, VCPU_GPR(R20)(r4)
273	stw	r21, VCPU_GPR(R21)(r4)
274	stw	r22, VCPU_GPR(R22)(r4)
275	stw	r23, VCPU_GPR(R23)(r4)
276	stw	r24, VCPU_GPR(R24)(r4)
277	stw	r25, VCPU_GPR(R25)(r4)
278	stw	r26, VCPU_GPR(R26)(r4)
279	stw	r27, VCPU_GPR(R27)(r4)
280	stw	r28, VCPU_GPR(R28)(r4)
281	stw	r29, VCPU_GPR(R29)(r4)
282	stw	r30, VCPU_GPR(R30)(r4)
283	stw	r31, VCPU_GPR(R31)(r4)
284
285	/* Load host non-volatile register state from host stack. */
286	lwz	r14, HOST_NV_GPR(R14)(r1)
287	lwz	r15, HOST_NV_GPR(R15)(r1)
288	lwz	r16, HOST_NV_GPR(R16)(r1)
289	lwz	r17, HOST_NV_GPR(R17)(r1)
290	lwz	r18, HOST_NV_GPR(R18)(r1)
291	lwz	r19, HOST_NV_GPR(R19)(r1)
292	lwz	r20, HOST_NV_GPR(R20)(r1)
293	lwz	r21, HOST_NV_GPR(R21)(r1)
294	lwz	r22, HOST_NV_GPR(R22)(r1)
295	lwz	r23, HOST_NV_GPR(R23)(r1)
296	lwz	r24, HOST_NV_GPR(R24)(r1)
297	lwz	r25, HOST_NV_GPR(R25)(r1)
298	lwz	r26, HOST_NV_GPR(R26)(r1)
299	lwz	r27, HOST_NV_GPR(R27)(r1)
300	lwz	r28, HOST_NV_GPR(R28)(r1)
301	lwz	r29, HOST_NV_GPR(R29)(r1)
302	lwz	r30, HOST_NV_GPR(R30)(r1)
303	lwz	r31, HOST_NV_GPR(R31)(r1)
304
305	/* Return to kvm_vcpu_run(). */
306	lwz	r4, HOST_STACK_LR(r1)
307	lwz	r5, HOST_CR(r1)
308	addi	r1, r1, HOST_STACK_SIZE
309	mtlr	r4
310	mtcr	r5
311	/* r3 still contains the return code from kvmppc_handle_exit(). */
312	blr
313
314
315/* Registers:
316 *  r3: kvm_run pointer
317 *  r4: vcpu pointer
318 */
319_GLOBAL(__kvmppc_vcpu_run)
320	stwu	r1, -HOST_STACK_SIZE(r1)
321	stw	r1, VCPU_HOST_STACK(r4)	/* Save stack pointer to vcpu. */
322
323	/* Save host state to stack. */
324	stw	r3, HOST_RUN(r1)
325	mflr	r3
326	stw	r3, HOST_STACK_LR(r1)
327	mfcr	r5
328	stw	r5, HOST_CR(r1)
329
330	/* Save host non-volatile register state to stack. */
331	stw	r14, HOST_NV_GPR(R14)(r1)
332	stw	r15, HOST_NV_GPR(R15)(r1)
333	stw	r16, HOST_NV_GPR(R16)(r1)
334	stw	r17, HOST_NV_GPR(R17)(r1)
335	stw	r18, HOST_NV_GPR(R18)(r1)
336	stw	r19, HOST_NV_GPR(R19)(r1)
337	stw	r20, HOST_NV_GPR(R20)(r1)
338	stw	r21, HOST_NV_GPR(R21)(r1)
339	stw	r22, HOST_NV_GPR(R22)(r1)
340	stw	r23, HOST_NV_GPR(R23)(r1)
341	stw	r24, HOST_NV_GPR(R24)(r1)
342	stw	r25, HOST_NV_GPR(R25)(r1)
343	stw	r26, HOST_NV_GPR(R26)(r1)
344	stw	r27, HOST_NV_GPR(R27)(r1)
345	stw	r28, HOST_NV_GPR(R28)(r1)
346	stw	r29, HOST_NV_GPR(R29)(r1)
347	stw	r30, HOST_NV_GPR(R30)(r1)
348	stw	r31, HOST_NV_GPR(R31)(r1)
349
350	/* Load guest non-volatiles. */
351	lwz	r14, VCPU_GPR(R14)(r4)
352	lwz	r15, VCPU_GPR(R15)(r4)
353	lwz	r16, VCPU_GPR(R16)(r4)
354	lwz	r17, VCPU_GPR(R17)(r4)
355	lwz	r18, VCPU_GPR(R18)(r4)
356	lwz	r19, VCPU_GPR(R19)(r4)
357	lwz	r20, VCPU_GPR(R20)(r4)
358	lwz	r21, VCPU_GPR(R21)(r4)
359	lwz	r22, VCPU_GPR(R22)(r4)
360	lwz	r23, VCPU_GPR(R23)(r4)
361	lwz	r24, VCPU_GPR(R24)(r4)
362	lwz	r25, VCPU_GPR(R25)(r4)
363	lwz	r26, VCPU_GPR(R26)(r4)
364	lwz	r27, VCPU_GPR(R27)(r4)
365	lwz	r28, VCPU_GPR(R28)(r4)
366	lwz	r29, VCPU_GPR(R29)(r4)
367	lwz	r30, VCPU_GPR(R30)(r4)
368	lwz	r31, VCPU_GPR(R31)(r4)
369
370#ifdef CONFIG_SPE
371	/* save host SPEFSCR and load guest SPEFSCR */
372	mfspr	r3, SPRN_SPEFSCR
373	stw	r3, VCPU_HOST_SPEFSCR(r4)
374	lwz	r3, VCPU_SPEFSCR(r4)
375	mtspr	SPRN_SPEFSCR, r3
376#endif
377
378lightweight_exit:
379	stw	r2, HOST_R2(r1)
380
381	mfspr	r3, SPRN_PID
382	stw	r3, VCPU_HOST_PID(r4)
383	lwz	r3, VCPU_SHADOW_PID(r4)
384	mtspr	SPRN_PID, r3
385
386#ifdef CONFIG_FSL_BOOKE
387	lwz	r3, VCPU_SHADOW_PID1(r4)
388	mtspr	SPRN_PID1, r3
389#endif
390
391#ifdef CONFIG_44x
392	iccci	0, 0 /* XXX hack */
393#endif
394
395	/* Load some guest volatiles. */
396	lwz	r0, VCPU_GPR(R0)(r4)
397	lwz	r2, VCPU_GPR(R2)(r4)
398	lwz	r9, VCPU_GPR(R9)(r4)
399	lwz	r10, VCPU_GPR(R10)(r4)
400	lwz	r11, VCPU_GPR(R11)(r4)
401	lwz	r12, VCPU_GPR(R12)(r4)
402	lwz	r13, VCPU_GPR(R13)(r4)
403	lwz	r3, VCPU_LR(r4)
404	mtlr	r3
405	lwz	r3, VCPU_XER(r4)
406	mtxer	r3
407
408	/* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
409	 * so how do we make sure vcpu won't fault? */
410	lis	r8, kvmppc_booke_handlers@ha
411	lwz	r8, kvmppc_booke_handlers@l(r8)
412	mtspr	SPRN_IVPR, r8
413
414	lwz	r5, VCPU_SHARED(r4)
415
416	/* Can't switch the stack pointer until after IVPR is switched,
417	 * because host interrupt handlers would get confused. */
418	lwz	r1, VCPU_GPR(R1)(r4)
419
420	/*
421	 * Host interrupt handlers may have clobbered these
422	 * guest-readable SPRGs, or the guest kernel may have
423	 * written directly to the shared area, so we
424	 * need to reload them here with the guest's values.
425	 */
426	PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
427	mtspr	SPRN_SPRG4W, r3
428	PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
429	mtspr	SPRN_SPRG5W, r3
430	PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
431	mtspr	SPRN_SPRG6W, r3
432	PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
433	mtspr	SPRN_SPRG7W, r3
434
435#ifdef CONFIG_KVM_EXIT_TIMING
436	/* save enter time */
4371:
438	mfspr	r6, SPRN_TBRU
439	mfspr	r7, SPRN_TBRL
440	mfspr	r8, SPRN_TBRU
441	cmpw	r8, r6
442	bne	1b
443	stw	r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
444	stw	r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
445#endif
446
447	/* Finish loading guest volatiles and jump to guest. */
448	lwz	r3, VCPU_CTR(r4)
449	lwz	r5, VCPU_CR(r4)
450	lwz	r6, VCPU_PC(r4)
451	lwz	r7, VCPU_SHADOW_MSR(r4)
452	mtctr	r3
453	mtcr	r5
454	mtsrr0	r6
455	mtsrr1	r7
456	lwz	r5, VCPU_GPR(R5)(r4)
457	lwz	r6, VCPU_GPR(R6)(r4)
458	lwz	r7, VCPU_GPR(R7)(r4)
459	lwz	r8, VCPU_GPR(R8)(r4)
460
461	/* Clear any debug events which occurred since we disabled MSR[DE].
462	 * XXX This gives us a 3-instruction window in which a breakpoint
463	 * intended for guest context could fire in the host instead. */
464	lis	r3, 0xffff
465	ori	r3, r3, 0xffff
466	mtspr	SPRN_DBSR, r3
467
468	lwz	r3, VCPU_GPR(R3)(r4)
469	lwz	r4, VCPU_GPR(R4)(r4)
470	rfi
471
472	.data
473	.align	4
474	.globl	kvmppc_booke_handler_addr
475kvmppc_booke_handler_addr:
476KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
477KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
478KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
479KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
480KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
481KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
482KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
483KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
484KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
485KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
486KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
487KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
488KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
489KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
490KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
491KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
492KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
493KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
494KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
495KVM_HANDLER_END /*Always keep this in end*/
496
497#ifdef CONFIG_SPE
498_GLOBAL(kvmppc_save_guest_spe)
499	cmpi	0,r3,0
500	beqlr-
501	SAVE_32EVRS(0, r4, r3, VCPU_EVR)
502	evxor   evr6, evr6, evr6
503	evmwumiaa evr6, evr6, evr6
504	li	r4,VCPU_ACC
505	evstddx evr6, r4, r3		/* save acc */
506	blr
507
508_GLOBAL(kvmppc_load_guest_spe)
509	cmpi	0,r3,0
510	beqlr-
511	li      r4,VCPU_ACC
512	evlddx  evr6,r4,r3
513	evmra   evr6,evr6		/* load acc */
514	REST_32EVRS(0, r4, r3, VCPU_EVR)
515	blr
516#endif
517