1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h>
23#include <asm/reg.h>
24#include <asm/mmu-44x.h>
25#include <asm/page.h>
26#include <asm/asm-offsets.h>
27
28/* The host stack layout: */
29#define HOST_R1         0 /* Implied by stwu. */
30#define HOST_CALLEE_LR  4
31#define HOST_RUN        8
32/* r2 is special: it holds 'current', and it made nonvolatile in the
33 * kernel with the -ffixed-r2 gcc option. */
34#define HOST_R2         12
35#define HOST_CR         16
36#define HOST_NV_GPRS    20
37#define __HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
38#define HOST_NV_GPR(n)  __HOST_NV_GPR(__REG_##n)
39#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
40#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
41#define HOST_STACK_LR   (HOST_STACK_SIZE + 4) /* In caller stack frame. */
42
43#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
44                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
45                        (1<<BOOKE_INTERRUPT_DEBUG))
46
47#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
48                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
49                        (1<<BOOKE_INTERRUPT_ALIGNMENT))
50
51#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
52                       (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
53                       (1<<BOOKE_INTERRUPT_PROGRAM) | \
54                       (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
55                       (1<<BOOKE_INTERRUPT_ALIGNMENT))
56
57.macro __KVM_HANDLER ivor_nr scratch srr0
58	/* Get pointer to vcpu and record exit number. */
59	mtspr	\scratch , r4
60	mfspr   r4, SPRN_SPRG_THREAD
61	lwz     r4, THREAD_KVM_VCPU(r4)
62	stw	r3, VCPU_GPR(R3)(r4)
63	stw	r5, VCPU_GPR(R5)(r4)
64	stw	r6, VCPU_GPR(R6)(r4)
65	mfspr	r3, \scratch
66	mfctr	r5
67	stw	r3, VCPU_GPR(R4)(r4)
68	stw	r5, VCPU_CTR(r4)
69	mfspr	r3, \srr0
70	lis	r6, kvmppc_resume_host@h
71	stw	r3, VCPU_PC(r4)
72	li	r5, \ivor_nr
73	ori	r6, r6, kvmppc_resume_host@l
74	mtctr	r6
75	bctr
76.endm
77
78.macro KVM_HANDLER ivor_nr scratch srr0
79_GLOBAL(kvmppc_handler_\ivor_nr)
80	__KVM_HANDLER \ivor_nr \scratch \srr0
81.endm
82
83.macro KVM_DBG_HANDLER ivor_nr scratch srr0
84_GLOBAL(kvmppc_handler_\ivor_nr)
85	mtspr   \scratch, r4
86	mfspr	r4, SPRN_SPRG_THREAD
87	lwz	r4, THREAD_KVM_VCPU(r4)
88	stw	r3, VCPU_CRIT_SAVE(r4)
89	mfcr	r3
90	mfspr	r4, SPRN_CSRR1
91	andi.	r4, r4, MSR_PR
92	bne	1f
93	/* debug interrupt happened in enter/exit path */
94	mfspr   r4, SPRN_CSRR1
95	rlwinm  r4, r4, 0, ~MSR_DE
96	mtspr   SPRN_CSRR1, r4
97	lis	r4, 0xffff
98	ori	r4, r4, 0xffff
99	mtspr	SPRN_DBSR, r4
100	mfspr	r4, SPRN_SPRG_THREAD
101	lwz	r4, THREAD_KVM_VCPU(r4)
102	mtcr	r3
103	lwz     r3, VCPU_CRIT_SAVE(r4)
104	mfspr   r4, \scratch
105	rfci
1061:	/* debug interrupt happened in guest */
107	mtcr	r3
108	mfspr	r4, SPRN_SPRG_THREAD
109	lwz	r4, THREAD_KVM_VCPU(r4)
110	lwz     r3, VCPU_CRIT_SAVE(r4)
111	mfspr   r4, \scratch
112	__KVM_HANDLER \ivor_nr \scratch \srr0
113.endm
114
115.macro KVM_HANDLER_ADDR ivor_nr
116	.long	kvmppc_handler_\ivor_nr
117.endm
118
119.macro KVM_HANDLER_END
120	.long	kvmppc_handlers_end
121.endm
122
123_GLOBAL(kvmppc_handlers_start)
124KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
125KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK  SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
126KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
127KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
128KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
129KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
130KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
131KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
132KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
133KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
134KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
135KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
136KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
137KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
138KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
139KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
140KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
141KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
142KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
143_GLOBAL(kvmppc_handlers_end)
144
145/* Registers:
146 *  SPRG_SCRATCH0: guest r4
147 *  r4: vcpu pointer
148 *  r5: KVM exit number
149 */
150_GLOBAL(kvmppc_resume_host)
151	mfcr	r3
152	stw	r3, VCPU_CR(r4)
153	stw	r7, VCPU_GPR(R7)(r4)
154	stw	r8, VCPU_GPR(R8)(r4)
155	stw	r9, VCPU_GPR(R9)(r4)
156
157	li	r6, 1
158	slw	r6, r6, r5
159
160#ifdef CONFIG_KVM_EXIT_TIMING
161	/* save exit time */
1621:
163	mfspr	r7, SPRN_TBRU
164	mfspr	r8, SPRN_TBRL
165	mfspr	r9, SPRN_TBRU
166	cmpw	r9, r7
167	bne	1b
168	stw	r8, VCPU_TIMING_EXIT_TBL(r4)
169	stw	r9, VCPU_TIMING_EXIT_TBU(r4)
170#endif
171
172	/* Save the faulting instruction and all GPRs for emulation. */
173	andi.	r7, r6, NEED_INST_MASK
174	beq	..skip_inst_copy
175	mfspr	r9, SPRN_SRR0
176	mfmsr	r8
177	ori	r7, r8, MSR_DS
178	mtmsr	r7
179	isync
180	lwz	r9, 0(r9)
181	mtmsr	r8
182	isync
183	stw	r9, VCPU_LAST_INST(r4)
184
185	stw	r15, VCPU_GPR(R15)(r4)
186	stw	r16, VCPU_GPR(R16)(r4)
187	stw	r17, VCPU_GPR(R17)(r4)
188	stw	r18, VCPU_GPR(R18)(r4)
189	stw	r19, VCPU_GPR(R19)(r4)
190	stw	r20, VCPU_GPR(R20)(r4)
191	stw	r21, VCPU_GPR(R21)(r4)
192	stw	r22, VCPU_GPR(R22)(r4)
193	stw	r23, VCPU_GPR(R23)(r4)
194	stw	r24, VCPU_GPR(R24)(r4)
195	stw	r25, VCPU_GPR(R25)(r4)
196	stw	r26, VCPU_GPR(R26)(r4)
197	stw	r27, VCPU_GPR(R27)(r4)
198	stw	r28, VCPU_GPR(R28)(r4)
199	stw	r29, VCPU_GPR(R29)(r4)
200	stw	r30, VCPU_GPR(R30)(r4)
201	stw	r31, VCPU_GPR(R31)(r4)
202..skip_inst_copy:
203
204	/* Also grab DEAR and ESR before the host can clobber them. */
205
206	andi.	r7, r6, NEED_DEAR_MASK
207	beq	..skip_dear
208	mfspr	r9, SPRN_DEAR
209	stw	r9, VCPU_FAULT_DEAR(r4)
210..skip_dear:
211
212	andi.	r7, r6, NEED_ESR_MASK
213	beq	..skip_esr
214	mfspr	r9, SPRN_ESR
215	stw	r9, VCPU_FAULT_ESR(r4)
216..skip_esr:
217
218	/* Save remaining volatile guest register state to vcpu. */
219	stw	r0, VCPU_GPR(R0)(r4)
220	stw	r1, VCPU_GPR(R1)(r4)
221	stw	r2, VCPU_GPR(R2)(r4)
222	stw	r10, VCPU_GPR(R10)(r4)
223	stw	r11, VCPU_GPR(R11)(r4)
224	stw	r12, VCPU_GPR(R12)(r4)
225	stw	r13, VCPU_GPR(R13)(r4)
226	stw	r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
227	mflr	r3
228	stw	r3, VCPU_LR(r4)
229	mfxer	r3
230	stw	r3, VCPU_XER(r4)
231
232	/* Restore host stack pointer and PID before IVPR, since the host
233	 * exception handlers use them. */
234	lwz	r1, VCPU_HOST_STACK(r4)
235	lwz	r3, VCPU_HOST_PID(r4)
236	mtspr	SPRN_PID, r3
237
238#ifdef CONFIG_FSL_BOOKE
239	/* we cheat and know that Linux doesn't use PID1 which is always 0 */
240	lis	r3, 0
241	mtspr	SPRN_PID1, r3
242#endif
243
244	/* Restore host IVPR before re-enabling interrupts. We cheat and know
245	 * that Linux IVPR is always 0xc0000000. */
246	lis	r3, 0xc000
247	mtspr	SPRN_IVPR, r3
248
249	/* Switch to kernel stack and jump to handler. */
250	LOAD_REG_ADDR(r3, kvmppc_handle_exit)
251	mtctr	r3
252	lwz	r3, HOST_RUN(r1)
253	lwz	r2, HOST_R2(r1)
254	mr	r14, r4 /* Save vcpu pointer. */
255
256	bctrl	/* kvmppc_handle_exit() */
257
258	/* Restore vcpu pointer and the nonvolatiles we used. */
259	mr	r4, r14
260	lwz	r14, VCPU_GPR(R14)(r4)
261
262	/* Sometimes instruction emulation must restore complete GPR state. */
263	andi.	r5, r3, RESUME_FLAG_NV
264	beq	..skip_nv_load
265	lwz	r15, VCPU_GPR(R15)(r4)
266	lwz	r16, VCPU_GPR(R16)(r4)
267	lwz	r17, VCPU_GPR(R17)(r4)
268	lwz	r18, VCPU_GPR(R18)(r4)
269	lwz	r19, VCPU_GPR(R19)(r4)
270	lwz	r20, VCPU_GPR(R20)(r4)
271	lwz	r21, VCPU_GPR(R21)(r4)
272	lwz	r22, VCPU_GPR(R22)(r4)
273	lwz	r23, VCPU_GPR(R23)(r4)
274	lwz	r24, VCPU_GPR(R24)(r4)
275	lwz	r25, VCPU_GPR(R25)(r4)
276	lwz	r26, VCPU_GPR(R26)(r4)
277	lwz	r27, VCPU_GPR(R27)(r4)
278	lwz	r28, VCPU_GPR(R28)(r4)
279	lwz	r29, VCPU_GPR(R29)(r4)
280	lwz	r30, VCPU_GPR(R30)(r4)
281	lwz	r31, VCPU_GPR(R31)(r4)
282..skip_nv_load:
283
284	/* Should we return to the guest? */
285	andi.	r5, r3, RESUME_FLAG_HOST
286	beq	lightweight_exit
287
288	srawi	r3, r3, 2 /* Shift -ERR back down. */
289
290heavyweight_exit:
291	/* Not returning to guest. */
292
293#ifdef CONFIG_SPE
294	/* save guest SPEFSCR and load host SPEFSCR */
295	mfspr	r9, SPRN_SPEFSCR
296	stw	r9, VCPU_SPEFSCR(r4)
297	lwz	r9, VCPU_HOST_SPEFSCR(r4)
298	mtspr	SPRN_SPEFSCR, r9
299#endif
300
301	/* We already saved guest volatile register state; now save the
302	 * non-volatiles. */
303	stw	r15, VCPU_GPR(R15)(r4)
304	stw	r16, VCPU_GPR(R16)(r4)
305	stw	r17, VCPU_GPR(R17)(r4)
306	stw	r18, VCPU_GPR(R18)(r4)
307	stw	r19, VCPU_GPR(R19)(r4)
308	stw	r20, VCPU_GPR(R20)(r4)
309	stw	r21, VCPU_GPR(R21)(r4)
310	stw	r22, VCPU_GPR(R22)(r4)
311	stw	r23, VCPU_GPR(R23)(r4)
312	stw	r24, VCPU_GPR(R24)(r4)
313	stw	r25, VCPU_GPR(R25)(r4)
314	stw	r26, VCPU_GPR(R26)(r4)
315	stw	r27, VCPU_GPR(R27)(r4)
316	stw	r28, VCPU_GPR(R28)(r4)
317	stw	r29, VCPU_GPR(R29)(r4)
318	stw	r30, VCPU_GPR(R30)(r4)
319	stw	r31, VCPU_GPR(R31)(r4)
320
321	/* Load host non-volatile register state from host stack. */
322	lwz	r14, HOST_NV_GPR(R14)(r1)
323	lwz	r15, HOST_NV_GPR(R15)(r1)
324	lwz	r16, HOST_NV_GPR(R16)(r1)
325	lwz	r17, HOST_NV_GPR(R17)(r1)
326	lwz	r18, HOST_NV_GPR(R18)(r1)
327	lwz	r19, HOST_NV_GPR(R19)(r1)
328	lwz	r20, HOST_NV_GPR(R20)(r1)
329	lwz	r21, HOST_NV_GPR(R21)(r1)
330	lwz	r22, HOST_NV_GPR(R22)(r1)
331	lwz	r23, HOST_NV_GPR(R23)(r1)
332	lwz	r24, HOST_NV_GPR(R24)(r1)
333	lwz	r25, HOST_NV_GPR(R25)(r1)
334	lwz	r26, HOST_NV_GPR(R26)(r1)
335	lwz	r27, HOST_NV_GPR(R27)(r1)
336	lwz	r28, HOST_NV_GPR(R28)(r1)
337	lwz	r29, HOST_NV_GPR(R29)(r1)
338	lwz	r30, HOST_NV_GPR(R30)(r1)
339	lwz	r31, HOST_NV_GPR(R31)(r1)
340
341	/* Return to kvm_vcpu_run(). */
342	lwz	r4, HOST_STACK_LR(r1)
343	lwz	r5, HOST_CR(r1)
344	addi	r1, r1, HOST_STACK_SIZE
345	mtlr	r4
346	mtcr	r5
347	/* r3 still contains the return code from kvmppc_handle_exit(). */
348	blr
349
350
351/* Registers:
352 *  r3: kvm_run pointer
353 *  r4: vcpu pointer
354 */
355_GLOBAL(__kvmppc_vcpu_run)
356	stwu	r1, -HOST_STACK_SIZE(r1)
357	stw	r1, VCPU_HOST_STACK(r4)	/* Save stack pointer to vcpu. */
358
359	/* Save host state to stack. */
360	stw	r3, HOST_RUN(r1)
361	mflr	r3
362	stw	r3, HOST_STACK_LR(r1)
363	mfcr	r5
364	stw	r5, HOST_CR(r1)
365
366	/* Save host non-volatile register state to stack. */
367	stw	r14, HOST_NV_GPR(R14)(r1)
368	stw	r15, HOST_NV_GPR(R15)(r1)
369	stw	r16, HOST_NV_GPR(R16)(r1)
370	stw	r17, HOST_NV_GPR(R17)(r1)
371	stw	r18, HOST_NV_GPR(R18)(r1)
372	stw	r19, HOST_NV_GPR(R19)(r1)
373	stw	r20, HOST_NV_GPR(R20)(r1)
374	stw	r21, HOST_NV_GPR(R21)(r1)
375	stw	r22, HOST_NV_GPR(R22)(r1)
376	stw	r23, HOST_NV_GPR(R23)(r1)
377	stw	r24, HOST_NV_GPR(R24)(r1)
378	stw	r25, HOST_NV_GPR(R25)(r1)
379	stw	r26, HOST_NV_GPR(R26)(r1)
380	stw	r27, HOST_NV_GPR(R27)(r1)
381	stw	r28, HOST_NV_GPR(R28)(r1)
382	stw	r29, HOST_NV_GPR(R29)(r1)
383	stw	r30, HOST_NV_GPR(R30)(r1)
384	stw	r31, HOST_NV_GPR(R31)(r1)
385
386	/* Load guest non-volatiles. */
387	lwz	r14, VCPU_GPR(R14)(r4)
388	lwz	r15, VCPU_GPR(R15)(r4)
389	lwz	r16, VCPU_GPR(R16)(r4)
390	lwz	r17, VCPU_GPR(R17)(r4)
391	lwz	r18, VCPU_GPR(R18)(r4)
392	lwz	r19, VCPU_GPR(R19)(r4)
393	lwz	r20, VCPU_GPR(R20)(r4)
394	lwz	r21, VCPU_GPR(R21)(r4)
395	lwz	r22, VCPU_GPR(R22)(r4)
396	lwz	r23, VCPU_GPR(R23)(r4)
397	lwz	r24, VCPU_GPR(R24)(r4)
398	lwz	r25, VCPU_GPR(R25)(r4)
399	lwz	r26, VCPU_GPR(R26)(r4)
400	lwz	r27, VCPU_GPR(R27)(r4)
401	lwz	r28, VCPU_GPR(R28)(r4)
402	lwz	r29, VCPU_GPR(R29)(r4)
403	lwz	r30, VCPU_GPR(R30)(r4)
404	lwz	r31, VCPU_GPR(R31)(r4)
405
406#ifdef CONFIG_SPE
407	/* save host SPEFSCR and load guest SPEFSCR */
408	mfspr	r3, SPRN_SPEFSCR
409	stw	r3, VCPU_HOST_SPEFSCR(r4)
410	lwz	r3, VCPU_SPEFSCR(r4)
411	mtspr	SPRN_SPEFSCR, r3
412#endif
413
414lightweight_exit:
415	stw	r2, HOST_R2(r1)
416
417	mfspr	r3, SPRN_PID
418	stw	r3, VCPU_HOST_PID(r4)
419	lwz	r3, VCPU_SHADOW_PID(r4)
420	mtspr	SPRN_PID, r3
421
422#ifdef CONFIG_FSL_BOOKE
423	lwz	r3, VCPU_SHADOW_PID1(r4)
424	mtspr	SPRN_PID1, r3
425#endif
426
427#ifdef CONFIG_44x
428	iccci	0, 0 /* XXX hack */
429#endif
430
431	/* Load some guest volatiles. */
432	lwz	r0, VCPU_GPR(R0)(r4)
433	lwz	r2, VCPU_GPR(R2)(r4)
434	lwz	r9, VCPU_GPR(R9)(r4)
435	lwz	r10, VCPU_GPR(R10)(r4)
436	lwz	r11, VCPU_GPR(R11)(r4)
437	lwz	r12, VCPU_GPR(R12)(r4)
438	lwz	r13, VCPU_GPR(R13)(r4)
439	lwz	r3, VCPU_LR(r4)
440	mtlr	r3
441	lwz	r3, VCPU_XER(r4)
442	mtxer	r3
443
444	/* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
445	 * so how do we make sure vcpu won't fault? */
446	lis	r8, kvmppc_booke_handlers@ha
447	lwz	r8, kvmppc_booke_handlers@l(r8)
448	mtspr	SPRN_IVPR, r8
449
450	lwz	r5, VCPU_SHARED(r4)
451
452	/* Can't switch the stack pointer until after IVPR is switched,
453	 * because host interrupt handlers would get confused. */
454	lwz	r1, VCPU_GPR(R1)(r4)
455
456	/*
457	 * Host interrupt handlers may have clobbered these
458	 * guest-readable SPRGs, or the guest kernel may have
459	 * written directly to the shared area, so we
460	 * need to reload them here with the guest's values.
461	 */
462	PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
463	mtspr	SPRN_SPRG4W, r3
464	PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
465	mtspr	SPRN_SPRG5W, r3
466	PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
467	mtspr	SPRN_SPRG6W, r3
468	PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
469	mtspr	SPRN_SPRG7W, r3
470
471#ifdef CONFIG_KVM_EXIT_TIMING
472	/* save enter time */
4731:
474	mfspr	r6, SPRN_TBRU
475	mfspr	r7, SPRN_TBRL
476	mfspr	r8, SPRN_TBRU
477	cmpw	r8, r6
478	bne	1b
479	stw	r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
480	stw	r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
481#endif
482
483	/* Finish loading guest volatiles and jump to guest. */
484	lwz	r3, VCPU_CTR(r4)
485	lwz	r5, VCPU_CR(r4)
486	lwz	r6, VCPU_PC(r4)
487	lwz	r7, VCPU_SHADOW_MSR(r4)
488	mtctr	r3
489	mtcr	r5
490	mtsrr0	r6
491	mtsrr1	r7
492	lwz	r5, VCPU_GPR(R5)(r4)
493	lwz	r6, VCPU_GPR(R6)(r4)
494	lwz	r7, VCPU_GPR(R7)(r4)
495	lwz	r8, VCPU_GPR(R8)(r4)
496
497	/* Clear any debug events which occurred since we disabled MSR[DE].
498	 * XXX This gives us a 3-instruction window in which a breakpoint
499	 * intended for guest context could fire in the host instead. */
500	lis	r3, 0xffff
501	ori	r3, r3, 0xffff
502	mtspr	SPRN_DBSR, r3
503
504	lwz	r3, VCPU_GPR(R3)(r4)
505	lwz	r4, VCPU_GPR(R4)(r4)
506	rfi
507
508	.data
509	.align	4
510	.globl	kvmppc_booke_handler_addr
511kvmppc_booke_handler_addr:
512KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
513KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
514KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
515KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
516KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
517KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
518KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
519KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
520KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
521KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
522KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
523KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
524KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
525KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
526KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
527KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
528KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
529KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
530KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
531KVM_HANDLER_END /*Always keep this in end*/
532
533#ifdef CONFIG_SPE
534_GLOBAL(kvmppc_save_guest_spe)
535	cmpi	0,r3,0
536	beqlr-
537	SAVE_32EVRS(0, r4, r3, VCPU_EVR)
538	evxor   evr6, evr6, evr6
539	evmwumiaa evr6, evr6, evr6
540	li	r4,VCPU_ACC
541	evstddx evr6, r4, r3		/* save acc */
542	blr
543
544_GLOBAL(kvmppc_load_guest_spe)
545	cmpi	0,r3,0
546	beqlr-
547	li      r4,VCPU_ACC
548	evlddx  evr6,r4,r3
549	evmra   evr6,evr6		/* load acc */
550	REST_32EVRS(0, r4, r3, VCPU_EVR)
551	blr
552#endif
553