xref: /openbmc/linux/arch/powerpc/kvm/booke_interrupts.S (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1d94d71cbSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2bbf45ba5SHollis Blanchard/*
3bbf45ba5SHollis Blanchard *
4bbf45ba5SHollis Blanchard * Copyright IBM Corp. 2007
54cd35f67SScott Wood * Copyright 2011 Freescale Semiconductor, Inc.
6bbf45ba5SHollis Blanchard *
7bbf45ba5SHollis Blanchard * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8bbf45ba5SHollis Blanchard */
9bbf45ba5SHollis Blanchard
10bbf45ba5SHollis Blanchard#include <asm/ppc_asm.h>
11bbf45ba5SHollis Blanchard#include <asm/kvm_asm.h>
12bbf45ba5SHollis Blanchard#include <asm/reg.h>
13bbf45ba5SHollis Blanchard#include <asm/page.h>
14bbf45ba5SHollis Blanchard#include <asm/asm-offsets.h>
15bbf45ba5SHollis Blanchard
16bbf45ba5SHollis Blanchard/* The host stack layout: */
17bbf45ba5SHollis Blanchard#define HOST_R1         0 /* Implied by stwu. */
18bbf45ba5SHollis Blanchard#define HOST_CALLEE_LR  4
19bbf45ba5SHollis Blanchard#define HOST_RUN        8
20bbf45ba5SHollis Blanchard/* r2 is special: it holds 'current', and it made nonvolatile in the
21bbf45ba5SHollis Blanchard * kernel with the -ffixed-r2 gcc option. */
22bbf45ba5SHollis Blanchard#define HOST_R2         12
23e1f8acf8SAlexander Graf#define HOST_CR         16
24e1f8acf8SAlexander Graf#define HOST_NV_GPRS    20
250b7673c3SMichael Neuling#define __HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
260b7673c3SMichael Neuling#define HOST_NV_GPR(n)  __HOST_NV_GPR(__REG_##n)
27c75df6f9SMichael Neuling#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
28bbf45ba5SHollis Blanchard#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
29bbf45ba5SHollis Blanchard#define HOST_STACK_LR   (HOST_STACK_SIZE + 4) /* In caller stack frame. */
30bbf45ba5SHollis Blanchard
31bbf45ba5SHollis Blanchard#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
326a0ab738SHollis Blanchard                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
336a0ab738SHollis Blanchard                        (1<<BOOKE_INTERRUPT_DEBUG))
34bbf45ba5SHollis Blanchard
35bbf45ba5SHollis Blanchard#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
36011da899SAlexander Graf                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
37011da899SAlexander Graf                        (1<<BOOKE_INTERRUPT_ALIGNMENT))
38bbf45ba5SHollis Blanchard
39bbf45ba5SHollis Blanchard#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
40bbf45ba5SHollis Blanchard                       (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
41bbf45ba5SHollis Blanchard                       (1<<BOOKE_INTERRUPT_PROGRAM) | \
42011da899SAlexander Graf                       (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
43011da899SAlexander Graf                       (1<<BOOKE_INTERRUPT_ALIGNMENT))
44bbf45ba5SHollis Blanchard
4515b708beSBharat Bhushan.macro __KVM_HANDLER ivor_nr scratch srr0
46bbf45ba5SHollis Blanchard	/* Get pointer to vcpu and record exit number. */
4775c44bbbSBharat Bhushan	mtspr	\scratch , r4
48ffe129ecSBharat Bhushan	mfspr   r4, SPRN_SPRG_THREAD
49ffe129ecSBharat Bhushan	lwz     r4, THREAD_KVM_VCPU(r4)
505fecc9d8SLinus Torvalds	stw	r3, VCPU_GPR(R3)(r4)
51c75df6f9SMichael Neuling	stw	r5, VCPU_GPR(R5)(r4)
52c75df6f9SMichael Neuling	stw	r6, VCPU_GPR(R6)(r4)
5375c44bbbSBharat Bhushan	mfspr	r3, \scratch
54bbf45ba5SHollis Blanchard	mfctr	r5
555fecc9d8SLinus Torvalds	stw	r3, VCPU_GPR(R4)(r4)
56bbf45ba5SHollis Blanchard	stw	r5, VCPU_CTR(r4)
5775c44bbbSBharat Bhushan	mfspr	r3, \srr0
5875c44bbbSBharat Bhushan	lis	r6, kvmppc_resume_host@h
5975c44bbbSBharat Bhushan	stw	r3, VCPU_PC(r4)
60bbf45ba5SHollis Blanchard	li	r5, \ivor_nr
61bbf45ba5SHollis Blanchard	ori	r6, r6, kvmppc_resume_host@l
62bbf45ba5SHollis Blanchard	mtctr	r6
63bbf45ba5SHollis Blanchard	bctr
64bbf45ba5SHollis Blanchard.endm
65bbf45ba5SHollis Blanchard
6615b708beSBharat Bhushan.macro KVM_HANDLER ivor_nr scratch srr0
6715b708beSBharat Bhushan_GLOBAL(kvmppc_handler_\ivor_nr)
6815b708beSBharat Bhushan	__KVM_HANDLER \ivor_nr \scratch \srr0
6915b708beSBharat Bhushan.endm
7015b708beSBharat Bhushan
7115b708beSBharat Bhushan.macro KVM_DBG_HANDLER ivor_nr scratch srr0
7215b708beSBharat Bhushan_GLOBAL(kvmppc_handler_\ivor_nr)
7315b708beSBharat Bhushan	mtspr   \scratch, r4
7415b708beSBharat Bhushan	mfspr	r4, SPRN_SPRG_THREAD
7515b708beSBharat Bhushan	lwz	r4, THREAD_KVM_VCPU(r4)
7615b708beSBharat Bhushan	stw	r3, VCPU_CRIT_SAVE(r4)
7715b708beSBharat Bhushan	mfcr	r3
7815b708beSBharat Bhushan	mfspr	r4, SPRN_CSRR1
7915b708beSBharat Bhushan	andi.	r4, r4, MSR_PR
8015b708beSBharat Bhushan	bne	1f
8115b708beSBharat Bhushan	/* debug interrupt happened in enter/exit path */
8215b708beSBharat Bhushan	mfspr   r4, SPRN_CSRR1
8315b708beSBharat Bhushan	rlwinm  r4, r4, 0, ~MSR_DE
8415b708beSBharat Bhushan	mtspr   SPRN_CSRR1, r4
8515b708beSBharat Bhushan	lis	r4, 0xffff
8615b708beSBharat Bhushan	ori	r4, r4, 0xffff
8715b708beSBharat Bhushan	mtspr	SPRN_DBSR, r4
8815b708beSBharat Bhushan	mfspr	r4, SPRN_SPRG_THREAD
8915b708beSBharat Bhushan	lwz	r4, THREAD_KVM_VCPU(r4)
9015b708beSBharat Bhushan	mtcr	r3
9115b708beSBharat Bhushan	lwz     r3, VCPU_CRIT_SAVE(r4)
9215b708beSBharat Bhushan	mfspr   r4, \scratch
9315b708beSBharat Bhushan	rfci
9415b708beSBharat Bhushan1:	/* debug interrupt happened in guest */
9515b708beSBharat Bhushan	mtcr	r3
9615b708beSBharat Bhushan	mfspr	r4, SPRN_SPRG_THREAD
9715b708beSBharat Bhushan	lwz	r4, THREAD_KVM_VCPU(r4)
9815b708beSBharat Bhushan	lwz     r3, VCPU_CRIT_SAVE(r4)
9915b708beSBharat Bhushan	mfspr   r4, \scratch
10015b708beSBharat Bhushan	__KVM_HANDLER \ivor_nr \scratch \srr0
10115b708beSBharat Bhushan.endm
10215b708beSBharat Bhushan
1031d542d9cSBharat Bhushan.macro KVM_HANDLER_ADDR ivor_nr
1041d542d9cSBharat Bhushan	.long	kvmppc_handler_\ivor_nr
1051d542d9cSBharat Bhushan.endm
1061d542d9cSBharat Bhushan
1071d542d9cSBharat Bhushan.macro KVM_HANDLER_END
1081d542d9cSBharat Bhushan	.long	kvmppc_handlers_end
1091d542d9cSBharat Bhushan.endm
1101d542d9cSBharat Bhushan
111bbf45ba5SHollis Blanchard_GLOBAL(kvmppc_handlers_start)
11275c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
11375c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK  SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
11475c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
11575c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
11675c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
11775c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
11875c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
11975c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12075c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12175c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12275c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12375c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12475c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
12575c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12675c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12715b708beSBharat BhushanKVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
12875c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
12975c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
13075c44bbbSBharat BhushanKVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
1311d542d9cSBharat Bhushan_GLOBAL(kvmppc_handlers_end)
132bbf45ba5SHollis Blanchard
133bbf45ba5SHollis Blanchard/* Registers:
134ee43eb78SBenjamin Herrenschmidt *  SPRG_SCRATCH0: guest r4
135bbf45ba5SHollis Blanchard *  r4: vcpu pointer
136bbf45ba5SHollis Blanchard *  r5: KVM exit number
137bbf45ba5SHollis Blanchard */
138bbf45ba5SHollis Blanchard_GLOBAL(kvmppc_resume_host)
139bbf45ba5SHollis Blanchard	mfcr	r3
140bbf45ba5SHollis Blanchard	stw	r3, VCPU_CR(r4)
141c75df6f9SMichael Neuling	stw	r7, VCPU_GPR(R7)(r4)
142c75df6f9SMichael Neuling	stw	r8, VCPU_GPR(R8)(r4)
143c75df6f9SMichael Neuling	stw	r9, VCPU_GPR(R9)(r4)
144bbf45ba5SHollis Blanchard
145bbf45ba5SHollis Blanchard	li	r6, 1
146bbf45ba5SHollis Blanchard	slw	r6, r6, r5
147bbf45ba5SHollis Blanchard
14873e75b41SHollis Blanchard#ifdef CONFIG_KVM_EXIT_TIMING
14973e75b41SHollis Blanchard	/* save exit time */
15073e75b41SHollis Blanchard1:
15173e75b41SHollis Blanchard	mfspr	r7, SPRN_TBRU
15273e75b41SHollis Blanchard	mfspr	r8, SPRN_TBRL
15373e75b41SHollis Blanchard	mfspr	r9, SPRN_TBRU
15473e75b41SHollis Blanchard	cmpw	r9, r7
15573e75b41SHollis Blanchard	bne	1b
15673e75b41SHollis Blanchard	stw	r8, VCPU_TIMING_EXIT_TBL(r4)
15773e75b41SHollis Blanchard	stw	r9, VCPU_TIMING_EXIT_TBU(r4)
15873e75b41SHollis Blanchard#endif
15973e75b41SHollis Blanchard
160bbf45ba5SHollis Blanchard	/* Save the faulting instruction and all GPRs for emulation. */
161bbf45ba5SHollis Blanchard	andi.	r7, r6, NEED_INST_MASK
162bbf45ba5SHollis Blanchard	beq	..skip_inst_copy
163bbf45ba5SHollis Blanchard	mfspr	r9, SPRN_SRR0
164bbf45ba5SHollis Blanchard	mfmsr	r8
165bbf45ba5SHollis Blanchard	ori	r7, r8, MSR_DS
166bbf45ba5SHollis Blanchard	mtmsr	r7
167bbf45ba5SHollis Blanchard	isync
168bbf45ba5SHollis Blanchard	lwz	r9, 0(r9)
169bbf45ba5SHollis Blanchard	mtmsr	r8
170bbf45ba5SHollis Blanchard	isync
171bbf45ba5SHollis Blanchard	stw	r9, VCPU_LAST_INST(r4)
172bbf45ba5SHollis Blanchard
173c75df6f9SMichael Neuling	stw	r15, VCPU_GPR(R15)(r4)
174c75df6f9SMichael Neuling	stw	r16, VCPU_GPR(R16)(r4)
175c75df6f9SMichael Neuling	stw	r17, VCPU_GPR(R17)(r4)
176c75df6f9SMichael Neuling	stw	r18, VCPU_GPR(R18)(r4)
177c75df6f9SMichael Neuling	stw	r19, VCPU_GPR(R19)(r4)
178c75df6f9SMichael Neuling	stw	r20, VCPU_GPR(R20)(r4)
179c75df6f9SMichael Neuling	stw	r21, VCPU_GPR(R21)(r4)
180c75df6f9SMichael Neuling	stw	r22, VCPU_GPR(R22)(r4)
181c75df6f9SMichael Neuling	stw	r23, VCPU_GPR(R23)(r4)
182c75df6f9SMichael Neuling	stw	r24, VCPU_GPR(R24)(r4)
183c75df6f9SMichael Neuling	stw	r25, VCPU_GPR(R25)(r4)
184c75df6f9SMichael Neuling	stw	r26, VCPU_GPR(R26)(r4)
185c75df6f9SMichael Neuling	stw	r27, VCPU_GPR(R27)(r4)
186c75df6f9SMichael Neuling	stw	r28, VCPU_GPR(R28)(r4)
187c75df6f9SMichael Neuling	stw	r29, VCPU_GPR(R29)(r4)
188c75df6f9SMichael Neuling	stw	r30, VCPU_GPR(R30)(r4)
189c75df6f9SMichael Neuling	stw	r31, VCPU_GPR(R31)(r4)
190bbf45ba5SHollis Blanchard..skip_inst_copy:
191bbf45ba5SHollis Blanchard
192bbf45ba5SHollis Blanchard	/* Also grab DEAR and ESR before the host can clobber them. */
193bbf45ba5SHollis Blanchard
194bbf45ba5SHollis Blanchard	andi.	r7, r6, NEED_DEAR_MASK
195bbf45ba5SHollis Blanchard	beq	..skip_dear
196bbf45ba5SHollis Blanchard	mfspr	r9, SPRN_DEAR
197bbf45ba5SHollis Blanchard	stw	r9, VCPU_FAULT_DEAR(r4)
198bbf45ba5SHollis Blanchard..skip_dear:
199bbf45ba5SHollis Blanchard
200bbf45ba5SHollis Blanchard	andi.	r7, r6, NEED_ESR_MASK
201bbf45ba5SHollis Blanchard	beq	..skip_esr
202bbf45ba5SHollis Blanchard	mfspr	r9, SPRN_ESR
203bbf45ba5SHollis Blanchard	stw	r9, VCPU_FAULT_ESR(r4)
204bbf45ba5SHollis Blanchard..skip_esr:
205bbf45ba5SHollis Blanchard
206bbf45ba5SHollis Blanchard	/* Save remaining volatile guest register state to vcpu. */
207c75df6f9SMichael Neuling	stw	r0, VCPU_GPR(R0)(r4)
208c75df6f9SMichael Neuling	stw	r1, VCPU_GPR(R1)(r4)
209c75df6f9SMichael Neuling	stw	r2, VCPU_GPR(R2)(r4)
210c75df6f9SMichael Neuling	stw	r10, VCPU_GPR(R10)(r4)
211c75df6f9SMichael Neuling	stw	r11, VCPU_GPR(R11)(r4)
212c75df6f9SMichael Neuling	stw	r12, VCPU_GPR(R12)(r4)
213c75df6f9SMichael Neuling	stw	r13, VCPU_GPR(R13)(r4)
214c75df6f9SMichael Neuling	stw	r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
215bbf45ba5SHollis Blanchard	mflr	r3
216bbf45ba5SHollis Blanchard	stw	r3, VCPU_LR(r4)
217bbf45ba5SHollis Blanchard	mfxer	r3
218bbf45ba5SHollis Blanchard	stw	r3, VCPU_XER(r4)
219bbf45ba5SHollis Blanchard
220bbf45ba5SHollis Blanchard	/* Restore host stack pointer and PID before IVPR, since the host
221bbf45ba5SHollis Blanchard	 * exception handlers use them. */
222bbf45ba5SHollis Blanchard	lwz	r1, VCPU_HOST_STACK(r4)
223bbf45ba5SHollis Blanchard	lwz	r3, VCPU_HOST_PID(r4)
224bbf45ba5SHollis Blanchard	mtspr	SPRN_PID, r3
225bbf45ba5SHollis Blanchard
226*dfc3095cSChristophe Leroy#ifdef CONFIG_PPC_85xx
227dd9ebf1fSLiu Yu	/* we cheat and know that Linux doesn't use PID1 which is always 0 */
228dd9ebf1fSLiu Yu	lis	r3, 0
229dd9ebf1fSLiu Yu	mtspr	SPRN_PID1, r3
230dd9ebf1fSLiu Yu#endif
231dd9ebf1fSLiu Yu
232bbf45ba5SHollis Blanchard	/* Restore host IVPR before re-enabling interrupts. We cheat and know
233bbf45ba5SHollis Blanchard	 * that Linux IVPR is always 0xc0000000. */
234bbf45ba5SHollis Blanchard	lis	r3, 0xc000
235bbf45ba5SHollis Blanchard	mtspr	SPRN_IVPR, r3
236bbf45ba5SHollis Blanchard
237bbf45ba5SHollis Blanchard	/* Switch to kernel stack and jump to handler. */
238bbf45ba5SHollis Blanchard	LOAD_REG_ADDR(r3, kvmppc_handle_exit)
239bbf45ba5SHollis Blanchard	mtctr	r3
2407ec21d9dSTianjia Zhang	mr	r3, r4
241bbf45ba5SHollis Blanchard	lwz	r2, HOST_R2(r1)
242bbf45ba5SHollis Blanchard	mr	r14, r4 /* Save vcpu pointer. */
243bbf45ba5SHollis Blanchard
244bbf45ba5SHollis Blanchard	bctrl	/* kvmppc_handle_exit() */
245bbf45ba5SHollis Blanchard
246bbf45ba5SHollis Blanchard	/* Restore vcpu pointer and the nonvolatiles we used. */
247bbf45ba5SHollis Blanchard	mr	r4, r14
248c75df6f9SMichael Neuling	lwz	r14, VCPU_GPR(R14)(r4)
249bbf45ba5SHollis Blanchard
250bbf45ba5SHollis Blanchard	/* Sometimes instruction emulation must restore complete GPR state. */
251bbf45ba5SHollis Blanchard	andi.	r5, r3, RESUME_FLAG_NV
252bbf45ba5SHollis Blanchard	beq	..skip_nv_load
253c75df6f9SMichael Neuling	lwz	r15, VCPU_GPR(R15)(r4)
254c75df6f9SMichael Neuling	lwz	r16, VCPU_GPR(R16)(r4)
255c75df6f9SMichael Neuling	lwz	r17, VCPU_GPR(R17)(r4)
256c75df6f9SMichael Neuling	lwz	r18, VCPU_GPR(R18)(r4)
257c75df6f9SMichael Neuling	lwz	r19, VCPU_GPR(R19)(r4)
258c75df6f9SMichael Neuling	lwz	r20, VCPU_GPR(R20)(r4)
259c75df6f9SMichael Neuling	lwz	r21, VCPU_GPR(R21)(r4)
260c75df6f9SMichael Neuling	lwz	r22, VCPU_GPR(R22)(r4)
261c75df6f9SMichael Neuling	lwz	r23, VCPU_GPR(R23)(r4)
262c75df6f9SMichael Neuling	lwz	r24, VCPU_GPR(R24)(r4)
263c75df6f9SMichael Neuling	lwz	r25, VCPU_GPR(R25)(r4)
264c75df6f9SMichael Neuling	lwz	r26, VCPU_GPR(R26)(r4)
265c75df6f9SMichael Neuling	lwz	r27, VCPU_GPR(R27)(r4)
266c75df6f9SMichael Neuling	lwz	r28, VCPU_GPR(R28)(r4)
267c75df6f9SMichael Neuling	lwz	r29, VCPU_GPR(R29)(r4)
268c75df6f9SMichael Neuling	lwz	r30, VCPU_GPR(R30)(r4)
269c75df6f9SMichael Neuling	lwz	r31, VCPU_GPR(R31)(r4)
270bbf45ba5SHollis Blanchard..skip_nv_load:
271bbf45ba5SHollis Blanchard
272bbf45ba5SHollis Blanchard	/* Should we return to the guest? */
273bbf45ba5SHollis Blanchard	andi.	r5, r3, RESUME_FLAG_HOST
274bbf45ba5SHollis Blanchard	beq	lightweight_exit
275bbf45ba5SHollis Blanchard
276bbf45ba5SHollis Blanchard	srawi	r3, r3, 2 /* Shift -ERR back down. */
277bbf45ba5SHollis Blanchard
278bbf45ba5SHollis Blanchardheavyweight_exit:
279bbf45ba5SHollis Blanchard	/* Not returning to guest. */
280bbf45ba5SHollis Blanchard
2814cd35f67SScott Wood#ifdef CONFIG_SPE
2824cd35f67SScott Wood	/* save guest SPEFSCR and load host SPEFSCR */
2834cd35f67SScott Wood	mfspr	r9, SPRN_SPEFSCR
2844cd35f67SScott Wood	stw	r9, VCPU_SPEFSCR(r4)
2854cd35f67SScott Wood	lwz	r9, VCPU_HOST_SPEFSCR(r4)
2864cd35f67SScott Wood	mtspr	SPRN_SPEFSCR, r9
2874cd35f67SScott Wood#endif
2884cd35f67SScott Wood
289bbf45ba5SHollis Blanchard	/* We already saved guest volatile register state; now save the
290bbf45ba5SHollis Blanchard	 * non-volatiles. */
291c75df6f9SMichael Neuling	stw	r15, VCPU_GPR(R15)(r4)
292c75df6f9SMichael Neuling	stw	r16, VCPU_GPR(R16)(r4)
293c75df6f9SMichael Neuling	stw	r17, VCPU_GPR(R17)(r4)
294c75df6f9SMichael Neuling	stw	r18, VCPU_GPR(R18)(r4)
295c75df6f9SMichael Neuling	stw	r19, VCPU_GPR(R19)(r4)
296c75df6f9SMichael Neuling	stw	r20, VCPU_GPR(R20)(r4)
297c75df6f9SMichael Neuling	stw	r21, VCPU_GPR(R21)(r4)
298c75df6f9SMichael Neuling	stw	r22, VCPU_GPR(R22)(r4)
299c75df6f9SMichael Neuling	stw	r23, VCPU_GPR(R23)(r4)
300c75df6f9SMichael Neuling	stw	r24, VCPU_GPR(R24)(r4)
301c75df6f9SMichael Neuling	stw	r25, VCPU_GPR(R25)(r4)
302c75df6f9SMichael Neuling	stw	r26, VCPU_GPR(R26)(r4)
303c75df6f9SMichael Neuling	stw	r27, VCPU_GPR(R27)(r4)
304c75df6f9SMichael Neuling	stw	r28, VCPU_GPR(R28)(r4)
305c75df6f9SMichael Neuling	stw	r29, VCPU_GPR(R29)(r4)
306c75df6f9SMichael Neuling	stw	r30, VCPU_GPR(R30)(r4)
307c75df6f9SMichael Neuling	stw	r31, VCPU_GPR(R31)(r4)
308bbf45ba5SHollis Blanchard
309bbf45ba5SHollis Blanchard	/* Load host non-volatile register state from host stack. */
310c75df6f9SMichael Neuling	lwz	r14, HOST_NV_GPR(R14)(r1)
311c75df6f9SMichael Neuling	lwz	r15, HOST_NV_GPR(R15)(r1)
312c75df6f9SMichael Neuling	lwz	r16, HOST_NV_GPR(R16)(r1)
313c75df6f9SMichael Neuling	lwz	r17, HOST_NV_GPR(R17)(r1)
314c75df6f9SMichael Neuling	lwz	r18, HOST_NV_GPR(R18)(r1)
315c75df6f9SMichael Neuling	lwz	r19, HOST_NV_GPR(R19)(r1)
316c75df6f9SMichael Neuling	lwz	r20, HOST_NV_GPR(R20)(r1)
317c75df6f9SMichael Neuling	lwz	r21, HOST_NV_GPR(R21)(r1)
318c75df6f9SMichael Neuling	lwz	r22, HOST_NV_GPR(R22)(r1)
319c75df6f9SMichael Neuling	lwz	r23, HOST_NV_GPR(R23)(r1)
320c75df6f9SMichael Neuling	lwz	r24, HOST_NV_GPR(R24)(r1)
321c75df6f9SMichael Neuling	lwz	r25, HOST_NV_GPR(R25)(r1)
322c75df6f9SMichael Neuling	lwz	r26, HOST_NV_GPR(R26)(r1)
323c75df6f9SMichael Neuling	lwz	r27, HOST_NV_GPR(R27)(r1)
324c75df6f9SMichael Neuling	lwz	r28, HOST_NV_GPR(R28)(r1)
325c75df6f9SMichael Neuling	lwz	r29, HOST_NV_GPR(R29)(r1)
326c75df6f9SMichael Neuling	lwz	r30, HOST_NV_GPR(R30)(r1)
327c75df6f9SMichael Neuling	lwz	r31, HOST_NV_GPR(R31)(r1)
328bbf45ba5SHollis Blanchard
329bbf45ba5SHollis Blanchard	/* Return to kvm_vcpu_run(). */
330bbf45ba5SHollis Blanchard	lwz	r4, HOST_STACK_LR(r1)
331e1f8acf8SAlexander Graf	lwz	r5, HOST_CR(r1)
332bbf45ba5SHollis Blanchard	addi	r1, r1, HOST_STACK_SIZE
333bbf45ba5SHollis Blanchard	mtlr	r4
334e1f8acf8SAlexander Graf	mtcr	r5
335bbf45ba5SHollis Blanchard	/* r3 still contains the return code from kvmppc_handle_exit(). */
336bbf45ba5SHollis Blanchard	blr
337bbf45ba5SHollis Blanchard
338bbf45ba5SHollis Blanchard
339bbf45ba5SHollis Blanchard/* Registers:
3407ec21d9dSTianjia Zhang *  r3: vcpu pointer
341bbf45ba5SHollis Blanchard */
342bbf45ba5SHollis Blanchard_GLOBAL(__kvmppc_vcpu_run)
343bbf45ba5SHollis Blanchard	stwu	r1, -HOST_STACK_SIZE(r1)
3447ec21d9dSTianjia Zhang	stw	r1, VCPU_HOST_STACK(r3)	/* Save stack pointer to vcpu. */
345bbf45ba5SHollis Blanchard
346bbf45ba5SHollis Blanchard	/* Save host state to stack. */
3477ec21d9dSTianjia Zhang	mr	r4, r3
348bbf45ba5SHollis Blanchard	mflr	r3
349bbf45ba5SHollis Blanchard	stw	r3, HOST_STACK_LR(r1)
350e1f8acf8SAlexander Graf	mfcr	r5
351e1f8acf8SAlexander Graf	stw	r5, HOST_CR(r1)
352bbf45ba5SHollis Blanchard
353bbf45ba5SHollis Blanchard	/* Save host non-volatile register state to stack. */
354c75df6f9SMichael Neuling	stw	r14, HOST_NV_GPR(R14)(r1)
355c75df6f9SMichael Neuling	stw	r15, HOST_NV_GPR(R15)(r1)
356c75df6f9SMichael Neuling	stw	r16, HOST_NV_GPR(R16)(r1)
357c75df6f9SMichael Neuling	stw	r17, HOST_NV_GPR(R17)(r1)
358c75df6f9SMichael Neuling	stw	r18, HOST_NV_GPR(R18)(r1)
359c75df6f9SMichael Neuling	stw	r19, HOST_NV_GPR(R19)(r1)
360c75df6f9SMichael Neuling	stw	r20, HOST_NV_GPR(R20)(r1)
361c75df6f9SMichael Neuling	stw	r21, HOST_NV_GPR(R21)(r1)
362c75df6f9SMichael Neuling	stw	r22, HOST_NV_GPR(R22)(r1)
363c75df6f9SMichael Neuling	stw	r23, HOST_NV_GPR(R23)(r1)
364c75df6f9SMichael Neuling	stw	r24, HOST_NV_GPR(R24)(r1)
365c75df6f9SMichael Neuling	stw	r25, HOST_NV_GPR(R25)(r1)
366c75df6f9SMichael Neuling	stw	r26, HOST_NV_GPR(R26)(r1)
367c75df6f9SMichael Neuling	stw	r27, HOST_NV_GPR(R27)(r1)
368c75df6f9SMichael Neuling	stw	r28, HOST_NV_GPR(R28)(r1)
369c75df6f9SMichael Neuling	stw	r29, HOST_NV_GPR(R29)(r1)
370c75df6f9SMichael Neuling	stw	r30, HOST_NV_GPR(R30)(r1)
371c75df6f9SMichael Neuling	stw	r31, HOST_NV_GPR(R31)(r1)
372bbf45ba5SHollis Blanchard
373bbf45ba5SHollis Blanchard	/* Load guest non-volatiles. */
374c75df6f9SMichael Neuling	lwz	r14, VCPU_GPR(R14)(r4)
375c75df6f9SMichael Neuling	lwz	r15, VCPU_GPR(R15)(r4)
376c75df6f9SMichael Neuling	lwz	r16, VCPU_GPR(R16)(r4)
377c75df6f9SMichael Neuling	lwz	r17, VCPU_GPR(R17)(r4)
378c75df6f9SMichael Neuling	lwz	r18, VCPU_GPR(R18)(r4)
379c75df6f9SMichael Neuling	lwz	r19, VCPU_GPR(R19)(r4)
380c75df6f9SMichael Neuling	lwz	r20, VCPU_GPR(R20)(r4)
381c75df6f9SMichael Neuling	lwz	r21, VCPU_GPR(R21)(r4)
382c75df6f9SMichael Neuling	lwz	r22, VCPU_GPR(R22)(r4)
383c75df6f9SMichael Neuling	lwz	r23, VCPU_GPR(R23)(r4)
384c75df6f9SMichael Neuling	lwz	r24, VCPU_GPR(R24)(r4)
385c75df6f9SMichael Neuling	lwz	r25, VCPU_GPR(R25)(r4)
386c75df6f9SMichael Neuling	lwz	r26, VCPU_GPR(R26)(r4)
387c75df6f9SMichael Neuling	lwz	r27, VCPU_GPR(R27)(r4)
388c75df6f9SMichael Neuling	lwz	r28, VCPU_GPR(R28)(r4)
389c75df6f9SMichael Neuling	lwz	r29, VCPU_GPR(R29)(r4)
390c75df6f9SMichael Neuling	lwz	r30, VCPU_GPR(R30)(r4)
391c75df6f9SMichael Neuling	lwz	r31, VCPU_GPR(R31)(r4)
392bbf45ba5SHollis Blanchard
3934cd35f67SScott Wood#ifdef CONFIG_SPE
3944cd35f67SScott Wood	/* save host SPEFSCR and load guest SPEFSCR */
3954cd35f67SScott Wood	mfspr	r3, SPRN_SPEFSCR
3964cd35f67SScott Wood	stw	r3, VCPU_HOST_SPEFSCR(r4)
3974cd35f67SScott Wood	lwz	r3, VCPU_SPEFSCR(r4)
3984cd35f67SScott Wood	mtspr	SPRN_SPEFSCR, r3
3994cd35f67SScott Wood#endif
4004cd35f67SScott Wood
401bbf45ba5SHollis Blanchardlightweight_exit:
402bbf45ba5SHollis Blanchard	stw	r2, HOST_R2(r1)
403bbf45ba5SHollis Blanchard
404bbf45ba5SHollis Blanchard	mfspr	r3, SPRN_PID
405bbf45ba5SHollis Blanchard	stw	r3, VCPU_HOST_PID(r4)
40649dd2c49SHollis Blanchard	lwz	r3, VCPU_SHADOW_PID(r4)
407bbf45ba5SHollis Blanchard	mtspr	SPRN_PID, r3
408bbf45ba5SHollis Blanchard
409*dfc3095cSChristophe Leroy#ifdef CONFIG_PPC_85xx
410dd9ebf1fSLiu Yu	lwz	r3, VCPU_SHADOW_PID1(r4)
411dd9ebf1fSLiu Yu	mtspr	SPRN_PID1, r3
412dd9ebf1fSLiu Yu#endif
413dd9ebf1fSLiu Yu
414bbf45ba5SHollis Blanchard	/* Load some guest volatiles. */
415c75df6f9SMichael Neuling	lwz	r0, VCPU_GPR(R0)(r4)
416c75df6f9SMichael Neuling	lwz	r2, VCPU_GPR(R2)(r4)
417c75df6f9SMichael Neuling	lwz	r9, VCPU_GPR(R9)(r4)
418c75df6f9SMichael Neuling	lwz	r10, VCPU_GPR(R10)(r4)
419c75df6f9SMichael Neuling	lwz	r11, VCPU_GPR(R11)(r4)
420c75df6f9SMichael Neuling	lwz	r12, VCPU_GPR(R12)(r4)
421c75df6f9SMichael Neuling	lwz	r13, VCPU_GPR(R13)(r4)
422bbf45ba5SHollis Blanchard	lwz	r3, VCPU_LR(r4)
423bbf45ba5SHollis Blanchard	mtlr	r3
424bbf45ba5SHollis Blanchard	lwz	r3, VCPU_XER(r4)
425bbf45ba5SHollis Blanchard	mtxer	r3
426bbf45ba5SHollis Blanchard
427bbf45ba5SHollis Blanchard	/* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
428bbf45ba5SHollis Blanchard	 * so how do we make sure vcpu won't fault? */
429bbf45ba5SHollis Blanchard	lis	r8, kvmppc_booke_handlers@ha
430bbf45ba5SHollis Blanchard	lwz	r8, kvmppc_booke_handlers@l(r8)
431bbf45ba5SHollis Blanchard	mtspr	SPRN_IVPR, r8
432bbf45ba5SHollis Blanchard
433b5904972SScott Wood	lwz	r5, VCPU_SHARED(r4)
434b5904972SScott Wood
435bbf45ba5SHollis Blanchard	/* Can't switch the stack pointer until after IVPR is switched,
436bbf45ba5SHollis Blanchard	 * because host interrupt handlers would get confused. */
437c75df6f9SMichael Neuling	lwz	r1, VCPU_GPR(R1)(r4)
438bbf45ba5SHollis Blanchard
439b5904972SScott Wood	/*
440b5904972SScott Wood	 * Host interrupt handlers may have clobbered these
441b5904972SScott Wood	 * guest-readable SPRGs, or the guest kernel may have
442b5904972SScott Wood	 * written directly to the shared area, so we
443b5904972SScott Wood	 * need to reload them here with the guest's values.
444b5904972SScott Wood	 */
44530124906SVarun Sethi	PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
446ee43eb78SBenjamin Herrenschmidt	mtspr	SPRN_SPRG4W, r3
44730124906SVarun Sethi	PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
448ee43eb78SBenjamin Herrenschmidt	mtspr	SPRN_SPRG5W, r3
44930124906SVarun Sethi	PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
450ee43eb78SBenjamin Herrenschmidt	mtspr	SPRN_SPRG6W, r3
45130124906SVarun Sethi	PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
452ee43eb78SBenjamin Herrenschmidt	mtspr	SPRN_SPRG7W, r3
453bbf45ba5SHollis Blanchard
45473e75b41SHollis Blanchard#ifdef CONFIG_KVM_EXIT_TIMING
45573e75b41SHollis Blanchard	/* save enter time */
45673e75b41SHollis Blanchard1:
45773e75b41SHollis Blanchard	mfspr	r6, SPRN_TBRU
45873e75b41SHollis Blanchard	mfspr	r7, SPRN_TBRL
45973e75b41SHollis Blanchard	mfspr	r8, SPRN_TBRU
46073e75b41SHollis Blanchard	cmpw	r8, r6
46173e75b41SHollis Blanchard	bne	1b
46273e75b41SHollis Blanchard	stw	r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
46373e75b41SHollis Blanchard	stw	r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
46473e75b41SHollis Blanchard#endif
46573e75b41SHollis Blanchard
466bbf45ba5SHollis Blanchard	/* Finish loading guest volatiles and jump to guest. */
467bbf45ba5SHollis Blanchard	lwz	r3, VCPU_CTR(r4)
468ecee273fSScott Wood	lwz	r5, VCPU_CR(r4)
469ecee273fSScott Wood	lwz	r6, VCPU_PC(r4)
470ecee273fSScott Wood	lwz	r7, VCPU_SHADOW_MSR(r4)
471bbf45ba5SHollis Blanchard	mtctr	r3
472ecee273fSScott Wood	mtcr	r5
473ecee273fSScott Wood	mtsrr0	r6
474ecee273fSScott Wood	mtsrr1	r7
475c75df6f9SMichael Neuling	lwz	r5, VCPU_GPR(R5)(r4)
476c75df6f9SMichael Neuling	lwz	r6, VCPU_GPR(R6)(r4)
477c75df6f9SMichael Neuling	lwz	r7, VCPU_GPR(R7)(r4)
478c75df6f9SMichael Neuling	lwz	r8, VCPU_GPR(R8)(r4)
4796a0ab738SHollis Blanchard
4806a0ab738SHollis Blanchard	/* Clear any debug events which occurred since we disabled MSR[DE].
4816a0ab738SHollis Blanchard	 * XXX This gives us a 3-instruction window in which a breakpoint
4826a0ab738SHollis Blanchard	 * intended for guest context could fire in the host instead. */
4836a0ab738SHollis Blanchard	lis	r3, 0xffff
4846a0ab738SHollis Blanchard	ori	r3, r3, 0xffff
4856a0ab738SHollis Blanchard	mtspr	SPRN_DBSR, r3
4866a0ab738SHollis Blanchard
487c75df6f9SMichael Neuling	lwz	r3, VCPU_GPR(R3)(r4)
488c75df6f9SMichael Neuling	lwz	r4, VCPU_GPR(R4)(r4)
489bbf45ba5SHollis Blanchard	rfi
4904cd35f67SScott Wood
4911d542d9cSBharat Bhushan	.data
4921d542d9cSBharat Bhushan	.align	4
4931d542d9cSBharat Bhushan	.globl	kvmppc_booke_handler_addr
4941d542d9cSBharat Bhushankvmppc_booke_handler_addr:
4951d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
4961d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
4971d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
4981d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
4991d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
5001d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
5011d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
5021d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
5031d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
5041d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
5051d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
5061d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
5071d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
5081d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
5091d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
5101d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
5111d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
5121d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
5131d542d9cSBharat BhushanKVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
5141d542d9cSBharat BhushanKVM_HANDLER_END /*Always keep this in end*/
5151d542d9cSBharat Bhushan
5164cd35f67SScott Wood#ifdef CONFIG_SPE
5174cd35f67SScott Wood_GLOBAL(kvmppc_save_guest_spe)
5184cd35f67SScott Wood	cmpi	0,r3,0
5194cd35f67SScott Wood	beqlr-
5204cd35f67SScott Wood	SAVE_32EVRS(0, r4, r3, VCPU_EVR)
5214cd35f67SScott Wood	evxor   evr6, evr6, evr6
5224cd35f67SScott Wood	evmwumiaa evr6, evr6, evr6
5234cd35f67SScott Wood	li	r4,VCPU_ACC
5244cd35f67SScott Wood	evstddx evr6, r4, r3		/* save acc */
5254cd35f67SScott Wood	blr
5264cd35f67SScott Wood
5274cd35f67SScott Wood_GLOBAL(kvmppc_load_guest_spe)
5284cd35f67SScott Wood	cmpi	0,r3,0
5294cd35f67SScott Wood	beqlr-
5304cd35f67SScott Wood	li      r4,VCPU_ACC
5314cd35f67SScott Wood	evlddx  evr6,r4,r3
5324cd35f67SScott Wood	evmra   evr6,evr6		/* load acc */
5334cd35f67SScott Wood	REST_32EVRS(0, r4, r3, VCPU_EVR)
5344cd35f67SScott Wood	blr
5354cd35f67SScott Wood#endif
536