1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25
26#ifdef CONFIG_PPC_BOOK3S_64
27#include <asm/exception-64s.h>
28#endif
29
30/*****************************************************************************
31 *                                                                           *
32 *        Real Mode handlers that need to be in low physical memory          *
33 *                                                                           *
34 ****************************************************************************/
35
36#if defined(CONFIG_PPC_BOOK3S_64)
37
38#define LOAD_SHADOW_VCPU(reg)	GET_PACA(reg)
39#define SHADOW_VCPU_OFF		PACA_KVM_SVCPU
40#define MSR_NOIRQ		MSR_KERNEL & ~(MSR_IR | MSR_DR)
41#define FUNC(name) 		GLUE(.,name)
42
43kvmppc_skip_interrupt:
44	/*
45	 * Here all GPRs are unchanged from when the interrupt happened
46	 * except for r13, which is saved in SPRG_SCRATCH0.
47	 */
48	mfspr	r13, SPRN_SRR0
49	addi	r13, r13, 4
50	mtspr	SPRN_SRR0, r13
51	GET_SCRATCH0(r13)
52	rfid
53	b	.
54
55kvmppc_skip_Hinterrupt:
56	/*
57	 * Here all GPRs are unchanged from when the interrupt happened
58	 * except for r13, which is saved in SPRG_SCRATCH0.
59	 */
60	mfspr	r13, SPRN_HSRR0
61	addi	r13, r13, 4
62	mtspr	SPRN_HSRR0, r13
63	GET_SCRATCH0(r13)
64	hrfid
65	b	.
66
67#elif defined(CONFIG_PPC_BOOK3S_32)
68
69#define SHADOW_VCPU_OFF		0
70#define MSR_NOIRQ		MSR_KERNEL
71#define FUNC(name)		name
72
73.macro INTERRUPT_TRAMPOLINE intno
74
75.global kvmppc_trampoline_\intno
76kvmppc_trampoline_\intno:
77
78	mtspr	SPRN_SPRG_SCRATCH0, r13		/* Save r13 */
79
80	/*
81	 * First thing to do is to find out if we're coming
82	 * from a KVM guest or a Linux process.
83	 *
84	 * To distinguish, we check a magic byte in the PACA/current
85	 */
86	mfspr	r13, SPRN_SPRG_THREAD
87	lwz	r13, THREAD_KVM_SVCPU(r13)
88	/* PPC32 can have a NULL pointer - let's check for that */
89	mtspr   SPRN_SPRG_SCRATCH1, r12		/* Save r12 */
90	mfcr	r12
91	cmpwi	r13, 0
92	bne	1f
932:	mtcr	r12
94	mfspr	r12, SPRN_SPRG_SCRATCH1
95	mfspr	r13, SPRN_SPRG_SCRATCH0		/* r13 = original r13 */
96	b	kvmppc_resume_\intno		/* Get back original handler */
97
981:	tophys(r13, r13)
99	stw	r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
100	mfspr	r12, SPRN_SPRG_SCRATCH1
101	stw	r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
102	lbz	r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
103	cmpwi	r12, KVM_GUEST_MODE_NONE
104	bne	..kvmppc_handler_hasmagic_\intno
105	/* No KVM guest? Then jump back to the Linux handler! */
106	lwz	r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
107	b	2b
108
109	/* Now we know we're handling a KVM guest */
110..kvmppc_handler_hasmagic_\intno:
111
112	/* Should we just skip the faulting instruction? */
113	cmpwi	r12, KVM_GUEST_MODE_SKIP
114	beq	kvmppc_handler_skip_ins
115
116	/* Let's store which interrupt we're handling */
117	li	r12, \intno
118
119	/* Jump into the SLB exit code that goes to the highmem handler */
120	b	kvmppc_handler_trampoline_exit
121
122.endm
123
124INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_SYSTEM_RESET
125INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_MACHINE_CHECK
126INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DATA_STORAGE
127INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_INST_STORAGE
128INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_EXTERNAL
129INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_ALIGNMENT
130INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_PROGRAM
131INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_FP_UNAVAIL
132INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DECREMENTER
133INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_SYSCALL
134INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_TRACE
135INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_PERFMON
136INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_ALTIVEC
137
138/*
139 * Bring us back to the faulting code, but skip the
140 * faulting instruction.
141 *
142 * This is a generic exit path from the interrupt
143 * trampolines above.
144 *
145 * Input Registers:
146 *
147 * R12            = free
148 * R13            = Shadow VCPU (PACA)
149 * SVCPU.SCRATCH0 = guest R12
150 * SVCPU.SCRATCH1 = guest CR
151 * SPRG_SCRATCH0  = guest R13
152 *
153 */
154kvmppc_handler_skip_ins:
155
156	/* Patch the IP to the next instruction */
157	mfsrr0	r12
158	addi	r12, r12, 4
159	mtsrr0	r12
160
161	/* Clean up all state */
162	lwz	r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
163	mtcr	r12
164	PPC_LL	r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
165	GET_SCRATCH0(r13)
166
167	/* And get back into the code */
168	RFI
169#endif
170
171/*
172 * This trampoline brings us back to a real mode handler
173 *
174 * Input Registers:
175 *
176 * R5 = SRR0
177 * R6 = SRR1
178 * LR = real-mode IP
179 *
180 */
181.global kvmppc_handler_lowmem_trampoline
182kvmppc_handler_lowmem_trampoline:
183
184	mtsrr0	r5
185	mtsrr1	r6
186	blr
187kvmppc_handler_lowmem_trampoline_end:
188
189/*
190 * Call a function in real mode
191 *
192 * Input Registers:
193 *
194 * R3 = function
195 * R4 = MSR
196 * R5 = scratch register
197 *
198 */
199_GLOBAL(kvmppc_rmcall)
200	LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
201	mtmsr	r5		/* Disable relocation and interrupts, so mtsrr
202				   doesn't get interrupted */
203	sync
204	mtsrr0	r3
205	mtsrr1	r4
206	RFI
207
208#if defined(CONFIG_PPC_BOOK3S_32)
209#define STACK_LR	INT_FRAME_SIZE+4
210
211/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
212#define MSR_EXT_START						\
213	PPC_STL	r20, _NIP(r1);					\
214	mfmsr	r20;						\
215	LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE);			\
216	andc	r3,r20,r3;		/* Disable DR,EE */	\
217	mtmsr	r3;						\
218	sync
219
220#define MSR_EXT_END						\
221	mtmsr	r20;			/* Enable DR,EE */	\
222	sync;							\
223	PPC_LL	r20, _NIP(r1)
224
225#elif defined(CONFIG_PPC_BOOK3S_64)
226#define STACK_LR	_LINK
227#define MSR_EXT_START
228#define MSR_EXT_END
229#endif
230
231/*
232 * Activate current's external feature (FPU/Altivec/VSX)
233 */
234#define define_load_up(what) 					\
235								\
236_GLOBAL(kvmppc_load_up_ ## what);				\
237	PPC_STLU r1, -INT_FRAME_SIZE(r1);			\
238	mflr	r3;						\
239	PPC_STL	r3, STACK_LR(r1);				\
240	MSR_EXT_START;						\
241								\
242	bl	FUNC(load_up_ ## what);				\
243								\
244	MSR_EXT_END;						\
245	PPC_LL	r3, STACK_LR(r1);				\
246	mtlr	r3;						\
247	addi	r1, r1, INT_FRAME_SIZE;				\
248	blr
249
250define_load_up(fpu)
251#ifdef CONFIG_ALTIVEC
252define_load_up(altivec)
253#endif
254#ifdef CONFIG_VSX
255define_load_up(vsx)
256#endif
257
258#include "book3s_segment.S"
259