1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h>
26
27#if defined(CONFIG_PPC_BOOK3S_64)
28
29#define ULONG_SIZE 		8
30#define FUNC(name) 		GLUE(.,name)
31
32#define GET_SHADOW_VCPU(reg)    \
33        addi    reg, r13, PACA_KVM_SVCPU
34
35#define DISABLE_INTERRUPTS	\
36	mfmsr   r0;		\
37	rldicl  r0,r0,48,1;	\
38	rotldi  r0,r0,16;	\
39	mtmsrd  r0,1;		\
40
41#elif defined(CONFIG_PPC_BOOK3S_32)
42
43#define ULONG_SIZE              4
44#define FUNC(name)		name
45
46#define GET_SHADOW_VCPU(reg)    \
47        lwz     reg, (THREAD + THREAD_KVM_SVCPU)(r2)
48
49#define DISABLE_INTERRUPTS	\
50	mfmsr   r0;		\
51	rlwinm  r0,r0,0,17,15;	\
52	mtmsr   r0;		\
53
54#endif /* CONFIG_PPC_BOOK3S_XX */
55
56
57#define VCPU_GPR(n)		(VCPU_GPRS + (n * ULONG_SIZE))
58#define VCPU_LOAD_NVGPRS(vcpu) \
59	PPC_LL	r14, VCPU_GPR(r14)(vcpu); \
60	PPC_LL	r15, VCPU_GPR(r15)(vcpu); \
61	PPC_LL	r16, VCPU_GPR(r16)(vcpu); \
62	PPC_LL	r17, VCPU_GPR(r17)(vcpu); \
63	PPC_LL	r18, VCPU_GPR(r18)(vcpu); \
64	PPC_LL	r19, VCPU_GPR(r19)(vcpu); \
65	PPC_LL	r20, VCPU_GPR(r20)(vcpu); \
66	PPC_LL	r21, VCPU_GPR(r21)(vcpu); \
67	PPC_LL	r22, VCPU_GPR(r22)(vcpu); \
68	PPC_LL	r23, VCPU_GPR(r23)(vcpu); \
69	PPC_LL	r24, VCPU_GPR(r24)(vcpu); \
70	PPC_LL	r25, VCPU_GPR(r25)(vcpu); \
71	PPC_LL	r26, VCPU_GPR(r26)(vcpu); \
72	PPC_LL	r27, VCPU_GPR(r27)(vcpu); \
73	PPC_LL	r28, VCPU_GPR(r28)(vcpu); \
74	PPC_LL	r29, VCPU_GPR(r29)(vcpu); \
75	PPC_LL	r30, VCPU_GPR(r30)(vcpu); \
76	PPC_LL	r31, VCPU_GPR(r31)(vcpu); \
77
78/*****************************************************************************
79 *                                                                           *
80 *     Guest entry / exit code that is in kernel module memory (highmem)     *
81 *                                                                           *
82 ****************************************************************************/
83
84/* Registers:
85 *  r3: kvm_run pointer
86 *  r4: vcpu pointer
87 */
88_GLOBAL(__kvmppc_vcpu_entry)
89
90kvm_start_entry:
91	/* Write correct stack frame */
92	mflr	r0
93	PPC_STL	r0,PPC_LR_STKOFF(r1)
94
95	/* Save host state to the stack */
96	PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
97
98	/* Save r3 (kvm_run) and r4 (vcpu) */
99	SAVE_2GPRS(3, r1)
100
101	/* Save non-volatile registers (r14 - r31) */
102	SAVE_NVGPRS(r1)
103
104	/* Save LR */
105	PPC_STL	r0, _LINK(r1)
106
107	/* Load non-volatile guest state from the vcpu */
108	VCPU_LOAD_NVGPRS(r4)
109
110	GET_SHADOW_VCPU(r5)
111
112	/* Save R1/R2 in the PACA */
113	PPC_STL	r1, SVCPU_HOST_R1(r5)
114	PPC_STL	r2, SVCPU_HOST_R2(r5)
115
116	/* XXX swap in/out on load? */
117	PPC_LL	r3, VCPU_HIGHMEM_HANDLER(r4)
118	PPC_STL	r3, SVCPU_VMHANDLER(r5)
119
120kvm_start_lightweight:
121
122	PPC_LL	r10, VCPU_SHADOW_MSR(r4)	/* r10 = vcpu->arch.shadow_msr */
123
124	DISABLE_INTERRUPTS
125
126#ifdef CONFIG_PPC_BOOK3S_64
127	/* Some guests may need to have dcbz set to 32 byte length.
128	 *
129	 * Usually we ensure that by patching the guest's instructions
130	 * to trap on dcbz and emulate it in the hypervisor.
131	 *
132	 * If we can, we should tell the CPU to use 32 byte dcbz though,
133	 * because that's a lot faster.
134	 */
135
136	PPC_LL	r3, VCPU_HFLAGS(r4)
137	rldicl.	r3, r3, 0, 63		/* CR = ((r3 & 1) == 0) */
138	beq	no_dcbz32_on
139
140	mfspr   r3,SPRN_HID5
141	ori     r3, r3, 0x80		/* XXX HID5_dcbz32 = 0x80 */
142	mtspr   SPRN_HID5,r3
143
144no_dcbz32_on:
145
146#endif /* CONFIG_PPC_BOOK3S_64 */
147
148	PPC_LL	r6, VCPU_RMCALL(r4)
149	mtctr	r6
150
151	PPC_LL	r3, VCPU_TRAMPOLINE_ENTER(r4)
152	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
153
154	/* Jump to segment patching handler and into our guest */
155	bctr
156
157/*
158 * This is the handler in module memory. It gets jumped at from the
159 * lowmem trampoline code, so it's basically the guest exit code.
160 *
161 */
162
163.global kvmppc_handler_highmem
164kvmppc_handler_highmem:
165
166	/*
167	 * Register usage at this point:
168	 *
169	 * R1       = host R1
170	 * R2       = host R2
171	 * R12      = exit handler id
172	 * R13      = PACA
173	 * SVCPU.*  = guest *
174	 *
175	 */
176
177	/* R7 = vcpu */
178	PPC_LL	r7, GPR4(r1)
179
180#ifdef CONFIG_PPC_BOOK3S_64
181
182	PPC_LL	r5, VCPU_HFLAGS(r7)
183	rldicl.	r5, r5, 0, 63		/* CR = ((r5 & 1) == 0) */
184	beq	no_dcbz32_off
185
186	li	r4, 0
187	mfspr   r5,SPRN_HID5
188	rldimi  r5,r4,6,56
189	mtspr   SPRN_HID5,r5
190
191no_dcbz32_off:
192
193#endif /* CONFIG_PPC_BOOK3S_64 */
194
195	PPC_STL	r14, VCPU_GPR(r14)(r7)
196	PPC_STL	r15, VCPU_GPR(r15)(r7)
197	PPC_STL	r16, VCPU_GPR(r16)(r7)
198	PPC_STL	r17, VCPU_GPR(r17)(r7)
199	PPC_STL	r18, VCPU_GPR(r18)(r7)
200	PPC_STL	r19, VCPU_GPR(r19)(r7)
201	PPC_STL	r20, VCPU_GPR(r20)(r7)
202	PPC_STL	r21, VCPU_GPR(r21)(r7)
203	PPC_STL	r22, VCPU_GPR(r22)(r7)
204	PPC_STL	r23, VCPU_GPR(r23)(r7)
205	PPC_STL	r24, VCPU_GPR(r24)(r7)
206	PPC_STL	r25, VCPU_GPR(r25)(r7)
207	PPC_STL	r26, VCPU_GPR(r26)(r7)
208	PPC_STL	r27, VCPU_GPR(r27)(r7)
209	PPC_STL	r28, VCPU_GPR(r28)(r7)
210	PPC_STL	r29, VCPU_GPR(r29)(r7)
211	PPC_STL	r30, VCPU_GPR(r30)(r7)
212	PPC_STL	r31, VCPU_GPR(r31)(r7)
213
214	/* Restore host msr -> SRR1 */
215	PPC_LL	r6, VCPU_HOST_MSR(r7)
216
217	/*
218	 * For some interrupts, we need to call the real Linux
219	 * handler, so it can do work for us. This has to happen
220	 * as if the interrupt arrived from the kernel though,
221	 * so let's fake it here where most state is restored.
222	 *
223	 * Call Linux for hardware interrupts/decrementer
224	 * r3 = address of interrupt handler (exit reason)
225	 */
226
227	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
228	beq	call_linux_handler
229	cmpwi	r12, BOOK3S_INTERRUPT_DECREMENTER
230	beq	call_linux_handler
231	cmpwi	r12, BOOK3S_INTERRUPT_PERFMON
232	beq	call_linux_handler
233
234	/* Back to EE=1 */
235	mtmsr	r6
236	sync
237	b	kvm_return_point
238
239call_linux_handler:
240
241	/*
242	 * If we land here we need to jump back to the handler we
243	 * came from.
244	 *
245	 * We have a page that we can access from real mode, so let's
246	 * jump back to that and use it as a trampoline to get back into the
247	 * interrupt handler!
248	 *
249	 * R3 still contains the exit code,
250	 * R5 VCPU_HOST_RETIP and
251	 * R6 VCPU_HOST_MSR
252	 */
253
254	/* Restore host IP -> SRR0 */
255	PPC_LL	r5, VCPU_HOST_RETIP(r7)
256
257	/* XXX Better move to a safe function?
258	 *     What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
259
260	mtlr	r12
261
262	PPC_LL	r4, VCPU_TRAMPOLINE_LOWMEM(r7)
263	mtsrr0	r4
264	LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
265	mtsrr1	r3
266
267	RFI
268
269.global kvm_return_point
270kvm_return_point:
271
272	/* Jump back to lightweight entry if we're supposed to */
273	/* go back into the guest */
274
275	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
276	mr	r5, r12
277
278	/* Restore r3 (kvm_run) and r4 (vcpu) */
279	REST_2GPRS(3, r1)
280	bl	FUNC(kvmppc_handle_exit)
281
282	/* If RESUME_GUEST, get back in the loop */
283	cmpwi	r3, RESUME_GUEST
284	beq	kvm_loop_lightweight
285
286	cmpwi	r3, RESUME_GUEST_NV
287	beq	kvm_loop_heavyweight
288
289kvm_exit_loop:
290
291	PPC_LL	r4, _LINK(r1)
292	mtlr	r4
293
294	/* Restore non-volatile host registers (r14 - r31) */
295	REST_NVGPRS(r1)
296
297	addi    r1, r1, SWITCH_FRAME_SIZE
298	blr
299
300kvm_loop_heavyweight:
301
302	PPC_LL	r4, _LINK(r1)
303	PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
304
305	/* Load vcpu and cpu_run */
306	REST_2GPRS(3, r1)
307
308	/* Load non-volatile guest state from the vcpu */
309	VCPU_LOAD_NVGPRS(r4)
310
311	/* Jump back into the beginning of this function */
312	b	kvm_start_lightweight
313
314kvm_loop_lightweight:
315
316	/* We'll need the vcpu pointer */
317	REST_GPR(4, r1)
318
319	/* Jump back into the beginning of this function */
320	b	kvm_start_lightweight
321