1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h>
26
27#if defined(CONFIG_PPC_BOOK3S_64)
28#define FUNC(name) 		GLUE(.,name)
29#define GET_SHADOW_VCPU(reg)    addi	reg, r13, PACA_SVCPU
30
31#elif defined(CONFIG_PPC_BOOK3S_32)
32#define FUNC(name)		name
33#define GET_SHADOW_VCPU(reg)	lwz     reg, (THREAD + THREAD_KVM_SVCPU)(r2)
34
35#endif /* CONFIG_PPC_BOOK3S_XX */
36
37#define VCPU_LOAD_NVGPRS(vcpu) \
38	PPC_LL	r14, VCPU_GPR(R14)(vcpu); \
39	PPC_LL	r15, VCPU_GPR(R15)(vcpu); \
40	PPC_LL	r16, VCPU_GPR(R16)(vcpu); \
41	PPC_LL	r17, VCPU_GPR(R17)(vcpu); \
42	PPC_LL	r18, VCPU_GPR(R18)(vcpu); \
43	PPC_LL	r19, VCPU_GPR(R19)(vcpu); \
44	PPC_LL	r20, VCPU_GPR(R20)(vcpu); \
45	PPC_LL	r21, VCPU_GPR(R21)(vcpu); \
46	PPC_LL	r22, VCPU_GPR(R22)(vcpu); \
47	PPC_LL	r23, VCPU_GPR(R23)(vcpu); \
48	PPC_LL	r24, VCPU_GPR(R24)(vcpu); \
49	PPC_LL	r25, VCPU_GPR(R25)(vcpu); \
50	PPC_LL	r26, VCPU_GPR(R26)(vcpu); \
51	PPC_LL	r27, VCPU_GPR(R27)(vcpu); \
52	PPC_LL	r28, VCPU_GPR(R28)(vcpu); \
53	PPC_LL	r29, VCPU_GPR(R29)(vcpu); \
54	PPC_LL	r30, VCPU_GPR(R30)(vcpu); \
55	PPC_LL	r31, VCPU_GPR(R31)(vcpu); \
56
57/*****************************************************************************
58 *                                                                           *
59 *     Guest entry / exit code that is in kernel module memory (highmem)     *
60 *                                                                           *
61 ****************************************************************************/
62
63/* Registers:
64 *  r3: kvm_run pointer
65 *  r4: vcpu pointer
66 */
67_GLOBAL(__kvmppc_vcpu_run)
68
69kvm_start_entry:
70	/* Write correct stack frame */
71	mflr	r0
72	PPC_STL	r0,PPC_LR_STKOFF(r1)
73
74	/* Save host state to the stack */
75	PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
76
77	/* Save r3 (kvm_run) and r4 (vcpu) */
78	SAVE_2GPRS(3, r1)
79
80	/* Save non-volatile registers (r14 - r31) */
81	SAVE_NVGPRS(r1)
82
83	/* Save CR */
84	mfcr	r14
85	stw	r14, _CCR(r1)
86
87	/* Save LR */
88	PPC_STL	r0, _LINK(r1)
89
90	/* Load non-volatile guest state from the vcpu */
91	VCPU_LOAD_NVGPRS(r4)
92
93kvm_start_lightweight:
94	/* Copy registers into shadow vcpu so we can access them in real mode */
95	GET_SHADOW_VCPU(r3)
96	bl	FUNC(kvmppc_copy_to_svcpu)
97	nop
98	REST_GPR(4, r1)
99
100#ifdef CONFIG_PPC_BOOK3S_64
101	/* Get the dcbz32 flag */
102	PPC_LL	r3, VCPU_HFLAGS(r4)
103	rldicl	r3, r3, 0, 63		/* r3 &= 1 */
104	stb	r3, HSTATE_RESTORE_HID5(r13)
105
106	/* Load up guest SPRG3 value, since it's user readable */
107	lwz	r3, VCPU_SHAREDBE(r4)
108	cmpwi	r3, 0
109	ld	r5, VCPU_SHARED(r4)
110	beq	sprg3_little_endian
111sprg3_big_endian:
112#ifdef __BIG_ENDIAN__
113	ld	r3, VCPU_SHARED_SPRG3(r5)
114#else
115	addi	r5, r5, VCPU_SHARED_SPRG3
116	ldbrx	r3, 0, r5
117#endif
118	b	after_sprg3_load
119sprg3_little_endian:
120#ifdef __LITTLE_ENDIAN__
121	ld	r3, VCPU_SHARED_SPRG3(r5)
122#else
123	addi	r5, r5, VCPU_SHARED_SPRG3
124	ldbrx	r3, 0, r5
125#endif
126
127after_sprg3_load:
128	mtspr	SPRN_SPRG3, r3
129#endif /* CONFIG_PPC_BOOK3S_64 */
130
131	PPC_LL	r4, VCPU_SHADOW_MSR(r4)	/* get shadow_msr */
132
133	/* Jump to segment patching handler and into our guest */
134	bl	FUNC(kvmppc_entry_trampoline)
135	nop
136
137/*
138 * This is the handler in module memory. It gets jumped at from the
139 * lowmem trampoline code, so it's basically the guest exit code.
140 *
141 */
142
143	/*
144	 * Register usage at this point:
145	 *
146	 * R1       = host R1
147	 * R2       = host R2
148	 * R12      = exit handler id
149	 * R13      = PACA
150	 * SVCPU.*  = guest *
151	 * MSR.EE   = 1
152	 *
153	 */
154
155	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */
156
157	/*
158	 * kvmppc_copy_from_svcpu can clobber volatile registers, save
159	 * the exit handler id to the vcpu and restore it from there later.
160	 */
161	stw	r12, VCPU_TRAP(r3)
162
163	/* Transfer reg values from shadow vcpu back to vcpu struct */
164	/* On 64-bit, interrupts are still off at this point */
165
166	GET_SHADOW_VCPU(r4)
167	bl	FUNC(kvmppc_copy_from_svcpu)
168	nop
169
170#ifdef CONFIG_PPC_BOOK3S_64
171	/*
172	 * Reload kernel SPRG3 value.
173	 * No need to save guest value as usermode can't modify SPRG3.
174	 */
175	ld	r3, PACA_SPRG_VDSO(r13)
176	mtspr	SPRN_SPRG_VDSO_WRITE, r3
177#endif /* CONFIG_PPC_BOOK3S_64 */
178
179	/* R7 = vcpu */
180	PPC_LL	r7, GPR4(r1)
181
182	PPC_STL	r14, VCPU_GPR(R14)(r7)
183	PPC_STL	r15, VCPU_GPR(R15)(r7)
184	PPC_STL	r16, VCPU_GPR(R16)(r7)
185	PPC_STL	r17, VCPU_GPR(R17)(r7)
186	PPC_STL	r18, VCPU_GPR(R18)(r7)
187	PPC_STL	r19, VCPU_GPR(R19)(r7)
188	PPC_STL	r20, VCPU_GPR(R20)(r7)
189	PPC_STL	r21, VCPU_GPR(R21)(r7)
190	PPC_STL	r22, VCPU_GPR(R22)(r7)
191	PPC_STL	r23, VCPU_GPR(R23)(r7)
192	PPC_STL	r24, VCPU_GPR(R24)(r7)
193	PPC_STL	r25, VCPU_GPR(R25)(r7)
194	PPC_STL	r26, VCPU_GPR(R26)(r7)
195	PPC_STL	r27, VCPU_GPR(R27)(r7)
196	PPC_STL	r28, VCPU_GPR(R28)(r7)
197	PPC_STL	r29, VCPU_GPR(R29)(r7)
198	PPC_STL	r30, VCPU_GPR(R30)(r7)
199	PPC_STL	r31, VCPU_GPR(R31)(r7)
200
201	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
202	lwz	r5, VCPU_TRAP(r7)
203
204	/* Restore r3 (kvm_run) and r4 (vcpu) */
205	REST_2GPRS(3, r1)
206	bl	FUNC(kvmppc_handle_exit_pr)
207
208	/* If RESUME_GUEST, get back in the loop */
209	cmpwi	r3, RESUME_GUEST
210	beq	kvm_loop_lightweight
211
212	cmpwi	r3, RESUME_GUEST_NV
213	beq	kvm_loop_heavyweight
214
215kvm_exit_loop:
216
217	PPC_LL	r4, _LINK(r1)
218	mtlr	r4
219
220	lwz	r14, _CCR(r1)
221	mtcr	r14
222
223	/* Restore non-volatile host registers (r14 - r31) */
224	REST_NVGPRS(r1)
225
226	addi    r1, r1, SWITCH_FRAME_SIZE
227	blr
228
229kvm_loop_heavyweight:
230
231	PPC_LL	r4, _LINK(r1)
232	PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
233
234	/* Load vcpu and cpu_run */
235	REST_2GPRS(3, r1)
236
237	/* Load non-volatile guest state from the vcpu */
238	VCPU_LOAD_NVGPRS(r4)
239
240	/* Jump back into the beginning of this function */
241	b	kvm_start_lightweight
242
243kvm_loop_lightweight:
244
245	/* We'll need the vcpu pointer */
246	REST_GPR(4, r1)
247
248	/* Jump back into the beginning of this function */
249	b	kvm_start_lightweight
250