xref: /openbmc/linux/arch/powerpc/kernel/kvm_emul.S (revision 8730046c)
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Alexander Graf <agraf@suse.de>
19 */
20
21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h>
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/asm-offsets.h>
26
27#define KVM_MAGIC_PAGE		(-4096)
28
29#ifdef CONFIG_64BIT
30#define LL64(reg, offs, reg2)	ld	reg, (offs)(reg2)
31#define STL64(reg, offs, reg2)	std	reg, (offs)(reg2)
32#else
33#define LL64(reg, offs, reg2)	lwz	reg, (offs + 4)(reg2)
34#define STL64(reg, offs, reg2)	stw	reg, (offs + 4)(reg2)
35#endif
36
37#define SCRATCH_SAVE							\
38	/* Enable critical section. We are critical if			\
39	   shared->critical == r1 */					\
40	STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);		\
41									\
42	/* Save state */						\
43	PPC_STL	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);		\
44	PPC_STL	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);		\
45	mfcr	r31;							\
46	stw	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
47
48#define SCRATCH_RESTORE							\
49	/* Restore state */						\
50	PPC_LL	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);		\
51	lwz	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);		\
52	mtcr	r30;							\
53	PPC_LL	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);		\
54									\
55	/* Disable critical section. We are critical if			\
56	   shared->critical == r1 and r2 is always != r1 */		\
57	STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
58
59.global kvm_template_start
60kvm_template_start:
61
62.global kvm_emulate_mtmsrd
63kvm_emulate_mtmsrd:
64
65	SCRATCH_SAVE
66
67	/* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
68	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
69	lis	r30, (~(MSR_EE | MSR_RI))@h
70	ori	r30, r30, (~(MSR_EE | MSR_RI))@l
71	and	r31, r31, r30
72
73	/* OR the register's (MSR_EE|MSR_RI) on MSR */
74kvm_emulate_mtmsrd_reg:
75	ori	r30, r0, 0
76	andi.	r30, r30, (MSR_EE|MSR_RI)
77	or	r31, r31, r30
78
79	/* Put MSR back into magic page */
80	STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
81
82	/* Check if we have to fetch an interrupt */
83	lwz	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
84	cmpwi	r31, 0
85	beq+	no_check
86
87	/* Check if we may trigger an interrupt */
88	andi.	r30, r30, MSR_EE
89	beq	no_check
90
91	SCRATCH_RESTORE
92
93	/* Nag hypervisor */
94kvm_emulate_mtmsrd_orig_ins:
95	tlbsync
96
97	b	kvm_emulate_mtmsrd_branch
98
99no_check:
100
101	SCRATCH_RESTORE
102
103	/* Go back to caller */
104kvm_emulate_mtmsrd_branch:
105	b	.
106kvm_emulate_mtmsrd_end:
107
108.global kvm_emulate_mtmsrd_branch_offs
109kvm_emulate_mtmsrd_branch_offs:
110	.long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
111
112.global kvm_emulate_mtmsrd_reg_offs
113kvm_emulate_mtmsrd_reg_offs:
114	.long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
115
116.global kvm_emulate_mtmsrd_orig_ins_offs
117kvm_emulate_mtmsrd_orig_ins_offs:
118	.long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
119
120.global kvm_emulate_mtmsrd_len
121kvm_emulate_mtmsrd_len:
122	.long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
123
124
125#define MSR_SAFE_BITS (MSR_EE | MSR_RI)
126#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
127
128.global kvm_emulate_mtmsr
129kvm_emulate_mtmsr:
130
131	SCRATCH_SAVE
132
133	/* Fetch old MSR in r31 */
134	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
135
136	/* Find the changed bits between old and new MSR */
137kvm_emulate_mtmsr_reg1:
138	ori	r30, r0, 0
139	xor	r31, r30, r31
140
141	/* Check if we need to really do mtmsr */
142	LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
143	and.	r31, r31, r30
144
145	/* No critical bits changed? Maybe we can stay in the guest. */
146	beq	maybe_stay_in_guest
147
148do_mtmsr:
149
150	SCRATCH_RESTORE
151
152	/* Just fire off the mtmsr if it's critical */
153kvm_emulate_mtmsr_orig_ins:
154	mtmsr	r0
155
156	b	kvm_emulate_mtmsr_branch
157
158maybe_stay_in_guest:
159
160	/* Get the target register in r30 */
161kvm_emulate_mtmsr_reg2:
162	ori	r30, r0, 0
163
164	/* Put MSR into magic page because we don't call mtmsr */
165	STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
166
167	/* Check if we have to fetch an interrupt */
168	lwz	r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
169	cmpwi	r31, 0
170	beq+	no_mtmsr
171
172	/* Check if we may trigger an interrupt */
173	andi.	r31, r30, MSR_EE
174	bne	do_mtmsr
175
176no_mtmsr:
177
178	SCRATCH_RESTORE
179
180	/* Go back to caller */
181kvm_emulate_mtmsr_branch:
182	b	.
183kvm_emulate_mtmsr_end:
184
185.global kvm_emulate_mtmsr_branch_offs
186kvm_emulate_mtmsr_branch_offs:
187	.long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
188
189.global kvm_emulate_mtmsr_reg1_offs
190kvm_emulate_mtmsr_reg1_offs:
191	.long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
192
193.global kvm_emulate_mtmsr_reg2_offs
194kvm_emulate_mtmsr_reg2_offs:
195	.long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
196
197.global kvm_emulate_mtmsr_orig_ins_offs
198kvm_emulate_mtmsr_orig_ins_offs:
199	.long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
200
201.global kvm_emulate_mtmsr_len
202kvm_emulate_mtmsr_len:
203	.long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
204
205/* also used for wrteei 1 */
206.global kvm_emulate_wrtee
207kvm_emulate_wrtee:
208
209	SCRATCH_SAVE
210
211	/* Fetch old MSR in r31 */
212	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
213
214	/* Insert new MSR[EE] */
215kvm_emulate_wrtee_reg:
216	ori	r30, r0, 0
217	rlwimi	r31, r30, 0, MSR_EE
218
219	/*
220	 * If MSR[EE] is now set, check for a pending interrupt.
221	 * We could skip this if MSR[EE] was already on, but that
222	 * should be rare, so don't bother.
223	 */
224	andi.	r30, r30, MSR_EE
225
226	/* Put MSR into magic page because we don't call wrtee */
227	STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
228
229	beq	no_wrtee
230
231	/* Check if we have to fetch an interrupt */
232	lwz	r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
233	cmpwi	r30, 0
234	bne	do_wrtee
235
236no_wrtee:
237	SCRATCH_RESTORE
238
239	/* Go back to caller */
240kvm_emulate_wrtee_branch:
241	b	.
242
243do_wrtee:
244	SCRATCH_RESTORE
245
246	/* Just fire off the wrtee if it's critical */
247kvm_emulate_wrtee_orig_ins:
248	wrtee	r0
249
250	b	kvm_emulate_wrtee_branch
251
252kvm_emulate_wrtee_end:
253
254.global kvm_emulate_wrtee_branch_offs
255kvm_emulate_wrtee_branch_offs:
256	.long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
257
258.global kvm_emulate_wrtee_reg_offs
259kvm_emulate_wrtee_reg_offs:
260	.long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
261
262.global kvm_emulate_wrtee_orig_ins_offs
263kvm_emulate_wrtee_orig_ins_offs:
264	.long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
265
266.global kvm_emulate_wrtee_len
267kvm_emulate_wrtee_len:
268	.long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
269
270.global kvm_emulate_wrteei_0
271kvm_emulate_wrteei_0:
272	SCRATCH_SAVE
273
274	/* Fetch old MSR in r31 */
275	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
276
277	/* Remove MSR_EE from old MSR */
278	rlwinm	r31, r31, 0, ~MSR_EE
279
280	/* Write new MSR value back */
281	STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
282
283	SCRATCH_RESTORE
284
285	/* Go back to caller */
286kvm_emulate_wrteei_0_branch:
287	b	.
288kvm_emulate_wrteei_0_end:
289
290.global kvm_emulate_wrteei_0_branch_offs
291kvm_emulate_wrteei_0_branch_offs:
292	.long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
293
294.global kvm_emulate_wrteei_0_len
295kvm_emulate_wrteei_0_len:
296	.long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
297
298.global kvm_emulate_mtsrin
299kvm_emulate_mtsrin:
300
301	SCRATCH_SAVE
302
303	LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
304	andi.	r31, r31, MSR_DR | MSR_IR
305	beq	kvm_emulate_mtsrin_reg1
306
307	SCRATCH_RESTORE
308
309kvm_emulate_mtsrin_orig_ins:
310	nop
311	b	kvm_emulate_mtsrin_branch
312
313kvm_emulate_mtsrin_reg1:
314	/* rX >> 26 */
315	rlwinm  r30,r0,6,26,29
316
317kvm_emulate_mtsrin_reg2:
318	stw	r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
319
320	SCRATCH_RESTORE
321
322	/* Go back to caller */
323kvm_emulate_mtsrin_branch:
324	b	.
325kvm_emulate_mtsrin_end:
326
327.global kvm_emulate_mtsrin_branch_offs
328kvm_emulate_mtsrin_branch_offs:
329	.long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
330
331.global kvm_emulate_mtsrin_reg1_offs
332kvm_emulate_mtsrin_reg1_offs:
333	.long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
334
335.global kvm_emulate_mtsrin_reg2_offs
336kvm_emulate_mtsrin_reg2_offs:
337	.long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
338
339.global kvm_emulate_mtsrin_orig_ins_offs
340kvm_emulate_mtsrin_orig_ins_offs:
341	.long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
342
343.global kvm_emulate_mtsrin_len
344kvm_emulate_mtsrin_len:
345	.long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
346
347.global kvm_template_end
348kvm_template_end:
349