xref: /openbmc/linux/arch/powerpc/kvm/book3s_64_slb.S (revision a09d2831)
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#define SHADOW_SLB_ESID(num)	(SLBSHADOW_SAVEAREA + (num * 0x10))
21#define SHADOW_SLB_VSID(num)	(SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22#define UNBOLT_SLB_ENTRY(num) \
23	ld	r9, SHADOW_SLB_ESID(num)(r12); \
24	/* Invalid? Skip. */; \
25	rldicl. r0, r9, 37, 63; \
26	beq	slb_entry_skip_ ## num; \
27	xoris	r9, r9, SLB_ESID_V@h; \
28	std	r9, SHADOW_SLB_ESID(num)(r12); \
29  slb_entry_skip_ ## num:
30
31#define REBOLT_SLB_ENTRY(num) \
32	ld	r10, SHADOW_SLB_ESID(num)(r11); \
33	cmpdi	r10, 0; \
34	beq	slb_exit_skip_1; \
35	oris	r10, r10, SLB_ESID_V@h; \
36	ld	r9, SHADOW_SLB_VSID(num)(r11); \
37	slbmte	r9, r10; \
38	std	r10, SHADOW_SLB_ESID(num)(r11); \
39slb_exit_skip_ ## num:
40
41/******************************************************************************
42 *                                                                            *
43 *                               Entry code                                   *
44 *                                                                            *
45 *****************************************************************************/
46
47.global kvmppc_handler_trampoline_enter
48kvmppc_handler_trampoline_enter:
49
50	/* Required state:
51	 *
52	 * MSR = ~IR|DR
53	 * R13 = PACA
54	 * R9 = guest IP
55	 * R10 = guest MSR
56	 * R11 = free
57	 * R12 = free
58	 * PACA[PACA_EXMC + EX_R9] = guest R9
59	 * PACA[PACA_EXMC + EX_R10] = guest R10
60	 * PACA[PACA_EXMC + EX_R11] = guest R11
61	 * PACA[PACA_EXMC + EX_R12] = guest R12
62	 * PACA[PACA_EXMC + EX_R13] = guest R13
63	 * PACA[PACA_EXMC + EX_CCR] = guest CR
64	 * PACA[PACA_EXMC + EX_R3] = guest XER
65	 */
66
67	mtsrr0	r9
68	mtsrr1	r10
69
70	mtspr	SPRN_SPRG_SCRATCH0, r0
71
72	/* Remove LPAR shadow entries */
73
74#if SLB_NUM_BOLTED == 3
75
76	ld	r12, PACA_SLBSHADOWPTR(r13)
77
78	/* Save off the first entry so we can slbie it later */
79	ld	r10, SHADOW_SLB_ESID(0)(r12)
80	ld	r11, SHADOW_SLB_VSID(0)(r12)
81
82	/* Remove bolted entries */
83	UNBOLT_SLB_ENTRY(0)
84	UNBOLT_SLB_ENTRY(1)
85	UNBOLT_SLB_ENTRY(2)
86
87#else
88#error unknown number of bolted entries
89#endif
90
91	/* Flush SLB */
92
93	slbia
94
95	/* r0 = esid & ESID_MASK */
96	rldicr  r10, r10, 0, 35
97	/* r0 |= CLASS_BIT(VSID) */
98	rldic   r12, r11, 56 - 36, 36
99	or      r10, r10, r12
100	slbie	r10
101
102	isync
103
104	/* Fill SLB with our shadow */
105
106	lbz	r12, PACA_KVM_SLB_MAX(r13)
107	mulli	r12, r12, 16
108	addi	r12, r12, PACA_KVM_SLB
109	add	r12, r12, r13
110
111	/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
112	li	r11, PACA_KVM_SLB
113	add	r11, r11, r13
114
115slb_loop_enter:
116
117	ld	r10, 0(r11)
118
119	rldicl. r0, r10, 37, 63
120	beq	slb_loop_enter_skip
121
122	ld	r9, 8(r11)
123	slbmte	r9, r10
124
125slb_loop_enter_skip:
126	addi	r11, r11, 16
127	cmpd	cr0, r11, r12
128	blt	slb_loop_enter
129
130slb_do_enter:
131
132	/* Enter guest */
133
134	mfspr	r0, SPRN_SPRG_SCRATCH0
135
136	ld	r9, (PACA_EXMC+EX_R9)(r13)
137	ld	r10, (PACA_EXMC+EX_R10)(r13)
138	ld	r12, (PACA_EXMC+EX_R12)(r13)
139
140	lwz	r11, (PACA_EXMC+EX_CCR)(r13)
141	mtcr	r11
142
143	ld	r11, (PACA_EXMC+EX_R3)(r13)
144	mtxer	r11
145
146	ld	r11, (PACA_EXMC+EX_R11)(r13)
147	ld	r13, (PACA_EXMC+EX_R13)(r13)
148
149	RFI
150kvmppc_handler_trampoline_enter_end:
151
152
153
154/******************************************************************************
155 *                                                                            *
156 *                               Exit code                                    *
157 *                                                                            *
158 *****************************************************************************/
159
160.global kvmppc_handler_trampoline_exit
161kvmppc_handler_trampoline_exit:
162
163	/* Register usage at this point:
164	 *
165	 * SPRG_SCRATCH0 = guest R13
166	 * R01           = host R1
167	 * R02           = host R2
168	 * R10           = guest PC
169	 * R11           = guest MSR
170	 * R12           = exit handler id
171	 * R13           = PACA
172	 * PACA.exmc.CCR  = guest CR
173	 * PACA.exmc.R9  = guest R1
174	 * PACA.exmc.R10 = guest R10
175	 * PACA.exmc.R11 = guest R11
176	 * PACA.exmc.R12 = guest R12
177	 * PACA.exmc.R13 = guest R2
178	 *
179	 */
180
181	/* Save registers */
182
183	std	r0, (PACA_EXMC+EX_SRR0)(r13)
184	std	r9, (PACA_EXMC+EX_R3)(r13)
185	std	r10, (PACA_EXMC+EX_LR)(r13)
186	std	r11, (PACA_EXMC+EX_DAR)(r13)
187
188	/*
189	 * In order for us to easily get the last instruction,
190	 * we got the #vmexit at, we exploit the fact that the
191	 * virtual layout is still the same here, so we can just
192	 * ld from the guest's PC address
193	 */
194
195	/* We only load the last instruction when it's safe */
196	cmpwi	r12, BOOK3S_INTERRUPT_DATA_STORAGE
197	beq	ld_last_inst
198	cmpwi	r12, BOOK3S_INTERRUPT_PROGRAM
199	beq	ld_last_inst
200
201	b	no_ld_last_inst
202
203ld_last_inst:
204	/* Save off the guest instruction we're at */
205	/*    1) enable paging for data */
206	mfmsr	r9
207	ori	r11, r9, MSR_DR			/* Enable paging for data */
208	mtmsr	r11
209	/*    2) fetch the instruction */
210	lwz	r0, 0(r10)
211	/*    3) disable paging again */
212	mtmsr	r9
213
214no_ld_last_inst:
215
216	/* Restore bolted entries from the shadow and fix it along the way */
217
218	/* We don't store anything in entry 0, so we don't need to take care of it */
219	slbia
220	isync
221
222#if SLB_NUM_BOLTED == 3
223
224	ld	r11, PACA_SLBSHADOWPTR(r13)
225
226	REBOLT_SLB_ENTRY(0)
227	REBOLT_SLB_ENTRY(1)
228	REBOLT_SLB_ENTRY(2)
229
230#else
231#error unknown number of bolted entries
232#endif
233
234slb_do_exit:
235
236	/* Restore registers */
237
238	ld	r11, (PACA_EXMC+EX_DAR)(r13)
239	ld	r10, (PACA_EXMC+EX_LR)(r13)
240	ld	r9, (PACA_EXMC+EX_R3)(r13)
241
242	/* Save last inst */
243	stw	r0, (PACA_EXMC+EX_LR)(r13)
244
245	/* Save DAR and DSISR before going to paged mode */
246	mfdar	r0
247	std	r0, (PACA_EXMC+EX_DAR)(r13)
248	mfdsisr	r0
249	stw	r0, (PACA_EXMC+EX_DSISR)(r13)
250
251	/* RFI into the highmem handler */
252	mfmsr	r0
253	ori	r0, r0, MSR_IR|MSR_DR|MSR_RI	/* Enable paging */
254	mtsrr1	r0
255	ld	r0, PACASAVEDMSR(r13)		/* Highmem handler address */
256	mtsrr0	r0
257
258	mfspr	r0, SPRN_SPRG_SCRATCH0
259
260	RFI
261kvmppc_handler_trampoline_exit_end:
262
263