xref: /openbmc/linux/arch/powerpc/kvm/tm.S (revision d4fd6347)
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Derived from book3s_hv_rmhandlers.S, which is:
12 *
13 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
14 *
15 */
16
17#include <asm/reg.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/export.h>
21#include <asm/tm.h>
22#include <asm/cputable.h>
23
24#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
25#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
26
27/*
28 * Save transactional state and TM-related registers.
29 * Called with:
30 * - r3 pointing to the vcpu struct
31 * - r4 containing the MSR with current TS bits:
32 * 	(For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
33 * - r5 containing a flag indicating that non-volatile registers
34 *	must be preserved.
35 * If r5 == 0, this can modify all checkpointed registers, but
36 * restores r1, r2 before exit.  If r5 != 0, this restores the
37 * MSR TM/FP/VEC/VSX bits to their state on entry.
38 */
39_GLOBAL(__kvmppc_save_tm)
40	mflr	r0
41	std	r0, PPC_LR_STKOFF(r1)
42	stdu    r1, -SWITCH_FRAME_SIZE(r1)
43
44	mr	r9, r3
45	cmpdi	cr7, r5, 0
46
47	/* Turn on TM. */
48	mfmsr	r8
49	mr	r10, r8
50	li	r0, 1
51	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
52	ori     r8, r8, MSR_FP
53	oris    r8, r8, (MSR_VEC | MSR_VSX)@h
54	mtmsrd	r8
55
56	rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
57	beq	1f	/* TM not active in guest. */
58
59	std	r1, HSTATE_SCRATCH2(r13)
60	std	r3, HSTATE_SCRATCH1(r13)
61
62	/* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
63	mfcr	r6
64	SAVE_GPR(6, r1)
65
66	/* Save DSCR so we can restore it to avoid running with user value */
67	mfspr	r7, SPRN_DSCR
68	SAVE_GPR(7, r1)
69
70	/*
71	 * We are going to do treclaim., which will modify all checkpointed
72	 * registers.  Save the non-volatile registers on the stack if
73	 * preservation of non-volatile state has been requested.
74	 */
75	beq	cr7, 3f
76	SAVE_NVGPRS(r1)
77
78	/* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
79	li	r0, 0
80	rldimi	r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
81	SAVE_GPR(10, r1)	/* final MSR value */
823:
83#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
84BEGIN_FTR_SECTION
85	/* Emulation of the treclaim instruction needs TEXASR before treclaim */
86	mfspr	r6, SPRN_TEXASR
87	std	r6, VCPU_ORIG_TEXASR(r3)
88END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
89#endif
90
91	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
92	li	r5, 0
93	mtmsrd	r5, 1
94
95	li	r3, TM_CAUSE_KVM_RESCHED
96
97	/* All GPRs are volatile at this point. */
98	TRECLAIM(R3)
99
100	/* Temporarily store r13 and r9 so we have some regs to play with */
101	SET_SCRATCH0(r13)
102	GET_PACA(r13)
103	std	r9, PACATMSCRATCH(r13)
104	ld	r9, HSTATE_SCRATCH1(r13)
105
106	/* Save away PPR soon so we don't run with user value. */
107	std	r0, VCPU_GPRS_TM(0)(r9)
108	mfspr	r0, SPRN_PPR
109	HMT_MEDIUM
110
111	/* Reload stack pointer. */
112	std	r1, VCPU_GPRS_TM(1)(r9)
113	ld	r1, HSTATE_SCRATCH2(r13)
114
115	/* Set MSR RI now we have r1 and r13 back. */
116	std	r2, VCPU_GPRS_TM(2)(r9)
117	li	r2, MSR_RI
118	mtmsrd	r2, 1
119
120	/* Reload TOC pointer. */
121	ld	r2, PACATOC(r13)
122
123	/* Save all but r0-r2, r9 & r13 */
124	reg = 3
125	.rept	29
126	.if (reg != 9) && (reg != 13)
127	std	reg, VCPU_GPRS_TM(reg)(r9)
128	.endif
129	reg = reg + 1
130	.endr
131	/* ... now save r13 */
132	GET_SCRATCH0(r4)
133	std	r4, VCPU_GPRS_TM(13)(r9)
134	/* ... and save r9 */
135	ld	r4, PACATMSCRATCH(r13)
136	std	r4, VCPU_GPRS_TM(9)(r9)
137
138	/* Restore host DSCR and CR values, after saving guest values */
139	mfcr	r6
140	mfspr	r7, SPRN_DSCR
141	stw	r6, VCPU_CR_TM(r9)
142	std	r7, VCPU_DSCR_TM(r9)
143	REST_GPR(6, r1)
144	REST_GPR(7, r1)
145	mtcr	r6
146	mtspr	SPRN_DSCR, r7
147
148	/* Save away checkpointed SPRs. */
149	std	r0, VCPU_PPR_TM(r9)
150	mflr	r5
151	mfctr	r7
152	mfspr	r8, SPRN_AMR
153	mfspr	r10, SPRN_TAR
154	mfxer	r11
155	std	r5, VCPU_LR_TM(r9)
156	std	r7, VCPU_CTR_TM(r9)
157	std	r8, VCPU_AMR_TM(r9)
158	std	r10, VCPU_TAR_TM(r9)
159	std	r11, VCPU_XER_TM(r9)
160
161	/* Save FP/VSX. */
162	addi	r3, r9, VCPU_FPRS_TM
163	bl	store_fp_state
164	addi	r3, r9, VCPU_VRS_TM
165	bl	store_vr_state
166	mfspr	r6, SPRN_VRSAVE
167	stw	r6, VCPU_VRSAVE_TM(r9)
168
169	/* Restore non-volatile registers if requested to */
170	beq	cr7, 1f
171	REST_NVGPRS(r1)
172	REST_GPR(10, r1)
1731:
174	/*
175	 * We need to save these SPRs after the treclaim so that the software
176	 * error code is recorded correctly in the TEXASR.  Also the user may
177	 * change these outside of a transaction, so they must always be
178	 * context switched.
179	 */
180	mfspr	r7, SPRN_TEXASR
181	std	r7, VCPU_TEXASR(r9)
182	mfspr	r5, SPRN_TFHAR
183	mfspr	r6, SPRN_TFIAR
184	std	r5, VCPU_TFHAR(r9)
185	std	r6, VCPU_TFIAR(r9)
186
187	/* Restore MSR state if requested */
188	beq	cr7, 2f
189	mtmsrd	r10, 0
1902:
191	addi	r1, r1, SWITCH_FRAME_SIZE
192	ld	r0, PPC_LR_STKOFF(r1)
193	mtlr	r0
194	blr
195
196/*
197 * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
198 * be invoked from C function by PR KVM only.
199 */
200_GLOBAL(_kvmppc_save_tm_pr)
201	mflr	r0
202	std	r0, PPC_LR_STKOFF(r1)
203	stdu    r1, -PPC_MIN_STKFRM(r1)
204
205	mfspr   r8, SPRN_TAR
206	std	r8, PPC_MIN_STKFRM-8(r1)
207
208	li	r5, 1		/* preserve non-volatile registers */
209	bl	__kvmppc_save_tm
210
211	ld	r8, PPC_MIN_STKFRM-8(r1)
212	mtspr   SPRN_TAR, r8
213
214	addi    r1, r1, PPC_MIN_STKFRM
215	ld	r0, PPC_LR_STKOFF(r1)
216	mtlr	r0
217	blr
218
219EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
220
221/*
222 * Restore transactional state and TM-related registers.
223 * Called with:
224 *  - r3 pointing to the vcpu struct.
225 *  - r4 is the guest MSR with desired TS bits:
226 * 	For HV KVM, it is VCPU_MSR
227 * 	For PR KVM, it is provided by caller
228 * - r5 containing a flag indicating that non-volatile registers
229 *	must be preserved.
230 * If r5 == 0, this potentially modifies all checkpointed registers, but
231 * restores r1, r2 from the PACA before exit.
232 * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
233 */
234_GLOBAL(__kvmppc_restore_tm)
235	mflr	r0
236	std	r0, PPC_LR_STKOFF(r1)
237
238	cmpdi	cr7, r5, 0
239
240	/* Turn on TM/FP/VSX/VMX so we can restore them. */
241	mfmsr	r5
242	mr	r10, r5
243	li	r6, MSR_TM >> 32
244	sldi	r6, r6, 32
245	or	r5, r5, r6
246	ori	r5, r5, MSR_FP
247	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
248	mtmsrd	r5
249
250	/*
251	 * The user may change these outside of a transaction, so they must
252	 * always be context switched.
253	 */
254	ld	r5, VCPU_TFHAR(r3)
255	ld	r6, VCPU_TFIAR(r3)
256	ld	r7, VCPU_TEXASR(r3)
257	mtspr	SPRN_TFHAR, r5
258	mtspr	SPRN_TFIAR, r6
259	mtspr	SPRN_TEXASR, r7
260
261	mr	r5, r4
262	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
263	beq	9f		/* TM not active in guest */
264
265	/* Make sure the failure summary is set, otherwise we'll program check
266	 * when we trechkpt.  It's possible that this might have been not set
267	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
268	 * host.
269	 */
270	oris	r7, r7, (TEXASR_FS)@h
271	mtspr	SPRN_TEXASR, r7
272
273	/*
274	 * Make a stack frame and save non-volatile registers if requested.
275	 */
276	stdu	r1, -SWITCH_FRAME_SIZE(r1)
277	std	r1, HSTATE_SCRATCH2(r13)
278
279	mfcr	r6
280	mfspr	r7, SPRN_DSCR
281	SAVE_GPR(2, r1)
282	SAVE_GPR(6, r1)
283	SAVE_GPR(7, r1)
284
285	beq	cr7, 4f
286	SAVE_NVGPRS(r1)
287
288	/* MSR[TS] will be 1 (suspended) once we do trechkpt */
289	li	r0, 1
290	rldimi	r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
291	SAVE_GPR(10, r1)	/* final MSR value */
2924:
293	/*
294	 * We need to load up the checkpointed state for the guest.
295	 * We need to do this early as it will blow away any GPRs, VSRs and
296	 * some SPRs.
297	 */
298
299	mr	r31, r3
300	addi	r3, r31, VCPU_FPRS_TM
301	bl	load_fp_state
302	addi	r3, r31, VCPU_VRS_TM
303	bl	load_vr_state
304	mr	r3, r31
305	lwz	r7, VCPU_VRSAVE_TM(r3)
306	mtspr	SPRN_VRSAVE, r7
307
308	ld	r5, VCPU_LR_TM(r3)
309	lwz	r6, VCPU_CR_TM(r3)
310	ld	r7, VCPU_CTR_TM(r3)
311	ld	r8, VCPU_AMR_TM(r3)
312	ld	r9, VCPU_TAR_TM(r3)
313	ld	r10, VCPU_XER_TM(r3)
314	mtlr	r5
315	mtcr	r6
316	mtctr	r7
317	mtspr	SPRN_AMR, r8
318	mtspr	SPRN_TAR, r9
319	mtxer	r10
320
321	/*
322	 * Load up PPR and DSCR values but don't put them in the actual SPRs
323	 * till the last moment to avoid running with userspace PPR and DSCR for
324	 * too long.
325	 */
326	ld	r29, VCPU_DSCR_TM(r3)
327	ld	r30, VCPU_PPR_TM(r3)
328
329	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
330	li	r5, 0
331	mtmsrd	r5, 1
332
333	/* Load GPRs r0-r28 */
334	reg = 0
335	.rept	29
336	ld	reg, VCPU_GPRS_TM(reg)(r31)
337	reg = reg + 1
338	.endr
339
340	mtspr	SPRN_DSCR, r29
341	mtspr	SPRN_PPR, r30
342
343	/* Load final GPRs */
344	ld	29, VCPU_GPRS_TM(29)(r31)
345	ld	30, VCPU_GPRS_TM(30)(r31)
346	ld	31, VCPU_GPRS_TM(31)(r31)
347
348	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
349	TRECHKPT
350
351	/* Now let's get back the state we need. */
352	HMT_MEDIUM
353	GET_PACA(r13)
354	ld	r1, HSTATE_SCRATCH2(r13)
355	REST_GPR(7, r1)
356	mtspr	SPRN_DSCR, r7
357
358	/* Set the MSR RI since we have our registers back. */
359	li	r5, MSR_RI
360	mtmsrd	r5, 1
361
362	/* Restore TOC pointer and CR */
363	REST_GPR(2, r1)
364	REST_GPR(6, r1)
365	mtcr	r6
366
367	/* Restore non-volatile registers if requested to. */
368	beq	cr7, 5f
369	REST_GPR(10, r1)
370	REST_NVGPRS(r1)
371
3725:	addi	r1, r1, SWITCH_FRAME_SIZE
373	ld	r0, PPC_LR_STKOFF(r1)
374	mtlr	r0
375
3769:	/* Restore MSR bits if requested */
377	beqlr	cr7
378	mtmsrd	r10, 0
379	blr
380
381/*
382 * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
383 * can be invoked from C function by PR KVM only.
384 */
385_GLOBAL(_kvmppc_restore_tm_pr)
386	mflr	r0
387	std	r0, PPC_LR_STKOFF(r1)
388	stdu    r1, -PPC_MIN_STKFRM(r1)
389
390	/* save TAR so that it can be recovered later */
391	mfspr   r8, SPRN_TAR
392	std	r8, PPC_MIN_STKFRM-8(r1)
393
394	li	r5, 1
395	bl	__kvmppc_restore_tm
396
397	ld	r8, PPC_MIN_STKFRM-8(r1)
398	mtspr   SPRN_TAR, r8
399
400	addi    r1, r1, PPC_MIN_STKFRM
401	ld	r0, PPC_LR_STKOFF(r1)
402	mtlr	r0
403	blr
404
405EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
406#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
407