xref: /openbmc/linux/arch/arm64/kvm/hyp/entry.S (revision e2c75e76)
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/asm-offsets.h>
21#include <asm/assembler.h>
22#include <asm/fpsimdmacros.h>
23#include <asm/kvm.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27
28#define CPU_GP_REG_OFFSET(x)	(CPU_GP_REGS + x)
29#define CPU_XREG_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
30
31	.text
32	.pushsection	.hyp.text, "ax"
33
34.macro save_callee_saved_regs ctxt
35	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
36	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
37	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
38	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
39	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
40	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
41.endm
42
43.macro restore_callee_saved_regs ctxt
44	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
45	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
46	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
47	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
48	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
49	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
50.endm
51
52/*
53 * u64 __guest_enter(struct kvm_vcpu *vcpu,
54 *		     struct kvm_cpu_context *host_ctxt);
55 */
56ENTRY(__guest_enter)
57	// x0: vcpu
58	// x1: host context
59	// x2-x17: clobbered by macros
60	// x18: guest context
61
62	// Store the host regs
63	save_callee_saved_regs x1
64
65	// Store host_ctxt and vcpu for use at exit time
66	stp	x1, x0, [sp, #-16]!
67
68	add	x18, x0, #VCPU_CONTEXT
69
70	// Restore guest regs x0-x17
71	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
72	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
73	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
74	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
75	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
76	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
77	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
78	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
79	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
80
81	// Restore guest regs x19-x29, lr
82	restore_callee_saved_regs x18
83
84	// Restore guest reg x18
85	ldr	x18,      [x18, #CPU_XREG_OFFSET(18)]
86
87	// Do not touch any register after this!
88	eret
89ENDPROC(__guest_enter)
90
91ENTRY(__guest_exit)
92	// x0: return code
93	// x1: vcpu
94	// x2-x29,lr: vcpu regs
95	// vcpu x0-x1 on the stack
96
97	add	x1, x1, #VCPU_CONTEXT
98
99	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
100
101	// Store the guest regs x2 and x3
102	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
103
104	// Retrieve the guest regs x0-x1 from the stack
105	ldp	x2, x3, [sp], #16	// x0, x1
106
107	// Store the guest regs x0-x1 and x4-x18
108	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
109	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
110	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
111	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
112	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
113	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
114	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
115	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
116	str	x18,      [x1, #CPU_XREG_OFFSET(18)]
117
118	// Store the guest regs x19-x29, lr
119	save_callee_saved_regs x1
120
121	// Restore the host_ctxt from the stack
122	ldr	x2, [sp], #16
123
124	// Now restore the host regs
125	restore_callee_saved_regs x2
126
127alternative_if ARM64_HAS_RAS_EXTN
128	// If we have the RAS extensions we can consume a pending error
129	// without an unmask-SError and isb.
130	esb
131	mrs_s	x2, SYS_DISR_EL1
132	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
133	cbz	x2, 1f
134	msr_s	SYS_DISR_EL1, xzr
135	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1361:	ret
137alternative_else
138	// If we have a pending asynchronous abort, now is the
139	// time to find out. From your VAXorcist book, page 666:
140	// "Threaten me not, oh Evil one!  For I speak with
141	// the power of DEC, and I command thee to show thyself!"
142	mrs	x2, elr_el2
143	mrs	x3, esr_el2
144	mrs	x4, spsr_el2
145	mov	x5, x0
146
147	dsb	sy		// Synchronize against in-flight ld/st
148	nop
149	msr	daifclr, #4	// Unmask aborts
150alternative_endif
151
152	// This is our single instruction exception window. A pending
153	// SError is guaranteed to occur at the earliest when we unmask
154	// it, and at the latest just after the ISB.
155	.global	abort_guest_exit_start
156abort_guest_exit_start:
157
158	isb
159
160	.global	abort_guest_exit_end
161abort_guest_exit_end:
162
163	// If the exception took place, restore the EL1 exception
164	// context so that we can report some information.
165	// Merge the exception code with the SError pending bit.
166	tbz	x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
167	msr	elr_el2, x2
168	msr	esr_el2, x3
169	msr	spsr_el2, x4
170	orr	x0, x0, x5
1711:	ret
172ENDPROC(__guest_exit)
173
174ENTRY(__fpsimd_guest_restore)
175	// x0: esr
176	// x1: vcpu
177	// x2-x29,lr: vcpu regs
178	// vcpu x0-x1 on the stack
179	stp	x2, x3, [sp, #-16]!
180	stp	x4, lr, [sp, #-16]!
181
182alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
183	mrs	x2, cptr_el2
184	bic	x2, x2, #CPTR_EL2_TFP
185	msr	cptr_el2, x2
186alternative_else
187	mrs	x2, cpacr_el1
188	orr	x2, x2, #CPACR_EL1_FPEN
189	msr	cpacr_el1, x2
190alternative_endif
191	isb
192
193	mov	x3, x1
194
195	ldr	x0, [x3, #VCPU_HOST_CONTEXT]
196	kern_hyp_va x0
197	add	x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
198	bl	__fpsimd_save_state
199
200	add	x2, x3, #VCPU_CONTEXT
201	add	x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
202	bl	__fpsimd_restore_state
203
204	// Skip restoring fpexc32 for AArch64 guests
205	mrs	x1, hcr_el2
206	tbnz	x1, #HCR_RW_SHIFT, 1f
207	ldr	x4, [x3, #VCPU_FPEXC32_EL2]
208	msr	fpexc32_el2, x4
2091:
210	ldp	x4, lr, [sp], #16
211	ldp	x2, x3, [sp], #16
212	ldp	x0, x1, [sp], #16
213
214	eret
215ENDPROC(__fpsimd_guest_restore)
216
217ENTRY(__qcom_hyp_sanitize_btac_predictors)
218	/**
219	 * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
220	 * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
221	 * b15-b0: contains SiP functionID
222	 */
223	movz    x0, #0x1700
224	movk    x0, #0xc200, lsl #16
225	smc     #0
226	ret
227ENDPROC(__qcom_hyp_sanitize_btac_predictors)
228