xref: /openbmc/linux/arch/arm64/kvm/hyp/entry.S (revision bb3982b4)
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/asm-offsets.h>
21#include <asm/assembler.h>
22#include <asm/fpsimdmacros.h>
23#include <asm/kvm.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27#include <asm/kvm_ptrauth.h>
28
29#define CPU_GP_REG_OFFSET(x)	(CPU_GP_REGS + x)
30#define CPU_XREG_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
31
32	.text
33	.pushsection	.hyp.text, "ax"
34
35.macro save_callee_saved_regs ctxt
36	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
37	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
38	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
39	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
40	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
41	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
42.endm
43
44.macro restore_callee_saved_regs ctxt
45	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
46	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
47	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
48	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
49	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
50	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
51.endm
52
53/*
54 * u64 __guest_enter(struct kvm_vcpu *vcpu,
55 *		     struct kvm_cpu_context *host_ctxt);
56 */
57ENTRY(__guest_enter)
58	// x0: vcpu
59	// x1: host context
60	// x2-x17: clobbered by macros
61	// x18: guest context
62
63	// Store the host regs
64	save_callee_saved_regs x1
65
66	add	x18, x0, #VCPU_CONTEXT
67
68	// Macro ptrauth_switch_to_guest format:
69	// 	ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
70	// The below macro to restore guest keys is not implemented in C code
71	// as it may cause Pointer Authentication key signing mismatch errors
72	// when this feature is enabled for kernel code.
73	ptrauth_switch_to_guest x18, x0, x1, x2
74
75	// Restore guest regs x0-x17
76	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
77	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
78	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
79	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
80	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
81	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
82	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
83	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
84	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
85
86	// Restore guest regs x19-x29, lr
87	restore_callee_saved_regs x18
88
89	// Restore guest reg x18
90	ldr	x18,      [x18, #CPU_XREG_OFFSET(18)]
91
92	// Do not touch any register after this!
93	eret
94	sb
95ENDPROC(__guest_enter)
96
97ENTRY(__guest_exit)
98	// x0: return code
99	// x1: vcpu
100	// x2-x29,lr: vcpu regs
101	// vcpu x0-x1 on the stack
102
103	add	x1, x1, #VCPU_CONTEXT
104
105	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
106
107	// Store the guest regs x2 and x3
108	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
109
110	// Retrieve the guest regs x0-x1 from the stack
111	ldp	x2, x3, [sp], #16	// x0, x1
112
113	// Store the guest regs x0-x1 and x4-x18
114	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
115	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
116	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
117	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
118	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
119	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
120	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
121	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
122	str	x18,      [x1, #CPU_XREG_OFFSET(18)]
123
124	// Store the guest regs x19-x29, lr
125	save_callee_saved_regs x1
126
127	get_host_ctxt	x2, x3
128
129	// Macro ptrauth_switch_to_guest format:
130	// 	ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
131	// The below macro to save/restore keys is not implemented in C code
132	// as it may cause Pointer Authentication key signing mismatch errors
133	// when this feature is enabled for kernel code.
134	ptrauth_switch_to_host x1, x2, x3, x4, x5
135
136	// Now restore the host regs
137	restore_callee_saved_regs x2
138
139alternative_if ARM64_HAS_RAS_EXTN
140	// If we have the RAS extensions we can consume a pending error
141	// without an unmask-SError and isb.
142	esb
143	mrs_s	x2, SYS_DISR_EL1
144	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
145	cbz	x2, 1f
146	msr_s	SYS_DISR_EL1, xzr
147	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1481:	ret
149alternative_else
150	// If we have a pending asynchronous abort, now is the
151	// time to find out. From your VAXorcist book, page 666:
152	// "Threaten me not, oh Evil one!  For I speak with
153	// the power of DEC, and I command thee to show thyself!"
154	mrs	x2, elr_el2
155	mrs	x3, esr_el2
156	mrs	x4, spsr_el2
157	mov	x5, x0
158
159	dsb	sy		// Synchronize against in-flight ld/st
160	nop
161	msr	daifclr, #4	// Unmask aborts
162alternative_endif
163
164	// This is our single instruction exception window. A pending
165	// SError is guaranteed to occur at the earliest when we unmask
166	// it, and at the latest just after the ISB.
167	.global	abort_guest_exit_start
168abort_guest_exit_start:
169
170	isb
171
172	.global	abort_guest_exit_end
173abort_guest_exit_end:
174
175	// If the exception took place, restore the EL1 exception
176	// context so that we can report some information.
177	// Merge the exception code with the SError pending bit.
178	tbz	x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
179	msr	elr_el2, x2
180	msr	esr_el2, x3
181	msr	spsr_el2, x4
182	orr	x0, x0, x5
1831:	ret
184ENDPROC(__guest_exit)
185