xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/host.S (revision abcda807)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_asm.h>
11#include <asm/kvm_mmu.h>
12
13	.text
14
15SYM_FUNC_START(__host_exit)
16	stp	x0, x1, [sp, #-16]!
17
18	get_host_ctxt	x0, x1
19
20	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
21
22	/* Store the host regs x2 and x3 */
23	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
24
25	/* Retrieve the host regs x0-x1 from the stack */
26	ldp	x2, x3, [sp], #16	// x0, x1
27
28	/* Store the host regs x0-x1 and x4-x17 */
29	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
30	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
31	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
32	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
33	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
34	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
35	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
36	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
37
38	/* Store the host regs x18-x29, lr */
39	save_callee_saved_regs x0
40
41	/* Save the host context pointer in x29 across the function call */
42	mov	x29, x0
43	bl	handle_trap
44
45	/* Restore host regs x0-x17 */
46	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
47	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
48	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
49	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
50
51	/* x0-7 are use for panic arguments */
52__host_enter_for_panic:
53	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
54	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
55	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
56	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
57	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
58
59	/* Restore host regs x18-x29, lr */
60	restore_callee_saved_regs x29
61
62	/* Do not touch any register after this! */
63__host_enter_without_restoring:
64	eret
65	sb
66SYM_FUNC_END(__host_exit)
67
68/*
69 * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
70 */
71SYM_FUNC_START(__hyp_do_panic)
72	/* Load the format arguments into x1-7 */
73	mov	x6, x3
74	get_vcpu_ptr x7, x3
75
76	mrs	x3, esr_el2
77	mrs	x4, far_el2
78	mrs	x5, hpfar_el2
79
80	/* Prepare and exit to the host's panic funciton. */
81	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
82		      PSR_MODE_EL1h)
83	msr	spsr_el2, lr
84	ldr	lr, =panic
85	msr	elr_el2, lr
86
87	/*
88	 * Set the panic format string and enter the host, conditionally
89	 * restoring the host context.
90	 */
91	cmp	x0, xzr
92	ldr	x0, =__hyp_panic_string
93	b.eq	__host_enter_without_restoring
94	b	__host_enter_for_panic
95SYM_FUNC_END(__hyp_do_panic)
96
97.macro host_el1_sync_vect
98	.align 7
99.L__vect_start\@:
100	stp	x0, x1, [sp, #-16]!
101	mrs	x0, esr_el2
102	lsr	x0, x0, #ESR_ELx_EC_SHIFT
103	cmp	x0, #ESR_ELx_EC_HVC64
104	ldp	x0, x1, [sp], #16
105	b.ne	__host_exit
106
107	/* Check for a stub HVC call */
108	cmp	x0, #HVC_STUB_HCALL_NR
109	b.hs	__host_exit
110
111	/*
112	 * Compute the idmap address of __kvm_handle_stub_hvc and
113	 * jump there. Since we use kimage_voffset, do not use the
114	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
115	 * (by loading it from the constant pool).
116	 *
117	 * Preserve x0-x4, which may contain stub parameters.
118	 */
119	ldr	x5, =__kvm_handle_stub_hvc
120	ldr_l	x6, kimage_voffset
121
122	/* x5 = __pa(x5) */
123	sub	x5, x5, x6
124	br	x5
125.L__vect_end\@:
126.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
127	.error "host_el1_sync_vect larger than vector entry"
128.endif
129.endm
130
131.macro invalid_host_el2_vect
132	.align 7
133	/* If a guest is loaded, panic out of it. */
134	stp	x0, x1, [sp, #-16]!
135	get_loaded_vcpu x0, x1
136	cbnz	x0, __guest_exit_panic
137	add	sp, sp, #16
138
139	/*
140	 * The panic may not be clean if the exception is taken before the host
141	 * context has been saved by __host_exit or after the hyp context has
142	 * been partially clobbered by __host_enter.
143	 */
144	b	hyp_panic
145.endm
146
147.macro invalid_host_el1_vect
148	.align 7
149	mov	x0, xzr		/* restore_host = false */
150	mrs	x1, spsr_el2
151	mrs	x2, elr_el2
152	mrs	x3, par_el1
153	b	__hyp_do_panic
154.endm
155
156/*
157 * The host vector does not use an ESB instruction in order to avoid consuming
158 * SErrors that should only be consumed by the host. Guest entry is deferred by
159 * __guest_enter if there are any pending asynchronous exceptions so hyp will
160 * always return to the host without having consumerd host SErrors.
161 *
162 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
163 * host knows about the EL2 vectors already, and there is no point in hiding
164 * them.
165 */
166	.align 11
167SYM_CODE_START(__kvm_hyp_host_vector)
168	invalid_host_el2_vect			// Synchronous EL2t
169	invalid_host_el2_vect			// IRQ EL2t
170	invalid_host_el2_vect			// FIQ EL2t
171	invalid_host_el2_vect			// Error EL2t
172
173	invalid_host_el2_vect			// Synchronous EL2h
174	invalid_host_el2_vect			// IRQ EL2h
175	invalid_host_el2_vect			// FIQ EL2h
176	invalid_host_el2_vect			// Error EL2h
177
178	host_el1_sync_vect			// Synchronous 64-bit EL1
179	invalid_host_el1_vect			// IRQ 64-bit EL1
180	invalid_host_el1_vect			// FIQ 64-bit EL1
181	invalid_host_el1_vect			// Error 64-bit EL1
182
183	invalid_host_el1_vect			// Synchronous 32-bit EL1
184	invalid_host_el1_vect			// IRQ 32-bit EL1
185	invalid_host_el1_vect			// FIQ 32-bit EL1
186	invalid_host_el1_vect			// Error 32-bit EL1
187SYM_CODE_END(__kvm_hyp_host_vector)
188