xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/hyp-init.S (revision 8a511e7efc5a72173f64d191f01cda236d54e27a)
1208243c7SAndrew Scull/* SPDX-License-Identifier: GPL-2.0-only */
2208243c7SAndrew Scull/*
3208243c7SAndrew Scull * Copyright (C) 2012,2013 - ARM Ltd
4208243c7SAndrew Scull * Author: Marc Zyngier <marc.zyngier@arm.com>
5208243c7SAndrew Scull */
6208243c7SAndrew Scull
704e4caa8SAndrew Scull#include <linux/arm-smccc.h>
8208243c7SAndrew Scull#include <linux/linkage.h>
9208243c7SAndrew Scull
100378daefSPaolo Bonzini#include <asm/alternative.h>
11208243c7SAndrew Scull#include <asm/assembler.h>
12cdf36719SDavid Brazdil#include <asm/el2_setup.h>
13208243c7SAndrew Scull#include <asm/kvm_arm.h>
1404e4caa8SAndrew Scull#include <asm/kvm_asm.h>
15208243c7SAndrew Scull#include <asm/kvm_mmu.h>
16208243c7SAndrew Scull#include <asm/pgtable-hwdef.h>
17208243c7SAndrew Scull#include <asm/sysreg.h>
18208243c7SAndrew Scull#include <asm/virt.h>
19208243c7SAndrew Scull
20208243c7SAndrew Scull	.text
21eceaf38fSDavid Brazdil	.pushsection	.idmap.text, "ax"
22208243c7SAndrew Scull
23208243c7SAndrew Scull	.align	11
24208243c7SAndrew Scull
25208243c7SAndrew ScullSYM_CODE_START(__kvm_hyp_init)
26208243c7SAndrew Scull	ventry	__invalid		// Synchronous EL2t
27208243c7SAndrew Scull	ventry	__invalid		// IRQ EL2t
28208243c7SAndrew Scull	ventry	__invalid		// FIQ EL2t
29208243c7SAndrew Scull	ventry	__invalid		// Error EL2t
30208243c7SAndrew Scull
31208243c7SAndrew Scull	ventry	__invalid		// Synchronous EL2h
32208243c7SAndrew Scull	ventry	__invalid		// IRQ EL2h
33208243c7SAndrew Scull	ventry	__invalid		// FIQ EL2h
34208243c7SAndrew Scull	ventry	__invalid		// Error EL2h
35208243c7SAndrew Scull
36208243c7SAndrew Scull	ventry	__do_hyp_init		// Synchronous 64-bit EL1
37208243c7SAndrew Scull	ventry	__invalid		// IRQ 64-bit EL1
38208243c7SAndrew Scull	ventry	__invalid		// FIQ 64-bit EL1
39208243c7SAndrew Scull	ventry	__invalid		// Error 64-bit EL1
40208243c7SAndrew Scull
41208243c7SAndrew Scull	ventry	__invalid		// Synchronous 32-bit EL1
42208243c7SAndrew Scull	ventry	__invalid		// IRQ 32-bit EL1
43208243c7SAndrew Scull	ventry	__invalid		// FIQ 32-bit EL1
44208243c7SAndrew Scull	ventry	__invalid		// Error 32-bit EL1
45208243c7SAndrew Scull
46208243c7SAndrew Scull__invalid:
47208243c7SAndrew Scull	b	.
48208243c7SAndrew Scull
49208243c7SAndrew Scull	/*
50e500b805SAndrew Scull	 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
51e500b805SAndrew Scull	 *
5204e4caa8SAndrew Scull	 * x0: SMCCC function ID
5363fec243SDavid Brazdil	 * x1: struct kvm_nvhe_init_params PA
54208243c7SAndrew Scull	 */
55208243c7SAndrew Scull__do_hyp_init:
56208243c7SAndrew Scull	/* Check for a stub HVC call */
57208243c7SAndrew Scull	cmp	x0, #HVC_STUB_HCALL_NR
58208243c7SAndrew Scull	b.lo	__kvm_handle_stub_hvc
59208243c7SAndrew Scull
60*373beef0SJean-Philippe Brucker	bic	x0, x0, #ARM_SMCCC_CALL_HINTS
6187b26801SAndrew Scull	mov	x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
6287b26801SAndrew Scull	cmp	x0, x3
6387b26801SAndrew Scull	b.eq	1f
64208243c7SAndrew Scull
6504e4caa8SAndrew Scull	mov	x0, #SMCCC_RET_NOT_SUPPORTED
6604e4caa8SAndrew Scull	eret
6704e4caa8SAndrew Scull
68f74e1e21SDavid Brazdil1:	mov	x0, x1
69e500b805SAndrew Scull	mov	x3, lr
70e500b805SAndrew Scull	bl	___kvm_hyp_init			// Clobbers x0..x2
71e500b805SAndrew Scull	mov	lr, x3
7228e81c62SMarc Zyngier
73f74e1e21SDavid Brazdil	/* Hello, World! */
74f74e1e21SDavid Brazdil	mov	x0, #SMCCC_RET_SUCCESS
75f74e1e21SDavid Brazdil	eret
76f74e1e21SDavid BrazdilSYM_CODE_END(__kvm_hyp_init)
77d3e1086cSDavid Brazdil
78f74e1e21SDavid Brazdil/*
79f74e1e21SDavid Brazdil * Initialize the hypervisor in EL2.
80f74e1e21SDavid Brazdil *
81e500b805SAndrew Scull * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
82e500b805SAndrew Scull * and leave x3 for the caller.
83f74e1e21SDavid Brazdil *
84f74e1e21SDavid Brazdil * x0: struct kvm_nvhe_init_params PA
85f74e1e21SDavid Brazdil */
86f74e1e21SDavid BrazdilSYM_CODE_START_LOCAL(___kvm_hyp_init)
87f74e1e21SDavid Brazdil	ldr	x1, [x0, #NVHE_INIT_STACK_HYP_VA]
88f74e1e21SDavid Brazdil	mov	sp, x1
89f74e1e21SDavid Brazdil
90f74e1e21SDavid Brazdil	ldr	x1, [x0, #NVHE_INIT_MAIR_EL2]
91f74e1e21SDavid Brazdil	msr	mair_el2, x1
92f74e1e21SDavid Brazdil
93734864c1SQuentin Perret	ldr	x1, [x0, #NVHE_INIT_HCR_EL2]
94734864c1SQuentin Perret	msr	hcr_el2, x1
95734864c1SQuentin Perret
969e7462bbSMarc Zyngier	mov	x2, #HCR_E2H
979e7462bbSMarc Zyngier	and	x2, x1, x2
989e7462bbSMarc Zyngier	cbz	x2, 1f
999e7462bbSMarc Zyngier
1001700f89cSMarc Zyngier	// hVHE: Replay the EL2 setup to account for the E2H bit
1011700f89cSMarc Zyngier	// TPIDR_EL2 is used to preserve x0 across the macro maze...
1021700f89cSMarc Zyngier	isb
1031700f89cSMarc Zyngier	msr	tpidr_el2, x0
1041700f89cSMarc Zyngier	init_el2_state
1051700f89cSMarc Zyngier	finalise_el2_state
1061700f89cSMarc Zyngier	mrs	x0, tpidr_el2
1071700f89cSMarc Zyngier
1089e7462bbSMarc Zyngier1:
1091700f89cSMarc Zyngier	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
1101700f89cSMarc Zyngier	msr	tpidr_el2, x1
1111700f89cSMarc Zyngier
112734864c1SQuentin Perret	ldr	x1, [x0, #NVHE_INIT_VTTBR]
113734864c1SQuentin Perret	msr	vttbr_el2, x1
114734864c1SQuentin Perret
115734864c1SQuentin Perret	ldr	x1, [x0, #NVHE_INIT_VTCR]
116734864c1SQuentin Perret	msr	vtcr_el2, x1
117734864c1SQuentin Perret
118f74e1e21SDavid Brazdil	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
119f74e1e21SDavid Brazdil	phys_to_ttbr x2, x1
12004e4caa8SAndrew Scullalternative_if ARM64_HAS_CNP
121d3e1086cSDavid Brazdil	orr	x2, x2, #TTBR_CNP_BIT
12204e4caa8SAndrew Scullalternative_else_nop_endif
123d3e1086cSDavid Brazdil	msr	ttbr0_el2, x2
124208243c7SAndrew Scull
125208243c7SAndrew Scull	/*
126208243c7SAndrew Scull	 * Set the PS bits in TCR_EL2.
127208243c7SAndrew Scull	 */
128e500b805SAndrew Scull	ldr	x0, [x0, #NVHE_INIT_TCR_EL2]
129e500b805SAndrew Scull	tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
130e500b805SAndrew Scull	msr	tcr_el2, x0
131208243c7SAndrew Scull
132208243c7SAndrew Scull	isb
133208243c7SAndrew Scull
134208243c7SAndrew Scull	/* Invalidate the stale TLBs from Bootloader */
135208243c7SAndrew Scull	tlbi	alle2
1361025c8c0SQuentin Perret	tlbi	vmalls12e1
137208243c7SAndrew Scull	dsb	sy
138208243c7SAndrew Scull
139fe2c8d19SMarc Zyngier	mov_q	x0, INIT_SCTLR_EL2_MMU_ON
140fc279329SMarc Zyngieralternative_if ARM64_HAS_ADDRESS_AUTH
14104e4caa8SAndrew Scull	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
142fc279329SMarc Zyngier		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
14304e4caa8SAndrew Scull	orr	x0, x0, x1
144fc279329SMarc Zyngieralternative_else_nop_endif
145b53d4a27SMostafa Saleh
146b53d4a27SMostafa Saleh#ifdef CONFIG_ARM64_BTI_KERNEL
147b53d4a27SMostafa Salehalternative_if ARM64_BTI
148b53d4a27SMostafa Saleh	orr	x0, x0, #SCTLR_EL2_BT
149b53d4a27SMostafa Salehalternative_else_nop_endif
150b53d4a27SMostafa Saleh#endif /* CONFIG_ARM64_BTI_KERNEL */
151b53d4a27SMostafa Saleh
15204e4caa8SAndrew Scull	msr	sctlr_el2, x0
153208243c7SAndrew Scull	isb
154208243c7SAndrew Scull
1555be1d622SDavid Brazdil	/* Set the host vector */
1565be1d622SDavid Brazdil	ldr	x0, =__kvm_hyp_host_vector
1575be1d622SDavid Brazdil	msr	vbar_el2, x0
158208243c7SAndrew Scull
159f74e1e21SDavid Brazdil	ret
160f74e1e21SDavid BrazdilSYM_CODE_END(___kvm_hyp_init)
161208243c7SAndrew Scull
162cdf36719SDavid Brazdil/*
163cdf36719SDavid Brazdil * PSCI CPU_ON entry point
164cdf36719SDavid Brazdil *
165cdf36719SDavid Brazdil * x0: struct kvm_nvhe_init_params PA
166cdf36719SDavid Brazdil */
167cdf36719SDavid BrazdilSYM_CODE_START(kvm_hyp_cpu_entry)
168cdf36719SDavid Brazdil	mov	x1, #1				// is_cpu_on = true
169cdf36719SDavid Brazdil	b	__kvm_hyp_init_cpu
170cdf36719SDavid BrazdilSYM_CODE_END(kvm_hyp_cpu_entry)
171cdf36719SDavid Brazdil
172cdf36719SDavid Brazdil/*
173d945f8d9SDavid Brazdil * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
174abf16336SDavid Brazdil *
175abf16336SDavid Brazdil * x0: struct kvm_nvhe_init_params PA
176abf16336SDavid Brazdil */
177abf16336SDavid BrazdilSYM_CODE_START(kvm_hyp_cpu_resume)
178abf16336SDavid Brazdil	mov	x1, #0				// is_cpu_on = false
179abf16336SDavid Brazdil	b	__kvm_hyp_init_cpu
180abf16336SDavid BrazdilSYM_CODE_END(kvm_hyp_cpu_resume)
181abf16336SDavid Brazdil
182abf16336SDavid Brazdil/*
183cdf36719SDavid Brazdil * Common code for CPU entry points. Initializes EL2 state and
184cdf36719SDavid Brazdil * installs the hypervisor before handing over to a C handler.
185cdf36719SDavid Brazdil *
186cdf36719SDavid Brazdil * x0: struct kvm_nvhe_init_params PA
187cdf36719SDavid Brazdil * x1: bool is_cpu_on
188cdf36719SDavid Brazdil */
189cdf36719SDavid BrazdilSYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
190cdf36719SDavid Brazdil	mov	x28, x0				// Stash arguments
191cdf36719SDavid Brazdil	mov	x29, x1
192cdf36719SDavid Brazdil
193cdf36719SDavid Brazdil	/* Check that the core was booted in EL2. */
194cdf36719SDavid Brazdil	mrs	x0, CurrentEL
195cdf36719SDavid Brazdil	cmp	x0, #CurrentEL_EL2
196cdf36719SDavid Brazdil	b.eq	2f
197cdf36719SDavid Brazdil
198cdf36719SDavid Brazdil	/* The core booted in EL1. KVM cannot be initialized on it. */
199cdf36719SDavid Brazdil1:	wfe
200cdf36719SDavid Brazdil	wfi
201cdf36719SDavid Brazdil	b	1b
202cdf36719SDavid Brazdil
203cdf36719SDavid Brazdil2:	msr	SPsel, #1			// We want to use SP_EL{1,2}
204cdf36719SDavid Brazdil
205cdf36719SDavid Brazdil	/* Initialize EL2 CPU state to sane values. */
206e2df4641SMarc Zyngier	init_el2_state				// Clobbers x0..x2
2076f10f2ecSQuentin Perret	finalise_el2_state
2081700f89cSMarc Zyngier	__init_el2_nvhe_prepare_eret
209cdf36719SDavid Brazdil
210cdf36719SDavid Brazdil	/* Enable MMU, set vectors and stack. */
211cdf36719SDavid Brazdil	mov	x0, x28
212e500b805SAndrew Scull	bl	___kvm_hyp_init			// Clobbers x0..x2
213cdf36719SDavid Brazdil
214cdf36719SDavid Brazdil	/* Leave idmap. */
215cdf36719SDavid Brazdil	mov	x0, x29
216cdf36719SDavid Brazdil	ldr	x1, =kvm_host_psci_cpu_entry
217cdf36719SDavid Brazdil	br	x1
218cdf36719SDavid BrazdilSYM_CODE_END(__kvm_hyp_init_cpu)
219cdf36719SDavid Brazdil
220208243c7SAndrew ScullSYM_CODE_START(__kvm_handle_stub_hvc)
221b53d4a27SMostafa Saleh	/*
222b53d4a27SMostafa Saleh	 * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
223b53d4a27SMostafa Saleh	 * we need bti j at beginning.
224b53d4a27SMostafa Saleh	 */
225b53d4a27SMostafa Saleh	bti j
226208243c7SAndrew Scull	cmp	x0, #HVC_SOFT_RESTART
227208243c7SAndrew Scull	b.ne	1f
228208243c7SAndrew Scull
229208243c7SAndrew Scull	/* This is where we're about to jump, staying at EL2 */
230208243c7SAndrew Scull	msr	elr_el2, x1
231208243c7SAndrew Scull	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
232208243c7SAndrew Scull	msr	spsr_el2, x0
233208243c7SAndrew Scull
234208243c7SAndrew Scull	/* Shuffle the arguments, and don't come back */
235208243c7SAndrew Scull	mov	x0, x2
236208243c7SAndrew Scull	mov	x1, x3
237208243c7SAndrew Scull	mov	x2, x4
238208243c7SAndrew Scull	b	reset
239208243c7SAndrew Scull
240208243c7SAndrew Scull1:	cmp	x0, #HVC_RESET_VECTORS
241208243c7SAndrew Scull	b.ne	1f
2420378daefSPaolo Bonzini
243208243c7SAndrew Scull	/*
2440378daefSPaolo Bonzini	 * Set the HVC_RESET_VECTORS return code before entering the common
2450378daefSPaolo Bonzini	 * path so that we do not clobber x0-x2 in case we are coming via
2460378daefSPaolo Bonzini	 * HVC_SOFT_RESTART.
247208243c7SAndrew Scull	 */
2480378daefSPaolo Bonzini	mov	x0, xzr
2490378daefSPaolo Bonzinireset:
2500378daefSPaolo Bonzini	/* Reset kvm back to the hyp stub. */
251bc6ddaa6SMarc Zyngier	mov_q	x5, INIT_SCTLR_EL2_MMU_OFF
252208243c7SAndrew Scull	pre_disable_mmu_workaround
253208243c7SAndrew Scull	msr	sctlr_el2, x5
254208243c7SAndrew Scull	isb
255208243c7SAndrew Scull
256b93c17c4SDavid Brazdilalternative_if ARM64_KVM_PROTECTED_MODE
257b93c17c4SDavid Brazdil	mov_q	x5, HCR_HOST_NVHE_FLAGS
258b93c17c4SDavid Brazdil	msr	hcr_el2, x5
259b93c17c4SDavid Brazdilalternative_else_nop_endif
260b93c17c4SDavid Brazdil
261208243c7SAndrew Scull	/* Install stub vectors */
262208243c7SAndrew Scull	adr_l	x5, __hyp_stub_vectors
263208243c7SAndrew Scull	msr	vbar_el2, x5
264208243c7SAndrew Scull	eret
265208243c7SAndrew Scull
266208243c7SAndrew Scull1:	/* Bad stub call */
267208243c7SAndrew Scull	mov_q	x0, HVC_STUB_ERR
268208243c7SAndrew Scull	eret
269208243c7SAndrew Scull
270208243c7SAndrew ScullSYM_CODE_END(__kvm_handle_stub_hvc)
271208243c7SAndrew Scull
272f320bc74SQuentin PerretSYM_FUNC_START(__pkvm_init_switch_pgd)
273f320bc74SQuentin Perret	/* Turn the MMU off */
274f320bc74SQuentin Perret	pre_disable_mmu_workaround
275f320bc74SQuentin Perret	mrs	x2, sctlr_el2
276f320bc74SQuentin Perret	bic	x3, x2, #SCTLR_ELx_M
277f320bc74SQuentin Perret	msr	sctlr_el2, x3
278f320bc74SQuentin Perret	isb
279f320bc74SQuentin Perret
280f320bc74SQuentin Perret	tlbi	alle2
281f320bc74SQuentin Perret
282f320bc74SQuentin Perret	/* Install the new pgtables */
283f320bc74SQuentin Perret	ldr	x3, [x0, #NVHE_INIT_PGD_PA]
284f320bc74SQuentin Perret	phys_to_ttbr x4, x3
285f320bc74SQuentin Perretalternative_if ARM64_HAS_CNP
286f320bc74SQuentin Perret	orr	x4, x4, #TTBR_CNP_BIT
287f320bc74SQuentin Perretalternative_else_nop_endif
288f320bc74SQuentin Perret	msr	ttbr0_el2, x4
289f320bc74SQuentin Perret
290f320bc74SQuentin Perret	/* Set the new stack pointer */
291f320bc74SQuentin Perret	ldr	x0, [x0, #NVHE_INIT_STACK_HYP_VA]
292f320bc74SQuentin Perret	mov	sp, x0
293f320bc74SQuentin Perret
294f320bc74SQuentin Perret	/* And turn the MMU back on! */
295f320bc74SQuentin Perret	set_sctlr_el2	x2
296f320bc74SQuentin Perret	ret	x1
297f320bc74SQuentin PerretSYM_FUNC_END(__pkvm_init_switch_pgd)
298f320bc74SQuentin Perret
299208243c7SAndrew Scull	.popsection
300