xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/hyp-init.S (revision abcda807)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/kvm_arm.h>
13#include <asm/kvm_asm.h>
14#include <asm/kvm_mmu.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/sysreg.h>
17#include <asm/virt.h>
18
19	.text
20	.pushsection	.hyp.idmap.text, "ax"
21
22	.align	11
23
24SYM_CODE_START(__kvm_hyp_init)
25	ventry	__invalid		// Synchronous EL2t
26	ventry	__invalid		// IRQ EL2t
27	ventry	__invalid		// FIQ EL2t
28	ventry	__invalid		// Error EL2t
29
30	ventry	__invalid		// Synchronous EL2h
31	ventry	__invalid		// IRQ EL2h
32	ventry	__invalid		// FIQ EL2h
33	ventry	__invalid		// Error EL2h
34
35	ventry	__do_hyp_init		// Synchronous 64-bit EL1
36	ventry	__invalid		// IRQ 64-bit EL1
37	ventry	__invalid		// FIQ 64-bit EL1
38	ventry	__invalid		// Error 64-bit EL1
39
40	ventry	__invalid		// Synchronous 32-bit EL1
41	ventry	__invalid		// IRQ 32-bit EL1
42	ventry	__invalid		// FIQ 32-bit EL1
43	ventry	__invalid		// Error 32-bit EL1
44
45__invalid:
46	b	.
47
48	/*
49	 * x0: SMCCC function ID
50	 * x1: HYP pgd
51	 * x2: per-CPU offset
52	 * x3: HYP stack
53	 * x4: HYP vectors
54	 */
55__do_hyp_init:
56	/* Check for a stub HVC call */
57	cmp	x0, #HVC_STUB_HCALL_NR
58	b.lo	__kvm_handle_stub_hvc
59
60	/* Set tpidr_el2 for use by HYP to free a register */
61	msr	tpidr_el2, x2
62
63	mov	x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
64	cmp	x0, x2
65	b.eq	1f
66	mov	x0, #SMCCC_RET_NOT_SUPPORTED
67	eret
68
691:	phys_to_ttbr x0, x1
70alternative_if ARM64_HAS_CNP
71	orr	x0, x0, #TTBR_CNP_BIT
72alternative_else_nop_endif
73	msr	ttbr0_el2, x0
74
75	mrs	x0, tcr_el1
76	mov_q	x1, TCR_EL2_MASK
77	and	x0, x0, x1
78	mov	x1, #TCR_EL2_RES1
79	orr	x0, x0, x1
80
81	/*
82	 * The ID map may be configured to use an extended virtual address
83	 * range. This is only the case if system RAM is out of range for the
84	 * currently configured page size and VA_BITS, in which case we will
85	 * also need the extended virtual range for the HYP ID map, or we won't
86	 * be able to enable the EL2 MMU.
87	 *
88	 * However, at EL2, there is only one TTBR register, and we can't switch
89	 * between translation tables *and* update TCR_EL2.T0SZ at the same
90	 * time. Bottom line: we need to use the extended range with *both* our
91	 * translation tables.
92	 *
93	 * So use the same T0SZ value we use for the ID map.
94	 */
95	ldr_l	x1, idmap_t0sz
96	bfi	x0, x1, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
97
98	/*
99	 * Set the PS bits in TCR_EL2.
100	 */
101	tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
102
103	msr	tcr_el2, x0
104
105	mrs	x0, mair_el1
106	msr	mair_el2, x0
107	isb
108
109	/* Invalidate the stale TLBs from Bootloader */
110	tlbi	alle2
111	dsb	sy
112
113	/*
114	 * Preserve all the RES1 bits while setting the default flags,
115	 * as well as the EE bit on BE. Drop the A flag since the compiler
116	 * is allowed to generate unaligned accesses.
117	 */
118	mov_q	x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
119CPU_BE(	orr	x0, x0, #SCTLR_ELx_EE)
120alternative_if ARM64_HAS_ADDRESS_AUTH
121	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
122		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
123	orr	x0, x0, x1
124alternative_else_nop_endif
125	msr	sctlr_el2, x0
126	isb
127
128	/* Set the stack and new vectors */
129	mov	sp, x3
130	msr	vbar_el2, x4
131
132	/* Hello, World! */
133	mov	x0, #SMCCC_RET_SUCCESS
134	eret
135SYM_CODE_END(__kvm_hyp_init)
136
137SYM_CODE_START(__kvm_handle_stub_hvc)
138	cmp	x0, #HVC_SOFT_RESTART
139	b.ne	1f
140
141	/* This is where we're about to jump, staying at EL2 */
142	msr	elr_el2, x1
143	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
144	msr	spsr_el2, x0
145
146	/* Shuffle the arguments, and don't come back */
147	mov	x0, x2
148	mov	x1, x3
149	mov	x2, x4
150	b	reset
151
1521:	cmp	x0, #HVC_RESET_VECTORS
153	b.ne	1f
154
155	/*
156	 * Set the HVC_RESET_VECTORS return code before entering the common
157	 * path so that we do not clobber x0-x2 in case we are coming via
158	 * HVC_SOFT_RESTART.
159	 */
160	mov	x0, xzr
161reset:
162	/* Reset kvm back to the hyp stub. */
163	mrs	x5, sctlr_el2
164	mov_q	x6, SCTLR_ELx_FLAGS
165	bic	x5, x5, x6		// Clear SCTL_M and etc
166	pre_disable_mmu_workaround
167	msr	sctlr_el2, x5
168	isb
169
170	/* Install stub vectors */
171	adr_l	x5, __hyp_stub_vectors
172	msr	vbar_el2, x5
173	eret
174
1751:	/* Bad stub call */
176	mov_q	x0, HVC_STUB_ERR
177	eret
178
179SYM_CODE_END(__kvm_handle_stub_hvc)
180
181	.popsection
182