1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <linux/arm-smccc.h> 8#include <linux/linkage.h> 9 10#include <asm/alternative.h> 11#include <asm/assembler.h> 12#include <asm/el2_setup.h> 13#include <asm/kvm_arm.h> 14#include <asm/kvm_asm.h> 15#include <asm/kvm_mmu.h> 16#include <asm/pgtable-hwdef.h> 17#include <asm/sysreg.h> 18#include <asm/virt.h> 19 20 .text 21 .pushsection .idmap.text, "ax" 22 23 .align 11 24 25SYM_CODE_START(__kvm_hyp_init) 26 ventry __invalid // Synchronous EL2t 27 ventry __invalid // IRQ EL2t 28 ventry __invalid // FIQ EL2t 29 ventry __invalid // Error EL2t 30 31 ventry __invalid // Synchronous EL2h 32 ventry __invalid // IRQ EL2h 33 ventry __invalid // FIQ EL2h 34 ventry __invalid // Error EL2h 35 36 ventry __do_hyp_init // Synchronous 64-bit EL1 37 ventry __invalid // IRQ 64-bit EL1 38 ventry __invalid // FIQ 64-bit EL1 39 ventry __invalid // Error 64-bit EL1 40 41 ventry __invalid // Synchronous 32-bit EL1 42 ventry __invalid // IRQ 32-bit EL1 43 ventry __invalid // FIQ 32-bit EL1 44 ventry __invalid // Error 32-bit EL1 45 46__invalid: 47 b . 48 49 /* 50 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. 51 * 52 * x0: SMCCC function ID 53 * x1: struct kvm_nvhe_init_params PA 54 */ 55__do_hyp_init: 56 /* Check for a stub HVC call */ 57 cmp x0, #HVC_STUB_HCALL_NR 58 b.lo __kvm_handle_stub_hvc 59 60 mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) 61 cmp x0, x3 62 b.eq 1f 63 64 mov x0, #SMCCC_RET_NOT_SUPPORTED 65 eret 66 671: mov x0, x1 68 mov x3, lr 69 bl ___kvm_hyp_init // Clobbers x0..x2 70 mov lr, x3 71 72 /* Hello, World! */ 73 mov x0, #SMCCC_RET_SUCCESS 74 eret 75SYM_CODE_END(__kvm_hyp_init) 76 77/* 78 * Initialize the hypervisor in EL2. 79 * 80 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers 81 * and leave x3 for the caller. 82 * 83 * x0: struct kvm_nvhe_init_params PA 84 */ 85SYM_CODE_START_LOCAL(___kvm_hyp_init) 86 ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] 87 msr tpidr_el2, x1 88 89 ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA] 90 mov sp, x1 91 92 ldr x1, [x0, #NVHE_INIT_MAIR_EL2] 93 msr mair_el2, x1 94 95 ldr x1, [x0, #NVHE_INIT_HCR_EL2] 96 msr hcr_el2, x1 97 98 ldr x1, [x0, #NVHE_INIT_VTTBR] 99 msr vttbr_el2, x1 100 101 ldr x1, [x0, #NVHE_INIT_VTCR] 102 msr vtcr_el2, x1 103 104 ldr x1, [x0, #NVHE_INIT_PGD_PA] 105 phys_to_ttbr x2, x1 106alternative_if ARM64_HAS_CNP 107 orr x2, x2, #TTBR_CNP_BIT 108alternative_else_nop_endif 109 msr ttbr0_el2, x2 110 111 /* 112 * Set the PS bits in TCR_EL2. 113 */ 114 ldr x0, [x0, #NVHE_INIT_TCR_EL2] 115 tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2 116 msr tcr_el2, x0 117 118 isb 119 120 /* Invalidate the stale TLBs from Bootloader */ 121 tlbi alle2 122 tlbi vmalls12e1 123 dsb sy 124 125 mov_q x0, INIT_SCTLR_EL2_MMU_ON 126alternative_if ARM64_HAS_ADDRESS_AUTH 127 mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ 128 SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) 129 orr x0, x0, x1 130alternative_else_nop_endif 131 msr sctlr_el2, x0 132 isb 133 134 /* Set the host vector */ 135 ldr x0, =__kvm_hyp_host_vector 136 msr vbar_el2, x0 137 138 ret 139SYM_CODE_END(___kvm_hyp_init) 140 141/* 142 * PSCI CPU_ON entry point 143 * 144 * x0: struct kvm_nvhe_init_params PA 145 */ 146SYM_CODE_START(kvm_hyp_cpu_entry) 147 mov x1, #1 // is_cpu_on = true 148 b __kvm_hyp_init_cpu 149SYM_CODE_END(kvm_hyp_cpu_entry) 150 151/* 152 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point 153 * 154 * x0: struct kvm_nvhe_init_params PA 155 */ 156SYM_CODE_START(kvm_hyp_cpu_resume) 157 mov x1, #0 // is_cpu_on = false 158 b __kvm_hyp_init_cpu 159SYM_CODE_END(kvm_hyp_cpu_resume) 160 161/* 162 * Common code for CPU entry points. Initializes EL2 state and 163 * installs the hypervisor before handing over to a C handler. 164 * 165 * x0: struct kvm_nvhe_init_params PA 166 * x1: bool is_cpu_on 167 */ 168SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 169 mov x28, x0 // Stash arguments 170 mov x29, x1 171 172 /* Check that the core was booted in EL2. */ 173 mrs x0, CurrentEL 174 cmp x0, #CurrentEL_EL2 175 b.eq 2f 176 177 /* The core booted in EL1. KVM cannot be initialized on it. */ 1781: wfe 179 wfi 180 b 1b 181 1822: msr SPsel, #1 // We want to use SP_EL{1,2} 183 184 /* Initialize EL2 CPU state to sane values. */ 185 init_el2_state // Clobbers x0..x2 186 finalise_el2_state 187 188 /* Enable MMU, set vectors and stack. */ 189 mov x0, x28 190 bl ___kvm_hyp_init // Clobbers x0..x2 191 192 /* Leave idmap. */ 193 mov x0, x29 194 ldr x1, =kvm_host_psci_cpu_entry 195 br x1 196SYM_CODE_END(__kvm_hyp_init_cpu) 197 198SYM_CODE_START(__kvm_handle_stub_hvc) 199 cmp x0, #HVC_SOFT_RESTART 200 b.ne 1f 201 202 /* This is where we're about to jump, staying at EL2 */ 203 msr elr_el2, x1 204 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) 205 msr spsr_el2, x0 206 207 /* Shuffle the arguments, and don't come back */ 208 mov x0, x2 209 mov x1, x3 210 mov x2, x4 211 b reset 212 2131: cmp x0, #HVC_RESET_VECTORS 214 b.ne 1f 215 216 /* 217 * Set the HVC_RESET_VECTORS return code before entering the common 218 * path so that we do not clobber x0-x2 in case we are coming via 219 * HVC_SOFT_RESTART. 220 */ 221 mov x0, xzr 222reset: 223 /* Reset kvm back to the hyp stub. */ 224 mov_q x5, INIT_SCTLR_EL2_MMU_OFF 225 pre_disable_mmu_workaround 226 msr sctlr_el2, x5 227 isb 228 229alternative_if ARM64_KVM_PROTECTED_MODE 230 mov_q x5, HCR_HOST_NVHE_FLAGS 231 msr hcr_el2, x5 232alternative_else_nop_endif 233 234 /* Install stub vectors */ 235 adr_l x5, __hyp_stub_vectors 236 msr vbar_el2, x5 237 eret 238 2391: /* Bad stub call */ 240 mov_q x0, HVC_STUB_ERR 241 eret 242 243SYM_CODE_END(__kvm_handle_stub_hvc) 244 245SYM_FUNC_START(__pkvm_init_switch_pgd) 246 /* Turn the MMU off */ 247 pre_disable_mmu_workaround 248 mrs x2, sctlr_el2 249 bic x3, x2, #SCTLR_ELx_M 250 msr sctlr_el2, x3 251 isb 252 253 tlbi alle2 254 255 /* Install the new pgtables */ 256 ldr x3, [x0, #NVHE_INIT_PGD_PA] 257 phys_to_ttbr x4, x3 258alternative_if ARM64_HAS_CNP 259 orr x4, x4, #TTBR_CNP_BIT 260alternative_else_nop_endif 261 msr ttbr0_el2, x4 262 263 /* Set the new stack pointer */ 264 ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA] 265 mov sp, x0 266 267 /* And turn the MMU back on! */ 268 set_sctlr_el2 x2 269 ret x1 270SYM_FUNC_END(__pkvm_init_switch_pgd) 271 272 .popsection 273