1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Hypervisor stub 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 9#include <linux/init.h> 10#include <linux/linkage.h> 11 12#include <asm/assembler.h> 13#include <asm/el2_setup.h> 14#include <asm/kvm_arm.h> 15#include <asm/kvm_asm.h> 16#include <asm/ptrace.h> 17#include <asm/virt.h> 18 19// Warning, hardcoded register allocation 20// This will clobber x1 and x2, and expect x1 to contain 21// the id register value as read from the HW 22.macro __check_override idreg, fld, width, pass, fail 23 ubfx x1, x1, #\fld, #\width 24 cbz x1, \fail 25 26 adr_l x1, \idreg\()_override 27 ldr x2, [x1, FTR_OVR_VAL_OFFSET] 28 ldr x1, [x1, FTR_OVR_MASK_OFFSET] 29 ubfx x2, x2, #\fld, #\width 30 ubfx x1, x1, #\fld, #\width 31 cmp x1, xzr 32 and x2, x2, x1 33 csinv x2, x2, xzr, ne 34 cbnz x2, \pass 35 b \fail 36.endm 37 38.macro check_override idreg, fld, pass, fail 39 mrs x1, \idreg\()_el1 40 __check_override \idreg \fld 4 \pass \fail 41.endm 42 43 .text 44 .pushsection .hyp.text, "ax" 45 46 .align 11 47 48SYM_CODE_START(__hyp_stub_vectors) 49 ventry el2_sync_invalid // Synchronous EL2t 50 ventry el2_irq_invalid // IRQ EL2t 51 ventry el2_fiq_invalid // FIQ EL2t 52 ventry el2_error_invalid // Error EL2t 53 54 ventry elx_sync // Synchronous EL2h 55 ventry el2_irq_invalid // IRQ EL2h 56 ventry el2_fiq_invalid // FIQ EL2h 57 ventry el2_error_invalid // Error EL2h 58 59 ventry elx_sync // Synchronous 64-bit EL1 60 ventry el1_irq_invalid // IRQ 64-bit EL1 61 ventry el1_fiq_invalid // FIQ 64-bit EL1 62 ventry el1_error_invalid // Error 64-bit EL1 63 64 ventry el1_sync_invalid // Synchronous 32-bit EL1 65 ventry el1_irq_invalid // IRQ 32-bit EL1 66 ventry el1_fiq_invalid // FIQ 32-bit EL1 67 ventry el1_error_invalid // Error 32-bit EL1 68SYM_CODE_END(__hyp_stub_vectors) 69 70 .align 11 71 72SYM_CODE_START_LOCAL(elx_sync) 73 cmp x0, #HVC_SET_VECTORS 74 b.ne 1f 75 msr vbar_el2, x1 76 b 9f 77 781: cmp x0, #HVC_FINALISE_EL2 79 b.eq __finalise_el2 80 812: cmp x0, #HVC_SOFT_RESTART 82 b.ne 3f 83 mov x0, x2 84 mov x2, x4 85 mov x4, x1 86 mov x1, x3 87 br x4 // no return 88 893: cmp x0, #HVC_RESET_VECTORS 90 beq 9f // Nothing to reset! 91 92 /* Someone called kvm_call_hyp() against the hyp-stub... */ 93 mov_q x0, HVC_STUB_ERR 94 eret 95 969: mov x0, xzr 97 eret 98SYM_CODE_END(elx_sync) 99 100SYM_CODE_START_LOCAL(__finalise_el2) 101 check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve 102 103.Linit_sve: /* SVE register access */ 104 mrs x0, cptr_el2 // Disable SVE traps 105 bic x0, x0, #CPTR_EL2_TZ 106 msr cptr_el2, x0 107 isb 108 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 109 msr_s SYS_ZCR_EL2, x1 // length for EL1. 110 111.Lskip_sve: 112 check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme 113 114.Linit_sme: /* SME register access and priority mapping */ 115 mrs x0, cptr_el2 // Disable SME traps 116 bic x0, x0, #CPTR_EL2_TSM 117 msr cptr_el2, x0 118 isb 119 120 mrs x1, sctlr_el2 121 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps 122 msr sctlr_el2, x1 123 isb 124 125 mov x0, #0 // SMCR controls 126 127 // Full FP in SM? 128 mrs_s x1, SYS_ID_AA64SMFR0_EL1 129 __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_FA64_SHIFT 1 .Linit_sme_fa64 .Lskip_sme_fa64 130 131.Linit_sme_fa64: 132 orr x0, x0, SMCR_ELx_FA64_MASK 133.Lskip_sme_fa64: 134 135 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector 136 msr_s SYS_SMCR_EL2, x0 // length for EL1. 137 138 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? 139 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 140 cbz x1, .Lskip_sme 141 142 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 143 144 mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? 145 ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 146 cbz x1, .Lskip_sme 147 148 mrs_s x1, SYS_HCRX_EL2 149 orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping 150 msr_s SYS_HCRX_EL2, x1 151 152.Lskip_sme: 153 154 // nVHE? No way! Give me the real thing! 155 // Sanity check: MMU *must* be off 156 mrs x1, sctlr_el2 157 tbnz x1, #0, 1f 158 159 // Needs to be VHE capable, obviously 160 check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f 161 1621: mov_q x0, HVC_STUB_ERR 163 eret 1642: 165 // Engage the VHE magic! 166 mov_q x0, HCR_HOST_VHE_FLAGS 167 msr hcr_el2, x0 168 isb 169 170 // Use the EL1 allocated stack, per-cpu offset 171 mrs x0, sp_el1 172 mov sp, x0 173 mrs x0, tpidr_el1 174 msr tpidr_el2, x0 175 176 // FP configuration, vectors 177 mrs_s x0, SYS_CPACR_EL12 178 msr cpacr_el1, x0 179 mrs_s x0, SYS_VBAR_EL12 180 msr vbar_el1, x0 181 182 // Use EL2 translations for SPE & TRBE and disable access from EL1 183 mrs x0, mdcr_el2 184 bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) 185 bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) 186 msr mdcr_el2, x0 187 188 // Transfer the MM state from EL1 to EL2 189 mrs_s x0, SYS_TCR_EL12 190 msr tcr_el1, x0 191 mrs_s x0, SYS_TTBR0_EL12 192 msr ttbr0_el1, x0 193 mrs_s x0, SYS_TTBR1_EL12 194 msr ttbr1_el1, x0 195 mrs_s x0, SYS_MAIR_EL12 196 msr mair_el1, x0 197 isb 198 199 // Hack the exception return to stay at EL2 200 mrs x0, spsr_el1 201 and x0, x0, #~PSR_MODE_MASK 202 mov x1, #PSR_MODE_EL2h 203 orr x0, x0, x1 204 msr spsr_el1, x0 205 206 b enter_vhe 207SYM_CODE_END(__finalise_el2) 208 209 // At the point where we reach enter_vhe(), we run with 210 // the MMU off (which is enforced by __finalise_el2()). 211 // We thus need to be in the idmap, or everything will 212 // explode when enabling the MMU. 213 214 .pushsection .idmap.text, "ax" 215 216SYM_CODE_START_LOCAL(enter_vhe) 217 // Invalidate TLBs before enabling the MMU 218 tlbi vmalle1 219 dsb nsh 220 isb 221 222 // Enable the EL2 S1 MMU, as set up from EL1 223 mrs_s x0, SYS_SCTLR_EL12 224 set_sctlr_el1 x0 225 226 // Disable the EL1 S1 MMU for a good measure 227 mov_q x0, INIT_SCTLR_EL1_MMU_OFF 228 msr_s SYS_SCTLR_EL12, x0 229 230 mov x0, xzr 231 232 eret 233SYM_CODE_END(enter_vhe) 234 235 .popsection 236 237.macro invalid_vector label 238SYM_CODE_START_LOCAL(\label) 239 b \label 240SYM_CODE_END(\label) 241.endm 242 243 invalid_vector el2_sync_invalid 244 invalid_vector el2_irq_invalid 245 invalid_vector el2_fiq_invalid 246 invalid_vector el2_error_invalid 247 invalid_vector el1_sync_invalid 248 invalid_vector el1_irq_invalid 249 invalid_vector el1_fiq_invalid 250 invalid_vector el1_error_invalid 251 252 .popsection 253 254/* 255 * __hyp_set_vectors: Call this after boot to set the initial hypervisor 256 * vectors as part of hypervisor installation. On an SMP system, this should 257 * be called on each CPU. 258 * 259 * x0 must be the physical address of the new vector table, and must be 260 * 2KB aligned. 261 * 262 * Before calling this, you must check that the stub hypervisor is installed 263 * everywhere, by waiting for any secondary CPUs to be brought up and then 264 * checking that is_hyp_mode_available() is true. 265 * 266 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or 267 * something else went wrong... in such cases, trying to install a new 268 * hypervisor is unlikely to work as desired. 269 * 270 * When you call into your shiny new hypervisor, sp_el2 will contain junk, 271 * so you will need to set that to something sensible at the new hypervisor's 272 * initialisation entry point. 273 */ 274 275SYM_FUNC_START(__hyp_set_vectors) 276 mov x1, x0 277 mov x0, #HVC_SET_VECTORS 278 hvc #0 279 ret 280SYM_FUNC_END(__hyp_set_vectors) 281 282SYM_FUNC_START(__hyp_reset_vectors) 283 mov x0, #HVC_RESET_VECTORS 284 hvc #0 285 ret 286SYM_FUNC_END(__hyp_reset_vectors) 287 288/* 289 * Entry point to finalise EL2 and switch to VHE if deemed capable 290 * 291 * w0: boot mode, as returned by init_kernel_el() 292 */ 293SYM_FUNC_START(finalise_el2) 294 // Need to have booted at EL2 295 cmp w0, #BOOT_CPU_MODE_EL2 296 b.ne 1f 297 298 // and still be at EL1 299 mrs x0, CurrentEL 300 cmp x0, #CurrentEL_EL1 301 b.ne 1f 302 303 mov x0, #HVC_FINALISE_EL2 304 hvc #0 3051: 306 ret 307SYM_FUNC_END(finalise_el2) 308