1caab277bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 2712c6ff4SMarc Zyngier/* 3712c6ff4SMarc Zyngier * Hypervisor stub 4712c6ff4SMarc Zyngier * 5712c6ff4SMarc Zyngier * Copyright (C) 2012 ARM Ltd. 6712c6ff4SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 7712c6ff4SMarc Zyngier */ 8712c6ff4SMarc Zyngier 9712c6ff4SMarc Zyngier#include <linux/init.h> 10712c6ff4SMarc Zyngier#include <linux/linkage.h> 11712c6ff4SMarc Zyngier 12712c6ff4SMarc Zyngier#include <asm/assembler.h> 13f3591822SMarc Zyngier#include <asm/el2_setup.h> 14ad72e59fSGeoff Levand#include <asm/kvm_arm.h> 15c94b0cf2SJames Morse#include <asm/kvm_asm.h> 16712c6ff4SMarc Zyngier#include <asm/ptrace.h> 17712c6ff4SMarc Zyngier#include <asm/virt.h> 18712c6ff4SMarc Zyngier 19712c6ff4SMarc Zyngier .text 208fac5cbdSJames Morse .pushsection .hyp.text, "ax" 218fac5cbdSJames Morse 22712c6ff4SMarc Zyngier .align 11 23712c6ff4SMarc Zyngier 240343a7e4SMark BrownSYM_CODE_START(__hyp_stub_vectors) 25712c6ff4SMarc Zyngier ventry el2_sync_invalid // Synchronous EL2t 26712c6ff4SMarc Zyngier ventry el2_irq_invalid // IRQ EL2t 27712c6ff4SMarc Zyngier ventry el2_fiq_invalid // FIQ EL2t 28712c6ff4SMarc Zyngier ventry el2_error_invalid // Error EL2t 29712c6ff4SMarc Zyngier 3031a32b49SMarc Zyngier ventry elx_sync // Synchronous EL2h 31712c6ff4SMarc Zyngier ventry el2_irq_invalid // IRQ EL2h 32712c6ff4SMarc Zyngier ventry el2_fiq_invalid // FIQ EL2h 33712c6ff4SMarc Zyngier ventry el2_error_invalid // Error EL2h 34712c6ff4SMarc Zyngier 3531a32b49SMarc Zyngier ventry elx_sync // Synchronous 64-bit EL1 36712c6ff4SMarc Zyngier ventry el1_irq_invalid // IRQ 64-bit EL1 37712c6ff4SMarc Zyngier ventry el1_fiq_invalid // FIQ 64-bit EL1 38712c6ff4SMarc Zyngier ventry el1_error_invalid // Error 64-bit EL1 39712c6ff4SMarc Zyngier 40712c6ff4SMarc Zyngier ventry el1_sync_invalid // Synchronous 32-bit EL1 41712c6ff4SMarc Zyngier ventry el1_irq_invalid // IRQ 32-bit EL1 42712c6ff4SMarc Zyngier ventry el1_fiq_invalid // FIQ 32-bit EL1 43712c6ff4SMarc Zyngier ventry el1_error_invalid // Error 32-bit EL1 440343a7e4SMark BrownSYM_CODE_END(__hyp_stub_vectors) 45712c6ff4SMarc Zyngier 46712c6ff4SMarc Zyngier .align 11 47712c6ff4SMarc Zyngier 4831a32b49SMarc ZyngierSYM_CODE_START_LOCAL(elx_sync) 490b51c547SMarc Zyngier cmp x0, #HVC_SET_VECTORS 50f3591822SMarc Zyngier b.ne 1f 51ad72e59fSGeoff Levand msr vbar_el2, x1 52ad72e59fSGeoff Levand b 9f 53ad72e59fSGeoff Levand 547ddb0c3dSMarc Zyngier1: cmp x0, #HVC_FINALISE_EL2 557ddb0c3dSMarc Zyngier b.eq __finalise_el2 56f3591822SMarc Zyngier 57f9076ecfSGeoff Levand2: cmp x0, #HVC_SOFT_RESTART 58f9076ecfSGeoff Levand b.ne 3f 59f9076ecfSGeoff Levand mov x0, x2 60f9076ecfSGeoff Levand mov x2, x4 61f9076ecfSGeoff Levand mov x4, x1 62f9076ecfSGeoff Levand mov x1, x3 63f9076ecfSGeoff Levand br x4 // no return 64f9076ecfSGeoff Levand 65fd0e0c61SMarc Zyngier3: cmp x0, #HVC_RESET_VECTORS 66fd0e0c61SMarc Zyngier beq 9f // Nothing to reset! 67fd0e0c61SMarc Zyngier 68c94b0cf2SJames Morse /* Someone called kvm_call_hyp() against the hyp-stub... */ 69dc374b47SRemi Denis-Courmont mov_q x0, HVC_STUB_ERR 70af42f204SMarc Zyngier eret 71ad72e59fSGeoff Levand 72af42f204SMarc Zyngier9: mov x0, xzr 73af42f204SMarc Zyngier eret 7431a32b49SMarc ZyngierSYM_CODE_END(elx_sync) 75712c6ff4SMarc Zyngier 767ddb0c3dSMarc ZyngierSYM_CODE_START_LOCAL(__finalise_el2) 77e2d4f5aeSQuentin Perret finalise_el2_state 78b3000e21SMarc Zyngier 79f3591822SMarc Zyngier // nVHE? No way! Give me the real thing! 80f3591822SMarc Zyngier // Sanity check: MMU *must* be off 81f3591822SMarc Zyngier mrs x1, sctlr_el2 82f3591822SMarc Zyngier tbnz x1, #0, 1f 83f3591822SMarc Zyngier 84f3591822SMarc Zyngier // Needs to be VHE capable, obviously 85*7a26e1f5SMarc Zyngier check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2 86*7a26e1f5SMarc Zyngier 87*7a26e1f5SMarc Zyngier0: // Check whether we only want the hypervisor to run VHE, not the kernel 88*7a26e1f5SMarc Zyngier adr_l x1, arm64_sw_feature_override 89*7a26e1f5SMarc Zyngier ldr x2, [x1, FTR_OVR_VAL_OFFSET] 90*7a26e1f5SMarc Zyngier ldr x1, [x1, FTR_OVR_MASK_OFFSET] 91*7a26e1f5SMarc Zyngier and x2, x2, x1 92*7a26e1f5SMarc Zyngier ubfx x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4 93*7a26e1f5SMarc Zyngier cbz x2, 2f 9441fac42cSMarc Zyngier 95f1b6cff7SMarc Zyngier1: mov_q x0, HVC_STUB_ERR 96f1b6cff7SMarc Zyngier eret 97f1b6cff7SMarc Zyngier2: 98f3591822SMarc Zyngier // Engage the VHE magic! 99f3591822SMarc Zyngier mov_q x0, HCR_HOST_VHE_FLAGS 100f3591822SMarc Zyngier msr hcr_el2, x0 101f3591822SMarc Zyngier isb 102f3591822SMarc Zyngier 103f3591822SMarc Zyngier // Use the EL1 allocated stack, per-cpu offset 104f3591822SMarc Zyngier mrs x0, sp_el1 105f3591822SMarc Zyngier mov sp, x0 106f3591822SMarc Zyngier mrs x0, tpidr_el1 107f3591822SMarc Zyngier msr tpidr_el2, x0 108f3591822SMarc Zyngier 109f3591822SMarc Zyngier // FP configuration, vectors 110f3591822SMarc Zyngier mrs_s x0, SYS_CPACR_EL12 111f3591822SMarc Zyngier msr cpacr_el1, x0 112f3591822SMarc Zyngier mrs_s x0, SYS_VBAR_EL12 113f3591822SMarc Zyngier msr vbar_el1, x0 114f3591822SMarc Zyngier 115a1319260SSuzuki K Poulose // Use EL2 translations for SPE & TRBE and disable access from EL1 11619e87e13SMarc Zyngier mrs x0, mdcr_el2 11719e87e13SMarc Zyngier bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) 118a1319260SSuzuki K Poulose bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) 11919e87e13SMarc Zyngier msr mdcr_el2, x0 12019e87e13SMarc Zyngier 121f3591822SMarc Zyngier // Transfer the MM state from EL1 to EL2 122f3591822SMarc Zyngier mrs_s x0, SYS_TCR_EL12 123f3591822SMarc Zyngier msr tcr_el1, x0 124f3591822SMarc Zyngier mrs_s x0, SYS_TTBR0_EL12 125f3591822SMarc Zyngier msr ttbr0_el1, x0 126f3591822SMarc Zyngier mrs_s x0, SYS_TTBR1_EL12 127f3591822SMarc Zyngier msr ttbr1_el1, x0 128f3591822SMarc Zyngier mrs_s x0, SYS_MAIR_EL12 129f3591822SMarc Zyngier msr mair_el1, x0 1306b776d38SJoey Gouly mrs x1, REG_ID_AA64MMFR3_EL1 1316b776d38SJoey Gouly ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 1326b776d38SJoey Gouly cbz x1, .Lskip_tcr2 1336b776d38SJoey Gouly mrs x0, REG_TCR2_EL12 1346b776d38SJoey Gouly msr REG_TCR2_EL1, x0 1356b776d38SJoey Gouly 1366b776d38SJoey Gouly // Transfer permission indirection state 1376b776d38SJoey Gouly mrs x1, REG_ID_AA64MMFR3_EL1 1386b776d38SJoey Gouly ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 1396b776d38SJoey Gouly cbz x1, .Lskip_indirection 1406b776d38SJoey Gouly mrs x0, REG_PIRE0_EL12 1416b776d38SJoey Gouly msr REG_PIRE0_EL1, x0 1426b776d38SJoey Gouly mrs x0, REG_PIR_EL12 1436b776d38SJoey Gouly msr REG_PIR_EL1, x0 1446b776d38SJoey Gouly 1456b776d38SJoey Gouly.Lskip_indirection: 1466b776d38SJoey Gouly.Lskip_tcr2: 1476b776d38SJoey Gouly 148f3591822SMarc Zyngier isb 149f3591822SMarc Zyngier 150f1b6cff7SMarc Zyngier // Hack the exception return to stay at EL2 151f1b6cff7SMarc Zyngier mrs x0, spsr_el1 152f1b6cff7SMarc Zyngier and x0, x0, #~PSR_MODE_MASK 153f1b6cff7SMarc Zyngier mov x1, #PSR_MODE_EL2h 154f1b6cff7SMarc Zyngier orr x0, x0, x1 155f1b6cff7SMarc Zyngier msr spsr_el1, x0 156f1b6cff7SMarc Zyngier 157f1b6cff7SMarc Zyngier b enter_vhe 1587ddb0c3dSMarc ZyngierSYM_CODE_END(__finalise_el2) 159f1b6cff7SMarc Zyngier 160f1b6cff7SMarc Zyngier // At the point where we reach enter_vhe(), we run with 1617ddb0c3dSMarc Zyngier // the MMU off (which is enforced by __finalise_el2()). 162f1b6cff7SMarc Zyngier // We thus need to be in the idmap, or everything will 163f1b6cff7SMarc Zyngier // explode when enabling the MMU. 164f1b6cff7SMarc Zyngier 165f1b6cff7SMarc Zyngier .pushsection .idmap.text, "ax" 166f1b6cff7SMarc Zyngier 167f1b6cff7SMarc ZyngierSYM_CODE_START_LOCAL(enter_vhe) 168f3591822SMarc Zyngier // Invalidate TLBs before enabling the MMU 169f3591822SMarc Zyngier tlbi vmalle1 170f3591822SMarc Zyngier dsb nsh 171430251ccSMarc Zyngier isb 172f3591822SMarc Zyngier 173f3591822SMarc Zyngier // Enable the EL2 S1 MMU, as set up from EL1 174f3591822SMarc Zyngier mrs_s x0, SYS_SCTLR_EL12 175f3591822SMarc Zyngier set_sctlr_el1 x0 176f3591822SMarc Zyngier 177f3591822SMarc Zyngier // Disable the EL1 S1 MMU for a good measure 178f3591822SMarc Zyngier mov_q x0, INIT_SCTLR_EL1_MMU_OFF 179f3591822SMarc Zyngier msr_s SYS_SCTLR_EL12, x0 180f3591822SMarc Zyngier 181f3591822SMarc Zyngier mov x0, xzr 182f3591822SMarc Zyngier 183f1b6cff7SMarc Zyngier eret 184f1b6cff7SMarc ZyngierSYM_CODE_END(enter_vhe) 185f1b6cff7SMarc Zyngier 186f1b6cff7SMarc Zyngier .popsection 187f3591822SMarc Zyngier 188712c6ff4SMarc Zyngier.macro invalid_vector label 1890343a7e4SMark BrownSYM_CODE_START_LOCAL(\label) 190712c6ff4SMarc Zyngier b \label 1910343a7e4SMark BrownSYM_CODE_END(\label) 192712c6ff4SMarc Zyngier.endm 193712c6ff4SMarc Zyngier 194712c6ff4SMarc Zyngier invalid_vector el2_sync_invalid 195712c6ff4SMarc Zyngier invalid_vector el2_irq_invalid 196712c6ff4SMarc Zyngier invalid_vector el2_fiq_invalid 197712c6ff4SMarc Zyngier invalid_vector el2_error_invalid 198712c6ff4SMarc Zyngier invalid_vector el1_sync_invalid 199712c6ff4SMarc Zyngier invalid_vector el1_irq_invalid 200712c6ff4SMarc Zyngier invalid_vector el1_fiq_invalid 201712c6ff4SMarc Zyngier invalid_vector el1_error_invalid 202712c6ff4SMarc Zyngier 203e30be145SQuentin Perret .popsection 204e30be145SQuentin Perret 205712c6ff4SMarc Zyngier/* 206712c6ff4SMarc Zyngier * __hyp_set_vectors: Call this after boot to set the initial hypervisor 207712c6ff4SMarc Zyngier * vectors as part of hypervisor installation. On an SMP system, this should 208712c6ff4SMarc Zyngier * be called on each CPU. 209712c6ff4SMarc Zyngier * 210712c6ff4SMarc Zyngier * x0 must be the physical address of the new vector table, and must be 211712c6ff4SMarc Zyngier * 2KB aligned. 212712c6ff4SMarc Zyngier * 213712c6ff4SMarc Zyngier * Before calling this, you must check that the stub hypervisor is installed 214712c6ff4SMarc Zyngier * everywhere, by waiting for any secondary CPUs to be brought up and then 215712c6ff4SMarc Zyngier * checking that is_hyp_mode_available() is true. 216712c6ff4SMarc Zyngier * 217712c6ff4SMarc Zyngier * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or 218712c6ff4SMarc Zyngier * something else went wrong... in such cases, trying to install a new 219712c6ff4SMarc Zyngier * hypervisor is unlikely to work as desired. 220712c6ff4SMarc Zyngier * 221712c6ff4SMarc Zyngier * When you call into your shiny new hypervisor, sp_el2 will contain junk, 222712c6ff4SMarc Zyngier * so you will need to set that to something sensible at the new hypervisor's 223712c6ff4SMarc Zyngier * initialisation entry point. 224712c6ff4SMarc Zyngier */ 225712c6ff4SMarc Zyngier 2260343a7e4SMark BrownSYM_FUNC_START(__hyp_set_vectors) 227ad72e59fSGeoff Levand mov x1, x0 228ad72e59fSGeoff Levand mov x0, #HVC_SET_VECTORS 22900a44cdaSJames Morse hvc #0 23000a44cdaSJames Morse ret 2310343a7e4SMark BrownSYM_FUNC_END(__hyp_set_vectors) 232fd0e0c61SMarc Zyngier 2330343a7e4SMark BrownSYM_FUNC_START(__hyp_reset_vectors) 234fd0e0c61SMarc Zyngier mov x0, #HVC_RESET_VECTORS 235fd0e0c61SMarc Zyngier hvc #0 236fd0e0c61SMarc Zyngier ret 2370343a7e4SMark BrownSYM_FUNC_END(__hyp_reset_vectors) 2380c93df96SMarc Zyngier 2390c93df96SMarc Zyngier/* 2407ddb0c3dSMarc Zyngier * Entry point to finalise EL2 and switch to VHE if deemed capable 241005e1267SArd Biesheuvel * 242005e1267SArd Biesheuvel * w0: boot mode, as returned by init_kernel_el() 2430c93df96SMarc Zyngier */ 2447ddb0c3dSMarc ZyngierSYM_FUNC_START(finalise_el2) 2450c93df96SMarc Zyngier // Need to have booted at EL2 2460c93df96SMarc Zyngier cmp w0, #BOOT_CPU_MODE_EL2 2470c93df96SMarc Zyngier b.ne 1f 2480c93df96SMarc Zyngier 2490c93df96SMarc Zyngier // and still be at EL1 2500c93df96SMarc Zyngier mrs x0, CurrentEL 2510c93df96SMarc Zyngier cmp x0, #CurrentEL_EL1 2520c93df96SMarc Zyngier b.ne 1f 2530c93df96SMarc Zyngier 2547ddb0c3dSMarc Zyngier mov x0, #HVC_FINALISE_EL2 2550c93df96SMarc Zyngier hvc #0 2560c93df96SMarc Zyngier1: 2570c93df96SMarc Zyngier ret 2587ddb0c3dSMarc ZyngierSYM_FUNC_END(finalise_el2) 259