1caab277bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 2b97b66c1SMarc Zyngier/* 3b97b66c1SMarc Zyngier * Copyright (C) 2015 - ARM Ltd 4b97b66c1SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 5b97b66c1SMarc Zyngier */ 6b97b66c1SMarc Zyngier 7b97b66c1SMarc Zyngier#include <linux/linkage.h> 8b97b66c1SMarc Zyngier 95dcd0fdbSJames Morse#include <asm/alternative.h> 10b97b66c1SMarc Zyngier#include <asm/asm-offsets.h> 11b97b66c1SMarc Zyngier#include <asm/assembler.h> 12b97b66c1SMarc Zyngier#include <asm/fpsimdmacros.h> 13b97b66c1SMarc Zyngier#include <asm/kvm.h> 14b97b66c1SMarc Zyngier#include <asm/kvm_arm.h> 15b97b66c1SMarc Zyngier#include <asm/kvm_asm.h> 16b97b66c1SMarc Zyngier#include <asm/kvm_mmu.h> 17384b40caSMark Rutland#include <asm/kvm_ptrauth.h> 18b97b66c1SMarc Zyngier 19e47c2055SMarc Zyngier#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) 206e977984SMarc Zyngier#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8) 21b97b66c1SMarc Zyngier 22b97b66c1SMarc Zyngier .text 23b97b66c1SMarc Zyngier 24af123768SArd Biesheuvel/* 25af123768SArd Biesheuvel * We treat x18 as callee-saved as the host may use it as a platform 26af123768SArd Biesheuvel * register (e.g. for shadow call stack). 27af123768SArd Biesheuvel */ 28b97b66c1SMarc Zyngier.macro save_callee_saved_regs ctxt 29af123768SArd Biesheuvel str x18, [\ctxt, #CPU_XREG_OFFSET(18)] 30b97b66c1SMarc Zyngier stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 31b97b66c1SMarc Zyngier stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 32b97b66c1SMarc Zyngier stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 33b97b66c1SMarc Zyngier stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 34b97b66c1SMarc Zyngier stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 35b97b66c1SMarc Zyngier stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 36b97b66c1SMarc Zyngier.endm 37b97b66c1SMarc Zyngier 38b97b66c1SMarc Zyngier.macro restore_callee_saved_regs ctxt 39af123768SArd Biesheuvel // We require \ctxt is not x18-x28 40af123768SArd Biesheuvel ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] 41b97b66c1SMarc Zyngier ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 42b97b66c1SMarc Zyngier ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 43b97b66c1SMarc Zyngier ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 44b97b66c1SMarc Zyngier ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 45b97b66c1SMarc Zyngier ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 46b97b66c1SMarc Zyngier ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 47b97b66c1SMarc Zyngier.endm 48b97b66c1SMarc Zyngier 496e977984SMarc Zyngier.macro save_sp_el0 ctxt, tmp 506e977984SMarc Zyngier mrs \tmp, sp_el0 516e977984SMarc Zyngier str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 526e977984SMarc Zyngier.endm 536e977984SMarc Zyngier 546e977984SMarc Zyngier.macro restore_sp_el0 ctxt, tmp 556e977984SMarc Zyngier ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 566e977984SMarc Zyngier msr sp_el0, \tmp 576e977984SMarc Zyngier.endm 586e977984SMarc Zyngier 59b97b66c1SMarc Zyngier/* 60b97b66c1SMarc Zyngier * u64 __guest_enter(struct kvm_vcpu *vcpu, 61b97b66c1SMarc Zyngier * struct kvm_cpu_context *host_ctxt); 62b97b66c1SMarc Zyngier */ 636645d854SMark BrownSYM_FUNC_START(__guest_enter) 64b97b66c1SMarc Zyngier // x0: vcpu 6568381b2bSShanker Donthineni // x1: host context 6668381b2bSShanker Donthineni // x2-x17: clobbered by macros 67af123768SArd Biesheuvel // x29: guest context 68b97b66c1SMarc Zyngier 69b97b66c1SMarc Zyngier // Store the host regs 70b97b66c1SMarc Zyngier save_callee_saved_regs x1 71b97b66c1SMarc Zyngier 726e977984SMarc Zyngier // Save the host's sp_el0 736e977984SMarc Zyngier save_sp_el0 x1, x2 746e977984SMarc Zyngier 755dcd0fdbSJames Morse // Now the host state is stored if we have a pending RAS SError it must 765dcd0fdbSJames Morse // affect the host. If any asynchronous exception is pending we defer 775dcd0fdbSJames Morse // the guest entry. The DSB isn't necessary before v8.2 as any SError 785dcd0fdbSJames Morse // would be fatal. 795dcd0fdbSJames Morsealternative_if ARM64_HAS_RAS_EXTN 805dcd0fdbSJames Morse dsb nshst 815dcd0fdbSJames Morse isb 825dcd0fdbSJames Morsealternative_else_nop_endif 835dcd0fdbSJames Morse mrs x1, isr_el1 845dcd0fdbSJames Morse cbz x1, 1f 855dcd0fdbSJames Morse mov x0, #ARM_EXCEPTION_IRQ 865dcd0fdbSJames Morse ret 875dcd0fdbSJames Morse 885dcd0fdbSJames Morse1: 89af123768SArd Biesheuvel add x29, x0, #VCPU_CONTEXT 90b97b66c1SMarc Zyngier 91384b40caSMark Rutland // Macro ptrauth_switch_to_guest format: 92384b40caSMark Rutland // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) 93384b40caSMark Rutland // The below macro to restore guest keys is not implemented in C code 94384b40caSMark Rutland // as it may cause Pointer Authentication key signing mismatch errors 95384b40caSMark Rutland // when this feature is enabled for kernel code. 96af123768SArd Biesheuvel ptrauth_switch_to_guest x29, x0, x1, x2 97384b40caSMark Rutland 986e977984SMarc Zyngier // Restore the guest's sp_el0 996e977984SMarc Zyngier restore_sp_el0 x29, x0 1006e977984SMarc Zyngier 10168381b2bSShanker Donthineni // Restore guest regs x0-x17 102af123768SArd Biesheuvel ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] 103af123768SArd Biesheuvel ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] 104af123768SArd Biesheuvel ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] 105af123768SArd Biesheuvel ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] 106af123768SArd Biesheuvel ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] 107af123768SArd Biesheuvel ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] 108af123768SArd Biesheuvel ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] 109af123768SArd Biesheuvel ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] 110af123768SArd Biesheuvel ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] 111b97b66c1SMarc Zyngier 112af123768SArd Biesheuvel // Restore guest regs x18-x29, lr 113af123768SArd Biesheuvel restore_callee_saved_regs x29 114b97b66c1SMarc Zyngier 115b97b66c1SMarc Zyngier // Do not touch any register after this! 116b97b66c1SMarc Zyngier eret 117679db708SWill Deacon sb 118b97b66c1SMarc Zyngier 1196645d854SMark BrownSYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) 12068381b2bSShanker Donthineni // x0: return code 12168381b2bSShanker Donthineni // x1: vcpu 12268381b2bSShanker Donthineni // x2-x29,lr: vcpu regs 12368381b2bSShanker Donthineni // vcpu x0-x1 on the stack 124b97b66c1SMarc Zyngier 12568381b2bSShanker Donthineni add x1, x1, #VCPU_CONTEXT 126b97b66c1SMarc Zyngier 127cb96408dSVladimir Murzin ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) 128cb96408dSVladimir Murzin 12968381b2bSShanker Donthineni // Store the guest regs x2 and x3 13068381b2bSShanker Donthineni stp x2, x3, [x1, #CPU_XREG_OFFSET(2)] 131b97b66c1SMarc Zyngier 13268381b2bSShanker Donthineni // Retrieve the guest regs x0-x1 from the stack 13368381b2bSShanker Donthineni ldp x2, x3, [sp], #16 // x0, x1 134b97b66c1SMarc Zyngier 135af123768SArd Biesheuvel // Store the guest regs x0-x1 and x4-x17 13668381b2bSShanker Donthineni stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] 13768381b2bSShanker Donthineni stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] 13868381b2bSShanker Donthineni stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] 13968381b2bSShanker Donthineni stp x8, x9, [x1, #CPU_XREG_OFFSET(8)] 14068381b2bSShanker Donthineni stp x10, x11, [x1, #CPU_XREG_OFFSET(10)] 14168381b2bSShanker Donthineni stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] 14268381b2bSShanker Donthineni stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] 14368381b2bSShanker Donthineni stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] 144b97b66c1SMarc Zyngier 145af123768SArd Biesheuvel // Store the guest regs x18-x29, lr 14668381b2bSShanker Donthineni save_callee_saved_regs x1 147b97b66c1SMarc Zyngier 1486e977984SMarc Zyngier // Store the guest's sp_el0 1496e977984SMarc Zyngier save_sp_el0 x1, x2 1506e977984SMarc Zyngier 1514464e210SChristoffer Dall get_host_ctxt x2, x3 15268381b2bSShanker Donthineni 153384b40caSMark Rutland // Macro ptrauth_switch_to_guest format: 154384b40caSMark Rutland // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) 155384b40caSMark Rutland // The below macro to save/restore keys is not implemented in C code 156384b40caSMark Rutland // as it may cause Pointer Authentication key signing mismatch errors 157384b40caSMark Rutland // when this feature is enabled for kernel code. 158384b40caSMark Rutland ptrauth_switch_to_host x1, x2, x3, x4, x5 159384b40caSMark Rutland 1606e977984SMarc Zyngier // Restore the hosts's sp_el0 1616e977984SMarc Zyngier restore_sp_el0 x2, x3 1626e977984SMarc Zyngier 163b97b66c1SMarc Zyngier // Now restore the host regs 164b97b66c1SMarc Zyngier restore_callee_saved_regs x2 165b97b66c1SMarc Zyngier 1660067df41SJames Morsealternative_if ARM64_HAS_RAS_EXTN 1670067df41SJames Morse // If we have the RAS extensions we can consume a pending error 1680e5b9c08SJames Morse // without an unmask-SError and isb. The ESB-instruction consumed any 1690e5b9c08SJames Morse // pending guest error when we took the exception from the guest. 1700067df41SJames Morse mrs_s x2, SYS_DISR_EL1 1710067df41SJames Morse str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)] 1720067df41SJames Morse cbz x2, 1f 1730067df41SJames Morse msr_s SYS_DISR_EL1, xzr 1740067df41SJames Morse orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT) 1750067df41SJames Morse1: ret 1760067df41SJames Morsealternative_else 17711b41626SJames Morse dsb sy // Synchronize against in-flight ld/st 17811b41626SJames Morse isb // Prevent an early read of side-effect free ISR 17911b41626SJames Morse mrs x2, isr_el1 18011b41626SJames Morse tbnz x2, #8, 2f // ISR_EL1.A 18111b41626SJames Morse ret 18211b41626SJames Morse nop 18311b41626SJames Morse2: 18411b41626SJames Morsealternative_endif 18511b41626SJames Morse // We know we have a pending asynchronous abort, now is the 18611b41626SJames Morse // time to flush it out. From your VAXorcist book, page 666: 187395ea79eSMarc Zyngier // "Threaten me not, oh Evil one! For I speak with 188395ea79eSMarc Zyngier // the power of DEC, and I command thee to show thyself!" 189395ea79eSMarc Zyngier mrs x2, elr_el2 190395ea79eSMarc Zyngier mrs x3, esr_el2 191395ea79eSMarc Zyngier mrs x4, spsr_el2 192395ea79eSMarc Zyngier mov x5, x0 193395ea79eSMarc Zyngier 194395ea79eSMarc Zyngier msr daifclr, #4 // Unmask aborts 195395ea79eSMarc Zyngier 196395ea79eSMarc Zyngier // This is our single instruction exception window. A pending 197395ea79eSMarc Zyngier // SError is guaranteed to occur at the earliest when we unmask 198395ea79eSMarc Zyngier // it, and at the latest just after the ISB. 199395ea79eSMarc Zyngierabort_guest_exit_start: 200395ea79eSMarc Zyngier 201395ea79eSMarc Zyngier isb 202395ea79eSMarc Zyngier 203395ea79eSMarc Zyngierabort_guest_exit_end: 204395ea79eSMarc Zyngier 205dad6321fSJames Morse msr daifset, #4 // Mask aborts 206e9ee186bSJames Morse ret 207dad6321fSJames Morse 208e9ee186bSJames Morse _kvm_extable abort_guest_exit_start, 9997f 209e9ee186bSJames Morse _kvm_extable abort_guest_exit_end, 9997f 210e9ee186bSJames Morse9997: 211e9ee186bSJames Morse msr daifset, #4 // Mask aborts 212e9ee186bSJames Morse mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) 213e9ee186bSJames Morse 214e9ee186bSJames Morse // restore the EL1 exception context so that we can report some 215e9ee186bSJames Morse // information. Merge the exception code with the SError pending bit. 216395ea79eSMarc Zyngier msr elr_el2, x2 217395ea79eSMarc Zyngier msr esr_el2, x3 218395ea79eSMarc Zyngier msr spsr_el2, x4 219395ea79eSMarc Zyngier orr x0, x0, x5 220395ea79eSMarc Zyngier1: ret 2216645d854SMark BrownSYM_FUNC_END(__guest_enter) 222