1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <linux/linkage.h> 8 9#include <asm/alternative.h> 10#include <asm/assembler.h> 11#include <asm/fpsimdmacros.h> 12#include <asm/kvm.h> 13#include <asm/kvm_arm.h> 14#include <asm/kvm_asm.h> 15#include <asm/kvm_mmu.h> 16#include <asm/kvm_mte.h> 17#include <asm/kvm_ptrauth.h> 18 19 .text 20 21/* 22 * u64 __guest_enter(struct kvm_vcpu *vcpu); 23 */ 24SYM_FUNC_START(__guest_enter) 25 // x0: vcpu 26 // x1-x17: clobbered by macros 27 // x29: guest context 28 29 adr_this_cpu x1, kvm_hyp_ctxt, x2 30 31 // Store the hyp regs 32 save_callee_saved_regs x1 33 34 // Save hyp's sp_el0 35 save_sp_el0 x1, x2 36 37 // Now the hyp state is stored if we have a pending RAS SError it must 38 // affect the host or hyp. If any asynchronous exception is pending we 39 // defer the guest entry. The DSB isn't necessary before v8.2 as any 40 // SError would be fatal. 41alternative_if ARM64_HAS_RAS_EXTN 42 dsb nshst 43 isb 44alternative_else_nop_endif 45 mrs x1, isr_el1 46 cbz x1, 1f 47 48 // Ensure that __guest_enter() always provides a context 49 // synchronization event so that callers don't need ISBs for anything 50 // that would usually be synchonized by the ERET. 51 isb 52 mov x0, #ARM_EXCEPTION_IRQ 53 ret 54 551: 56 set_loaded_vcpu x0, x1, x2 57 58 add x29, x0, #VCPU_CONTEXT 59 60 // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1) 61 mte_switch_to_guest x29, x1, x2 62 63 // Macro ptrauth_switch_to_guest format: 64 // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) 65 // The below macro to restore guest keys is not implemented in C code 66 // as it may cause Pointer Authentication key signing mismatch errors 67 // when this feature is enabled for kernel code. 68 ptrauth_switch_to_guest x29, x0, x1, x2 69 70 // Restore the guest's sp_el0 71 restore_sp_el0 x29, x0 72 73 // Restore guest regs x0-x17 74 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] 75 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] 76 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] 77 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] 78 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] 79 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] 80 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] 81 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] 82 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] 83 84 // Restore guest regs x18-x29, lr 85 restore_callee_saved_regs x29 86 87 // Do not touch any register after this! 88 eret 89 sb 90 91SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) 92 // x2-x29,lr: vcpu regs 93 // vcpu x0-x1 on the stack 94 95 // If the hyp context is loaded, go straight to hyp_panic 96 get_loaded_vcpu x0, x1 97 cbnz x0, 1f 98 b hyp_panic 99 1001: 101 // The hyp context is saved so make sure it is restored to allow 102 // hyp_panic to run at hyp and, subsequently, panic to run in the host. 103 // This makes use of __guest_exit to avoid duplication but sets the 104 // return address to tail call into hyp_panic. As a side effect, the 105 // current state is saved to the guest context but it will only be 106 // accurate if the guest had been completely restored. 107 adr_this_cpu x0, kvm_hyp_ctxt, x1 108 adr_l x1, hyp_panic 109 str x1, [x0, #CPU_XREG_OFFSET(30)] 110 111 get_vcpu_ptr x1, x0 112 113SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) 114 // x0: return code 115 // x1: vcpu 116 // x2-x29,lr: vcpu regs 117 // vcpu x0-x1 on the stack 118 119 add x1, x1, #VCPU_CONTEXT 120 121 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) 122 123 // Store the guest regs x2 and x3 124 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)] 125 126 // Retrieve the guest regs x0-x1 from the stack 127 ldp x2, x3, [sp], #16 // x0, x1 128 129 // Store the guest regs x0-x1 and x4-x17 130 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] 131 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] 132 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] 133 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)] 134 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)] 135 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] 136 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] 137 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] 138 139 // Store the guest regs x18-x29, lr 140 save_callee_saved_regs x1 141 142 // Store the guest's sp_el0 143 save_sp_el0 x1, x2 144 145 adr_this_cpu x2, kvm_hyp_ctxt, x3 146 147 // Macro ptrauth_switch_to_hyp format: 148 // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3) 149 // The below macro to save/restore keys is not implemented in C code 150 // as it may cause Pointer Authentication key signing mismatch errors 151 // when this feature is enabled for kernel code. 152 ptrauth_switch_to_hyp x1, x2, x3, x4, x5 153 154 // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1) 155 mte_switch_to_hyp x1, x2, x3 156 157 // Restore hyp's sp_el0 158 restore_sp_el0 x2, x3 159 160 // Now restore the hyp regs 161 restore_callee_saved_regs x2 162 163 set_loaded_vcpu xzr, x2, x3 164 165alternative_if ARM64_HAS_RAS_EXTN 166 // If we have the RAS extensions we can consume a pending error 167 // without an unmask-SError and isb. The ESB-instruction consumed any 168 // pending guest error when we took the exception from the guest. 169 mrs_s x2, SYS_DISR_EL1 170 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)] 171 cbz x2, 1f 172 msr_s SYS_DISR_EL1, xzr 173 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT) 1741: ret 175alternative_else 176 dsb sy // Synchronize against in-flight ld/st 177 isb // Prevent an early read of side-effect free ISR 178 mrs x2, isr_el1 179 tbnz x2, #ISR_EL1_A_SHIFT, 2f 180 ret 181 nop 1822: 183alternative_endif 184 // We know we have a pending asynchronous abort, now is the 185 // time to flush it out. From your VAXorcist book, page 666: 186 // "Threaten me not, oh Evil one! For I speak with 187 // the power of DEC, and I command thee to show thyself!" 188 mrs x2, elr_el2 189 mrs x3, esr_el2 190 mrs x4, spsr_el2 191 mov x5, x0 192 193 msr daifclr, #4 // Unmask aborts 194 195 // This is our single instruction exception window. A pending 196 // SError is guaranteed to occur at the earliest when we unmask 197 // it, and at the latest just after the ISB. 198abort_guest_exit_start: 199 200 isb 201 202abort_guest_exit_end: 203 204 msr daifset, #4 // Mask aborts 205 ret 206 207 _kvm_extable abort_guest_exit_start, 9997f 208 _kvm_extable abort_guest_exit_end, 9997f 2099997: 210 msr daifset, #4 // Mask aborts 211 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) 212 213 // restore the EL1 exception context so that we can report some 214 // information. Merge the exception code with the SError pending bit. 215 msr elr_el2, x2 216 msr esr_el2, x3 217 msr spsr_el2, x4 218 orr x0, x0, x5 2191: ret 220SYM_FUNC_END(__guest_enter) 221