1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <linux/linkage.h> 8 9#include <asm/alternative.h> 10#include <asm/assembler.h> 11#include <asm/fpsimdmacros.h> 12#include <asm/kvm.h> 13#include <asm/kvm_arm.h> 14#include <asm/kvm_asm.h> 15#include <asm/kvm_mmu.h> 16#include <asm/kvm_ptrauth.h> 17 18 .text 19 20/* 21 * u64 __guest_enter(struct kvm_vcpu *vcpu); 22 */ 23SYM_FUNC_START(__guest_enter) 24 // x0: vcpu 25 // x1-x17: clobbered by macros 26 // x29: guest context 27 28 adr_this_cpu x1, kvm_hyp_ctxt, x2 29 30 // Store the hyp regs 31 save_callee_saved_regs x1 32 33 // Save hyp's sp_el0 34 save_sp_el0 x1, x2 35 36 // Now the hyp state is stored if we have a pending RAS SError it must 37 // affect the host or hyp. If any asynchronous exception is pending we 38 // defer the guest entry. The DSB isn't necessary before v8.2 as any 39 // SError would be fatal. 40alternative_if ARM64_HAS_RAS_EXTN 41 dsb nshst 42 isb 43alternative_else_nop_endif 44 mrs x1, isr_el1 45 cbz x1, 1f 46 mov x0, #ARM_EXCEPTION_IRQ 47 ret 48 491: 50 set_loaded_vcpu x0, x1, x2 51 52 add x29, x0, #VCPU_CONTEXT 53 54 // Macro ptrauth_switch_to_guest format: 55 // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) 56 // The below macro to restore guest keys is not implemented in C code 57 // as it may cause Pointer Authentication key signing mismatch errors 58 // when this feature is enabled for kernel code. 59 ptrauth_switch_to_guest x29, x0, x1, x2 60 61 // Restore the guest's sp_el0 62 restore_sp_el0 x29, x0 63 64 // Restore guest regs x0-x17 65 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] 66 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] 67 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] 68 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] 69 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] 70 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] 71 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] 72 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] 73 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] 74 75 // Restore guest regs x18-x29, lr 76 restore_callee_saved_regs x29 77 78 // Do not touch any register after this! 79 eret 80 sb 81 82SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) 83 // x2-x29,lr: vcpu regs 84 // vcpu x0-x1 on the stack 85 86 // If the hyp context is loaded, go straight to hyp_panic 87 get_loaded_vcpu x0, x1 88 cbz x0, hyp_panic 89 90 // The hyp context is saved so make sure it is restored to allow 91 // hyp_panic to run at hyp and, subsequently, panic to run in the host. 92 // This makes use of __guest_exit to avoid duplication but sets the 93 // return address to tail call into hyp_panic. As a side effect, the 94 // current state is saved to the guest context but it will only be 95 // accurate if the guest had been completely restored. 96 adr_this_cpu x0, kvm_hyp_ctxt, x1 97 adr x1, hyp_panic 98 str x1, [x0, #CPU_XREG_OFFSET(30)] 99 100 get_vcpu_ptr x1, x0 101 102SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) 103 // x0: return code 104 // x1: vcpu 105 // x2-x29,lr: vcpu regs 106 // vcpu x0-x1 on the stack 107 108 add x1, x1, #VCPU_CONTEXT 109 110 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) 111 112 // Store the guest regs x2 and x3 113 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)] 114 115 // Retrieve the guest regs x0-x1 from the stack 116 ldp x2, x3, [sp], #16 // x0, x1 117 118 // Store the guest regs x0-x1 and x4-x17 119 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] 120 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] 121 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] 122 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)] 123 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)] 124 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] 125 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] 126 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] 127 128 // Store the guest regs x18-x29, lr 129 save_callee_saved_regs x1 130 131 // Store the guest's sp_el0 132 save_sp_el0 x1, x2 133 134 adr_this_cpu x2, kvm_hyp_ctxt, x3 135 136 // Macro ptrauth_switch_to_hyp format: 137 // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3) 138 // The below macro to save/restore keys is not implemented in C code 139 // as it may cause Pointer Authentication key signing mismatch errors 140 // when this feature is enabled for kernel code. 141 ptrauth_switch_to_hyp x1, x2, x3, x4, x5 142 143 // Restore hyp's sp_el0 144 restore_sp_el0 x2, x3 145 146 // Now restore the hyp regs 147 restore_callee_saved_regs x2 148 149 set_loaded_vcpu xzr, x1, x2 150 151alternative_if ARM64_HAS_RAS_EXTN 152 // If we have the RAS extensions we can consume a pending error 153 // without an unmask-SError and isb. The ESB-instruction consumed any 154 // pending guest error when we took the exception from the guest. 155 mrs_s x2, SYS_DISR_EL1 156 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)] 157 cbz x2, 1f 158 msr_s SYS_DISR_EL1, xzr 159 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT) 1601: ret 161alternative_else 162 dsb sy // Synchronize against in-flight ld/st 163 isb // Prevent an early read of side-effect free ISR 164 mrs x2, isr_el1 165 tbnz x2, #8, 2f // ISR_EL1.A 166 ret 167 nop 1682: 169alternative_endif 170 // We know we have a pending asynchronous abort, now is the 171 // time to flush it out. From your VAXorcist book, page 666: 172 // "Threaten me not, oh Evil one! For I speak with 173 // the power of DEC, and I command thee to show thyself!" 174 mrs x2, elr_el2 175 mrs x3, esr_el2 176 mrs x4, spsr_el2 177 mov x5, x0 178 179 msr daifclr, #4 // Unmask aborts 180 181 // This is our single instruction exception window. A pending 182 // SError is guaranteed to occur at the earliest when we unmask 183 // it, and at the latest just after the ISB. 184abort_guest_exit_start: 185 186 isb 187 188abort_guest_exit_end: 189 190 msr daifset, #4 // Mask aborts 191 ret 192 193 _kvm_extable abort_guest_exit_start, 9997f 194 _kvm_extable abort_guest_exit_end, 9997f 1959997: 196 msr daifset, #4 // Mask aborts 197 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) 198 199 // restore the EL1 exception context so that we can report some 200 // information. Merge the exception code with the SError pending bit. 201 msr elr_el2, x2 202 msr esr_el2, x3 203 msr spsr_el2, x4 204 orr x0, x0, x5 2051: ret 206SYM_FUNC_END(__guest_enter) 207