1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2020 - Google Inc 4 * Author: Andrew Scull <ascull@google.com> 5 */ 6 7#include <linux/linkage.h> 8 9#include <asm/assembler.h> 10#include <asm/kvm_arm.h> 11#include <asm/kvm_asm.h> 12#include <asm/kvm_mmu.h> 13 14 .text 15 16SYM_FUNC_START(__host_exit) 17 get_host_ctxt x0, x1 18 19 /* Store the host regs x2 and x3 */ 20 stp x2, x3, [x0, #CPU_XREG_OFFSET(2)] 21 22 /* Retrieve the host regs x0-x1 from the stack */ 23 ldp x2, x3, [sp], #16 // x0, x1 24 25 /* Store the host regs x0-x1 and x4-x17 */ 26 stp x2, x3, [x0, #CPU_XREG_OFFSET(0)] 27 stp x4, x5, [x0, #CPU_XREG_OFFSET(4)] 28 stp x6, x7, [x0, #CPU_XREG_OFFSET(6)] 29 stp x8, x9, [x0, #CPU_XREG_OFFSET(8)] 30 stp x10, x11, [x0, #CPU_XREG_OFFSET(10)] 31 stp x12, x13, [x0, #CPU_XREG_OFFSET(12)] 32 stp x14, x15, [x0, #CPU_XREG_OFFSET(14)] 33 stp x16, x17, [x0, #CPU_XREG_OFFSET(16)] 34 35 /* Store the host regs x18-x29, lr */ 36 save_callee_saved_regs x0 37 38 /* Save the host context pointer in x29 across the function call */ 39 mov x29, x0 40 bl handle_trap 41 42 /* Restore host regs x0-x17 */ 43__host_enter_restore_full: 44 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] 45 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] 46 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] 47 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] 48 49 /* x0-7 are use for panic arguments */ 50__host_enter_for_panic: 51 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] 52 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] 53 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] 54 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] 55 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] 56 57 /* Restore host regs x18-x29, lr */ 58 restore_callee_saved_regs x29 59 60 /* Do not touch any register after this! */ 61__host_enter_without_restoring: 62 eret 63 sb 64SYM_FUNC_END(__host_exit) 65 66/* 67 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 68 */ 69SYM_FUNC_START(__host_enter) 70 mov x29, x0 71 b __host_enter_restore_full 72SYM_FUNC_END(__host_enter) 73 74/* 75 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 76 * u64 elr, u64 par); 77 */ 78SYM_FUNC_START(__hyp_do_panic) 79 /* Prepare and exit to the host's panic funciton. */ 80 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 81 PSR_MODE_EL1h) 82 msr spsr_el2, lr 83 ldr lr, =nvhe_hyp_panic_handler 84 hyp_kimg_va lr, x6 85 msr elr_el2, lr 86 87 mov x29, x0 88 89#ifdef CONFIG_NVHE_EL2_DEBUG 90 /* Ensure host stage-2 is disabled */ 91 mrs x0, hcr_el2 92 bic x0, x0, #HCR_VM 93 msr hcr_el2, x0 94 isb 95 tlbi vmalls12e1 96 dsb nsh 97#endif 98 99 /* Load the panic arguments into x0-7 */ 100 mrs x0, esr_el2 101 mov x4, x3 102 mov x3, x2 103 hyp_pa x3, x6 104 get_vcpu_ptr x5, x6 105 mrs x6, far_el2 106 mrs x7, hpfar_el2 107 108 /* Enter the host, conditionally restoring the host context. */ 109 cbz x29, __host_enter_without_restoring 110 b __host_enter_for_panic 111SYM_FUNC_END(__hyp_do_panic) 112 113.macro host_el1_sync_vect 114 .align 7 115.L__vect_start\@: 116 stp x0, x1, [sp, #-16]! 117 mrs x0, esr_el2 118 lsr x0, x0, #ESR_ELx_EC_SHIFT 119 cmp x0, #ESR_ELx_EC_HVC64 120 b.ne __host_exit 121 122 ldp x0, x1, [sp] // Don't fixup the stack yet 123 124 /* Check for a stub HVC call */ 125 cmp x0, #HVC_STUB_HCALL_NR 126 b.hs __host_exit 127 128 add sp, sp, #16 129 /* 130 * Compute the idmap address of __kvm_handle_stub_hvc and 131 * jump there. Since we use kimage_voffset, do not use the 132 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead 133 * (by loading it from the constant pool). 134 * 135 * Preserve x0-x4, which may contain stub parameters. 136 */ 137 ldr x5, =__kvm_handle_stub_hvc 138 hyp_pa x5, x6 139 br x5 140.L__vect_end\@: 141.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) 142 .error "host_el1_sync_vect larger than vector entry" 143.endif 144.endm 145 146.macro invalid_host_el2_vect 147 .align 7 148 /* If a guest is loaded, panic out of it. */ 149 stp x0, x1, [sp, #-16]! 150 get_loaded_vcpu x0, x1 151 cbnz x0, __guest_exit_panic 152 add sp, sp, #16 153 154 /* 155 * The panic may not be clean if the exception is taken before the host 156 * context has been saved by __host_exit or after the hyp context has 157 * been partially clobbered by __host_enter. 158 */ 159 b hyp_panic 160.endm 161 162.macro invalid_host_el1_vect 163 .align 7 164 mov x0, xzr /* restore_host = false */ 165 mrs x1, spsr_el2 166 mrs x2, elr_el2 167 mrs x3, par_el1 168 b __hyp_do_panic 169.endm 170 171/* 172 * The host vector does not use an ESB instruction in order to avoid consuming 173 * SErrors that should only be consumed by the host. Guest entry is deferred by 174 * __guest_enter if there are any pending asynchronous exceptions so hyp will 175 * always return to the host without having consumerd host SErrors. 176 * 177 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the 178 * host knows about the EL2 vectors already, and there is no point in hiding 179 * them. 180 */ 181 .align 11 182SYM_CODE_START(__kvm_hyp_host_vector) 183 invalid_host_el2_vect // Synchronous EL2t 184 invalid_host_el2_vect // IRQ EL2t 185 invalid_host_el2_vect // FIQ EL2t 186 invalid_host_el2_vect // Error EL2t 187 188 invalid_host_el2_vect // Synchronous EL2h 189 invalid_host_el2_vect // IRQ EL2h 190 invalid_host_el2_vect // FIQ EL2h 191 invalid_host_el2_vect // Error EL2h 192 193 host_el1_sync_vect // Synchronous 64-bit EL1 194 invalid_host_el1_vect // IRQ 64-bit EL1 195 invalid_host_el1_vect // FIQ 64-bit EL1 196 invalid_host_el1_vect // Error 64-bit EL1 197 198 invalid_host_el1_vect // Synchronous 32-bit EL1 199 invalid_host_el1_vect // IRQ 32-bit EL1 200 invalid_host_el1_vect // FIQ 32-bit EL1 201 invalid_host_el1_vect // Error 32-bit EL1 202SYM_CODE_END(__kvm_hyp_host_vector) 203 204/* 205 * Forward SMC with arguments in struct kvm_cpu_context, and 206 * store the result into the same struct. Assumes SMCCC 1.2 or older. 207 * 208 * x0: struct kvm_cpu_context* 209 */ 210SYM_CODE_START(__kvm_hyp_host_forward_smc) 211 /* 212 * Use x18 to keep the pointer to the host context because 213 * x18 is callee-saved in SMCCC but not in AAPCS64. 214 */ 215 mov x18, x0 216 217 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] 218 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] 219 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)] 220 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)] 221 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)] 222 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)] 223 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)] 224 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)] 225 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)] 226 227 smc #0 228 229 stp x0, x1, [x18, #CPU_XREG_OFFSET(0)] 230 stp x2, x3, [x18, #CPU_XREG_OFFSET(2)] 231 stp x4, x5, [x18, #CPU_XREG_OFFSET(4)] 232 stp x6, x7, [x18, #CPU_XREG_OFFSET(6)] 233 stp x8, x9, [x18, #CPU_XREG_OFFSET(8)] 234 stp x10, x11, [x18, #CPU_XREG_OFFSET(10)] 235 stp x12, x13, [x18, #CPU_XREG_OFFSET(12)] 236 stp x14, x15, [x18, #CPU_XREG_OFFSET(14)] 237 stp x16, x17, [x18, #CPU_XREG_OFFSET(16)] 238 239 ret 240SYM_CODE_END(__kvm_hyp_host_forward_smc) 241