1/* 2 * Copyright (C) 2015 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18#include <linux/arm-smccc.h> 19#include <linux/linkage.h> 20 21#include <asm/alternative.h> 22#include <asm/assembler.h> 23#include <asm/cpufeature.h> 24#include <asm/kvm_arm.h> 25#include <asm/kvm_asm.h> 26#include <asm/kvm_mmu.h> 27 28 .text 29 .pushsection .hyp.text, "ax" 30 31.macro do_el2_call 32 /* 33 * Shuffle the parameters before calling the function 34 * pointed to in x0. Assumes parameters in x[1,2,3]. 35 */ 36 str lr, [sp, #-16]! 37 mov lr, x0 38 mov x0, x1 39 mov x1, x2 40 mov x2, x3 41 blr lr 42 ldr lr, [sp], #16 43.endm 44 45ENTRY(__vhe_hyp_call) 46 do_el2_call 47 /* 48 * We used to rely on having an exception return to get 49 * an implicit isb. In the E2H case, we don't have it anymore. 50 * rather than changing all the leaf functions, just do it here 51 * before returning to the rest of the kernel. 52 */ 53 isb 54 ret 55ENDPROC(__vhe_hyp_call) 56 57el1_sync: // Guest trapped into EL2 58 59 mrs x0, esr_el2 60 lsr x0, x0, #ESR_ELx_EC_SHIFT 61 cmp x0, #ESR_ELx_EC_HVC64 62 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne 63 b.ne el1_trap 64 65 mrs x1, vttbr_el2 // If vttbr is valid, the guest 66 cbnz x1, el1_hvc_guest // called HVC 67 68 /* Here, we're pretty sure the host called HVC. */ 69 ldp x0, x1, [sp], #16 70 71 /* Check for a stub HVC call */ 72 cmp x0, #HVC_STUB_HCALL_NR 73 b.hs 1f 74 75 /* 76 * Compute the idmap address of __kvm_handle_stub_hvc and 77 * jump there. Since we use kimage_voffset, do not use the 78 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead 79 * (by loading it from the constant pool). 80 * 81 * Preserve x0-x4, which may contain stub parameters. 82 */ 83 ldr x5, =__kvm_handle_stub_hvc 84 ldr_l x6, kimage_voffset 85 86 /* x5 = __pa(x5) */ 87 sub x5, x5, x6 88 br x5 89 901: 91 /* 92 * Perform the EL2 call 93 */ 94 kern_hyp_va x0 95 do_el2_call 96 97 eret 98 99el1_hvc_guest: 100 /* 101 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. 102 * The workaround has already been applied on the host, 103 * so let's quickly get back to the guest. We don't bother 104 * restoring x1, as it can be clobbered anyway. 105 */ 106 ldr x1, [sp] // Guest's x0 107 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 108 cbnz w1, el1_trap 109 mov x0, x1 110 add sp, sp, #16 111 eret 112 113el1_trap: 114 get_vcpu_ptr x1, x0 115 116 mrs x0, esr_el2 117 lsr x0, x0, #ESR_ELx_EC_SHIFT 118 /* 119 * x0: ESR_EC 120 * x1: vcpu pointer 121 */ 122 123 /* 124 * We trap the first access to the FP/SIMD to save the host context 125 * and restore the guest context lazily. 126 * If FP/SIMD is not implemented, handle the trap and inject an 127 * undefined instruction exception to the guest. 128 */ 129alternative_if_not ARM64_HAS_NO_FPSIMD 130 cmp x0, #ESR_ELx_EC_FP_ASIMD 131 b.eq __fpsimd_guest_restore 132alternative_else_nop_endif 133 134 mov x0, #ARM_EXCEPTION_TRAP 135 b __guest_exit 136 137el1_irq: 138 get_vcpu_ptr x1, x0 139 mov x0, #ARM_EXCEPTION_IRQ 140 b __guest_exit 141 142el1_error: 143 get_vcpu_ptr x1, x0 144 mov x0, #ARM_EXCEPTION_EL1_SERROR 145 b __guest_exit 146 147el2_error: 148 ldp x0, x1, [sp], #16 149 150 /* 151 * Only two possibilities: 152 * 1) Either we come from the exit path, having just unmasked 153 * PSTATE.A: change the return code to an EL2 fault, and 154 * carry on, as we're already in a sane state to handle it. 155 * 2) Or we come from anywhere else, and that's a bug: we panic. 156 * 157 * For (1), x0 contains the original return code and x1 doesn't 158 * contain anything meaningful at that stage. We can reuse them 159 * as temp registers. 160 * For (2), who cares? 161 */ 162 mrs x0, elr_el2 163 adr x1, abort_guest_exit_start 164 cmp x0, x1 165 adr x1, abort_guest_exit_end 166 ccmp x0, x1, #4, ne 167 b.ne __hyp_panic 168 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) 169 eret 170 171ENTRY(__hyp_do_panic) 172 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 173 PSR_MODE_EL1h) 174 msr spsr_el2, lr 175 ldr lr, =panic 176 msr elr_el2, lr 177 eret 178ENDPROC(__hyp_do_panic) 179 180ENTRY(__hyp_panic) 181 get_host_ctxt x0, x1 182 b hyp_panic 183ENDPROC(__hyp_panic) 184 185.macro invalid_vector label, target = __hyp_panic 186 .align 2 187\label: 188 b \target 189ENDPROC(\label) 190.endm 191 192 /* None of these should ever happen */ 193 invalid_vector el2t_sync_invalid 194 invalid_vector el2t_irq_invalid 195 invalid_vector el2t_fiq_invalid 196 invalid_vector el2t_error_invalid 197 invalid_vector el2h_sync_invalid 198 invalid_vector el2h_irq_invalid 199 invalid_vector el2h_fiq_invalid 200 invalid_vector el1_fiq_invalid 201 202 .ltorg 203 204 .align 11 205 206.macro valid_vect target 207 .align 7 208 stp x0, x1, [sp, #-16]! 209 b \target 210.endm 211 212.macro invalid_vect target 213 .align 7 214 b \target 215 ldp x0, x1, [sp], #16 216 b \target 217.endm 218 219ENTRY(__kvm_hyp_vector) 220 invalid_vect el2t_sync_invalid // Synchronous EL2t 221 invalid_vect el2t_irq_invalid // IRQ EL2t 222 invalid_vect el2t_fiq_invalid // FIQ EL2t 223 invalid_vect el2t_error_invalid // Error EL2t 224 225 invalid_vect el2h_sync_invalid // Synchronous EL2h 226 invalid_vect el2h_irq_invalid // IRQ EL2h 227 invalid_vect el2h_fiq_invalid // FIQ EL2h 228 valid_vect el2_error // Error EL2h 229 230 valid_vect el1_sync // Synchronous 64-bit EL1 231 valid_vect el1_irq // IRQ 64-bit EL1 232 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1 233 valid_vect el1_error // Error 64-bit EL1 234 235 valid_vect el1_sync // Synchronous 32-bit EL1 236 valid_vect el1_irq // IRQ 32-bit EL1 237 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 238 valid_vect el1_error // Error 32-bit EL1 239ENDPROC(__kvm_hyp_vector) 240