1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/handle_exit.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/kvm.h> 23 #include <linux/kvm_host.h> 24 25 #include <asm/esr.h> 26 #include <asm/kvm_asm.h> 27 #include <asm/kvm_coproc.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_mmu.h> 30 #include <asm/kvm_psci.h> 31 32 #define CREATE_TRACE_POINTS 33 #include "trace.h" 34 35 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); 36 37 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) 38 { 39 int ret; 40 41 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), 42 kvm_vcpu_hvc_get_imm(vcpu)); 43 vcpu->stat.hvc_exit_stat++; 44 45 ret = kvm_psci_call(vcpu); 46 if (ret < 0) { 47 kvm_inject_undefined(vcpu); 48 return 1; 49 } 50 51 return ret; 52 } 53 54 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55 { 56 kvm_inject_undefined(vcpu); 57 return 1; 58 } 59 60 /* 61 * Guest access to FP/ASIMD registers are routed to this handler only 62 * when the system doesn't support FP/ASIMD. 63 */ 64 static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) 65 { 66 kvm_inject_undefined(vcpu); 67 return 1; 68 } 69 70 /** 71 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event 72 * instruction executed by a guest 73 * 74 * @vcpu: the vcpu pointer 75 * 76 * WFE: Yield the CPU and come back to this vcpu when the scheduler 77 * decides to. 78 * WFI: Simply call kvm_vcpu_block(), which will halt execution of 79 * world-switches and schedule other host processes until there is an 80 * incoming IRQ or FIQ to the VM. 81 */ 82 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) 83 { 84 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { 85 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); 86 vcpu->stat.wfe_exit_stat++; 87 kvm_vcpu_on_spin(vcpu); 88 } else { 89 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); 90 vcpu->stat.wfi_exit_stat++; 91 kvm_vcpu_block(vcpu); 92 } 93 94 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 95 96 return 1; 97 } 98 99 /** 100 * kvm_handle_guest_debug - handle a debug exception instruction 101 * 102 * @vcpu: the vcpu pointer 103 * @run: access to the kvm_run structure for results 104 * 105 * We route all debug exceptions through the same handler. If both the 106 * guest and host are using the same debug facilities it will be up to 107 * userspace to re-inject the correct exception for guest delivery. 108 * 109 * @return: 0 (while setting run->exit_reason), -1 for error 110 */ 111 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) 112 { 113 u32 hsr = kvm_vcpu_get_hsr(vcpu); 114 int ret = 0; 115 116 run->exit_reason = KVM_EXIT_DEBUG; 117 run->debug.arch.hsr = hsr; 118 119 switch (ESR_ELx_EC(hsr)) { 120 case ESR_ELx_EC_WATCHPT_LOW: 121 run->debug.arch.far = vcpu->arch.fault.far_el2; 122 /* fall through */ 123 case ESR_ELx_EC_SOFTSTP_LOW: 124 case ESR_ELx_EC_BREAKPT_LOW: 125 case ESR_ELx_EC_BKPT32: 126 case ESR_ELx_EC_BRK64: 127 break; 128 default: 129 kvm_err("%s: un-handled case hsr: %#08x\n", 130 __func__, (unsigned int) hsr); 131 ret = -1; 132 break; 133 } 134 135 return ret; 136 } 137 138 static exit_handle_fn arm_exit_handlers[] = { 139 [ESR_ELx_EC_WFx] = kvm_handle_wfx, 140 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, 141 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, 142 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, 143 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, 144 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, 145 [ESR_ELx_EC_HVC32] = handle_hvc, 146 [ESR_ELx_EC_SMC32] = handle_smc, 147 [ESR_ELx_EC_HVC64] = handle_hvc, 148 [ESR_ELx_EC_SMC64] = handle_smc, 149 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, 150 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, 151 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, 152 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug, 153 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug, 154 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, 155 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, 156 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, 157 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd, 158 }; 159 160 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) 161 { 162 u32 hsr = kvm_vcpu_get_hsr(vcpu); 163 u8 hsr_ec = ESR_ELx_EC(hsr); 164 165 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || 166 !arm_exit_handlers[hsr_ec]) { 167 kvm_err("Unknown exception class: hsr: %#08x -- %s\n", 168 hsr, esr_get_class_string(hsr)); 169 BUG(); 170 } 171 172 return arm_exit_handlers[hsr_ec]; 173 } 174 175 /* 176 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 177 * proper exit to userspace. 178 */ 179 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 180 int exception_index) 181 { 182 exit_handle_fn exit_handler; 183 184 if (ARM_SERROR_PENDING(exception_index)) { 185 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 186 187 /* 188 * HVC/SMC already have an adjusted PC, which we need 189 * to correct in order to return to after having 190 * injected the SError. 191 */ 192 if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 || 193 hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) { 194 u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; 195 *vcpu_pc(vcpu) -= adj; 196 } 197 198 kvm_inject_vabt(vcpu); 199 return 1; 200 } 201 202 exception_index = ARM_EXCEPTION_CODE(exception_index); 203 204 switch (exception_index) { 205 case ARM_EXCEPTION_IRQ: 206 return 1; 207 case ARM_EXCEPTION_EL1_SERROR: 208 kvm_inject_vabt(vcpu); 209 return 1; 210 case ARM_EXCEPTION_TRAP: 211 /* 212 * See ARM ARM B1.14.1: "Hyp traps on instructions 213 * that fail their condition code check" 214 */ 215 if (!kvm_condition_valid(vcpu)) { 216 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 217 return 1; 218 } 219 220 exit_handler = kvm_get_exit_handler(vcpu); 221 222 return exit_handler(vcpu, run); 223 case ARM_EXCEPTION_HYP_GONE: 224 /* 225 * EL2 has been reset to the hyp-stub. This happens when a guest 226 * is pre-empted by kvm_reboot()'s shutdown call. 227 */ 228 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 229 return 0; 230 default: 231 kvm_pr_unimpl("Unsupported exception type: %d", 232 exception_index); 233 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 234 return 0; 235 } 236 } 237