1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/handle_exit.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/kvm.h> 23 #include <linux/kvm_host.h> 24 25 #include <asm/esr.h> 26 #include <asm/kvm_asm.h> 27 #include <asm/kvm_coproc.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_mmu.h> 30 #include <asm/kvm_psci.h> 31 32 #define CREATE_TRACE_POINTS 33 #include "trace.h" 34 35 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); 36 37 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) 38 { 39 int ret; 40 41 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), 42 kvm_vcpu_hvc_get_imm(vcpu)); 43 vcpu->stat.hvc_exit_stat++; 44 45 ret = kvm_psci_call(vcpu); 46 if (ret < 0) { 47 kvm_inject_undefined(vcpu); 48 return 1; 49 } 50 51 return ret; 52 } 53 54 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55 { 56 kvm_inject_undefined(vcpu); 57 return 1; 58 } 59 60 /* 61 * Guest access to FP/ASIMD registers are routed to this handler only 62 * when the system doesn't support FP/ASIMD. 63 */ 64 static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) 65 { 66 kvm_inject_undefined(vcpu); 67 return 1; 68 } 69 70 /** 71 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event 72 * instruction executed by a guest 73 * 74 * @vcpu: the vcpu pointer 75 * 76 * WFE: Yield the CPU and come back to this vcpu when the scheduler 77 * decides to. 78 * WFI: Simply call kvm_vcpu_block(), which will halt execution of 79 * world-switches and schedule other host processes until there is an 80 * incoming IRQ or FIQ to the VM. 81 */ 82 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) 83 { 84 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { 85 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); 86 vcpu->stat.wfe_exit_stat++; 87 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); 88 } else { 89 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); 90 vcpu->stat.wfi_exit_stat++; 91 kvm_vcpu_block(vcpu); 92 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 93 } 94 95 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 96 97 return 1; 98 } 99 100 /** 101 * kvm_handle_guest_debug - handle a debug exception instruction 102 * 103 * @vcpu: the vcpu pointer 104 * @run: access to the kvm_run structure for results 105 * 106 * We route all debug exceptions through the same handler. If both the 107 * guest and host are using the same debug facilities it will be up to 108 * userspace to re-inject the correct exception for guest delivery. 109 * 110 * @return: 0 (while setting run->exit_reason), -1 for error 111 */ 112 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) 113 { 114 u32 hsr = kvm_vcpu_get_hsr(vcpu); 115 int ret = 0; 116 117 run->exit_reason = KVM_EXIT_DEBUG; 118 run->debug.arch.hsr = hsr; 119 120 switch (ESR_ELx_EC(hsr)) { 121 case ESR_ELx_EC_WATCHPT_LOW: 122 run->debug.arch.far = vcpu->arch.fault.far_el2; 123 /* fall through */ 124 case ESR_ELx_EC_SOFTSTP_LOW: 125 case ESR_ELx_EC_BREAKPT_LOW: 126 case ESR_ELx_EC_BKPT32: 127 case ESR_ELx_EC_BRK64: 128 break; 129 default: 130 kvm_err("%s: un-handled case hsr: %#08x\n", 131 __func__, (unsigned int) hsr); 132 ret = -1; 133 break; 134 } 135 136 return ret; 137 } 138 139 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) 140 { 141 u32 hsr = kvm_vcpu_get_hsr(vcpu); 142 143 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", 144 hsr, esr_get_class_string(hsr)); 145 146 kvm_inject_undefined(vcpu); 147 return 1; 148 } 149 150 static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) 151 { 152 /* Until SVE is supported for guests: */ 153 kvm_inject_undefined(vcpu); 154 return 1; 155 } 156 157 static exit_handle_fn arm_exit_handlers[] = { 158 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, 159 [ESR_ELx_EC_WFx] = kvm_handle_wfx, 160 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, 161 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, 162 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, 163 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, 164 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, 165 [ESR_ELx_EC_HVC32] = handle_hvc, 166 [ESR_ELx_EC_SMC32] = handle_smc, 167 [ESR_ELx_EC_HVC64] = handle_hvc, 168 [ESR_ELx_EC_SMC64] = handle_smc, 169 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, 170 [ESR_ELx_EC_SVE] = handle_sve, 171 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, 172 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, 173 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug, 174 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug, 175 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, 176 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, 177 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, 178 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd, 179 }; 180 181 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) 182 { 183 u32 hsr = kvm_vcpu_get_hsr(vcpu); 184 u8 hsr_ec = ESR_ELx_EC(hsr); 185 186 return arm_exit_handlers[hsr_ec]; 187 } 188 189 /* 190 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 191 * proper exit to userspace. 192 */ 193 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 194 int exception_index) 195 { 196 exit_handle_fn exit_handler; 197 198 if (ARM_SERROR_PENDING(exception_index)) { 199 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 200 201 /* 202 * HVC/SMC already have an adjusted PC, which we need 203 * to correct in order to return to after having 204 * injected the SError. 205 */ 206 if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 || 207 hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) { 208 u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; 209 *vcpu_pc(vcpu) -= adj; 210 } 211 212 kvm_inject_vabt(vcpu); 213 return 1; 214 } 215 216 exception_index = ARM_EXCEPTION_CODE(exception_index); 217 218 switch (exception_index) { 219 case ARM_EXCEPTION_IRQ: 220 return 1; 221 case ARM_EXCEPTION_EL1_SERROR: 222 kvm_inject_vabt(vcpu); 223 return 1; 224 case ARM_EXCEPTION_TRAP: 225 /* 226 * See ARM ARM B1.14.1: "Hyp traps on instructions 227 * that fail their condition code check" 228 */ 229 if (!kvm_condition_valid(vcpu)) { 230 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 231 return 1; 232 } 233 234 exit_handler = kvm_get_exit_handler(vcpu); 235 236 return exit_handler(vcpu, run); 237 case ARM_EXCEPTION_HYP_GONE: 238 /* 239 * EL2 has been reset to the hyp-stub. This happens when a guest 240 * is pre-empted by kvm_reboot()'s shutdown call. 241 */ 242 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 243 return 0; 244 default: 245 kvm_pr_unimpl("Unsupported exception type: %d", 246 exception_index); 247 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 248 return 0; 249 } 250 } 251