1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/include/kvm_emulate.h 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #ifndef __ARM64_KVM_EMULATE_H__ 23 #define __ARM64_KVM_EMULATE_H__ 24 25 #include <linux/kvm_host.h> 26 27 #include <asm/debug-monitors.h> 28 #include <asm/esr.h> 29 #include <asm/kvm_arm.h> 30 #include <asm/kvm_hyp.h> 31 #include <asm/kvm_mmio.h> 32 #include <asm/ptrace.h> 33 #include <asm/cputype.h> 34 #include <asm/virt.h> 35 36 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 37 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); 38 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); 39 40 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 41 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 42 43 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 44 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 45 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 46 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 47 void kvm_inject_undef32(struct kvm_vcpu *vcpu); 48 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 49 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 50 51 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 52 { 53 return !(vcpu->arch.hcr_el2 & HCR_RW); 54 } 55 56 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 57 { 58 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 59 if (is_kernel_in_hyp_mode()) 60 vcpu->arch.hcr_el2 |= HCR_E2H; 61 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { 62 /* route synchronous external abort exceptions to EL2 */ 63 vcpu->arch.hcr_el2 |= HCR_TEA; 64 /* trap error record accesses */ 65 vcpu->arch.hcr_el2 |= HCR_TERR; 66 } 67 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 68 vcpu->arch.hcr_el2 |= HCR_FWB; 69 70 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 71 vcpu->arch.hcr_el2 &= ~HCR_RW; 72 73 /* 74 * TID3: trap feature register accesses that we virtualise. 75 * For now this is conditional, since no AArch32 feature regs 76 * are currently virtualised. 77 */ 78 if (!vcpu_el1_is_32bit(vcpu)) 79 vcpu->arch.hcr_el2 |= HCR_TID3; 80 81 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || 82 vcpu_el1_is_32bit(vcpu)) 83 vcpu->arch.hcr_el2 |= HCR_TID2; 84 } 85 86 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) 87 { 88 return (unsigned long *)&vcpu->arch.hcr_el2; 89 } 90 91 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) 92 { 93 vcpu->arch.hcr_el2 &= ~HCR_TWE; 94 } 95 96 static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) 97 { 98 vcpu->arch.hcr_el2 |= HCR_TWE; 99 } 100 101 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) 102 { 103 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); 104 } 105 106 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) 107 { 108 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); 109 } 110 111 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) 112 { 113 if (vcpu_has_ptrauth(vcpu)) 114 vcpu_ptrauth_disable(vcpu); 115 } 116 117 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) 118 { 119 return vcpu->arch.vsesr_el2; 120 } 121 122 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) 123 { 124 vcpu->arch.vsesr_el2 = vsesr; 125 } 126 127 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 128 { 129 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 130 } 131 132 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) 133 { 134 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; 135 } 136 137 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) 138 { 139 if (vcpu->arch.sysregs_loaded_on_cpu) 140 return read_sysreg_el1(elr); 141 else 142 return *__vcpu_elr_el1(vcpu); 143 } 144 145 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) 146 { 147 if (vcpu->arch.sysregs_loaded_on_cpu) 148 write_sysreg_el1(v, elr); 149 else 150 *__vcpu_elr_el1(vcpu) = v; 151 } 152 153 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 154 { 155 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; 156 } 157 158 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 159 { 160 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 161 } 162 163 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 164 { 165 if (vcpu_mode_is_32bit(vcpu)) 166 return kvm_condition_valid32(vcpu); 167 168 return true; 169 } 170 171 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 172 { 173 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; 174 } 175 176 /* 177 * vcpu_get_reg and vcpu_set_reg should always be passed a register number 178 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 179 * AArch32 with banked registers. 180 */ 181 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 182 u8 reg_num) 183 { 184 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 185 } 186 187 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 188 unsigned long val) 189 { 190 if (reg_num != 31) 191 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; 192 } 193 194 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) 195 { 196 if (vcpu_mode_is_32bit(vcpu)) 197 return vcpu_read_spsr32(vcpu); 198 199 if (vcpu->arch.sysregs_loaded_on_cpu) 200 return read_sysreg_el1(spsr); 201 else 202 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 203 } 204 205 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) 206 { 207 if (vcpu_mode_is_32bit(vcpu)) { 208 vcpu_write_spsr32(vcpu, v); 209 return; 210 } 211 212 if (vcpu->arch.sysregs_loaded_on_cpu) 213 write_sysreg_el1(v, spsr); 214 else 215 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; 216 } 217 218 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 219 { 220 u32 mode; 221 222 if (vcpu_mode_is_32bit(vcpu)) { 223 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; 224 return mode > PSR_AA32_MODE_USR; 225 } 226 227 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 228 229 return mode != PSR_MODE_EL0t; 230 } 231 232 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 233 { 234 return vcpu->arch.fault.esr_el2; 235 } 236 237 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 238 { 239 u32 esr = kvm_vcpu_get_hsr(vcpu); 240 241 if (esr & ESR_ELx_CV) 242 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 243 244 return -1; 245 } 246 247 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 248 { 249 return vcpu->arch.fault.far_el2; 250 } 251 252 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 253 { 254 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 255 } 256 257 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) 258 { 259 return vcpu->arch.fault.disr_el1; 260 } 261 262 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 263 { 264 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 265 } 266 267 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 268 { 269 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 270 } 271 272 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 273 { 274 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); 275 } 276 277 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 278 { 279 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 280 } 281 282 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 283 { 284 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 285 } 286 287 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 288 { 289 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || 290 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 291 } 292 293 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 294 { 295 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 296 } 297 298 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 299 { 300 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 301 } 302 303 /* This one is not specific to Data Abort */ 304 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 305 { 306 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); 307 } 308 309 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 310 { 311 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 312 } 313 314 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 315 { 316 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 317 } 318 319 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 320 { 321 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; 322 } 323 324 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 325 { 326 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 327 } 328 329 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 330 { 331 switch (kvm_vcpu_trap_get_fault(vcpu)) { 332 case FSC_SEA: 333 case FSC_SEA_TTW0: 334 case FSC_SEA_TTW1: 335 case FSC_SEA_TTW2: 336 case FSC_SEA_TTW3: 337 case FSC_SECC: 338 case FSC_SECC_TTW0: 339 case FSC_SECC_TTW1: 340 case FSC_SECC_TTW2: 341 case FSC_SECC_TTW3: 342 return true; 343 default: 344 return false; 345 } 346 } 347 348 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 349 { 350 u32 esr = kvm_vcpu_get_hsr(vcpu); 351 return ESR_ELx_SYS64_ISS_RT(esr); 352 } 353 354 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 355 { 356 if (kvm_vcpu_trap_is_iabt(vcpu)) 357 return false; 358 359 return kvm_vcpu_dabt_iswrite(vcpu); 360 } 361 362 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 363 { 364 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 365 } 366 367 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 368 { 369 if (vcpu_mode_is_32bit(vcpu)) { 370 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; 371 } else { 372 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); 373 sctlr |= (1 << 25); 374 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); 375 } 376 } 377 378 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 379 { 380 if (vcpu_mode_is_32bit(vcpu)) 381 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); 382 383 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); 384 } 385 386 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 387 unsigned long data, 388 unsigned int len) 389 { 390 if (kvm_vcpu_is_be(vcpu)) { 391 switch (len) { 392 case 1: 393 return data & 0xff; 394 case 2: 395 return be16_to_cpu(data & 0xffff); 396 case 4: 397 return be32_to_cpu(data & 0xffffffff); 398 default: 399 return be64_to_cpu(data); 400 } 401 } else { 402 switch (len) { 403 case 1: 404 return data & 0xff; 405 case 2: 406 return le16_to_cpu(data & 0xffff); 407 case 4: 408 return le32_to_cpu(data & 0xffffffff); 409 default: 410 return le64_to_cpu(data); 411 } 412 } 413 414 return data; /* Leave LE untouched */ 415 } 416 417 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 418 unsigned long data, 419 unsigned int len) 420 { 421 if (kvm_vcpu_is_be(vcpu)) { 422 switch (len) { 423 case 1: 424 return data & 0xff; 425 case 2: 426 return cpu_to_be16(data & 0xffff); 427 case 4: 428 return cpu_to_be32(data & 0xffffffff); 429 default: 430 return cpu_to_be64(data); 431 } 432 } else { 433 switch (len) { 434 case 1: 435 return data & 0xff; 436 case 2: 437 return cpu_to_le16(data & 0xffff); 438 case 4: 439 return cpu_to_le32(data & 0xffffffff); 440 default: 441 return cpu_to_le64(data); 442 } 443 } 444 445 return data; /* Leave LE untouched */ 446 } 447 448 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 449 { 450 if (vcpu_mode_is_32bit(vcpu)) 451 kvm_skip_instr32(vcpu, is_wide_instr); 452 else 453 *vcpu_pc(vcpu) += 4; 454 455 /* advance the singlestep state machine */ 456 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 457 } 458 459 /* 460 * Skip an instruction which has been emulated at hyp while most guest sysregs 461 * are live. 462 */ 463 static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) 464 { 465 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 466 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); 467 468 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 469 470 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); 471 write_sysreg_el2(*vcpu_pc(vcpu), elr); 472 } 473 474 #endif /* __ARM64_KVM_EMULATE_H__ */ 475