1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/kvm_emulate.h 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_EMULATE_H__ 12 #define __ARM64_KVM_EMULATE_H__ 13 14 #include <linux/kvm_host.h> 15 16 #include <asm/debug-monitors.h> 17 #include <asm/esr.h> 18 #include <asm/kvm_arm.h> 19 #include <asm/kvm_hyp.h> 20 #include <asm/kvm_mmio.h> 21 #include <asm/ptrace.h> 22 #include <asm/cputype.h> 23 #include <asm/virt.h> 24 25 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 26 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); 27 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); 28 29 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 30 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 31 32 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 33 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 34 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 35 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 36 void kvm_inject_undef32(struct kvm_vcpu *vcpu); 37 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 38 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 39 40 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 41 { 42 return !(vcpu->arch.hcr_el2 & HCR_RW); 43 } 44 45 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 46 { 47 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 48 if (is_kernel_in_hyp_mode()) 49 vcpu->arch.hcr_el2 |= HCR_E2H; 50 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { 51 /* route synchronous external abort exceptions to EL2 */ 52 vcpu->arch.hcr_el2 |= HCR_TEA; 53 /* trap error record accesses */ 54 vcpu->arch.hcr_el2 |= HCR_TERR; 55 } 56 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 57 vcpu->arch.hcr_el2 |= HCR_FWB; 58 59 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 60 vcpu->arch.hcr_el2 &= ~HCR_RW; 61 62 /* 63 * TID3: trap feature register accesses that we virtualise. 64 * For now this is conditional, since no AArch32 feature regs 65 * are currently virtualised. 66 */ 67 if (!vcpu_el1_is_32bit(vcpu)) 68 vcpu->arch.hcr_el2 |= HCR_TID3; 69 70 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || 71 vcpu_el1_is_32bit(vcpu)) 72 vcpu->arch.hcr_el2 |= HCR_TID2; 73 } 74 75 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) 76 { 77 return (unsigned long *)&vcpu->arch.hcr_el2; 78 } 79 80 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) 81 { 82 vcpu->arch.hcr_el2 &= ~HCR_TWE; 83 } 84 85 static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) 86 { 87 vcpu->arch.hcr_el2 |= HCR_TWE; 88 } 89 90 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) 91 { 92 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); 93 } 94 95 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) 96 { 97 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); 98 } 99 100 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) 101 { 102 if (vcpu_has_ptrauth(vcpu)) 103 vcpu_ptrauth_disable(vcpu); 104 } 105 106 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) 107 { 108 return vcpu->arch.vsesr_el2; 109 } 110 111 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) 112 { 113 vcpu->arch.vsesr_el2 = vsesr; 114 } 115 116 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 117 { 118 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 119 } 120 121 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) 122 { 123 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; 124 } 125 126 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) 127 { 128 if (vcpu->arch.sysregs_loaded_on_cpu) 129 return read_sysreg_el1(SYS_ELR); 130 else 131 return *__vcpu_elr_el1(vcpu); 132 } 133 134 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) 135 { 136 if (vcpu->arch.sysregs_loaded_on_cpu) 137 write_sysreg_el1(v, SYS_ELR); 138 else 139 *__vcpu_elr_el1(vcpu) = v; 140 } 141 142 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 143 { 144 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; 145 } 146 147 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 148 { 149 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 150 } 151 152 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 153 { 154 if (vcpu_mode_is_32bit(vcpu)) 155 return kvm_condition_valid32(vcpu); 156 157 return true; 158 } 159 160 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 161 { 162 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; 163 } 164 165 /* 166 * vcpu_get_reg and vcpu_set_reg should always be passed a register number 167 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 168 * AArch32 with banked registers. 169 */ 170 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 171 u8 reg_num) 172 { 173 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 174 } 175 176 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 177 unsigned long val) 178 { 179 if (reg_num != 31) 180 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; 181 } 182 183 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) 184 { 185 if (vcpu_mode_is_32bit(vcpu)) 186 return vcpu_read_spsr32(vcpu); 187 188 if (vcpu->arch.sysregs_loaded_on_cpu) 189 return read_sysreg_el1(SYS_SPSR); 190 else 191 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 192 } 193 194 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) 195 { 196 if (vcpu_mode_is_32bit(vcpu)) { 197 vcpu_write_spsr32(vcpu, v); 198 return; 199 } 200 201 if (vcpu->arch.sysregs_loaded_on_cpu) 202 write_sysreg_el1(v, SYS_SPSR); 203 else 204 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; 205 } 206 207 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 208 { 209 u32 mode; 210 211 if (vcpu_mode_is_32bit(vcpu)) { 212 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; 213 return mode > PSR_AA32_MODE_USR; 214 } 215 216 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 217 218 return mode != PSR_MODE_EL0t; 219 } 220 221 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 222 { 223 return vcpu->arch.fault.esr_el2; 224 } 225 226 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 227 { 228 u32 esr = kvm_vcpu_get_hsr(vcpu); 229 230 if (esr & ESR_ELx_CV) 231 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 232 233 return -1; 234 } 235 236 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 237 { 238 return vcpu->arch.fault.far_el2; 239 } 240 241 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 242 { 243 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 244 } 245 246 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) 247 { 248 return vcpu->arch.fault.disr_el1; 249 } 250 251 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 252 { 253 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 254 } 255 256 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 257 { 258 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 259 } 260 261 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 262 { 263 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); 264 } 265 266 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 267 { 268 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 269 } 270 271 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 272 { 273 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 274 } 275 276 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 277 { 278 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || 279 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 280 } 281 282 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 283 { 284 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 285 } 286 287 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 288 { 289 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 290 } 291 292 /* This one is not specific to Data Abort */ 293 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 294 { 295 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); 296 } 297 298 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 299 { 300 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 301 } 302 303 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 304 { 305 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 306 } 307 308 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 309 { 310 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; 311 } 312 313 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 314 { 315 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 316 } 317 318 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 319 { 320 switch (kvm_vcpu_trap_get_fault(vcpu)) { 321 case FSC_SEA: 322 case FSC_SEA_TTW0: 323 case FSC_SEA_TTW1: 324 case FSC_SEA_TTW2: 325 case FSC_SEA_TTW3: 326 case FSC_SECC: 327 case FSC_SECC_TTW0: 328 case FSC_SECC_TTW1: 329 case FSC_SECC_TTW2: 330 case FSC_SECC_TTW3: 331 return true; 332 default: 333 return false; 334 } 335 } 336 337 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 338 { 339 u32 esr = kvm_vcpu_get_hsr(vcpu); 340 return ESR_ELx_SYS64_ISS_RT(esr); 341 } 342 343 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 344 { 345 if (kvm_vcpu_trap_is_iabt(vcpu)) 346 return false; 347 348 return kvm_vcpu_dabt_iswrite(vcpu); 349 } 350 351 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 352 { 353 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 354 } 355 356 static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) 357 { 358 return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG; 359 } 360 361 static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, 362 bool flag) 363 { 364 if (flag) 365 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 366 else 367 vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG; 368 } 369 370 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 371 { 372 if (vcpu_mode_is_32bit(vcpu)) { 373 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; 374 } else { 375 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); 376 sctlr |= (1 << 25); 377 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); 378 } 379 } 380 381 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 382 { 383 if (vcpu_mode_is_32bit(vcpu)) 384 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); 385 386 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); 387 } 388 389 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 390 unsigned long data, 391 unsigned int len) 392 { 393 if (kvm_vcpu_is_be(vcpu)) { 394 switch (len) { 395 case 1: 396 return data & 0xff; 397 case 2: 398 return be16_to_cpu(data & 0xffff); 399 case 4: 400 return be32_to_cpu(data & 0xffffffff); 401 default: 402 return be64_to_cpu(data); 403 } 404 } else { 405 switch (len) { 406 case 1: 407 return data & 0xff; 408 case 2: 409 return le16_to_cpu(data & 0xffff); 410 case 4: 411 return le32_to_cpu(data & 0xffffffff); 412 default: 413 return le64_to_cpu(data); 414 } 415 } 416 417 return data; /* Leave LE untouched */ 418 } 419 420 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 421 unsigned long data, 422 unsigned int len) 423 { 424 if (kvm_vcpu_is_be(vcpu)) { 425 switch (len) { 426 case 1: 427 return data & 0xff; 428 case 2: 429 return cpu_to_be16(data & 0xffff); 430 case 4: 431 return cpu_to_be32(data & 0xffffffff); 432 default: 433 return cpu_to_be64(data); 434 } 435 } else { 436 switch (len) { 437 case 1: 438 return data & 0xff; 439 case 2: 440 return cpu_to_le16(data & 0xffff); 441 case 4: 442 return cpu_to_le32(data & 0xffffffff); 443 default: 444 return cpu_to_le64(data); 445 } 446 } 447 448 return data; /* Leave LE untouched */ 449 } 450 451 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 452 { 453 if (vcpu_mode_is_32bit(vcpu)) 454 kvm_skip_instr32(vcpu, is_wide_instr); 455 else 456 *vcpu_pc(vcpu) += 4; 457 458 /* advance the singlestep state machine */ 459 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 460 } 461 462 /* 463 * Skip an instruction which has been emulated at hyp while most guest sysregs 464 * are live. 465 */ 466 static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) 467 { 468 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 469 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); 470 471 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 472 473 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); 474 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 475 } 476 477 #endif /* __ARM64_KVM_EMULATE_H__ */ 478