1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/include/kvm_emulate.h 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #ifndef __ARM64_KVM_EMULATE_H__ 23 #define __ARM64_KVM_EMULATE_H__ 24 25 #include <linux/kvm_host.h> 26 27 #include <asm/esr.h> 28 #include <asm/kvm_arm.h> 29 #include <asm/kvm_mmio.h> 30 #include <asm/ptrace.h> 31 #include <asm/cputype.h> 32 #include <asm/virt.h> 33 34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); 36 37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 39 40 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 41 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 42 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 43 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 44 void kvm_inject_undef32(struct kvm_vcpu *vcpu); 45 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 46 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 47 48 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 49 { 50 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 51 if (is_kernel_in_hyp_mode()) 52 vcpu->arch.hcr_el2 |= HCR_E2H; 53 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 54 vcpu->arch.hcr_el2 &= ~HCR_RW; 55 } 56 57 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) 58 { 59 return vcpu->arch.hcr_el2; 60 } 61 62 static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) 63 { 64 vcpu->arch.hcr_el2 = hcr; 65 } 66 67 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 68 { 69 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 70 } 71 72 static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu) 73 { 74 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; 75 } 76 77 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 78 { 79 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; 80 } 81 82 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 83 { 84 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 85 } 86 87 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 88 { 89 if (vcpu_mode_is_32bit(vcpu)) 90 return kvm_condition_valid32(vcpu); 91 92 return true; 93 } 94 95 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 96 { 97 if (vcpu_mode_is_32bit(vcpu)) 98 kvm_skip_instr32(vcpu, is_wide_instr); 99 else 100 *vcpu_pc(vcpu) += 4; 101 } 102 103 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 104 { 105 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 106 } 107 108 /* 109 * vcpu_get_reg and vcpu_set_reg should always be passed a register number 110 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 111 * AArch32 with banked registers. 112 */ 113 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 114 u8 reg_num) 115 { 116 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 117 } 118 119 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 120 unsigned long val) 121 { 122 if (reg_num != 31) 123 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; 124 } 125 126 /* Get vcpu SPSR for current mode */ 127 static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) 128 { 129 if (vcpu_mode_is_32bit(vcpu)) 130 return vcpu_spsr32(vcpu); 131 132 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 133 } 134 135 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 136 { 137 u32 mode; 138 139 if (vcpu_mode_is_32bit(vcpu)) { 140 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; 141 return mode > COMPAT_PSR_MODE_USR; 142 } 143 144 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 145 146 return mode != PSR_MODE_EL0t; 147 } 148 149 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 150 { 151 return vcpu->arch.fault.esr_el2; 152 } 153 154 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 155 { 156 u32 esr = kvm_vcpu_get_hsr(vcpu); 157 158 if (esr & ESR_ELx_CV) 159 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 160 161 return -1; 162 } 163 164 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 165 { 166 return vcpu->arch.fault.far_el2; 167 } 168 169 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 170 { 171 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 172 } 173 174 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 175 { 176 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 177 } 178 179 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 180 { 181 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 182 } 183 184 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 185 { 186 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); 187 } 188 189 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 190 { 191 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 192 } 193 194 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 195 { 196 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 197 } 198 199 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 200 { 201 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || 202 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 203 } 204 205 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 206 { 207 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 208 } 209 210 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 211 { 212 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 213 } 214 215 /* This one is not specific to Data Abort */ 216 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 217 { 218 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); 219 } 220 221 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 222 { 223 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 224 } 225 226 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 227 { 228 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 229 } 230 231 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 232 { 233 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; 234 } 235 236 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 237 { 238 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 239 } 240 241 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 242 { 243 switch (kvm_vcpu_trap_get_fault(vcpu)) { 244 case FSC_SEA: 245 case FSC_SEA_TTW0: 246 case FSC_SEA_TTW1: 247 case FSC_SEA_TTW2: 248 case FSC_SEA_TTW3: 249 case FSC_SECC: 250 case FSC_SECC_TTW0: 251 case FSC_SECC_TTW1: 252 case FSC_SECC_TTW2: 253 case FSC_SECC_TTW3: 254 return true; 255 default: 256 return false; 257 } 258 } 259 260 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 261 { 262 u32 esr = kvm_vcpu_get_hsr(vcpu); 263 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; 264 } 265 266 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 267 { 268 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 269 } 270 271 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 272 { 273 if (vcpu_mode_is_32bit(vcpu)) 274 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; 275 else 276 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25); 277 } 278 279 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 280 { 281 if (vcpu_mode_is_32bit(vcpu)) 282 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); 283 284 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); 285 } 286 287 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 288 unsigned long data, 289 unsigned int len) 290 { 291 if (kvm_vcpu_is_be(vcpu)) { 292 switch (len) { 293 case 1: 294 return data & 0xff; 295 case 2: 296 return be16_to_cpu(data & 0xffff); 297 case 4: 298 return be32_to_cpu(data & 0xffffffff); 299 default: 300 return be64_to_cpu(data); 301 } 302 } else { 303 switch (len) { 304 case 1: 305 return data & 0xff; 306 case 2: 307 return le16_to_cpu(data & 0xffff); 308 case 4: 309 return le32_to_cpu(data & 0xffffffff); 310 default: 311 return le64_to_cpu(data); 312 } 313 } 314 315 return data; /* Leave LE untouched */ 316 } 317 318 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 319 unsigned long data, 320 unsigned int len) 321 { 322 if (kvm_vcpu_is_be(vcpu)) { 323 switch (len) { 324 case 1: 325 return data & 0xff; 326 case 2: 327 return cpu_to_be16(data & 0xffff); 328 case 4: 329 return cpu_to_be32(data & 0xffffffff); 330 default: 331 return cpu_to_be64(data); 332 } 333 } else { 334 switch (len) { 335 case 1: 336 return data & 0xff; 337 case 2: 338 return cpu_to_le16(data & 0xffff); 339 case 4: 340 return cpu_to_le32(data & 0xffffffff); 341 default: 342 return cpu_to_le64(data); 343 } 344 } 345 346 return data; /* Leave LE untouched */ 347 } 348 349 #endif /* __ARM64_KVM_EMULATE_H__ */ 350