xref: /openbmc/linux/arch/arm64/include/asm/kvm_emulate.h (revision f2d8b6917f3bcfb3190eb80567fea71a9b59dbd3)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13 
14 #include <linux/kvm_host.h>
15 
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23 
24 #define CURRENT_EL_SP_EL0_VECTOR	0x0
25 #define CURRENT_EL_SP_ELx_VECTOR	0x200
26 #define LOWER_EL_AArch64_VECTOR		0x400
27 #define LOWER_EL_AArch32_VECTOR		0x600
28 
29 enum exception_type {
30 	except_type_sync	= 0,
31 	except_type_irq		= 0x80,
32 	except_type_fiq		= 0x100,
33 	except_type_serror	= 0x180,
34 };
35 
36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38 
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43 
44 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
45 
46 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
47 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
48 {
49 	return !(vcpu->arch.hcr_el2 & HCR_RW);
50 }
51 #else
52 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
53 {
54 	struct kvm *kvm = vcpu->kvm;
55 
56 	WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
57 			       &kvm->arch.flags));
58 
59 	return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
60 }
61 #endif
62 
63 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
64 {
65 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
66 	if (is_kernel_in_hyp_mode())
67 		vcpu->arch.hcr_el2 |= HCR_E2H;
68 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
69 		/* route synchronous external abort exceptions to EL2 */
70 		vcpu->arch.hcr_el2 |= HCR_TEA;
71 		/* trap error record accesses */
72 		vcpu->arch.hcr_el2 |= HCR_TERR;
73 	}
74 
75 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
76 		vcpu->arch.hcr_el2 |= HCR_FWB;
77 	} else {
78 		/*
79 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
80 		 * get set in SCTLR_EL1 such that we can detect when the guest
81 		 * MMU gets turned on and do the necessary cache maintenance
82 		 * then.
83 		 */
84 		vcpu->arch.hcr_el2 |= HCR_TVM;
85 	}
86 
87 	if (vcpu_el1_is_32bit(vcpu))
88 		vcpu->arch.hcr_el2 &= ~HCR_RW;
89 	else
90 		/*
91 		 * TID3: trap feature register accesses that we virtualise.
92 		 * For now this is conditional, since no AArch32 feature regs
93 		 * are currently virtualised.
94 		 */
95 		vcpu->arch.hcr_el2 |= HCR_TID3;
96 
97 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
98 	    vcpu_el1_is_32bit(vcpu))
99 		vcpu->arch.hcr_el2 |= HCR_TID2;
100 
101 	if (kvm_has_mte(vcpu->kvm))
102 		vcpu->arch.hcr_el2 |= HCR_ATA;
103 }
104 
105 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
106 {
107 	return (unsigned long *)&vcpu->arch.hcr_el2;
108 }
109 
110 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
111 {
112 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
113 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
114 	    vcpu->kvm->arch.vgic.nassgireq)
115 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
116 	else
117 		vcpu->arch.hcr_el2 |= HCR_TWI;
118 }
119 
120 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
121 {
122 	vcpu->arch.hcr_el2 |= HCR_TWE;
123 	vcpu->arch.hcr_el2 |= HCR_TWI;
124 }
125 
126 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
127 {
128 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
129 }
130 
131 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
132 {
133 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
134 }
135 
136 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
137 {
138 	return vcpu->arch.vsesr_el2;
139 }
140 
141 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
142 {
143 	vcpu->arch.vsesr_el2 = vsesr;
144 }
145 
146 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
147 {
148 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
149 }
150 
151 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
152 {
153 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
154 }
155 
156 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
157 {
158 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
159 }
160 
161 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
162 {
163 	if (vcpu_mode_is_32bit(vcpu))
164 		return kvm_condition_valid32(vcpu);
165 
166 	return true;
167 }
168 
169 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
170 {
171 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
172 }
173 
174 /*
175  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
176  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
177  * AArch32 with banked registers.
178  */
179 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
180 					 u8 reg_num)
181 {
182 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
183 }
184 
185 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
186 				unsigned long val)
187 {
188 	if (reg_num != 31)
189 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
190 }
191 
192 /*
193  * The layout of SPSR for an AArch32 state is different when observed from an
194  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
195  * view given an AArch64 view.
196  *
197  * In ARM DDI 0487E.a see:
198  *
199  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
200  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
201  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
202  *
203  * Which show the following differences:
204  *
205  * | Bit | AA64 | AA32 | Notes                       |
206  * +-----+------+------+-----------------------------|
207  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
208  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
209  *
210  * ... and all other bits are (currently) common.
211  */
212 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
213 {
214 	const unsigned long overlap = BIT(24) | BIT(21);
215 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
216 
217 	spsr &= ~overlap;
218 
219 	spsr |= dit << 21;
220 
221 	return spsr;
222 }
223 
224 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
225 {
226 	u32 mode;
227 
228 	if (vcpu_mode_is_32bit(vcpu)) {
229 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
230 		return mode > PSR_AA32_MODE_USR;
231 	}
232 
233 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
234 
235 	return mode != PSR_MODE_EL0t;
236 }
237 
238 static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
239 {
240 	return vcpu->arch.fault.esr_el2;
241 }
242 
243 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
244 {
245 	u32 esr = kvm_vcpu_get_esr(vcpu);
246 
247 	if (esr & ESR_ELx_CV)
248 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
249 
250 	return -1;
251 }
252 
253 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
254 {
255 	return vcpu->arch.fault.far_el2;
256 }
257 
258 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
259 {
260 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
261 }
262 
263 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
264 {
265 	return vcpu->arch.fault.disr_el1;
266 }
267 
268 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
269 {
270 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
271 }
272 
273 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
274 {
275 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
276 }
277 
278 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
279 {
280 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
281 }
282 
283 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
284 {
285 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
286 }
287 
288 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
289 {
290 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
291 }
292 
293 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
294 {
295 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
296 }
297 
298 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
299 {
300 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
301 }
302 
303 /* Always check for S1PTW *before* using this. */
304 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
305 {
306 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
307 }
308 
309 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
310 {
311 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
312 }
313 
314 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
315 {
316 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
317 }
318 
319 /* This one is not specific to Data Abort */
320 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
321 {
322 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
323 }
324 
325 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
326 {
327 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
328 }
329 
330 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
331 {
332 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
333 }
334 
335 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
336 {
337 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
338 }
339 
340 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
341 {
342 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
343 }
344 
345 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
346 {
347 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
348 }
349 
350 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
351 {
352 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
353 }
354 
355 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
356 {
357 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
358 	case FSC_SEA:
359 	case FSC_SEA_TTW0:
360 	case FSC_SEA_TTW1:
361 	case FSC_SEA_TTW2:
362 	case FSC_SEA_TTW3:
363 	case FSC_SECC:
364 	case FSC_SECC_TTW0:
365 	case FSC_SECC_TTW1:
366 	case FSC_SECC_TTW2:
367 	case FSC_SECC_TTW3:
368 		return true;
369 	default:
370 		return false;
371 	}
372 }
373 
374 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
375 {
376 	u32 esr = kvm_vcpu_get_esr(vcpu);
377 	return ESR_ELx_SYS64_ISS_RT(esr);
378 }
379 
380 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
381 {
382 	if (kvm_vcpu_abt_iss1tw(vcpu))
383 		return true;
384 
385 	if (kvm_vcpu_trap_is_iabt(vcpu))
386 		return false;
387 
388 	return kvm_vcpu_dabt_iswrite(vcpu);
389 }
390 
391 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
392 {
393 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
394 }
395 
396 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
397 {
398 	if (vcpu_mode_is_32bit(vcpu)) {
399 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
400 	} else {
401 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
402 		sctlr |= SCTLR_ELx_EE;
403 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
404 	}
405 }
406 
407 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
408 {
409 	if (vcpu_mode_is_32bit(vcpu))
410 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
411 
412 	if (vcpu_mode_priv(vcpu))
413 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
414 	else
415 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
416 }
417 
418 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
419 						    unsigned long data,
420 						    unsigned int len)
421 {
422 	if (kvm_vcpu_is_be(vcpu)) {
423 		switch (len) {
424 		case 1:
425 			return data & 0xff;
426 		case 2:
427 			return be16_to_cpu(data & 0xffff);
428 		case 4:
429 			return be32_to_cpu(data & 0xffffffff);
430 		default:
431 			return be64_to_cpu(data);
432 		}
433 	} else {
434 		switch (len) {
435 		case 1:
436 			return data & 0xff;
437 		case 2:
438 			return le16_to_cpu(data & 0xffff);
439 		case 4:
440 			return le32_to_cpu(data & 0xffffffff);
441 		default:
442 			return le64_to_cpu(data);
443 		}
444 	}
445 
446 	return data;		/* Leave LE untouched */
447 }
448 
449 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
450 						    unsigned long data,
451 						    unsigned int len)
452 {
453 	if (kvm_vcpu_is_be(vcpu)) {
454 		switch (len) {
455 		case 1:
456 			return data & 0xff;
457 		case 2:
458 			return cpu_to_be16(data & 0xffff);
459 		case 4:
460 			return cpu_to_be32(data & 0xffffffff);
461 		default:
462 			return cpu_to_be64(data);
463 		}
464 	} else {
465 		switch (len) {
466 		case 1:
467 			return data & 0xff;
468 		case 2:
469 			return cpu_to_le16(data & 0xffff);
470 		case 4:
471 			return cpu_to_le32(data & 0xffffffff);
472 		default:
473 			return cpu_to_le64(data);
474 		}
475 	}
476 
477 	return data;		/* Leave LE untouched */
478 }
479 
480 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
481 {
482 	vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
483 }
484 
485 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
486 {
487 	return test_bit(feature, vcpu->arch.features);
488 }
489 
490 #endif /* __ARM64_KVM_EMULATE_H__ */
491