1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13 
14 #include <linux/kvm_host.h>
15 
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23 
24 #define CURRENT_EL_SP_EL0_VECTOR	0x0
25 #define CURRENT_EL_SP_ELx_VECTOR	0x200
26 #define LOWER_EL_AArch64_VECTOR		0x400
27 #define LOWER_EL_AArch32_VECTOR		0x600
28 
29 enum exception_type {
30 	except_type_sync	= 0,
31 	except_type_irq		= 0x80,
32 	except_type_fiq		= 0x100,
33 	except_type_serror	= 0x180,
34 };
35 
36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38 
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43 
44 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
45 
46 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
47 {
48 	return !(vcpu->arch.hcr_el2 & HCR_RW);
49 }
50 
51 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
52 {
53 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
54 	if (is_kernel_in_hyp_mode())
55 		vcpu->arch.hcr_el2 |= HCR_E2H;
56 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
57 		/* route synchronous external abort exceptions to EL2 */
58 		vcpu->arch.hcr_el2 |= HCR_TEA;
59 		/* trap error record accesses */
60 		vcpu->arch.hcr_el2 |= HCR_TERR;
61 	}
62 
63 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
64 		vcpu->arch.hcr_el2 |= HCR_FWB;
65 	} else {
66 		/*
67 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
68 		 * get set in SCTLR_EL1 such that we can detect when the guest
69 		 * MMU gets turned on and do the necessary cache maintenance
70 		 * then.
71 		 */
72 		vcpu->arch.hcr_el2 |= HCR_TVM;
73 	}
74 
75 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
76 		vcpu->arch.hcr_el2 &= ~HCR_RW;
77 
78 	/*
79 	 * TID3: trap feature register accesses that we virtualise.
80 	 * For now this is conditional, since no AArch32 feature regs
81 	 * are currently virtualised.
82 	 */
83 	if (!vcpu_el1_is_32bit(vcpu))
84 		vcpu->arch.hcr_el2 |= HCR_TID3;
85 
86 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
87 	    vcpu_el1_is_32bit(vcpu))
88 		vcpu->arch.hcr_el2 |= HCR_TID2;
89 
90 	if (kvm_has_mte(vcpu->kvm))
91 		vcpu->arch.hcr_el2 |= HCR_ATA;
92 }
93 
94 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
95 {
96 	return (unsigned long *)&vcpu->arch.hcr_el2;
97 }
98 
99 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
100 {
101 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
102 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
103 	    vcpu->kvm->arch.vgic.nassgireq)
104 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
105 	else
106 		vcpu->arch.hcr_el2 |= HCR_TWI;
107 }
108 
109 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
110 {
111 	vcpu->arch.hcr_el2 |= HCR_TWE;
112 	vcpu->arch.hcr_el2 |= HCR_TWI;
113 }
114 
115 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
116 {
117 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
118 }
119 
120 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
121 {
122 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
123 }
124 
125 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
126 {
127 	return vcpu->arch.vsesr_el2;
128 }
129 
130 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
131 {
132 	vcpu->arch.vsesr_el2 = vsesr;
133 }
134 
135 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
136 {
137 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
138 }
139 
140 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
141 {
142 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
143 }
144 
145 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
146 {
147 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
148 }
149 
150 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
151 {
152 	if (vcpu_mode_is_32bit(vcpu))
153 		return kvm_condition_valid32(vcpu);
154 
155 	return true;
156 }
157 
158 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
159 {
160 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
161 }
162 
163 /*
164  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
165  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
166  * AArch32 with banked registers.
167  */
168 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
169 					 u8 reg_num)
170 {
171 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
172 }
173 
174 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
175 				unsigned long val)
176 {
177 	if (reg_num != 31)
178 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
179 }
180 
181 /*
182  * The layout of SPSR for an AArch32 state is different when observed from an
183  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
184  * view given an AArch64 view.
185  *
186  * In ARM DDI 0487E.a see:
187  *
188  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
189  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
190  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
191  *
192  * Which show the following differences:
193  *
194  * | Bit | AA64 | AA32 | Notes                       |
195  * +-----+------+------+-----------------------------|
196  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
197  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
198  *
199  * ... and all other bits are (currently) common.
200  */
201 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
202 {
203 	const unsigned long overlap = BIT(24) | BIT(21);
204 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
205 
206 	spsr &= ~overlap;
207 
208 	spsr |= dit << 21;
209 
210 	return spsr;
211 }
212 
213 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
214 {
215 	u32 mode;
216 
217 	if (vcpu_mode_is_32bit(vcpu)) {
218 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
219 		return mode > PSR_AA32_MODE_USR;
220 	}
221 
222 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
223 
224 	return mode != PSR_MODE_EL0t;
225 }
226 
227 static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
228 {
229 	return vcpu->arch.fault.esr_el2;
230 }
231 
232 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
233 {
234 	u32 esr = kvm_vcpu_get_esr(vcpu);
235 
236 	if (esr & ESR_ELx_CV)
237 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
238 
239 	return -1;
240 }
241 
242 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
243 {
244 	return vcpu->arch.fault.far_el2;
245 }
246 
247 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
248 {
249 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
250 }
251 
252 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
253 {
254 	return vcpu->arch.fault.disr_el1;
255 }
256 
257 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
258 {
259 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
260 }
261 
262 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
263 {
264 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
265 }
266 
267 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
268 {
269 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
270 }
271 
272 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
273 {
274 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
275 }
276 
277 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
278 {
279 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
280 }
281 
282 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
283 {
284 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
285 }
286 
287 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
288 {
289 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
290 }
291 
292 /* Always check for S1PTW *before* using this. */
293 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
294 {
295 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
296 }
297 
298 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
299 {
300 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
301 }
302 
303 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
304 {
305 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
306 }
307 
308 /* This one is not specific to Data Abort */
309 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
310 {
311 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
312 }
313 
314 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
315 {
316 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
317 }
318 
319 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
320 {
321 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
322 }
323 
324 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
325 {
326 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
327 }
328 
329 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
330 {
331 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
332 }
333 
334 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
335 {
336 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
337 }
338 
339 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
340 {
341 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
342 }
343 
344 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
345 {
346 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
347 	case FSC_SEA:
348 	case FSC_SEA_TTW0:
349 	case FSC_SEA_TTW1:
350 	case FSC_SEA_TTW2:
351 	case FSC_SEA_TTW3:
352 	case FSC_SECC:
353 	case FSC_SECC_TTW0:
354 	case FSC_SECC_TTW1:
355 	case FSC_SECC_TTW2:
356 	case FSC_SECC_TTW3:
357 		return true;
358 	default:
359 		return false;
360 	}
361 }
362 
363 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
364 {
365 	u32 esr = kvm_vcpu_get_esr(vcpu);
366 	return ESR_ELx_SYS64_ISS_RT(esr);
367 }
368 
369 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
370 {
371 	if (kvm_vcpu_abt_iss1tw(vcpu))
372 		return true;
373 
374 	if (kvm_vcpu_trap_is_iabt(vcpu))
375 		return false;
376 
377 	return kvm_vcpu_dabt_iswrite(vcpu);
378 }
379 
380 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
381 {
382 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
383 }
384 
385 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
386 {
387 	if (vcpu_mode_is_32bit(vcpu)) {
388 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
389 	} else {
390 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
391 		sctlr |= SCTLR_ELx_EE;
392 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
393 	}
394 }
395 
396 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
397 {
398 	if (vcpu_mode_is_32bit(vcpu))
399 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
400 
401 	if (vcpu_mode_priv(vcpu))
402 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
403 	else
404 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
405 }
406 
407 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
408 						    unsigned long data,
409 						    unsigned int len)
410 {
411 	if (kvm_vcpu_is_be(vcpu)) {
412 		switch (len) {
413 		case 1:
414 			return data & 0xff;
415 		case 2:
416 			return be16_to_cpu(data & 0xffff);
417 		case 4:
418 			return be32_to_cpu(data & 0xffffffff);
419 		default:
420 			return be64_to_cpu(data);
421 		}
422 	} else {
423 		switch (len) {
424 		case 1:
425 			return data & 0xff;
426 		case 2:
427 			return le16_to_cpu(data & 0xffff);
428 		case 4:
429 			return le32_to_cpu(data & 0xffffffff);
430 		default:
431 			return le64_to_cpu(data);
432 		}
433 	}
434 
435 	return data;		/* Leave LE untouched */
436 }
437 
438 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
439 						    unsigned long data,
440 						    unsigned int len)
441 {
442 	if (kvm_vcpu_is_be(vcpu)) {
443 		switch (len) {
444 		case 1:
445 			return data & 0xff;
446 		case 2:
447 			return cpu_to_be16(data & 0xffff);
448 		case 4:
449 			return cpu_to_be32(data & 0xffffffff);
450 		default:
451 			return cpu_to_be64(data);
452 		}
453 	} else {
454 		switch (len) {
455 		case 1:
456 			return data & 0xff;
457 		case 2:
458 			return cpu_to_le16(data & 0xffff);
459 		case 4:
460 			return cpu_to_le32(data & 0xffffffff);
461 		default:
462 			return cpu_to_le64(data);
463 		}
464 	}
465 
466 	return data;		/* Leave LE untouched */
467 }
468 
469 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
470 {
471 	vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
472 }
473 
474 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
475 {
476 	return test_bit(feature, vcpu->arch.features);
477 }
478 
479 #endif /* __ARM64_KVM_EMULATE_H__ */
480