xref: /openbmc/linux/arch/arm64/kvm/inject_fault.c (revision fa840ba4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Fault injection for both 32 and 64bit guests.
4  *
5  * Copyright (C) 2012,2013 - ARM Ltd
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  *
8  * Based on arch/arm/kvm/emulate.c
9  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
10  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11  */
12 
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/esr.h>
17 
18 static void pend_sync_exception(struct kvm_vcpu *vcpu)
19 {
20 	/* If not nesting, EL1 is the only possible exception target */
21 	if (likely(!vcpu_has_nv(vcpu))) {
22 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
23 		return;
24 	}
25 
26 	/*
27 	 * With NV, we need to pick between EL1 and EL2. Note that we
28 	 * never deal with a nesting exception here, hence never
29 	 * changing context, and the exception itself can be delayed
30 	 * until the next entry.
31 	 */
32 	switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
33 	case PSR_MODE_EL2h:
34 	case PSR_MODE_EL2t:
35 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
36 		break;
37 	case PSR_MODE_EL1h:
38 	case PSR_MODE_EL1t:
39 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
40 		break;
41 	case PSR_MODE_EL0t:
42 		if (vcpu_el2_tge_is_set(vcpu))
43 			kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
44 		else
45 			kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
46 		break;
47 	default:
48 		BUG();
49 	}
50 }
51 
52 static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target)
53 {
54 	return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target);
55 }
56 
57 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
58 {
59 	unsigned long cpsr = *vcpu_cpsr(vcpu);
60 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
61 	u64 esr = 0;
62 
63 	pend_sync_exception(vcpu);
64 
65 	/*
66 	 * Build an {i,d}abort, depending on the level and the
67 	 * instruction set. Report an external synchronous abort.
68 	 */
69 	if (kvm_vcpu_trap_il_is32bit(vcpu))
70 		esr |= ESR_ELx_IL;
71 
72 	/*
73 	 * Here, the guest runs in AArch64 mode when in EL1. If we get
74 	 * an AArch32 fault, it means we managed to trap an EL0 fault.
75 	 */
76 	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
77 		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
78 	else
79 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
80 
81 	if (!is_iabt)
82 		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
83 
84 	esr |= ESR_ELx_FSC_EXTABT;
85 
86 	if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
87 		vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
88 		vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
89 	} else {
90 		vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
91 		vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
92 	}
93 }
94 
95 static void inject_undef64(struct kvm_vcpu *vcpu)
96 {
97 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
98 
99 	pend_sync_exception(vcpu);
100 
101 	/*
102 	 * Build an unknown exception, depending on the instruction
103 	 * set.
104 	 */
105 	if (kvm_vcpu_trap_il_is32bit(vcpu))
106 		esr |= ESR_ELx_IL;
107 
108 	if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC)))
109 		vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
110 	else
111 		vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
112 }
113 
114 #define DFSR_FSC_EXTABT_LPAE	0x10
115 #define DFSR_FSC_EXTABT_nLPAE	0x08
116 #define DFSR_LPAE		BIT(9)
117 #define TTBCR_EAE		BIT(31)
118 
119 static void inject_undef32(struct kvm_vcpu *vcpu)
120 {
121 	kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
122 }
123 
124 /*
125  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
126  * pseudocode.
127  */
128 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
129 {
130 	u64 far;
131 	u32 fsr;
132 
133 	/* Give the guest an IMPLEMENTATION DEFINED exception */
134 	if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
135 		fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
136 	} else {
137 		/* no need to shuffle FS[4] into DFSR[10] as its 0 */
138 		fsr = DFSR_FSC_EXTABT_nLPAE;
139 	}
140 
141 	far = vcpu_read_sys_reg(vcpu, FAR_EL1);
142 
143 	if (is_pabt) {
144 		kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
145 		far &= GENMASK(31, 0);
146 		far |= (u64)addr << 32;
147 		vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
148 	} else { /* !iabt */
149 		kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
150 		far &= GENMASK(63, 32);
151 		far |= addr;
152 		vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
153 	}
154 
155 	vcpu_write_sys_reg(vcpu, far, FAR_EL1);
156 }
157 
158 /**
159  * kvm_inject_dabt - inject a data abort into the guest
160  * @vcpu: The VCPU to receive the data abort
161  * @addr: The address to report in the DFAR
162  *
163  * It is assumed that this code is called from the VCPU thread and that the
164  * VCPU therefore is not currently executing guest code.
165  */
166 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
167 {
168 	if (vcpu_el1_is_32bit(vcpu))
169 		inject_abt32(vcpu, false, addr);
170 	else
171 		inject_abt64(vcpu, false, addr);
172 }
173 
174 /**
175  * kvm_inject_pabt - inject a prefetch abort into the guest
176  * @vcpu: The VCPU to receive the prefetch abort
177  * @addr: The address to report in the DFAR
178  *
179  * It is assumed that this code is called from the VCPU thread and that the
180  * VCPU therefore is not currently executing guest code.
181  */
182 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
183 {
184 	if (vcpu_el1_is_32bit(vcpu))
185 		inject_abt32(vcpu, true, addr);
186 	else
187 		inject_abt64(vcpu, true, addr);
188 }
189 
190 void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
191 {
192 	unsigned long addr, esr;
193 
194 	addr  = kvm_vcpu_get_fault_ipa(vcpu);
195 	addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
196 
197 	if (kvm_vcpu_trap_is_iabt(vcpu))
198 		kvm_inject_pabt(vcpu, addr);
199 	else
200 		kvm_inject_dabt(vcpu, addr);
201 
202 	/*
203 	 * If AArch64 or LPAE, set FSC to 0 to indicate an Address
204 	 * Size Fault at level 0, as if exceeding PARange.
205 	 *
206 	 * Non-LPAE guests will only get the external abort, as there
207 	 * is no way to describe the ASF.
208 	 */
209 	if (vcpu_el1_is_32bit(vcpu) &&
210 	    !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
211 		return;
212 
213 	esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
214 	esr &= ~GENMASK_ULL(5, 0);
215 	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
216 }
217 
218 /**
219  * kvm_inject_undefined - inject an undefined instruction into the guest
220  * @vcpu: The vCPU in which to inject the exception
221  *
222  * It is assumed that this code is called from the VCPU thread and that the
223  * VCPU therefore is not currently executing guest code.
224  */
225 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
226 {
227 	if (vcpu_el1_is_32bit(vcpu))
228 		inject_undef32(vcpu);
229 	else
230 		inject_undef64(vcpu);
231 }
232 
233 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
234 {
235 	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
236 	*vcpu_hcr(vcpu) |= HCR_VSE;
237 }
238 
239 /**
240  * kvm_inject_vabt - inject an async abort / SError into the guest
241  * @vcpu: The VCPU to receive the exception
242  *
243  * It is assumed that this code is called from the VCPU thread and that the
244  * VCPU therefore is not currently executing guest code.
245  *
246  * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
247  * the remaining ISS all-zeros so that this error is not interpreted as an
248  * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
249  * value, so the CPU generates an imp-def value.
250  */
251 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
252 {
253 	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
254 }
255