xref: /openbmc/linux/arch/arm64/kvm/inject_fault.c (revision e5c86679)
1 /*
2  * Fault injection for both 32 and 64bit guests.
3  *
4  * Copyright (C) 2012,2013 - ARM Ltd
5  * Author: Marc Zyngier <marc.zyngier@arm.com>
6  *
7  * Based on arch/arm/kvm/emulate.c
8  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/esr.h>
27 
28 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 				 PSR_I_BIT | PSR_D_BIT)
30 
31 #define CURRENT_EL_SP_EL0_VECTOR	0x0
32 #define CURRENT_EL_SP_ELx_VECTOR	0x200
33 #define LOWER_EL_AArch64_VECTOR		0x400
34 #define LOWER_EL_AArch32_VECTOR		0x600
35 
36 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
37 {
38 	unsigned long cpsr;
39 	unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
40 	bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
41 	u32 return_offset = (is_thumb) ? 4 : 0;
42 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
43 
44 	cpsr = mode | COMPAT_PSR_I_BIT;
45 
46 	if (sctlr & (1 << 30))
47 		cpsr |= COMPAT_PSR_T_BIT;
48 	if (sctlr & (1 << 25))
49 		cpsr |= COMPAT_PSR_E_BIT;
50 
51 	*vcpu_cpsr(vcpu) = cpsr;
52 
53 	/* Note: These now point to the banked copies */
54 	*vcpu_spsr(vcpu) = new_spsr_value;
55 	*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
56 
57 	/* Branch to exception vector */
58 	if (sctlr & (1 << 13))
59 		vect_offset += 0xffff0000;
60 	else /* always have security exceptions */
61 		vect_offset += vcpu_cp15(vcpu, c12_VBAR);
62 
63 	*vcpu_pc(vcpu) = vect_offset;
64 }
65 
66 static void inject_undef32(struct kvm_vcpu *vcpu)
67 {
68 	prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
69 }
70 
71 /*
72  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
73  * pseudocode.
74  */
75 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
76 			 unsigned long addr)
77 {
78 	u32 vect_offset;
79 	u32 *far, *fsr;
80 	bool is_lpae;
81 
82 	if (is_pabt) {
83 		vect_offset = 12;
84 		far = &vcpu_cp15(vcpu, c6_IFAR);
85 		fsr = &vcpu_cp15(vcpu, c5_IFSR);
86 	} else { /* !iabt */
87 		vect_offset = 16;
88 		far = &vcpu_cp15(vcpu, c6_DFAR);
89 		fsr = &vcpu_cp15(vcpu, c5_DFSR);
90 	}
91 
92 	prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
93 
94 	*far = addr;
95 
96 	/* Give the guest an IMPLEMENTATION DEFINED exception */
97 	is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
98 	if (is_lpae)
99 		*fsr = 1 << 9 | 0x34;
100 	else
101 		*fsr = 0x14;
102 }
103 
104 enum exception_type {
105 	except_type_sync	= 0,
106 	except_type_irq		= 0x80,
107 	except_type_fiq		= 0x100,
108 	except_type_serror	= 0x180,
109 };
110 
111 static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
112 {
113 	u64 exc_offset;
114 
115 	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
116 	case PSR_MODE_EL1t:
117 		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
118 		break;
119 	case PSR_MODE_EL1h:
120 		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
121 		break;
122 	case PSR_MODE_EL0t:
123 		exc_offset = LOWER_EL_AArch64_VECTOR;
124 		break;
125 	default:
126 		exc_offset = LOWER_EL_AArch32_VECTOR;
127 	}
128 
129 	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
130 }
131 
132 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
133 {
134 	unsigned long cpsr = *vcpu_cpsr(vcpu);
135 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
136 	u32 esr = 0;
137 
138 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
139 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
140 
141 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
142 	*vcpu_spsr(vcpu) = cpsr;
143 
144 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
145 
146 	/*
147 	 * Build an {i,d}abort, depending on the level and the
148 	 * instruction set. Report an external synchronous abort.
149 	 */
150 	if (kvm_vcpu_trap_il_is32bit(vcpu))
151 		esr |= ESR_ELx_IL;
152 
153 	/*
154 	 * Here, the guest runs in AArch64 mode when in EL1. If we get
155 	 * an AArch32 fault, it means we managed to trap an EL0 fault.
156 	 */
157 	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
158 		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
159 	else
160 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
161 
162 	if (!is_iabt)
163 		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
164 
165 	vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
166 }
167 
168 static void inject_undef64(struct kvm_vcpu *vcpu)
169 {
170 	unsigned long cpsr = *vcpu_cpsr(vcpu);
171 	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
172 
173 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
174 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
175 
176 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
177 	*vcpu_spsr(vcpu) = cpsr;
178 
179 	/*
180 	 * Build an unknown exception, depending on the instruction
181 	 * set.
182 	 */
183 	if (kvm_vcpu_trap_il_is32bit(vcpu))
184 		esr |= ESR_ELx_IL;
185 
186 	vcpu_sys_reg(vcpu, ESR_EL1) = esr;
187 }
188 
189 /**
190  * kvm_inject_dabt - inject a data abort into the guest
191  * @vcpu: The VCPU to receive the undefined exception
192  * @addr: The address to report in the DFAR
193  *
194  * It is assumed that this code is called from the VCPU thread and that the
195  * VCPU therefore is not currently executing guest code.
196  */
197 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
198 {
199 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
200 		inject_abt32(vcpu, false, addr);
201 	else
202 		inject_abt64(vcpu, false, addr);
203 }
204 
205 /**
206  * kvm_inject_pabt - inject a prefetch abort into the guest
207  * @vcpu: The VCPU to receive the undefined exception
208  * @addr: The address to report in the DFAR
209  *
210  * It is assumed that this code is called from the VCPU thread and that the
211  * VCPU therefore is not currently executing guest code.
212  */
213 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
214 {
215 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
216 		inject_abt32(vcpu, true, addr);
217 	else
218 		inject_abt64(vcpu, true, addr);
219 }
220 
221 /**
222  * kvm_inject_undefined - inject an undefined instruction into the guest
223  *
224  * It is assumed that this code is called from the VCPU thread and that the
225  * VCPU therefore is not currently executing guest code.
226  */
227 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
228 {
229 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
230 		inject_undef32(vcpu);
231 	else
232 		inject_undef64(vcpu);
233 }
234 
235 /**
236  * kvm_inject_vabt - inject an async abort / SError into the guest
237  * @vcpu: The VCPU to receive the exception
238  *
239  * It is assumed that this code is called from the VCPU thread and that the
240  * VCPU therefore is not currently executing guest code.
241  */
242 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
243 {
244 	vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
245 }
246