xref: /openbmc/linux/arch/arm64/kvm/inject_fault.c (revision f87deada)
1 /*
2  * Fault injection for both 32 and 64bit guests.
3  *
4  * Copyright (C) 2012,2013 - ARM Ltd
5  * Author: Marc Zyngier <marc.zyngier@arm.com>
6  *
7  * Based on arch/arm/kvm/emulate.c
8  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/esr.h>
27 
28 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 				 PSR_I_BIT | PSR_D_BIT)
30 
31 #define CURRENT_EL_SP_EL0_VECTOR	0x0
32 #define CURRENT_EL_SP_ELx_VECTOR	0x200
33 #define LOWER_EL_AArch64_VECTOR		0x400
34 #define LOWER_EL_AArch32_VECTOR		0x600
35 
36 enum exception_type {
37 	except_type_sync	= 0,
38 	except_type_irq		= 0x80,
39 	except_type_fiq		= 0x100,
40 	except_type_serror	= 0x180,
41 };
42 
43 static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
44 {
45 	u64 exc_offset;
46 
47 	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
48 	case PSR_MODE_EL1t:
49 		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
50 		break;
51 	case PSR_MODE_EL1h:
52 		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
53 		break;
54 	case PSR_MODE_EL0t:
55 		exc_offset = LOWER_EL_AArch64_VECTOR;
56 		break;
57 	default:
58 		exc_offset = LOWER_EL_AArch32_VECTOR;
59 	}
60 
61 	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
62 }
63 
64 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
65 {
66 	unsigned long cpsr = *vcpu_cpsr(vcpu);
67 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
68 	u32 esr = 0;
69 
70 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
71 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
72 
73 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
74 	*vcpu_spsr(vcpu) = cpsr;
75 
76 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
77 
78 	/*
79 	 * Build an {i,d}abort, depending on the level and the
80 	 * instruction set. Report an external synchronous abort.
81 	 */
82 	if (kvm_vcpu_trap_il_is32bit(vcpu))
83 		esr |= ESR_ELx_IL;
84 
85 	/*
86 	 * Here, the guest runs in AArch64 mode when in EL1. If we get
87 	 * an AArch32 fault, it means we managed to trap an EL0 fault.
88 	 */
89 	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
90 		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
91 	else
92 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
93 
94 	if (!is_iabt)
95 		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
96 
97 	vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
98 }
99 
100 static void inject_undef64(struct kvm_vcpu *vcpu)
101 {
102 	unsigned long cpsr = *vcpu_cpsr(vcpu);
103 	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
104 
105 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
106 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
107 
108 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
109 	*vcpu_spsr(vcpu) = cpsr;
110 
111 	/*
112 	 * Build an unknown exception, depending on the instruction
113 	 * set.
114 	 */
115 	if (kvm_vcpu_trap_il_is32bit(vcpu))
116 		esr |= ESR_ELx_IL;
117 
118 	vcpu_sys_reg(vcpu, ESR_EL1) = esr;
119 }
120 
121 /**
122  * kvm_inject_dabt - inject a data abort into the guest
123  * @vcpu: The VCPU to receive the undefined exception
124  * @addr: The address to report in the DFAR
125  *
126  * It is assumed that this code is called from the VCPU thread and that the
127  * VCPU therefore is not currently executing guest code.
128  */
129 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
130 {
131 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
132 		kvm_inject_dabt32(vcpu, addr);
133 	else
134 		inject_abt64(vcpu, false, addr);
135 }
136 
137 /**
138  * kvm_inject_pabt - inject a prefetch abort into the guest
139  * @vcpu: The VCPU to receive the undefined exception
140  * @addr: The address to report in the DFAR
141  *
142  * It is assumed that this code is called from the VCPU thread and that the
143  * VCPU therefore is not currently executing guest code.
144  */
145 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
146 {
147 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
148 		kvm_inject_pabt32(vcpu, addr);
149 	else
150 		inject_abt64(vcpu, true, addr);
151 }
152 
153 /**
154  * kvm_inject_undefined - inject an undefined instruction into the guest
155  *
156  * It is assumed that this code is called from the VCPU thread and that the
157  * VCPU therefore is not currently executing guest code.
158  */
159 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
160 {
161 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
162 		kvm_inject_undef32(vcpu);
163 	else
164 		inject_undef64(vcpu);
165 }
166 
167 static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
168 {
169 	vcpu_set_vsesr(vcpu, esr);
170 	vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
171 }
172 
173 /**
174  * kvm_inject_vabt - inject an async abort / SError into the guest
175  * @vcpu: The VCPU to receive the exception
176  *
177  * It is assumed that this code is called from the VCPU thread and that the
178  * VCPU therefore is not currently executing guest code.
179  *
180  * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
181  * the remaining ISS all-zeros so that this error is not interpreted as an
182  * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
183  * value, so the CPU generates an imp-def value.
184  */
185 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
186 {
187 	pend_guest_serror(vcpu, ESR_ELx_ISV);
188 }
189