1 /*
2 * QEMU Hypervisor.framework support for Apple Silicon
3
4 * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5 * Copyright 2020 Google LLC
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14
15 #include "sysemu/runstate.h"
16 #include "sysemu/hvf.h"
17 #include "sysemu/hvf_int.h"
18 #include "sysemu/hw_accel.h"
19 #include "hvf_arm.h"
20 #include "cpregs.h"
21
22 #include <mach/mach_time.h>
23
24 #include "exec/address-spaces.h"
25 #include "hw/boards.h"
26 #include "hw/irq.h"
27 #include "qemu/main-loop.h"
28 #include "sysemu/cpus.h"
29 #include "arm-powerctl.h"
30 #include "target/arm/cpu.h"
31 #include "target/arm/internals.h"
32 #include "target/arm/multiprocessing.h"
33 #include "target/arm/gtimer.h"
34 #include "trace.h"
35 #include "migration/vmstate.h"
36
37 #include "gdbstub/enums.h"
38
39 #define MDSCR_EL1_SS_SHIFT 0
40 #define MDSCR_EL1_MDE_SHIFT 15
41
42 static const uint16_t dbgbcr_regs[] = {
43 HV_SYS_REG_DBGBCR0_EL1,
44 HV_SYS_REG_DBGBCR1_EL1,
45 HV_SYS_REG_DBGBCR2_EL1,
46 HV_SYS_REG_DBGBCR3_EL1,
47 HV_SYS_REG_DBGBCR4_EL1,
48 HV_SYS_REG_DBGBCR5_EL1,
49 HV_SYS_REG_DBGBCR6_EL1,
50 HV_SYS_REG_DBGBCR7_EL1,
51 HV_SYS_REG_DBGBCR8_EL1,
52 HV_SYS_REG_DBGBCR9_EL1,
53 HV_SYS_REG_DBGBCR10_EL1,
54 HV_SYS_REG_DBGBCR11_EL1,
55 HV_SYS_REG_DBGBCR12_EL1,
56 HV_SYS_REG_DBGBCR13_EL1,
57 HV_SYS_REG_DBGBCR14_EL1,
58 HV_SYS_REG_DBGBCR15_EL1,
59 };
60
61 static const uint16_t dbgbvr_regs[] = {
62 HV_SYS_REG_DBGBVR0_EL1,
63 HV_SYS_REG_DBGBVR1_EL1,
64 HV_SYS_REG_DBGBVR2_EL1,
65 HV_SYS_REG_DBGBVR3_EL1,
66 HV_SYS_REG_DBGBVR4_EL1,
67 HV_SYS_REG_DBGBVR5_EL1,
68 HV_SYS_REG_DBGBVR6_EL1,
69 HV_SYS_REG_DBGBVR7_EL1,
70 HV_SYS_REG_DBGBVR8_EL1,
71 HV_SYS_REG_DBGBVR9_EL1,
72 HV_SYS_REG_DBGBVR10_EL1,
73 HV_SYS_REG_DBGBVR11_EL1,
74 HV_SYS_REG_DBGBVR12_EL1,
75 HV_SYS_REG_DBGBVR13_EL1,
76 HV_SYS_REG_DBGBVR14_EL1,
77 HV_SYS_REG_DBGBVR15_EL1,
78 };
79
80 static const uint16_t dbgwcr_regs[] = {
81 HV_SYS_REG_DBGWCR0_EL1,
82 HV_SYS_REG_DBGWCR1_EL1,
83 HV_SYS_REG_DBGWCR2_EL1,
84 HV_SYS_REG_DBGWCR3_EL1,
85 HV_SYS_REG_DBGWCR4_EL1,
86 HV_SYS_REG_DBGWCR5_EL1,
87 HV_SYS_REG_DBGWCR6_EL1,
88 HV_SYS_REG_DBGWCR7_EL1,
89 HV_SYS_REG_DBGWCR8_EL1,
90 HV_SYS_REG_DBGWCR9_EL1,
91 HV_SYS_REG_DBGWCR10_EL1,
92 HV_SYS_REG_DBGWCR11_EL1,
93 HV_SYS_REG_DBGWCR12_EL1,
94 HV_SYS_REG_DBGWCR13_EL1,
95 HV_SYS_REG_DBGWCR14_EL1,
96 HV_SYS_REG_DBGWCR15_EL1,
97 };
98
99 static const uint16_t dbgwvr_regs[] = {
100 HV_SYS_REG_DBGWVR0_EL1,
101 HV_SYS_REG_DBGWVR1_EL1,
102 HV_SYS_REG_DBGWVR2_EL1,
103 HV_SYS_REG_DBGWVR3_EL1,
104 HV_SYS_REG_DBGWVR4_EL1,
105 HV_SYS_REG_DBGWVR5_EL1,
106 HV_SYS_REG_DBGWVR6_EL1,
107 HV_SYS_REG_DBGWVR7_EL1,
108 HV_SYS_REG_DBGWVR8_EL1,
109 HV_SYS_REG_DBGWVR9_EL1,
110 HV_SYS_REG_DBGWVR10_EL1,
111 HV_SYS_REG_DBGWVR11_EL1,
112 HV_SYS_REG_DBGWVR12_EL1,
113 HV_SYS_REG_DBGWVR13_EL1,
114 HV_SYS_REG_DBGWVR14_EL1,
115 HV_SYS_REG_DBGWVR15_EL1,
116 };
117
hvf_arm_num_brps(hv_vcpu_config_t config)118 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
119 {
120 uint64_t val;
121 hv_return_t ret;
122 ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
123 &val);
124 assert_hvf_ok(ret);
125 return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
126 }
127
hvf_arm_num_wrps(hv_vcpu_config_t config)128 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
129 {
130 uint64_t val;
131 hv_return_t ret;
132 ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
133 &val);
134 assert_hvf_ok(ret);
135 return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
136 }
137
hvf_arm_init_debug(void)138 void hvf_arm_init_debug(void)
139 {
140 hv_vcpu_config_t config;
141 config = hv_vcpu_config_create();
142
143 max_hw_bps = hvf_arm_num_brps(config);
144 hw_breakpoints =
145 g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
146
147 max_hw_wps = hvf_arm_num_wrps(config);
148 hw_watchpoints =
149 g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
150 }
151
152 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
153 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
154
155 #define SYSREG_OP0_SHIFT 20
156 #define SYSREG_OP0_MASK 0x3
157 #define SYSREG_OP0(sysreg) ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
158 #define SYSREG_OP1_SHIFT 14
159 #define SYSREG_OP1_MASK 0x7
160 #define SYSREG_OP1(sysreg) ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
161 #define SYSREG_CRN_SHIFT 10
162 #define SYSREG_CRN_MASK 0xf
163 #define SYSREG_CRN(sysreg) ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
164 #define SYSREG_CRM_SHIFT 1
165 #define SYSREG_CRM_MASK 0xf
166 #define SYSREG_CRM(sysreg) ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
167 #define SYSREG_OP2_SHIFT 17
168 #define SYSREG_OP2_MASK 0x7
169 #define SYSREG_OP2(sysreg) ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
170
171 #define SYSREG(op0, op1, crn, crm, op2) \
172 ((op0 << SYSREG_OP0_SHIFT) | \
173 (op1 << SYSREG_OP1_SHIFT) | \
174 (crn << SYSREG_CRN_SHIFT) | \
175 (crm << SYSREG_CRM_SHIFT) | \
176 (op2 << SYSREG_OP2_SHIFT))
177 #define SYSREG_MASK \
178 SYSREG(SYSREG_OP0_MASK, \
179 SYSREG_OP1_MASK, \
180 SYSREG_CRN_MASK, \
181 SYSREG_CRM_MASK, \
182 SYSREG_OP2_MASK)
183 #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
184 #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
185 #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
186 #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
187 #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
188 #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
189 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
190 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
191 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
192 #define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
193 #define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
194 #define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
195 #define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
196 #define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
197 #define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
198 #define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
199
200 #define SYSREG_ICC_AP0R0_EL1 SYSREG(3, 0, 12, 8, 4)
201 #define SYSREG_ICC_AP0R1_EL1 SYSREG(3, 0, 12, 8, 5)
202 #define SYSREG_ICC_AP0R2_EL1 SYSREG(3, 0, 12, 8, 6)
203 #define SYSREG_ICC_AP0R3_EL1 SYSREG(3, 0, 12, 8, 7)
204 #define SYSREG_ICC_AP1R0_EL1 SYSREG(3, 0, 12, 9, 0)
205 #define SYSREG_ICC_AP1R1_EL1 SYSREG(3, 0, 12, 9, 1)
206 #define SYSREG_ICC_AP1R2_EL1 SYSREG(3, 0, 12, 9, 2)
207 #define SYSREG_ICC_AP1R3_EL1 SYSREG(3, 0, 12, 9, 3)
208 #define SYSREG_ICC_ASGI1R_EL1 SYSREG(3, 0, 12, 11, 6)
209 #define SYSREG_ICC_BPR0_EL1 SYSREG(3, 0, 12, 8, 3)
210 #define SYSREG_ICC_BPR1_EL1 SYSREG(3, 0, 12, 12, 3)
211 #define SYSREG_ICC_CTLR_EL1 SYSREG(3, 0, 12, 12, 4)
212 #define SYSREG_ICC_DIR_EL1 SYSREG(3, 0, 12, 11, 1)
213 #define SYSREG_ICC_EOIR0_EL1 SYSREG(3, 0, 12, 8, 1)
214 #define SYSREG_ICC_EOIR1_EL1 SYSREG(3, 0, 12, 12, 1)
215 #define SYSREG_ICC_HPPIR0_EL1 SYSREG(3, 0, 12, 8, 2)
216 #define SYSREG_ICC_HPPIR1_EL1 SYSREG(3, 0, 12, 12, 2)
217 #define SYSREG_ICC_IAR0_EL1 SYSREG(3, 0, 12, 8, 0)
218 #define SYSREG_ICC_IAR1_EL1 SYSREG(3, 0, 12, 12, 0)
219 #define SYSREG_ICC_IGRPEN0_EL1 SYSREG(3, 0, 12, 12, 6)
220 #define SYSREG_ICC_IGRPEN1_EL1 SYSREG(3, 0, 12, 12, 7)
221 #define SYSREG_ICC_PMR_EL1 SYSREG(3, 0, 4, 6, 0)
222 #define SYSREG_ICC_RPR_EL1 SYSREG(3, 0, 12, 11, 3)
223 #define SYSREG_ICC_SGI0R_EL1 SYSREG(3, 0, 12, 11, 7)
224 #define SYSREG_ICC_SGI1R_EL1 SYSREG(3, 0, 12, 11, 5)
225 #define SYSREG_ICC_SRE_EL1 SYSREG(3, 0, 12, 12, 5)
226
227 #define SYSREG_MDSCR_EL1 SYSREG(2, 0, 0, 2, 2)
228 #define SYSREG_DBGBVR0_EL1 SYSREG(2, 0, 0, 0, 4)
229 #define SYSREG_DBGBCR0_EL1 SYSREG(2, 0, 0, 0, 5)
230 #define SYSREG_DBGWVR0_EL1 SYSREG(2, 0, 0, 0, 6)
231 #define SYSREG_DBGWCR0_EL1 SYSREG(2, 0, 0, 0, 7)
232 #define SYSREG_DBGBVR1_EL1 SYSREG(2, 0, 0, 1, 4)
233 #define SYSREG_DBGBCR1_EL1 SYSREG(2, 0, 0, 1, 5)
234 #define SYSREG_DBGWVR1_EL1 SYSREG(2, 0, 0, 1, 6)
235 #define SYSREG_DBGWCR1_EL1 SYSREG(2, 0, 0, 1, 7)
236 #define SYSREG_DBGBVR2_EL1 SYSREG(2, 0, 0, 2, 4)
237 #define SYSREG_DBGBCR2_EL1 SYSREG(2, 0, 0, 2, 5)
238 #define SYSREG_DBGWVR2_EL1 SYSREG(2, 0, 0, 2, 6)
239 #define SYSREG_DBGWCR2_EL1 SYSREG(2, 0, 0, 2, 7)
240 #define SYSREG_DBGBVR3_EL1 SYSREG(2, 0, 0, 3, 4)
241 #define SYSREG_DBGBCR3_EL1 SYSREG(2, 0, 0, 3, 5)
242 #define SYSREG_DBGWVR3_EL1 SYSREG(2, 0, 0, 3, 6)
243 #define SYSREG_DBGWCR3_EL1 SYSREG(2, 0, 0, 3, 7)
244 #define SYSREG_DBGBVR4_EL1 SYSREG(2, 0, 0, 4, 4)
245 #define SYSREG_DBGBCR4_EL1 SYSREG(2, 0, 0, 4, 5)
246 #define SYSREG_DBGWVR4_EL1 SYSREG(2, 0, 0, 4, 6)
247 #define SYSREG_DBGWCR4_EL1 SYSREG(2, 0, 0, 4, 7)
248 #define SYSREG_DBGBVR5_EL1 SYSREG(2, 0, 0, 5, 4)
249 #define SYSREG_DBGBCR5_EL1 SYSREG(2, 0, 0, 5, 5)
250 #define SYSREG_DBGWVR5_EL1 SYSREG(2, 0, 0, 5, 6)
251 #define SYSREG_DBGWCR5_EL1 SYSREG(2, 0, 0, 5, 7)
252 #define SYSREG_DBGBVR6_EL1 SYSREG(2, 0, 0, 6, 4)
253 #define SYSREG_DBGBCR6_EL1 SYSREG(2, 0, 0, 6, 5)
254 #define SYSREG_DBGWVR6_EL1 SYSREG(2, 0, 0, 6, 6)
255 #define SYSREG_DBGWCR6_EL1 SYSREG(2, 0, 0, 6, 7)
256 #define SYSREG_DBGBVR7_EL1 SYSREG(2, 0, 0, 7, 4)
257 #define SYSREG_DBGBCR7_EL1 SYSREG(2, 0, 0, 7, 5)
258 #define SYSREG_DBGWVR7_EL1 SYSREG(2, 0, 0, 7, 6)
259 #define SYSREG_DBGWCR7_EL1 SYSREG(2, 0, 0, 7, 7)
260 #define SYSREG_DBGBVR8_EL1 SYSREG(2, 0, 0, 8, 4)
261 #define SYSREG_DBGBCR8_EL1 SYSREG(2, 0, 0, 8, 5)
262 #define SYSREG_DBGWVR8_EL1 SYSREG(2, 0, 0, 8, 6)
263 #define SYSREG_DBGWCR8_EL1 SYSREG(2, 0, 0, 8, 7)
264 #define SYSREG_DBGBVR9_EL1 SYSREG(2, 0, 0, 9, 4)
265 #define SYSREG_DBGBCR9_EL1 SYSREG(2, 0, 0, 9, 5)
266 #define SYSREG_DBGWVR9_EL1 SYSREG(2, 0, 0, 9, 6)
267 #define SYSREG_DBGWCR9_EL1 SYSREG(2, 0, 0, 9, 7)
268 #define SYSREG_DBGBVR10_EL1 SYSREG(2, 0, 0, 10, 4)
269 #define SYSREG_DBGBCR10_EL1 SYSREG(2, 0, 0, 10, 5)
270 #define SYSREG_DBGWVR10_EL1 SYSREG(2, 0, 0, 10, 6)
271 #define SYSREG_DBGWCR10_EL1 SYSREG(2, 0, 0, 10, 7)
272 #define SYSREG_DBGBVR11_EL1 SYSREG(2, 0, 0, 11, 4)
273 #define SYSREG_DBGBCR11_EL1 SYSREG(2, 0, 0, 11, 5)
274 #define SYSREG_DBGWVR11_EL1 SYSREG(2, 0, 0, 11, 6)
275 #define SYSREG_DBGWCR11_EL1 SYSREG(2, 0, 0, 11, 7)
276 #define SYSREG_DBGBVR12_EL1 SYSREG(2, 0, 0, 12, 4)
277 #define SYSREG_DBGBCR12_EL1 SYSREG(2, 0, 0, 12, 5)
278 #define SYSREG_DBGWVR12_EL1 SYSREG(2, 0, 0, 12, 6)
279 #define SYSREG_DBGWCR12_EL1 SYSREG(2, 0, 0, 12, 7)
280 #define SYSREG_DBGBVR13_EL1 SYSREG(2, 0, 0, 13, 4)
281 #define SYSREG_DBGBCR13_EL1 SYSREG(2, 0, 0, 13, 5)
282 #define SYSREG_DBGWVR13_EL1 SYSREG(2, 0, 0, 13, 6)
283 #define SYSREG_DBGWCR13_EL1 SYSREG(2, 0, 0, 13, 7)
284 #define SYSREG_DBGBVR14_EL1 SYSREG(2, 0, 0, 14, 4)
285 #define SYSREG_DBGBCR14_EL1 SYSREG(2, 0, 0, 14, 5)
286 #define SYSREG_DBGWVR14_EL1 SYSREG(2, 0, 0, 14, 6)
287 #define SYSREG_DBGWCR14_EL1 SYSREG(2, 0, 0, 14, 7)
288 #define SYSREG_DBGBVR15_EL1 SYSREG(2, 0, 0, 15, 4)
289 #define SYSREG_DBGBCR15_EL1 SYSREG(2, 0, 0, 15, 5)
290 #define SYSREG_DBGWVR15_EL1 SYSREG(2, 0, 0, 15, 6)
291 #define SYSREG_DBGWCR15_EL1 SYSREG(2, 0, 0, 15, 7)
292
293 #define WFX_IS_WFE (1 << 0)
294
295 #define TMR_CTL_ENABLE (1 << 0)
296 #define TMR_CTL_IMASK (1 << 1)
297 #define TMR_CTL_ISTATUS (1 << 2)
298
299 static void hvf_wfi(CPUState *cpu);
300
301 static uint32_t chosen_ipa_bit_size;
302
303 typedef struct HVFVTimer {
304 /* Vtimer value during migration and paused state */
305 uint64_t vtimer_val;
306 } HVFVTimer;
307
308 static HVFVTimer vtimer;
309
310 typedef struct ARMHostCPUFeatures {
311 ARMISARegisters isar;
312 uint64_t features;
313 uint64_t midr;
314 uint32_t reset_sctlr;
315 const char *dtb_compatible;
316 } ARMHostCPUFeatures;
317
318 static ARMHostCPUFeatures arm_host_cpu_features;
319
320 struct hvf_reg_match {
321 int reg;
322 uint64_t offset;
323 };
324
325 static const struct hvf_reg_match hvf_reg_match[] = {
326 { HV_REG_X0, offsetof(CPUARMState, xregs[0]) },
327 { HV_REG_X1, offsetof(CPUARMState, xregs[1]) },
328 { HV_REG_X2, offsetof(CPUARMState, xregs[2]) },
329 { HV_REG_X3, offsetof(CPUARMState, xregs[3]) },
330 { HV_REG_X4, offsetof(CPUARMState, xregs[4]) },
331 { HV_REG_X5, offsetof(CPUARMState, xregs[5]) },
332 { HV_REG_X6, offsetof(CPUARMState, xregs[6]) },
333 { HV_REG_X7, offsetof(CPUARMState, xregs[7]) },
334 { HV_REG_X8, offsetof(CPUARMState, xregs[8]) },
335 { HV_REG_X9, offsetof(CPUARMState, xregs[9]) },
336 { HV_REG_X10, offsetof(CPUARMState, xregs[10]) },
337 { HV_REG_X11, offsetof(CPUARMState, xregs[11]) },
338 { HV_REG_X12, offsetof(CPUARMState, xregs[12]) },
339 { HV_REG_X13, offsetof(CPUARMState, xregs[13]) },
340 { HV_REG_X14, offsetof(CPUARMState, xregs[14]) },
341 { HV_REG_X15, offsetof(CPUARMState, xregs[15]) },
342 { HV_REG_X16, offsetof(CPUARMState, xregs[16]) },
343 { HV_REG_X17, offsetof(CPUARMState, xregs[17]) },
344 { HV_REG_X18, offsetof(CPUARMState, xregs[18]) },
345 { HV_REG_X19, offsetof(CPUARMState, xregs[19]) },
346 { HV_REG_X20, offsetof(CPUARMState, xregs[20]) },
347 { HV_REG_X21, offsetof(CPUARMState, xregs[21]) },
348 { HV_REG_X22, offsetof(CPUARMState, xregs[22]) },
349 { HV_REG_X23, offsetof(CPUARMState, xregs[23]) },
350 { HV_REG_X24, offsetof(CPUARMState, xregs[24]) },
351 { HV_REG_X25, offsetof(CPUARMState, xregs[25]) },
352 { HV_REG_X26, offsetof(CPUARMState, xregs[26]) },
353 { HV_REG_X27, offsetof(CPUARMState, xregs[27]) },
354 { HV_REG_X28, offsetof(CPUARMState, xregs[28]) },
355 { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
356 { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
357 { HV_REG_PC, offsetof(CPUARMState, pc) },
358 };
359
360 static const struct hvf_reg_match hvf_fpreg_match[] = {
361 { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) },
362 { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) },
363 { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) },
364 { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) },
365 { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) },
366 { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) },
367 { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) },
368 { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) },
369 { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) },
370 { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) },
371 { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
372 { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
373 { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
374 { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
375 { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
376 { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
377 { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
378 { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
379 { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
380 { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
381 { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
382 { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
383 { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
384 { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
385 { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
386 { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
387 { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
388 { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
389 { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
390 { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
391 { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
392 { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
393 };
394
395 struct hvf_sreg_match {
396 int reg;
397 uint32_t key;
398 uint32_t cp_idx;
399 };
400
401 static struct hvf_sreg_match hvf_sreg_match[] = {
402 { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 4) },
403 { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 5) },
404 { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 6) },
405 { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 7) },
406
407 { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 4) },
408 { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 5) },
409 { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 6) },
410 { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 7) },
411
412 { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 4) },
413 { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 5) },
414 { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 6) },
415 { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 7) },
416
417 { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 4) },
418 { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 5) },
419 { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 6) },
420 { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 7) },
421
422 { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 4) },
423 { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 5) },
424 { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 6) },
425 { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 7) },
426
427 { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 4) },
428 { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 5) },
429 { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 6) },
430 { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 7) },
431
432 { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 4) },
433 { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 5) },
434 { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 6) },
435 { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 7) },
436
437 { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 4) },
438 { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 5) },
439 { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 6) },
440 { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 7) },
441
442 { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 4) },
443 { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 5) },
444 { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 6) },
445 { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 7) },
446
447 { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 4) },
448 { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 5) },
449 { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 6) },
450 { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 7) },
451
452 { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 4) },
453 { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 5) },
454 { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 6) },
455 { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 7) },
456
457 { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 4) },
458 { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 5) },
459 { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 6) },
460 { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 7) },
461
462 { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 4) },
463 { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 5) },
464 { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 6) },
465 { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 7) },
466
467 { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 4) },
468 { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 5) },
469 { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 6) },
470 { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 7) },
471
472 { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 4) },
473 { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 5) },
474 { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 6) },
475 { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 7) },
476
477 { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 4) },
478 { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 5) },
479 { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 6) },
480 { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 7) },
481
482 #ifdef SYNC_NO_RAW_REGS
483 /*
484 * The registers below are manually synced on init because they are
485 * marked as NO_RAW. We still list them to make number space sync easier.
486 */
487 { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
488 { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
489 { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
490 { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
491 #endif
492 { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 1) },
493 { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
494 { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
495 { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
496 { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
497 #ifdef SYNC_NO_MMFR0
498 /* We keep the hardware MMFR0 around. HW limits are there anyway */
499 { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
500 #endif
501 { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
502 { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
503 /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
504
505 { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
506 { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
507 { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
508 { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
509 { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
510 { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
511
512 { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
513 { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
514 { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
515 { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
516 { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
517 { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
518 { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
519 { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
520 { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
521 { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
522
523 { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
524 { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
525 { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
526 { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
527 { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
528 { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
529 { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
530 { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
531 { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
532 { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
533 { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
534 { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
535 { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
536 { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
537 { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
538 { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
539 { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
540 { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
541 { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
542 { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
543 };
544
hvf_get_registers(CPUState * cpu)545 int hvf_get_registers(CPUState *cpu)
546 {
547 ARMCPU *arm_cpu = ARM_CPU(cpu);
548 CPUARMState *env = &arm_cpu->env;
549 hv_return_t ret;
550 uint64_t val;
551 hv_simd_fp_uchar16_t fpval;
552 int i;
553
554 for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
555 ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
556 *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
557 assert_hvf_ok(ret);
558 }
559
560 for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
561 ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
562 &fpval);
563 memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
564 assert_hvf_ok(ret);
565 }
566
567 val = 0;
568 ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
569 assert_hvf_ok(ret);
570 vfp_set_fpcr(env, val);
571
572 val = 0;
573 ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
574 assert_hvf_ok(ret);
575 vfp_set_fpsr(env, val);
576
577 ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
578 assert_hvf_ok(ret);
579 pstate_write(env, val);
580
581 for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
582 if (hvf_sreg_match[i].cp_idx == -1) {
583 continue;
584 }
585
586 if (cpu->accel->guest_debug_enabled) {
587 /* Handle debug registers */
588 switch (hvf_sreg_match[i].reg) {
589 case HV_SYS_REG_DBGBVR0_EL1:
590 case HV_SYS_REG_DBGBCR0_EL1:
591 case HV_SYS_REG_DBGWVR0_EL1:
592 case HV_SYS_REG_DBGWCR0_EL1:
593 case HV_SYS_REG_DBGBVR1_EL1:
594 case HV_SYS_REG_DBGBCR1_EL1:
595 case HV_SYS_REG_DBGWVR1_EL1:
596 case HV_SYS_REG_DBGWCR1_EL1:
597 case HV_SYS_REG_DBGBVR2_EL1:
598 case HV_SYS_REG_DBGBCR2_EL1:
599 case HV_SYS_REG_DBGWVR2_EL1:
600 case HV_SYS_REG_DBGWCR2_EL1:
601 case HV_SYS_REG_DBGBVR3_EL1:
602 case HV_SYS_REG_DBGBCR3_EL1:
603 case HV_SYS_REG_DBGWVR3_EL1:
604 case HV_SYS_REG_DBGWCR3_EL1:
605 case HV_SYS_REG_DBGBVR4_EL1:
606 case HV_SYS_REG_DBGBCR4_EL1:
607 case HV_SYS_REG_DBGWVR4_EL1:
608 case HV_SYS_REG_DBGWCR4_EL1:
609 case HV_SYS_REG_DBGBVR5_EL1:
610 case HV_SYS_REG_DBGBCR5_EL1:
611 case HV_SYS_REG_DBGWVR5_EL1:
612 case HV_SYS_REG_DBGWCR5_EL1:
613 case HV_SYS_REG_DBGBVR6_EL1:
614 case HV_SYS_REG_DBGBCR6_EL1:
615 case HV_SYS_REG_DBGWVR6_EL1:
616 case HV_SYS_REG_DBGWCR6_EL1:
617 case HV_SYS_REG_DBGBVR7_EL1:
618 case HV_SYS_REG_DBGBCR7_EL1:
619 case HV_SYS_REG_DBGWVR7_EL1:
620 case HV_SYS_REG_DBGWCR7_EL1:
621 case HV_SYS_REG_DBGBVR8_EL1:
622 case HV_SYS_REG_DBGBCR8_EL1:
623 case HV_SYS_REG_DBGWVR8_EL1:
624 case HV_SYS_REG_DBGWCR8_EL1:
625 case HV_SYS_REG_DBGBVR9_EL1:
626 case HV_SYS_REG_DBGBCR9_EL1:
627 case HV_SYS_REG_DBGWVR9_EL1:
628 case HV_SYS_REG_DBGWCR9_EL1:
629 case HV_SYS_REG_DBGBVR10_EL1:
630 case HV_SYS_REG_DBGBCR10_EL1:
631 case HV_SYS_REG_DBGWVR10_EL1:
632 case HV_SYS_REG_DBGWCR10_EL1:
633 case HV_SYS_REG_DBGBVR11_EL1:
634 case HV_SYS_REG_DBGBCR11_EL1:
635 case HV_SYS_REG_DBGWVR11_EL1:
636 case HV_SYS_REG_DBGWCR11_EL1:
637 case HV_SYS_REG_DBGBVR12_EL1:
638 case HV_SYS_REG_DBGBCR12_EL1:
639 case HV_SYS_REG_DBGWVR12_EL1:
640 case HV_SYS_REG_DBGWCR12_EL1:
641 case HV_SYS_REG_DBGBVR13_EL1:
642 case HV_SYS_REG_DBGBCR13_EL1:
643 case HV_SYS_REG_DBGWVR13_EL1:
644 case HV_SYS_REG_DBGWCR13_EL1:
645 case HV_SYS_REG_DBGBVR14_EL1:
646 case HV_SYS_REG_DBGBCR14_EL1:
647 case HV_SYS_REG_DBGWVR14_EL1:
648 case HV_SYS_REG_DBGWCR14_EL1:
649 case HV_SYS_REG_DBGBVR15_EL1:
650 case HV_SYS_REG_DBGBCR15_EL1:
651 case HV_SYS_REG_DBGWVR15_EL1:
652 case HV_SYS_REG_DBGWCR15_EL1: {
653 /*
654 * If the guest is being debugged, the vCPU's debug registers
655 * are holding the gdbstub's view of the registers (set in
656 * hvf_arch_update_guest_debug()).
657 * Since the environment is used to store only the guest's view
658 * of the registers, don't update it with the values from the
659 * vCPU but simply keep the values from the previous
660 * environment.
661 */
662 const ARMCPRegInfo *ri;
663 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
664 val = read_raw_cp_reg(env, ri);
665
666 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
667 continue;
668 }
669 }
670 }
671
672 ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
673 assert_hvf_ok(ret);
674
675 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
676 }
677 assert(write_list_to_cpustate(arm_cpu));
678
679 aarch64_restore_sp(env, arm_current_el(env));
680
681 return 0;
682 }
683
hvf_put_registers(CPUState * cpu)684 int hvf_put_registers(CPUState *cpu)
685 {
686 ARMCPU *arm_cpu = ARM_CPU(cpu);
687 CPUARMState *env = &arm_cpu->env;
688 hv_return_t ret;
689 uint64_t val;
690 hv_simd_fp_uchar16_t fpval;
691 int i;
692
693 for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
694 val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
695 ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
696 assert_hvf_ok(ret);
697 }
698
699 for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
700 memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
701 ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
702 fpval);
703 assert_hvf_ok(ret);
704 }
705
706 ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
707 assert_hvf_ok(ret);
708
709 ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
710 assert_hvf_ok(ret);
711
712 ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
713 assert_hvf_ok(ret);
714
715 aarch64_save_sp(env, arm_current_el(env));
716
717 assert(write_cpustate_to_list(arm_cpu, false));
718 for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
719 if (hvf_sreg_match[i].cp_idx == -1) {
720 continue;
721 }
722
723 if (cpu->accel->guest_debug_enabled) {
724 /* Handle debug registers */
725 switch (hvf_sreg_match[i].reg) {
726 case HV_SYS_REG_DBGBVR0_EL1:
727 case HV_SYS_REG_DBGBCR0_EL1:
728 case HV_SYS_REG_DBGWVR0_EL1:
729 case HV_SYS_REG_DBGWCR0_EL1:
730 case HV_SYS_REG_DBGBVR1_EL1:
731 case HV_SYS_REG_DBGBCR1_EL1:
732 case HV_SYS_REG_DBGWVR1_EL1:
733 case HV_SYS_REG_DBGWCR1_EL1:
734 case HV_SYS_REG_DBGBVR2_EL1:
735 case HV_SYS_REG_DBGBCR2_EL1:
736 case HV_SYS_REG_DBGWVR2_EL1:
737 case HV_SYS_REG_DBGWCR2_EL1:
738 case HV_SYS_REG_DBGBVR3_EL1:
739 case HV_SYS_REG_DBGBCR3_EL1:
740 case HV_SYS_REG_DBGWVR3_EL1:
741 case HV_SYS_REG_DBGWCR3_EL1:
742 case HV_SYS_REG_DBGBVR4_EL1:
743 case HV_SYS_REG_DBGBCR4_EL1:
744 case HV_SYS_REG_DBGWVR4_EL1:
745 case HV_SYS_REG_DBGWCR4_EL1:
746 case HV_SYS_REG_DBGBVR5_EL1:
747 case HV_SYS_REG_DBGBCR5_EL1:
748 case HV_SYS_REG_DBGWVR5_EL1:
749 case HV_SYS_REG_DBGWCR5_EL1:
750 case HV_SYS_REG_DBGBVR6_EL1:
751 case HV_SYS_REG_DBGBCR6_EL1:
752 case HV_SYS_REG_DBGWVR6_EL1:
753 case HV_SYS_REG_DBGWCR6_EL1:
754 case HV_SYS_REG_DBGBVR7_EL1:
755 case HV_SYS_REG_DBGBCR7_EL1:
756 case HV_SYS_REG_DBGWVR7_EL1:
757 case HV_SYS_REG_DBGWCR7_EL1:
758 case HV_SYS_REG_DBGBVR8_EL1:
759 case HV_SYS_REG_DBGBCR8_EL1:
760 case HV_SYS_REG_DBGWVR8_EL1:
761 case HV_SYS_REG_DBGWCR8_EL1:
762 case HV_SYS_REG_DBGBVR9_EL1:
763 case HV_SYS_REG_DBGBCR9_EL1:
764 case HV_SYS_REG_DBGWVR9_EL1:
765 case HV_SYS_REG_DBGWCR9_EL1:
766 case HV_SYS_REG_DBGBVR10_EL1:
767 case HV_SYS_REG_DBGBCR10_EL1:
768 case HV_SYS_REG_DBGWVR10_EL1:
769 case HV_SYS_REG_DBGWCR10_EL1:
770 case HV_SYS_REG_DBGBVR11_EL1:
771 case HV_SYS_REG_DBGBCR11_EL1:
772 case HV_SYS_REG_DBGWVR11_EL1:
773 case HV_SYS_REG_DBGWCR11_EL1:
774 case HV_SYS_REG_DBGBVR12_EL1:
775 case HV_SYS_REG_DBGBCR12_EL1:
776 case HV_SYS_REG_DBGWVR12_EL1:
777 case HV_SYS_REG_DBGWCR12_EL1:
778 case HV_SYS_REG_DBGBVR13_EL1:
779 case HV_SYS_REG_DBGBCR13_EL1:
780 case HV_SYS_REG_DBGWVR13_EL1:
781 case HV_SYS_REG_DBGWCR13_EL1:
782 case HV_SYS_REG_DBGBVR14_EL1:
783 case HV_SYS_REG_DBGBCR14_EL1:
784 case HV_SYS_REG_DBGWVR14_EL1:
785 case HV_SYS_REG_DBGWCR14_EL1:
786 case HV_SYS_REG_DBGBVR15_EL1:
787 case HV_SYS_REG_DBGBCR15_EL1:
788 case HV_SYS_REG_DBGWVR15_EL1:
789 case HV_SYS_REG_DBGWCR15_EL1:
790 /*
791 * If the guest is being debugged, the vCPU's debug registers
792 * are already holding the gdbstub's view of the registers (set
793 * in hvf_arch_update_guest_debug()).
794 */
795 continue;
796 }
797 }
798
799 val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
800 ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
801 assert_hvf_ok(ret);
802 }
803
804 ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
805 assert_hvf_ok(ret);
806
807 return 0;
808 }
809
flush_cpu_state(CPUState * cpu)810 static void flush_cpu_state(CPUState *cpu)
811 {
812 if (cpu->accel->dirty) {
813 hvf_put_registers(cpu);
814 cpu->accel->dirty = false;
815 }
816 }
817
hvf_set_reg(CPUState * cpu,int rt,uint64_t val)818 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
819 {
820 hv_return_t r;
821
822 flush_cpu_state(cpu);
823
824 if (rt < 31) {
825 r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
826 assert_hvf_ok(r);
827 }
828 }
829
hvf_get_reg(CPUState * cpu,int rt)830 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
831 {
832 uint64_t val = 0;
833 hv_return_t r;
834
835 flush_cpu_state(cpu);
836
837 if (rt < 31) {
838 r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
839 assert_hvf_ok(r);
840 }
841
842 return val;
843 }
844
clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t * id_aa64mmfr0)845 static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
846 {
847 uint32_t ipa_size = chosen_ipa_bit_size ?
848 chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
849
850 /* Clamp down the PARange to the IPA size the kernel supports. */
851 uint8_t index = round_down_to_parange_index(ipa_size);
852 *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
853 }
854
hvf_arm_get_host_cpu_features(ARMHostCPUFeatures * ahcf)855 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
856 {
857 ARMISARegisters host_isar = {};
858 const struct isar_regs {
859 int reg;
860 uint64_t *val;
861 } regs[] = {
862 { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
863 { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
864 { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
865 { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
866 { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
867 { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
868 /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
869 { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
870 { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
871 { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
872 /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
873 };
874 hv_vcpu_t fd;
875 hv_return_t r = HV_SUCCESS;
876 hv_vcpu_exit_t *exit;
877 int i;
878
879 ahcf->dtb_compatible = "arm,arm-v8";
880 ahcf->features = (1ULL << ARM_FEATURE_V8) |
881 (1ULL << ARM_FEATURE_NEON) |
882 (1ULL << ARM_FEATURE_AARCH64) |
883 (1ULL << ARM_FEATURE_PMU) |
884 (1ULL << ARM_FEATURE_GENERIC_TIMER);
885
886 /* We set up a small vcpu to extract host registers */
887
888 if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
889 return false;
890 }
891
892 for (i = 0; i < ARRAY_SIZE(regs); i++) {
893 r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
894 }
895 r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
896 r |= hv_vcpu_destroy(fd);
897
898 clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
899
900 /*
901 * Disable SME, which is not properly handled by QEMU hvf yet.
902 * To allow this through we would need to:
903 * - make sure that the SME state is correctly handled in the
904 * get_registers/put_registers functions
905 * - get the SME-specific CPU properties to work with accelerators
906 * other than TCG
907 * - fix any assumptions we made that SME implies SVE (since
908 * on the M4 there is SME but not SVE)
909 */
910 host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
911
912 ahcf->isar = host_isar;
913
914 /*
915 * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
916 * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
917 */
918 ahcf->reset_sctlr = 0x30100180;
919 /*
920 * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
921 * let's disable it on boot and then allow guest software to turn it on by
922 * setting it to 0.
923 */
924 ahcf->reset_sctlr |= 0x00800000;
925
926 /* Make sure we don't advertise AArch32 support for EL0/EL1 */
927 if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
928 return false;
929 }
930
931 return r == HV_SUCCESS;
932 }
933
hvf_arm_get_default_ipa_bit_size(void)934 uint32_t hvf_arm_get_default_ipa_bit_size(void)
935 {
936 uint32_t default_ipa_size;
937 hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size);
938 assert_hvf_ok(ret);
939
940 return default_ipa_size;
941 }
942
hvf_arm_get_max_ipa_bit_size(void)943 uint32_t hvf_arm_get_max_ipa_bit_size(void)
944 {
945 uint32_t max_ipa_size;
946 hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size);
947 assert_hvf_ok(ret);
948
949 /*
950 * We clamp any IPA size we want to back the VM with to a valid PARange
951 * value so the guest doesn't try and map memory outside of the valid range.
952 * This logic just clamps the passed in IPA bit size to the first valid
953 * PARange value <= to it.
954 */
955 return round_down_to_parange_bit_size(max_ipa_size);
956 }
957
hvf_arm_set_cpu_features_from_host(ARMCPU * cpu)958 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
959 {
960 if (!arm_host_cpu_features.dtb_compatible) {
961 if (!hvf_enabled() ||
962 !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
963 /*
964 * We can't report this error yet, so flag that we need to
965 * in arm_cpu_realizefn().
966 */
967 cpu->host_cpu_probe_failed = true;
968 return;
969 }
970 }
971
972 cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
973 cpu->isar = arm_host_cpu_features.isar;
974 cpu->env.features = arm_host_cpu_features.features;
975 cpu->midr = arm_host_cpu_features.midr;
976 cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
977 }
978
hvf_arch_vcpu_destroy(CPUState * cpu)979 void hvf_arch_vcpu_destroy(CPUState *cpu)
980 {
981 }
982
hvf_arch_vm_create(MachineState * ms,uint32_t pa_range)983 hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
984 {
985 hv_return_t ret;
986 hv_vm_config_t config = hv_vm_config_create();
987
988 ret = hv_vm_config_set_ipa_size(config, pa_range);
989 if (ret != HV_SUCCESS) {
990 goto cleanup;
991 }
992 chosen_ipa_bit_size = pa_range;
993
994 ret = hv_vm_create(config);
995
996 cleanup:
997 os_release(config);
998
999 return ret;
1000 }
1001
hvf_arch_init_vcpu(CPUState * cpu)1002 int hvf_arch_init_vcpu(CPUState *cpu)
1003 {
1004 ARMCPU *arm_cpu = ARM_CPU(cpu);
1005 CPUARMState *env = &arm_cpu->env;
1006 uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
1007 uint32_t sregs_cnt = 0;
1008 uint64_t pfr;
1009 hv_return_t ret;
1010 int i;
1011
1012 env->aarch64 = true;
1013 asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
1014
1015 /* Allocate enough space for our sysreg sync */
1016 arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
1017 sregs_match_len);
1018 arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
1019 sregs_match_len);
1020 arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
1021 arm_cpu->cpreg_vmstate_indexes,
1022 sregs_match_len);
1023 arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
1024 arm_cpu->cpreg_vmstate_values,
1025 sregs_match_len);
1026
1027 memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
1028
1029 /* Populate cp list for all known sysregs */
1030 for (i = 0; i < sregs_match_len; i++) {
1031 const ARMCPRegInfo *ri;
1032 uint32_t key = hvf_sreg_match[i].key;
1033
1034 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
1035 if (ri) {
1036 assert(!(ri->type & ARM_CP_NO_RAW));
1037 hvf_sreg_match[i].cp_idx = sregs_cnt;
1038 arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
1039 } else {
1040 hvf_sreg_match[i].cp_idx = -1;
1041 }
1042 }
1043 arm_cpu->cpreg_array_len = sregs_cnt;
1044 arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
1045
1046 assert(write_cpustate_to_list(arm_cpu, false));
1047
1048 /* Set CP_NO_RAW system registers on init */
1049 ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
1050 arm_cpu->midr);
1051 assert_hvf_ok(ret);
1052
1053 ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
1054 arm_cpu->mp_affinity);
1055 assert_hvf_ok(ret);
1056
1057 ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
1058 assert_hvf_ok(ret);
1059 pfr |= env->gicv3state ? (1 << 24) : 0;
1060 ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
1061 assert_hvf_ok(ret);
1062
1063 /* We're limited to underlying hardware caps, override internal versions */
1064 ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1065 &arm_cpu->isar.id_aa64mmfr0);
1066 assert_hvf_ok(ret);
1067
1068 clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
1069 ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1070 arm_cpu->isar.id_aa64mmfr0);
1071 assert_hvf_ok(ret);
1072
1073 return 0;
1074 }
1075
hvf_kick_vcpu_thread(CPUState * cpu)1076 void hvf_kick_vcpu_thread(CPUState *cpu)
1077 {
1078 cpus_kick_thread(cpu);
1079 hv_vcpus_exit(&cpu->accel->fd, 1);
1080 }
1081
hvf_raise_exception(CPUState * cpu,uint32_t excp,uint32_t syndrome)1082 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1083 uint32_t syndrome)
1084 {
1085 ARMCPU *arm_cpu = ARM_CPU(cpu);
1086 CPUARMState *env = &arm_cpu->env;
1087
1088 cpu->exception_index = excp;
1089 env->exception.target_el = 1;
1090 env->exception.syndrome = syndrome;
1091
1092 arm_cpu_do_interrupt(cpu);
1093 }
1094
hvf_psci_cpu_off(ARMCPU * arm_cpu)1095 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1096 {
1097 int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu));
1098 assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1099 }
1100
1101 /*
1102 * Handle a PSCI call.
1103 *
1104 * Returns 0 on success
1105 * -1 when the PSCI call is unknown,
1106 */
hvf_handle_psci_call(CPUState * cpu)1107 static bool hvf_handle_psci_call(CPUState *cpu)
1108 {
1109 ARMCPU *arm_cpu = ARM_CPU(cpu);
1110 CPUARMState *env = &arm_cpu->env;
1111 uint64_t param[4] = {
1112 env->xregs[0],
1113 env->xregs[1],
1114 env->xregs[2],
1115 env->xregs[3]
1116 };
1117 uint64_t context_id, mpidr;
1118 bool target_aarch64 = true;
1119 CPUState *target_cpu_state;
1120 ARMCPU *target_cpu;
1121 target_ulong entry;
1122 int target_el = 1;
1123 int32_t ret = 0;
1124
1125 trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1126 arm_cpu_mp_affinity(arm_cpu));
1127
1128 switch (param[0]) {
1129 case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1130 ret = QEMU_PSCI_VERSION_1_1;
1131 break;
1132 case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1133 ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1134 break;
1135 case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1136 case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1137 mpidr = param[1];
1138
1139 switch (param[2]) {
1140 case 0:
1141 target_cpu_state = arm_get_cpu_by_id(mpidr);
1142 if (!target_cpu_state) {
1143 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1144 break;
1145 }
1146 target_cpu = ARM_CPU(target_cpu_state);
1147
1148 ret = target_cpu->power_state;
1149 break;
1150 default:
1151 /* Everything above affinity level 0 is always on. */
1152 ret = 0;
1153 }
1154 break;
1155 case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1156 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1157 /*
1158 * QEMU reset and shutdown are async requests, but PSCI
1159 * mandates that we never return from the reset/shutdown
1160 * call, so power the CPU off now so it doesn't execute
1161 * anything further.
1162 */
1163 hvf_psci_cpu_off(arm_cpu);
1164 break;
1165 case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1166 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1167 hvf_psci_cpu_off(arm_cpu);
1168 break;
1169 case QEMU_PSCI_0_1_FN_CPU_ON:
1170 case QEMU_PSCI_0_2_FN_CPU_ON:
1171 case QEMU_PSCI_0_2_FN64_CPU_ON:
1172 mpidr = param[1];
1173 entry = param[2];
1174 context_id = param[3];
1175 ret = arm_set_cpu_on(mpidr, entry, context_id,
1176 target_el, target_aarch64);
1177 break;
1178 case QEMU_PSCI_0_1_FN_CPU_OFF:
1179 case QEMU_PSCI_0_2_FN_CPU_OFF:
1180 hvf_psci_cpu_off(arm_cpu);
1181 break;
1182 case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1183 case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1184 case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1185 /* Affinity levels are not supported in QEMU */
1186 if (param[1] & 0xfffe0000) {
1187 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1188 break;
1189 }
1190 /* Powerdown is not supported, we always go into WFI */
1191 env->xregs[0] = 0;
1192 hvf_wfi(cpu);
1193 break;
1194 case QEMU_PSCI_0_1_FN_MIGRATE:
1195 case QEMU_PSCI_0_2_FN_MIGRATE:
1196 ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1197 break;
1198 case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1199 switch (param[1]) {
1200 case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1201 case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1202 case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1203 case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1204 case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1205 case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1206 case QEMU_PSCI_0_1_FN_CPU_ON:
1207 case QEMU_PSCI_0_2_FN_CPU_ON:
1208 case QEMU_PSCI_0_2_FN64_CPU_ON:
1209 case QEMU_PSCI_0_1_FN_CPU_OFF:
1210 case QEMU_PSCI_0_2_FN_CPU_OFF:
1211 case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1212 case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1213 case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1214 case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1215 ret = 0;
1216 break;
1217 case QEMU_PSCI_0_1_FN_MIGRATE:
1218 case QEMU_PSCI_0_2_FN_MIGRATE:
1219 default:
1220 ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1221 }
1222 break;
1223 default:
1224 return false;
1225 }
1226
1227 env->xregs[0] = ret;
1228 return true;
1229 }
1230
is_id_sysreg(uint32_t reg)1231 static bool is_id_sysreg(uint32_t reg)
1232 {
1233 return SYSREG_OP0(reg) == 3 &&
1234 SYSREG_OP1(reg) == 0 &&
1235 SYSREG_CRN(reg) == 0 &&
1236 SYSREG_CRM(reg) >= 1 &&
1237 SYSREG_CRM(reg) < 8;
1238 }
1239
hvf_reg2cp_reg(uint32_t reg)1240 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1241 {
1242 return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1243 (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1244 (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1245 (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1246 (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1247 (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1248 }
1249
hvf_sysreg_read_cp(CPUState * cpu,uint32_t reg,uint64_t * val)1250 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1251 {
1252 ARMCPU *arm_cpu = ARM_CPU(cpu);
1253 CPUARMState *env = &arm_cpu->env;
1254 const ARMCPRegInfo *ri;
1255
1256 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1257 if (ri) {
1258 if (ri->accessfn) {
1259 if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1260 return false;
1261 }
1262 }
1263 if (ri->type & ARM_CP_CONST) {
1264 *val = ri->resetvalue;
1265 } else if (ri->readfn) {
1266 *val = ri->readfn(env, ri);
1267 } else {
1268 *val = CPREG_FIELD64(env, ri);
1269 }
1270 trace_hvf_vgic_read(ri->name, *val);
1271 return true;
1272 }
1273
1274 return false;
1275 }
1276
hvf_sysreg_read(CPUState * cpu,uint32_t reg,uint64_t * val)1277 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
1278 {
1279 ARMCPU *arm_cpu = ARM_CPU(cpu);
1280 CPUARMState *env = &arm_cpu->env;
1281
1282 if (arm_feature(env, ARM_FEATURE_PMU)) {
1283 switch (reg) {
1284 case SYSREG_PMCR_EL0:
1285 *val = env->cp15.c9_pmcr;
1286 return 0;
1287 case SYSREG_PMCCNTR_EL0:
1288 pmu_op_start(env);
1289 *val = env->cp15.c15_ccnt;
1290 pmu_op_finish(env);
1291 return 0;
1292 case SYSREG_PMCNTENCLR_EL0:
1293 *val = env->cp15.c9_pmcnten;
1294 return 0;
1295 case SYSREG_PMOVSCLR_EL0:
1296 *val = env->cp15.c9_pmovsr;
1297 return 0;
1298 case SYSREG_PMSELR_EL0:
1299 *val = env->cp15.c9_pmselr;
1300 return 0;
1301 case SYSREG_PMINTENCLR_EL1:
1302 *val = env->cp15.c9_pminten;
1303 return 0;
1304 case SYSREG_PMCCFILTR_EL0:
1305 *val = env->cp15.pmccfiltr_el0;
1306 return 0;
1307 case SYSREG_PMCNTENSET_EL0:
1308 *val = env->cp15.c9_pmcnten;
1309 return 0;
1310 case SYSREG_PMUSERENR_EL0:
1311 *val = env->cp15.c9_pmuserenr;
1312 return 0;
1313 case SYSREG_PMCEID0_EL0:
1314 case SYSREG_PMCEID1_EL0:
1315 /* We can't really count anything yet, declare all events invalid */
1316 *val = 0;
1317 return 0;
1318 }
1319 }
1320
1321 switch (reg) {
1322 case SYSREG_CNTPCT_EL0:
1323 *val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1324 gt_cntfrq_period_ns(arm_cpu);
1325 return 0;
1326 case SYSREG_OSLSR_EL1:
1327 *val = env->cp15.oslsr_el1;
1328 return 0;
1329 case SYSREG_OSDLR_EL1:
1330 /* Dummy register */
1331 return 0;
1332 case SYSREG_ICC_AP0R0_EL1:
1333 case SYSREG_ICC_AP0R1_EL1:
1334 case SYSREG_ICC_AP0R2_EL1:
1335 case SYSREG_ICC_AP0R3_EL1:
1336 case SYSREG_ICC_AP1R0_EL1:
1337 case SYSREG_ICC_AP1R1_EL1:
1338 case SYSREG_ICC_AP1R2_EL1:
1339 case SYSREG_ICC_AP1R3_EL1:
1340 case SYSREG_ICC_ASGI1R_EL1:
1341 case SYSREG_ICC_BPR0_EL1:
1342 case SYSREG_ICC_BPR1_EL1:
1343 case SYSREG_ICC_DIR_EL1:
1344 case SYSREG_ICC_EOIR0_EL1:
1345 case SYSREG_ICC_EOIR1_EL1:
1346 case SYSREG_ICC_HPPIR0_EL1:
1347 case SYSREG_ICC_HPPIR1_EL1:
1348 case SYSREG_ICC_IAR0_EL1:
1349 case SYSREG_ICC_IAR1_EL1:
1350 case SYSREG_ICC_IGRPEN0_EL1:
1351 case SYSREG_ICC_IGRPEN1_EL1:
1352 case SYSREG_ICC_PMR_EL1:
1353 case SYSREG_ICC_SGI0R_EL1:
1354 case SYSREG_ICC_SGI1R_EL1:
1355 case SYSREG_ICC_SRE_EL1:
1356 case SYSREG_ICC_CTLR_EL1:
1357 /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1358 if (hvf_sysreg_read_cp(cpu, reg, val)) {
1359 return 0;
1360 }
1361 break;
1362 case SYSREG_DBGBVR0_EL1:
1363 case SYSREG_DBGBVR1_EL1:
1364 case SYSREG_DBGBVR2_EL1:
1365 case SYSREG_DBGBVR3_EL1:
1366 case SYSREG_DBGBVR4_EL1:
1367 case SYSREG_DBGBVR5_EL1:
1368 case SYSREG_DBGBVR6_EL1:
1369 case SYSREG_DBGBVR7_EL1:
1370 case SYSREG_DBGBVR8_EL1:
1371 case SYSREG_DBGBVR9_EL1:
1372 case SYSREG_DBGBVR10_EL1:
1373 case SYSREG_DBGBVR11_EL1:
1374 case SYSREG_DBGBVR12_EL1:
1375 case SYSREG_DBGBVR13_EL1:
1376 case SYSREG_DBGBVR14_EL1:
1377 case SYSREG_DBGBVR15_EL1:
1378 *val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1379 return 0;
1380 case SYSREG_DBGBCR0_EL1:
1381 case SYSREG_DBGBCR1_EL1:
1382 case SYSREG_DBGBCR2_EL1:
1383 case SYSREG_DBGBCR3_EL1:
1384 case SYSREG_DBGBCR4_EL1:
1385 case SYSREG_DBGBCR5_EL1:
1386 case SYSREG_DBGBCR6_EL1:
1387 case SYSREG_DBGBCR7_EL1:
1388 case SYSREG_DBGBCR8_EL1:
1389 case SYSREG_DBGBCR9_EL1:
1390 case SYSREG_DBGBCR10_EL1:
1391 case SYSREG_DBGBCR11_EL1:
1392 case SYSREG_DBGBCR12_EL1:
1393 case SYSREG_DBGBCR13_EL1:
1394 case SYSREG_DBGBCR14_EL1:
1395 case SYSREG_DBGBCR15_EL1:
1396 *val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1397 return 0;
1398 case SYSREG_DBGWVR0_EL1:
1399 case SYSREG_DBGWVR1_EL1:
1400 case SYSREG_DBGWVR2_EL1:
1401 case SYSREG_DBGWVR3_EL1:
1402 case SYSREG_DBGWVR4_EL1:
1403 case SYSREG_DBGWVR5_EL1:
1404 case SYSREG_DBGWVR6_EL1:
1405 case SYSREG_DBGWVR7_EL1:
1406 case SYSREG_DBGWVR8_EL1:
1407 case SYSREG_DBGWVR9_EL1:
1408 case SYSREG_DBGWVR10_EL1:
1409 case SYSREG_DBGWVR11_EL1:
1410 case SYSREG_DBGWVR12_EL1:
1411 case SYSREG_DBGWVR13_EL1:
1412 case SYSREG_DBGWVR14_EL1:
1413 case SYSREG_DBGWVR15_EL1:
1414 *val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1415 return 0;
1416 case SYSREG_DBGWCR0_EL1:
1417 case SYSREG_DBGWCR1_EL1:
1418 case SYSREG_DBGWCR2_EL1:
1419 case SYSREG_DBGWCR3_EL1:
1420 case SYSREG_DBGWCR4_EL1:
1421 case SYSREG_DBGWCR5_EL1:
1422 case SYSREG_DBGWCR6_EL1:
1423 case SYSREG_DBGWCR7_EL1:
1424 case SYSREG_DBGWCR8_EL1:
1425 case SYSREG_DBGWCR9_EL1:
1426 case SYSREG_DBGWCR10_EL1:
1427 case SYSREG_DBGWCR11_EL1:
1428 case SYSREG_DBGWCR12_EL1:
1429 case SYSREG_DBGWCR13_EL1:
1430 case SYSREG_DBGWCR14_EL1:
1431 case SYSREG_DBGWCR15_EL1:
1432 *val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1433 return 0;
1434 default:
1435 if (is_id_sysreg(reg)) {
1436 /* ID system registers read as RES0 */
1437 *val = 0;
1438 return 0;
1439 }
1440 }
1441
1442 cpu_synchronize_state(cpu);
1443 trace_hvf_unhandled_sysreg_read(env->pc, reg,
1444 SYSREG_OP0(reg),
1445 SYSREG_OP1(reg),
1446 SYSREG_CRN(reg),
1447 SYSREG_CRM(reg),
1448 SYSREG_OP2(reg));
1449 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1450 return 1;
1451 }
1452
pmu_update_irq(CPUARMState * env)1453 static void pmu_update_irq(CPUARMState *env)
1454 {
1455 ARMCPU *cpu = env_archcpu(env);
1456 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1457 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1458 }
1459
pmu_event_supported(uint16_t number)1460 static bool pmu_event_supported(uint16_t number)
1461 {
1462 return false;
1463 }
1464
1465 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1466 * the current EL, security state, and register configuration.
1467 */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)1468 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1469 {
1470 uint64_t filter;
1471 bool enabled, filtered = true;
1472 int el = arm_current_el(env);
1473
1474 enabled = (env->cp15.c9_pmcr & PMCRE) &&
1475 (env->cp15.c9_pmcnten & (1 << counter));
1476
1477 if (counter == 31) {
1478 filter = env->cp15.pmccfiltr_el0;
1479 } else {
1480 filter = env->cp15.c14_pmevtyper[counter];
1481 }
1482
1483 if (el == 0) {
1484 filtered = filter & PMXEVTYPER_U;
1485 } else if (el == 1) {
1486 filtered = filter & PMXEVTYPER_P;
1487 }
1488
1489 if (counter != 31) {
1490 /*
1491 * If not checking PMCCNTR, ensure the counter is setup to an event we
1492 * support
1493 */
1494 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1495 if (!pmu_event_supported(event)) {
1496 return false;
1497 }
1498 }
1499
1500 return enabled && !filtered;
1501 }
1502
pmswinc_write(CPUARMState * env,uint64_t value)1503 static void pmswinc_write(CPUARMState *env, uint64_t value)
1504 {
1505 unsigned int i;
1506 for (i = 0; i < pmu_num_counters(env); i++) {
1507 /* Increment a counter's count iff: */
1508 if ((value & (1 << i)) && /* counter's bit is set */
1509 /* counter is enabled and not filtered */
1510 pmu_counter_enabled(env, i) &&
1511 /* counter is SW_INCR */
1512 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1513 /*
1514 * Detect if this write causes an overflow since we can't predict
1515 * PMSWINC overflows like we can for other events
1516 */
1517 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1518
1519 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1520 env->cp15.c9_pmovsr |= (1 << i);
1521 pmu_update_irq(env);
1522 }
1523
1524 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1525 }
1526 }
1527 }
1528
hvf_sysreg_write_cp(CPUState * cpu,uint32_t reg,uint64_t val)1529 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1530 {
1531 ARMCPU *arm_cpu = ARM_CPU(cpu);
1532 CPUARMState *env = &arm_cpu->env;
1533 const ARMCPRegInfo *ri;
1534
1535 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1536
1537 if (ri) {
1538 if (ri->accessfn) {
1539 if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1540 return false;
1541 }
1542 }
1543 if (ri->writefn) {
1544 ri->writefn(env, ri, val);
1545 } else {
1546 CPREG_FIELD64(env, ri) = val;
1547 }
1548
1549 trace_hvf_vgic_write(ri->name, val);
1550 return true;
1551 }
1552
1553 return false;
1554 }
1555
hvf_sysreg_write(CPUState * cpu,uint32_t reg,uint64_t val)1556 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1557 {
1558 ARMCPU *arm_cpu = ARM_CPU(cpu);
1559 CPUARMState *env = &arm_cpu->env;
1560
1561 trace_hvf_sysreg_write(reg,
1562 SYSREG_OP0(reg),
1563 SYSREG_OP1(reg),
1564 SYSREG_CRN(reg),
1565 SYSREG_CRM(reg),
1566 SYSREG_OP2(reg),
1567 val);
1568
1569 if (arm_feature(env, ARM_FEATURE_PMU)) {
1570 switch (reg) {
1571 case SYSREG_PMCCNTR_EL0:
1572 pmu_op_start(env);
1573 env->cp15.c15_ccnt = val;
1574 pmu_op_finish(env);
1575 return 0;
1576 case SYSREG_PMCR_EL0:
1577 pmu_op_start(env);
1578
1579 if (val & PMCRC) {
1580 /* The counter has been reset */
1581 env->cp15.c15_ccnt = 0;
1582 }
1583
1584 if (val & PMCRP) {
1585 unsigned int i;
1586 for (i = 0; i < pmu_num_counters(env); i++) {
1587 env->cp15.c14_pmevcntr[i] = 0;
1588 }
1589 }
1590
1591 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1592 env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1593
1594 pmu_op_finish(env);
1595 return 0;
1596 case SYSREG_PMUSERENR_EL0:
1597 env->cp15.c9_pmuserenr = val & 0xf;
1598 return 0;
1599 case SYSREG_PMCNTENSET_EL0:
1600 env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1601 return 0;
1602 case SYSREG_PMCNTENCLR_EL0:
1603 env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1604 return 0;
1605 case SYSREG_PMINTENCLR_EL1:
1606 pmu_op_start(env);
1607 env->cp15.c9_pminten |= val;
1608 pmu_op_finish(env);
1609 return 0;
1610 case SYSREG_PMOVSCLR_EL0:
1611 pmu_op_start(env);
1612 env->cp15.c9_pmovsr &= ~val;
1613 pmu_op_finish(env);
1614 return 0;
1615 case SYSREG_PMSWINC_EL0:
1616 pmu_op_start(env);
1617 pmswinc_write(env, val);
1618 pmu_op_finish(env);
1619 return 0;
1620 case SYSREG_PMSELR_EL0:
1621 env->cp15.c9_pmselr = val & 0x1f;
1622 return 0;
1623 case SYSREG_PMCCFILTR_EL0:
1624 pmu_op_start(env);
1625 env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1626 pmu_op_finish(env);
1627 return 0;
1628 }
1629 }
1630
1631 switch (reg) {
1632 case SYSREG_OSLAR_EL1:
1633 env->cp15.oslsr_el1 = val & 1;
1634 return 0;
1635 case SYSREG_OSDLR_EL1:
1636 /* Dummy register */
1637 return 0;
1638 case SYSREG_ICC_AP0R0_EL1:
1639 case SYSREG_ICC_AP0R1_EL1:
1640 case SYSREG_ICC_AP0R2_EL1:
1641 case SYSREG_ICC_AP0R3_EL1:
1642 case SYSREG_ICC_AP1R0_EL1:
1643 case SYSREG_ICC_AP1R1_EL1:
1644 case SYSREG_ICC_AP1R2_EL1:
1645 case SYSREG_ICC_AP1R3_EL1:
1646 case SYSREG_ICC_ASGI1R_EL1:
1647 case SYSREG_ICC_BPR0_EL1:
1648 case SYSREG_ICC_BPR1_EL1:
1649 case SYSREG_ICC_CTLR_EL1:
1650 case SYSREG_ICC_DIR_EL1:
1651 case SYSREG_ICC_EOIR0_EL1:
1652 case SYSREG_ICC_EOIR1_EL1:
1653 case SYSREG_ICC_HPPIR0_EL1:
1654 case SYSREG_ICC_HPPIR1_EL1:
1655 case SYSREG_ICC_IAR0_EL1:
1656 case SYSREG_ICC_IAR1_EL1:
1657 case SYSREG_ICC_IGRPEN0_EL1:
1658 case SYSREG_ICC_IGRPEN1_EL1:
1659 case SYSREG_ICC_PMR_EL1:
1660 case SYSREG_ICC_SGI0R_EL1:
1661 case SYSREG_ICC_SGI1R_EL1:
1662 case SYSREG_ICC_SRE_EL1:
1663 /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1664 if (hvf_sysreg_write_cp(cpu, reg, val)) {
1665 return 0;
1666 }
1667 break;
1668 case SYSREG_MDSCR_EL1:
1669 env->cp15.mdscr_el1 = val;
1670 return 0;
1671 case SYSREG_DBGBVR0_EL1:
1672 case SYSREG_DBGBVR1_EL1:
1673 case SYSREG_DBGBVR2_EL1:
1674 case SYSREG_DBGBVR3_EL1:
1675 case SYSREG_DBGBVR4_EL1:
1676 case SYSREG_DBGBVR5_EL1:
1677 case SYSREG_DBGBVR6_EL1:
1678 case SYSREG_DBGBVR7_EL1:
1679 case SYSREG_DBGBVR8_EL1:
1680 case SYSREG_DBGBVR9_EL1:
1681 case SYSREG_DBGBVR10_EL1:
1682 case SYSREG_DBGBVR11_EL1:
1683 case SYSREG_DBGBVR12_EL1:
1684 case SYSREG_DBGBVR13_EL1:
1685 case SYSREG_DBGBVR14_EL1:
1686 case SYSREG_DBGBVR15_EL1:
1687 env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1688 return 0;
1689 case SYSREG_DBGBCR0_EL1:
1690 case SYSREG_DBGBCR1_EL1:
1691 case SYSREG_DBGBCR2_EL1:
1692 case SYSREG_DBGBCR3_EL1:
1693 case SYSREG_DBGBCR4_EL1:
1694 case SYSREG_DBGBCR5_EL1:
1695 case SYSREG_DBGBCR6_EL1:
1696 case SYSREG_DBGBCR7_EL1:
1697 case SYSREG_DBGBCR8_EL1:
1698 case SYSREG_DBGBCR9_EL1:
1699 case SYSREG_DBGBCR10_EL1:
1700 case SYSREG_DBGBCR11_EL1:
1701 case SYSREG_DBGBCR12_EL1:
1702 case SYSREG_DBGBCR13_EL1:
1703 case SYSREG_DBGBCR14_EL1:
1704 case SYSREG_DBGBCR15_EL1:
1705 env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1706 return 0;
1707 case SYSREG_DBGWVR0_EL1:
1708 case SYSREG_DBGWVR1_EL1:
1709 case SYSREG_DBGWVR2_EL1:
1710 case SYSREG_DBGWVR3_EL1:
1711 case SYSREG_DBGWVR4_EL1:
1712 case SYSREG_DBGWVR5_EL1:
1713 case SYSREG_DBGWVR6_EL1:
1714 case SYSREG_DBGWVR7_EL1:
1715 case SYSREG_DBGWVR8_EL1:
1716 case SYSREG_DBGWVR9_EL1:
1717 case SYSREG_DBGWVR10_EL1:
1718 case SYSREG_DBGWVR11_EL1:
1719 case SYSREG_DBGWVR12_EL1:
1720 case SYSREG_DBGWVR13_EL1:
1721 case SYSREG_DBGWVR14_EL1:
1722 case SYSREG_DBGWVR15_EL1:
1723 env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1724 return 0;
1725 case SYSREG_DBGWCR0_EL1:
1726 case SYSREG_DBGWCR1_EL1:
1727 case SYSREG_DBGWCR2_EL1:
1728 case SYSREG_DBGWCR3_EL1:
1729 case SYSREG_DBGWCR4_EL1:
1730 case SYSREG_DBGWCR5_EL1:
1731 case SYSREG_DBGWCR6_EL1:
1732 case SYSREG_DBGWCR7_EL1:
1733 case SYSREG_DBGWCR8_EL1:
1734 case SYSREG_DBGWCR9_EL1:
1735 case SYSREG_DBGWCR10_EL1:
1736 case SYSREG_DBGWCR11_EL1:
1737 case SYSREG_DBGWCR12_EL1:
1738 case SYSREG_DBGWCR13_EL1:
1739 case SYSREG_DBGWCR14_EL1:
1740 case SYSREG_DBGWCR15_EL1:
1741 env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1742 return 0;
1743 }
1744
1745 cpu_synchronize_state(cpu);
1746 trace_hvf_unhandled_sysreg_write(env->pc, reg,
1747 SYSREG_OP0(reg),
1748 SYSREG_OP1(reg),
1749 SYSREG_CRN(reg),
1750 SYSREG_CRM(reg),
1751 SYSREG_OP2(reg));
1752 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1753 return 1;
1754 }
1755
hvf_inject_interrupts(CPUState * cpu)1756 static int hvf_inject_interrupts(CPUState *cpu)
1757 {
1758 if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1759 trace_hvf_inject_fiq();
1760 hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
1761 true);
1762 }
1763
1764 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1765 trace_hvf_inject_irq();
1766 hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
1767 true);
1768 }
1769
1770 return 0;
1771 }
1772
hvf_vtimer_val_raw(void)1773 static uint64_t hvf_vtimer_val_raw(void)
1774 {
1775 /*
1776 * mach_absolute_time() returns the vtimer value without the VM
1777 * offset that we define. Add our own offset on top.
1778 */
1779 return mach_absolute_time() - hvf_state->vtimer_offset;
1780 }
1781
hvf_vtimer_val(void)1782 static uint64_t hvf_vtimer_val(void)
1783 {
1784 if (!runstate_is_running()) {
1785 /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1786 return vtimer.vtimer_val;
1787 }
1788
1789 return hvf_vtimer_val_raw();
1790 }
1791
hvf_wait_for_ipi(CPUState * cpu,struct timespec * ts)1792 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1793 {
1794 /*
1795 * Use pselect to sleep so that other threads can IPI us while we're
1796 * sleeping.
1797 */
1798 qatomic_set_mb(&cpu->thread_kicked, false);
1799 bql_unlock();
1800 pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
1801 bql_lock();
1802 }
1803
hvf_wfi(CPUState * cpu)1804 static void hvf_wfi(CPUState *cpu)
1805 {
1806 ARMCPU *arm_cpu = ARM_CPU(cpu);
1807 struct timespec ts;
1808 hv_return_t r;
1809 uint64_t ctl;
1810 uint64_t cval;
1811 int64_t ticks_to_sleep;
1812 uint64_t seconds;
1813 uint64_t nanos;
1814 uint32_t cntfrq;
1815
1816 if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1817 /* Interrupt pending, no need to wait */
1818 return;
1819 }
1820
1821 r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1822 assert_hvf_ok(r);
1823
1824 if (!(ctl & 1) || (ctl & 2)) {
1825 /* Timer disabled or masked, just wait for an IPI. */
1826 hvf_wait_for_ipi(cpu, NULL);
1827 return;
1828 }
1829
1830 r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1831 assert_hvf_ok(r);
1832
1833 ticks_to_sleep = cval - hvf_vtimer_val();
1834 if (ticks_to_sleep < 0) {
1835 return;
1836 }
1837
1838 cntfrq = gt_cntfrq_period_ns(arm_cpu);
1839 seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1840 ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1841 nanos = ticks_to_sleep * cntfrq;
1842
1843 /*
1844 * Don't sleep for less than the time a context switch would take,
1845 * so that we can satisfy fast timer requests on the same CPU.
1846 * Measurements on M1 show the sweet spot to be ~2ms.
1847 */
1848 if (!seconds && nanos < (2 * SCALE_MS)) {
1849 return;
1850 }
1851
1852 ts = (struct timespec) { seconds, nanos };
1853 hvf_wait_for_ipi(cpu, &ts);
1854 }
1855
hvf_sync_vtimer(CPUState * cpu)1856 static void hvf_sync_vtimer(CPUState *cpu)
1857 {
1858 ARMCPU *arm_cpu = ARM_CPU(cpu);
1859 hv_return_t r;
1860 uint64_t ctl;
1861 bool irq_state;
1862
1863 if (!cpu->accel->vtimer_masked) {
1864 /* We will get notified on vtimer changes by hvf, nothing to do */
1865 return;
1866 }
1867
1868 r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1869 assert_hvf_ok(r);
1870
1871 irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1872 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1873 qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1874
1875 if (!irq_state) {
1876 /* Timer no longer asserting, we can unmask it */
1877 hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
1878 cpu->accel->vtimer_masked = false;
1879 }
1880 }
1881
hvf_vcpu_exec(CPUState * cpu)1882 int hvf_vcpu_exec(CPUState *cpu)
1883 {
1884 ARMCPU *arm_cpu = ARM_CPU(cpu);
1885 CPUARMState *env = &arm_cpu->env;
1886 int ret;
1887 hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
1888 hv_return_t r;
1889 bool advance_pc = false;
1890
1891 if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1892 hvf_inject_interrupts(cpu)) {
1893 return EXCP_INTERRUPT;
1894 }
1895
1896 if (cpu->halted) {
1897 return EXCP_HLT;
1898 }
1899
1900 flush_cpu_state(cpu);
1901
1902 bql_unlock();
1903 assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
1904
1905 /* handle VMEXIT */
1906 uint64_t exit_reason = hvf_exit->reason;
1907 uint64_t syndrome = hvf_exit->exception.syndrome;
1908 uint32_t ec = syn_get_ec(syndrome);
1909
1910 ret = 0;
1911 bql_lock();
1912 switch (exit_reason) {
1913 case HV_EXIT_REASON_EXCEPTION:
1914 /* This is the main one, handle below. */
1915 break;
1916 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1917 qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1918 cpu->accel->vtimer_masked = true;
1919 return 0;
1920 case HV_EXIT_REASON_CANCELED:
1921 /* we got kicked, no exit to process */
1922 return 0;
1923 default:
1924 g_assert_not_reached();
1925 }
1926
1927 hvf_sync_vtimer(cpu);
1928
1929 switch (ec) {
1930 case EC_SOFTWARESTEP: {
1931 ret = EXCP_DEBUG;
1932
1933 if (!cpu->singlestep_enabled) {
1934 error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1935 }
1936 break;
1937 }
1938 case EC_AA64_BKPT: {
1939 ret = EXCP_DEBUG;
1940
1941 cpu_synchronize_state(cpu);
1942
1943 if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1944 /* Re-inject into the guest */
1945 ret = 0;
1946 hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
1947 }
1948 break;
1949 }
1950 case EC_BREAKPOINT: {
1951 ret = EXCP_DEBUG;
1952
1953 cpu_synchronize_state(cpu);
1954
1955 if (!find_hw_breakpoint(cpu, env->pc)) {
1956 error_report("EC_BREAKPOINT but unknown hw breakpoint");
1957 }
1958 break;
1959 }
1960 case EC_WATCHPOINT: {
1961 ret = EXCP_DEBUG;
1962
1963 cpu_synchronize_state(cpu);
1964
1965 CPUWatchpoint *wp =
1966 find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
1967 if (!wp) {
1968 error_report("EXCP_DEBUG but unknown hw watchpoint");
1969 }
1970 cpu->watchpoint_hit = wp;
1971 break;
1972 }
1973 case EC_DATAABORT: {
1974 bool isv = syndrome & ARM_EL_ISV;
1975 bool iswrite = (syndrome >> 6) & 1;
1976 bool s1ptw = (syndrome >> 7) & 1;
1977 bool sse = (syndrome >> 21) & 1;
1978 uint32_t sas = (syndrome >> 22) & 3;
1979 uint32_t len = 1 << sas;
1980 uint32_t srt = (syndrome >> 16) & 0x1f;
1981 uint32_t cm = (syndrome >> 8) & 0x1;
1982 uint64_t val = 0;
1983
1984 trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
1985 hvf_exit->exception.physical_address, isv,
1986 iswrite, s1ptw, len, srt);
1987
1988 if (cm) {
1989 /* We don't cache MMIO regions */
1990 advance_pc = true;
1991 break;
1992 }
1993
1994 assert(isv);
1995
1996 if (iswrite) {
1997 val = hvf_get_reg(cpu, srt);
1998 address_space_write(&address_space_memory,
1999 hvf_exit->exception.physical_address,
2000 MEMTXATTRS_UNSPECIFIED, &val, len);
2001 } else {
2002 address_space_read(&address_space_memory,
2003 hvf_exit->exception.physical_address,
2004 MEMTXATTRS_UNSPECIFIED, &val, len);
2005 if (sse) {
2006 val = sextract64(val, 0, len * 8);
2007 }
2008 hvf_set_reg(cpu, srt, val);
2009 }
2010
2011 advance_pc = true;
2012 break;
2013 }
2014 case EC_SYSTEMREGISTERTRAP: {
2015 bool isread = (syndrome >> 0) & 1;
2016 uint32_t rt = (syndrome >> 5) & 0x1f;
2017 uint32_t reg = syndrome & SYSREG_MASK;
2018 uint64_t val;
2019 int sysreg_ret = 0;
2020
2021 if (isread) {
2022 sysreg_ret = hvf_sysreg_read(cpu, reg, &val);
2023 if (!sysreg_ret) {
2024 trace_hvf_sysreg_read(reg,
2025 SYSREG_OP0(reg),
2026 SYSREG_OP1(reg),
2027 SYSREG_CRN(reg),
2028 SYSREG_CRM(reg),
2029 SYSREG_OP2(reg),
2030 val);
2031 hvf_set_reg(cpu, rt, val);
2032 }
2033 } else {
2034 val = hvf_get_reg(cpu, rt);
2035 sysreg_ret = hvf_sysreg_write(cpu, reg, val);
2036 }
2037
2038 advance_pc = !sysreg_ret;
2039 break;
2040 }
2041 case EC_WFX_TRAP:
2042 advance_pc = true;
2043 if (!(syndrome & WFX_IS_WFE)) {
2044 hvf_wfi(cpu);
2045 }
2046 break;
2047 case EC_AA64_HVC:
2048 cpu_synchronize_state(cpu);
2049 if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
2050 if (!hvf_handle_psci_call(cpu)) {
2051 trace_hvf_unknown_hvc(env->xregs[0]);
2052 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2053 env->xregs[0] = -1;
2054 }
2055 } else {
2056 trace_hvf_unknown_hvc(env->xregs[0]);
2057 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
2058 }
2059 break;
2060 case EC_AA64_SMC:
2061 cpu_synchronize_state(cpu);
2062 if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
2063 advance_pc = true;
2064
2065 if (!hvf_handle_psci_call(cpu)) {
2066 trace_hvf_unknown_smc(env->xregs[0]);
2067 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2068 env->xregs[0] = -1;
2069 }
2070 } else {
2071 trace_hvf_unknown_smc(env->xregs[0]);
2072 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
2073 }
2074 break;
2075 default:
2076 cpu_synchronize_state(cpu);
2077 trace_hvf_exit(syndrome, ec, env->pc);
2078 error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
2079 }
2080
2081 if (advance_pc) {
2082 uint64_t pc;
2083
2084 flush_cpu_state(cpu);
2085
2086 r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
2087 assert_hvf_ok(r);
2088 pc += 4;
2089 r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
2090 assert_hvf_ok(r);
2091
2092 /* Handle single-stepping over instructions which trigger a VM exit */
2093 if (cpu->singlestep_enabled) {
2094 ret = EXCP_DEBUG;
2095 }
2096 }
2097
2098 return ret;
2099 }
2100
2101 static const VMStateDescription vmstate_hvf_vtimer = {
2102 .name = "hvf-vtimer",
2103 .version_id = 1,
2104 .minimum_version_id = 1,
2105 .fields = (const VMStateField[]) {
2106 VMSTATE_UINT64(vtimer_val, HVFVTimer),
2107 VMSTATE_END_OF_LIST()
2108 },
2109 };
2110
hvf_vm_state_change(void * opaque,bool running,RunState state)2111 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2112 {
2113 HVFVTimer *s = opaque;
2114
2115 if (running) {
2116 /* Update vtimer offset on all CPUs */
2117 hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2118 cpu_synchronize_all_states();
2119 } else {
2120 /* Remember vtimer value on every pause */
2121 s->vtimer_val = hvf_vtimer_val_raw();
2122 }
2123 }
2124
hvf_arch_init(void)2125 int hvf_arch_init(void)
2126 {
2127 hvf_state->vtimer_offset = mach_absolute_time();
2128 vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2129 qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2130
2131 hvf_arm_init_debug();
2132
2133 return 0;
2134 }
2135
2136 static const uint32_t brk_insn = 0xd4200000;
2137
hvf_arch_insert_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2138 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2139 {
2140 if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2141 cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2142 return -EINVAL;
2143 }
2144 return 0;
2145 }
2146
hvf_arch_remove_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2147 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2148 {
2149 static uint32_t brk;
2150
2151 if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2152 brk != brk_insn ||
2153 cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2154 return -EINVAL;
2155 }
2156 return 0;
2157 }
2158
hvf_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2159 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2160 {
2161 switch (type) {
2162 case GDB_BREAKPOINT_HW:
2163 return insert_hw_breakpoint(addr);
2164 case GDB_WATCHPOINT_READ:
2165 case GDB_WATCHPOINT_WRITE:
2166 case GDB_WATCHPOINT_ACCESS:
2167 return insert_hw_watchpoint(addr, len, type);
2168 default:
2169 return -ENOSYS;
2170 }
2171 }
2172
hvf_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2173 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2174 {
2175 switch (type) {
2176 case GDB_BREAKPOINT_HW:
2177 return delete_hw_breakpoint(addr);
2178 case GDB_WATCHPOINT_READ:
2179 case GDB_WATCHPOINT_WRITE:
2180 case GDB_WATCHPOINT_ACCESS:
2181 return delete_hw_watchpoint(addr, len, type);
2182 default:
2183 return -ENOSYS;
2184 }
2185 }
2186
hvf_arch_remove_all_hw_breakpoints(void)2187 void hvf_arch_remove_all_hw_breakpoints(void)
2188 {
2189 if (cur_hw_wps > 0) {
2190 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2191 }
2192 if (cur_hw_bps > 0) {
2193 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2194 }
2195 }
2196
2197 /*
2198 * Update the vCPU with the gdbstub's view of debug registers. This view
2199 * consists of all hardware breakpoints and watchpoints inserted so far while
2200 * debugging the guest.
2201 */
hvf_put_gdbstub_debug_registers(CPUState * cpu)2202 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2203 {
2204 hv_return_t r = HV_SUCCESS;
2205 int i;
2206
2207 for (i = 0; i < cur_hw_bps; i++) {
2208 HWBreakpoint *bp = get_hw_bp(i);
2209 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
2210 assert_hvf_ok(r);
2211 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
2212 assert_hvf_ok(r);
2213 }
2214 for (i = cur_hw_bps; i < max_hw_bps; i++) {
2215 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
2216 assert_hvf_ok(r);
2217 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
2218 assert_hvf_ok(r);
2219 }
2220
2221 for (i = 0; i < cur_hw_wps; i++) {
2222 HWWatchpoint *wp = get_hw_wp(i);
2223 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
2224 assert_hvf_ok(r);
2225 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
2226 assert_hvf_ok(r);
2227 }
2228 for (i = cur_hw_wps; i < max_hw_wps; i++) {
2229 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
2230 assert_hvf_ok(r);
2231 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
2232 assert_hvf_ok(r);
2233 }
2234 }
2235
2236 /*
2237 * Update the vCPU with the guest's view of debug registers. This view is kept
2238 * in the environment at all times.
2239 */
hvf_put_guest_debug_registers(CPUState * cpu)2240 static void hvf_put_guest_debug_registers(CPUState *cpu)
2241 {
2242 ARMCPU *arm_cpu = ARM_CPU(cpu);
2243 CPUARMState *env = &arm_cpu->env;
2244 hv_return_t r = HV_SUCCESS;
2245 int i;
2246
2247 for (i = 0; i < max_hw_bps; i++) {
2248 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
2249 env->cp15.dbgbcr[i]);
2250 assert_hvf_ok(r);
2251 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
2252 env->cp15.dbgbvr[i]);
2253 assert_hvf_ok(r);
2254 }
2255
2256 for (i = 0; i < max_hw_wps; i++) {
2257 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
2258 env->cp15.dbgwcr[i]);
2259 assert_hvf_ok(r);
2260 r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
2261 env->cp15.dbgwvr[i]);
2262 assert_hvf_ok(r);
2263 }
2264 }
2265
hvf_arm_hw_debug_active(CPUState * cpu)2266 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2267 {
2268 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2269 }
2270
hvf_arch_set_traps(void)2271 static void hvf_arch_set_traps(void)
2272 {
2273 CPUState *cpu;
2274 bool should_enable_traps = false;
2275 hv_return_t r = HV_SUCCESS;
2276
2277 /* Check whether guest debugging is enabled for at least one vCPU; if it
2278 * is, enable exiting the guest on all vCPUs */
2279 CPU_FOREACH(cpu) {
2280 should_enable_traps |= cpu->accel->guest_debug_enabled;
2281 }
2282 CPU_FOREACH(cpu) {
2283 /* Set whether debug exceptions exit the guest */
2284 r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
2285 should_enable_traps);
2286 assert_hvf_ok(r);
2287
2288 /* Set whether accesses to debug registers exit the guest */
2289 r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
2290 should_enable_traps);
2291 assert_hvf_ok(r);
2292 }
2293 }
2294
hvf_arch_update_guest_debug(CPUState * cpu)2295 void hvf_arch_update_guest_debug(CPUState *cpu)
2296 {
2297 ARMCPU *arm_cpu = ARM_CPU(cpu);
2298 CPUARMState *env = &arm_cpu->env;
2299
2300 /* Check whether guest debugging is enabled */
2301 cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
2302 hvf_sw_breakpoints_active(cpu) ||
2303 hvf_arm_hw_debug_active(cpu);
2304
2305 /* Update debug registers */
2306 if (cpu->accel->guest_debug_enabled) {
2307 hvf_put_gdbstub_debug_registers(cpu);
2308 } else {
2309 hvf_put_guest_debug_registers(cpu);
2310 }
2311
2312 cpu_synchronize_state(cpu);
2313
2314 /* Enable/disable single-stepping */
2315 if (cpu->singlestep_enabled) {
2316 env->cp15.mdscr_el1 =
2317 deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2318 pstate_write(env, pstate_read(env) | PSTATE_SS);
2319 } else {
2320 env->cp15.mdscr_el1 =
2321 deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2322 }
2323
2324 /* Enable/disable Breakpoint exceptions */
2325 if (hvf_arm_hw_debug_active(cpu)) {
2326 env->cp15.mdscr_el1 =
2327 deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2328 } else {
2329 env->cp15.mdscr_el1 =
2330 deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2331 }
2332
2333 hvf_arch_set_traps();
2334 }
2335
hvf_arch_supports_guest_debug(void)2336 bool hvf_arch_supports_guest_debug(void)
2337 {
2338 return true;
2339 }
2340