xref: /openbmc/qemu/target/arm/hvf/hvf.c (revision 7771e8b8)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 
15 #include "sysemu/runstate.h"
16 #include "sysemu/hvf.h"
17 #include "sysemu/hvf_int.h"
18 #include "sysemu/hw_accel.h"
19 #include "hvf_arm.h"
20 #include "cpregs.h"
21 
22 #include <mach/mach_time.h>
23 
24 #include "exec/address-spaces.h"
25 #include "hw/irq.h"
26 #include "qemu/main-loop.h"
27 #include "sysemu/cpus.h"
28 #include "arm-powerctl.h"
29 #include "target/arm/cpu.h"
30 #include "target/arm/internals.h"
31 #include "trace/trace-target_arm_hvf.h"
32 #include "migration/vmstate.h"
33 
34 #include "exec/gdbstub.h"
35 
36 #define MDSCR_EL1_SS_SHIFT  0
37 #define MDSCR_EL1_MDE_SHIFT 15
38 
39 static uint16_t dbgbcr_regs[] = {
40     HV_SYS_REG_DBGBCR0_EL1,
41     HV_SYS_REG_DBGBCR1_EL1,
42     HV_SYS_REG_DBGBCR2_EL1,
43     HV_SYS_REG_DBGBCR3_EL1,
44     HV_SYS_REG_DBGBCR4_EL1,
45     HV_SYS_REG_DBGBCR5_EL1,
46     HV_SYS_REG_DBGBCR6_EL1,
47     HV_SYS_REG_DBGBCR7_EL1,
48     HV_SYS_REG_DBGBCR8_EL1,
49     HV_SYS_REG_DBGBCR9_EL1,
50     HV_SYS_REG_DBGBCR10_EL1,
51     HV_SYS_REG_DBGBCR11_EL1,
52     HV_SYS_REG_DBGBCR12_EL1,
53     HV_SYS_REG_DBGBCR13_EL1,
54     HV_SYS_REG_DBGBCR14_EL1,
55     HV_SYS_REG_DBGBCR15_EL1,
56 };
57 static uint16_t dbgbvr_regs[] = {
58     HV_SYS_REG_DBGBVR0_EL1,
59     HV_SYS_REG_DBGBVR1_EL1,
60     HV_SYS_REG_DBGBVR2_EL1,
61     HV_SYS_REG_DBGBVR3_EL1,
62     HV_SYS_REG_DBGBVR4_EL1,
63     HV_SYS_REG_DBGBVR5_EL1,
64     HV_SYS_REG_DBGBVR6_EL1,
65     HV_SYS_REG_DBGBVR7_EL1,
66     HV_SYS_REG_DBGBVR8_EL1,
67     HV_SYS_REG_DBGBVR9_EL1,
68     HV_SYS_REG_DBGBVR10_EL1,
69     HV_SYS_REG_DBGBVR11_EL1,
70     HV_SYS_REG_DBGBVR12_EL1,
71     HV_SYS_REG_DBGBVR13_EL1,
72     HV_SYS_REG_DBGBVR14_EL1,
73     HV_SYS_REG_DBGBVR15_EL1,
74 };
75 static uint16_t dbgwcr_regs[] = {
76     HV_SYS_REG_DBGWCR0_EL1,
77     HV_SYS_REG_DBGWCR1_EL1,
78     HV_SYS_REG_DBGWCR2_EL1,
79     HV_SYS_REG_DBGWCR3_EL1,
80     HV_SYS_REG_DBGWCR4_EL1,
81     HV_SYS_REG_DBGWCR5_EL1,
82     HV_SYS_REG_DBGWCR6_EL1,
83     HV_SYS_REG_DBGWCR7_EL1,
84     HV_SYS_REG_DBGWCR8_EL1,
85     HV_SYS_REG_DBGWCR9_EL1,
86     HV_SYS_REG_DBGWCR10_EL1,
87     HV_SYS_REG_DBGWCR11_EL1,
88     HV_SYS_REG_DBGWCR12_EL1,
89     HV_SYS_REG_DBGWCR13_EL1,
90     HV_SYS_REG_DBGWCR14_EL1,
91     HV_SYS_REG_DBGWCR15_EL1,
92 };
93 static uint16_t dbgwvr_regs[] = {
94     HV_SYS_REG_DBGWVR0_EL1,
95     HV_SYS_REG_DBGWVR1_EL1,
96     HV_SYS_REG_DBGWVR2_EL1,
97     HV_SYS_REG_DBGWVR3_EL1,
98     HV_SYS_REG_DBGWVR4_EL1,
99     HV_SYS_REG_DBGWVR5_EL1,
100     HV_SYS_REG_DBGWVR6_EL1,
101     HV_SYS_REG_DBGWVR7_EL1,
102     HV_SYS_REG_DBGWVR8_EL1,
103     HV_SYS_REG_DBGWVR9_EL1,
104     HV_SYS_REG_DBGWVR10_EL1,
105     HV_SYS_REG_DBGWVR11_EL1,
106     HV_SYS_REG_DBGWVR12_EL1,
107     HV_SYS_REG_DBGWVR13_EL1,
108     HV_SYS_REG_DBGWVR14_EL1,
109     HV_SYS_REG_DBGWVR15_EL1,
110 };
111 
112 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
113 {
114     uint64_t val;
115     hv_return_t ret;
116     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
117                                          &val);
118     assert_hvf_ok(ret);
119     return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
120 }
121 
122 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
123 {
124     uint64_t val;
125     hv_return_t ret;
126     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
127                                          &val);
128     assert_hvf_ok(ret);
129     return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
130 }
131 
132 void hvf_arm_init_debug(void)
133 {
134     hv_vcpu_config_t config;
135     config = hv_vcpu_config_create();
136 
137     max_hw_bps = hvf_arm_num_brps(config);
138     hw_breakpoints =
139         g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
140 
141     max_hw_wps = hvf_arm_num_wrps(config);
142     hw_watchpoints =
143         g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
144 }
145 
146 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
147         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
148 #define PL1_WRITE_MASK 0x4
149 
150 #define SYSREG_OP0_SHIFT      20
151 #define SYSREG_OP0_MASK       0x3
152 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
153 #define SYSREG_OP1_SHIFT      14
154 #define SYSREG_OP1_MASK       0x7
155 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
156 #define SYSREG_CRN_SHIFT      10
157 #define SYSREG_CRN_MASK       0xf
158 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
159 #define SYSREG_CRM_SHIFT      1
160 #define SYSREG_CRM_MASK       0xf
161 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
162 #define SYSREG_OP2_SHIFT      17
163 #define SYSREG_OP2_MASK       0x7
164 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
165 
166 #define SYSREG(op0, op1, crn, crm, op2) \
167     ((op0 << SYSREG_OP0_SHIFT) | \
168      (op1 << SYSREG_OP1_SHIFT) | \
169      (crn << SYSREG_CRN_SHIFT) | \
170      (crm << SYSREG_CRM_SHIFT) | \
171      (op2 << SYSREG_OP2_SHIFT))
172 #define SYSREG_MASK \
173     SYSREG(SYSREG_OP0_MASK, \
174            SYSREG_OP1_MASK, \
175            SYSREG_CRN_MASK, \
176            SYSREG_CRM_MASK, \
177            SYSREG_OP2_MASK)
178 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
179 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
180 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
181 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
182 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
183 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
184 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
185 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
186 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
187 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
188 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
189 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
190 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
191 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
192 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
193 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
194 
195 #define SYSREG_ICC_AP0R0_EL1     SYSREG(3, 0, 12, 8, 4)
196 #define SYSREG_ICC_AP0R1_EL1     SYSREG(3, 0, 12, 8, 5)
197 #define SYSREG_ICC_AP0R2_EL1     SYSREG(3, 0, 12, 8, 6)
198 #define SYSREG_ICC_AP0R3_EL1     SYSREG(3, 0, 12, 8, 7)
199 #define SYSREG_ICC_AP1R0_EL1     SYSREG(3, 0, 12, 9, 0)
200 #define SYSREG_ICC_AP1R1_EL1     SYSREG(3, 0, 12, 9, 1)
201 #define SYSREG_ICC_AP1R2_EL1     SYSREG(3, 0, 12, 9, 2)
202 #define SYSREG_ICC_AP1R3_EL1     SYSREG(3, 0, 12, 9, 3)
203 #define SYSREG_ICC_ASGI1R_EL1    SYSREG(3, 0, 12, 11, 6)
204 #define SYSREG_ICC_BPR0_EL1      SYSREG(3, 0, 12, 8, 3)
205 #define SYSREG_ICC_BPR1_EL1      SYSREG(3, 0, 12, 12, 3)
206 #define SYSREG_ICC_CTLR_EL1      SYSREG(3, 0, 12, 12, 4)
207 #define SYSREG_ICC_DIR_EL1       SYSREG(3, 0, 12, 11, 1)
208 #define SYSREG_ICC_EOIR0_EL1     SYSREG(3, 0, 12, 8, 1)
209 #define SYSREG_ICC_EOIR1_EL1     SYSREG(3, 0, 12, 12, 1)
210 #define SYSREG_ICC_HPPIR0_EL1    SYSREG(3, 0, 12, 8, 2)
211 #define SYSREG_ICC_HPPIR1_EL1    SYSREG(3, 0, 12, 12, 2)
212 #define SYSREG_ICC_IAR0_EL1      SYSREG(3, 0, 12, 8, 0)
213 #define SYSREG_ICC_IAR1_EL1      SYSREG(3, 0, 12, 12, 0)
214 #define SYSREG_ICC_IGRPEN0_EL1   SYSREG(3, 0, 12, 12, 6)
215 #define SYSREG_ICC_IGRPEN1_EL1   SYSREG(3, 0, 12, 12, 7)
216 #define SYSREG_ICC_PMR_EL1       SYSREG(3, 0, 4, 6, 0)
217 #define SYSREG_ICC_RPR_EL1       SYSREG(3, 0, 12, 11, 3)
218 #define SYSREG_ICC_SGI0R_EL1     SYSREG(3, 0, 12, 11, 7)
219 #define SYSREG_ICC_SGI1R_EL1     SYSREG(3, 0, 12, 11, 5)
220 #define SYSREG_ICC_SRE_EL1       SYSREG(3, 0, 12, 12, 5)
221 
222 #define SYSREG_MDSCR_EL1      SYSREG(2, 0, 0, 2, 2)
223 #define SYSREG_DBGBVR0_EL1    SYSREG(2, 0, 0, 0, 4)
224 #define SYSREG_DBGBCR0_EL1    SYSREG(2, 0, 0, 0, 5)
225 #define SYSREG_DBGWVR0_EL1    SYSREG(2, 0, 0, 0, 6)
226 #define SYSREG_DBGWCR0_EL1    SYSREG(2, 0, 0, 0, 7)
227 #define SYSREG_DBGBVR1_EL1    SYSREG(2, 0, 0, 1, 4)
228 #define SYSREG_DBGBCR1_EL1    SYSREG(2, 0, 0, 1, 5)
229 #define SYSREG_DBGWVR1_EL1    SYSREG(2, 0, 0, 1, 6)
230 #define SYSREG_DBGWCR1_EL1    SYSREG(2, 0, 0, 1, 7)
231 #define SYSREG_DBGBVR2_EL1    SYSREG(2, 0, 0, 2, 4)
232 #define SYSREG_DBGBCR2_EL1    SYSREG(2, 0, 0, 2, 5)
233 #define SYSREG_DBGWVR2_EL1    SYSREG(2, 0, 0, 2, 6)
234 #define SYSREG_DBGWCR2_EL1    SYSREG(2, 0, 0, 2, 7)
235 #define SYSREG_DBGBVR3_EL1    SYSREG(2, 0, 0, 3, 4)
236 #define SYSREG_DBGBCR3_EL1    SYSREG(2, 0, 0, 3, 5)
237 #define SYSREG_DBGWVR3_EL1    SYSREG(2, 0, 0, 3, 6)
238 #define SYSREG_DBGWCR3_EL1    SYSREG(2, 0, 0, 3, 7)
239 #define SYSREG_DBGBVR4_EL1    SYSREG(2, 0, 0, 4, 4)
240 #define SYSREG_DBGBCR4_EL1    SYSREG(2, 0, 0, 4, 5)
241 #define SYSREG_DBGWVR4_EL1    SYSREG(2, 0, 0, 4, 6)
242 #define SYSREG_DBGWCR4_EL1    SYSREG(2, 0, 0, 4, 7)
243 #define SYSREG_DBGBVR5_EL1    SYSREG(2, 0, 0, 5, 4)
244 #define SYSREG_DBGBCR5_EL1    SYSREG(2, 0, 0, 5, 5)
245 #define SYSREG_DBGWVR5_EL1    SYSREG(2, 0, 0, 5, 6)
246 #define SYSREG_DBGWCR5_EL1    SYSREG(2, 0, 0, 5, 7)
247 #define SYSREG_DBGBVR6_EL1    SYSREG(2, 0, 0, 6, 4)
248 #define SYSREG_DBGBCR6_EL1    SYSREG(2, 0, 0, 6, 5)
249 #define SYSREG_DBGWVR6_EL1    SYSREG(2, 0, 0, 6, 6)
250 #define SYSREG_DBGWCR6_EL1    SYSREG(2, 0, 0, 6, 7)
251 #define SYSREG_DBGBVR7_EL1    SYSREG(2, 0, 0, 7, 4)
252 #define SYSREG_DBGBCR7_EL1    SYSREG(2, 0, 0, 7, 5)
253 #define SYSREG_DBGWVR7_EL1    SYSREG(2, 0, 0, 7, 6)
254 #define SYSREG_DBGWCR7_EL1    SYSREG(2, 0, 0, 7, 7)
255 #define SYSREG_DBGBVR8_EL1    SYSREG(2, 0, 0, 8, 4)
256 #define SYSREG_DBGBCR8_EL1    SYSREG(2, 0, 0, 8, 5)
257 #define SYSREG_DBGWVR8_EL1    SYSREG(2, 0, 0, 8, 6)
258 #define SYSREG_DBGWCR8_EL1    SYSREG(2, 0, 0, 8, 7)
259 #define SYSREG_DBGBVR9_EL1    SYSREG(2, 0, 0, 9, 4)
260 #define SYSREG_DBGBCR9_EL1    SYSREG(2, 0, 0, 9, 5)
261 #define SYSREG_DBGWVR9_EL1    SYSREG(2, 0, 0, 9, 6)
262 #define SYSREG_DBGWCR9_EL1    SYSREG(2, 0, 0, 9, 7)
263 #define SYSREG_DBGBVR10_EL1   SYSREG(2, 0, 0, 10, 4)
264 #define SYSREG_DBGBCR10_EL1   SYSREG(2, 0, 0, 10, 5)
265 #define SYSREG_DBGWVR10_EL1   SYSREG(2, 0, 0, 10, 6)
266 #define SYSREG_DBGWCR10_EL1   SYSREG(2, 0, 0, 10, 7)
267 #define SYSREG_DBGBVR11_EL1   SYSREG(2, 0, 0, 11, 4)
268 #define SYSREG_DBGBCR11_EL1   SYSREG(2, 0, 0, 11, 5)
269 #define SYSREG_DBGWVR11_EL1   SYSREG(2, 0, 0, 11, 6)
270 #define SYSREG_DBGWCR11_EL1   SYSREG(2, 0, 0, 11, 7)
271 #define SYSREG_DBGBVR12_EL1   SYSREG(2, 0, 0, 12, 4)
272 #define SYSREG_DBGBCR12_EL1   SYSREG(2, 0, 0, 12, 5)
273 #define SYSREG_DBGWVR12_EL1   SYSREG(2, 0, 0, 12, 6)
274 #define SYSREG_DBGWCR12_EL1   SYSREG(2, 0, 0, 12, 7)
275 #define SYSREG_DBGBVR13_EL1   SYSREG(2, 0, 0, 13, 4)
276 #define SYSREG_DBGBCR13_EL1   SYSREG(2, 0, 0, 13, 5)
277 #define SYSREG_DBGWVR13_EL1   SYSREG(2, 0, 0, 13, 6)
278 #define SYSREG_DBGWCR13_EL1   SYSREG(2, 0, 0, 13, 7)
279 #define SYSREG_DBGBVR14_EL1   SYSREG(2, 0, 0, 14, 4)
280 #define SYSREG_DBGBCR14_EL1   SYSREG(2, 0, 0, 14, 5)
281 #define SYSREG_DBGWVR14_EL1   SYSREG(2, 0, 0, 14, 6)
282 #define SYSREG_DBGWCR14_EL1   SYSREG(2, 0, 0, 14, 7)
283 #define SYSREG_DBGBVR15_EL1   SYSREG(2, 0, 0, 15, 4)
284 #define SYSREG_DBGBCR15_EL1   SYSREG(2, 0, 0, 15, 5)
285 #define SYSREG_DBGWVR15_EL1   SYSREG(2, 0, 0, 15, 6)
286 #define SYSREG_DBGWCR15_EL1   SYSREG(2, 0, 0, 15, 7)
287 
288 #define WFX_IS_WFE (1 << 0)
289 
290 #define TMR_CTL_ENABLE  (1 << 0)
291 #define TMR_CTL_IMASK   (1 << 1)
292 #define TMR_CTL_ISTATUS (1 << 2)
293 
294 static void hvf_wfi(CPUState *cpu);
295 
296 typedef struct HVFVTimer {
297     /* Vtimer value during migration and paused state */
298     uint64_t vtimer_val;
299 } HVFVTimer;
300 
301 static HVFVTimer vtimer;
302 
303 typedef struct ARMHostCPUFeatures {
304     ARMISARegisters isar;
305     uint64_t features;
306     uint64_t midr;
307     uint32_t reset_sctlr;
308     const char *dtb_compatible;
309 } ARMHostCPUFeatures;
310 
311 static ARMHostCPUFeatures arm_host_cpu_features;
312 
313 struct hvf_reg_match {
314     int reg;
315     uint64_t offset;
316 };
317 
318 static const struct hvf_reg_match hvf_reg_match[] = {
319     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
320     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
321     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
322     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
323     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
324     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
325     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
326     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
327     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
328     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
329     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
330     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
331     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
332     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
333     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
334     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
335     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
336     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
337     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
338     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
339     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
340     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
341     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
342     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
343     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
344     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
345     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
346     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
347     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
348     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
349     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
350     { HV_REG_PC,   offsetof(CPUARMState, pc) },
351 };
352 
353 static const struct hvf_reg_match hvf_fpreg_match[] = {
354     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
355     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
356     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
357     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
358     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
359     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
360     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
361     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
362     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
363     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
364     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
365     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
366     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
367     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
368     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
369     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
370     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
371     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
372     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
373     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
374     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
375     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
376     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
377     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
378     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
379     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
380     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
381     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
382     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
383     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
384     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
385     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
386 };
387 
388 struct hvf_sreg_match {
389     int reg;
390     uint32_t key;
391     uint32_t cp_idx;
392 };
393 
394 static struct hvf_sreg_match hvf_sreg_match[] = {
395     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
396     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
397     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
398     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
399 
400     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
401     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
402     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
403     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
404 
405     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
406     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
407     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
408     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
409 
410     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
411     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
412     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
413     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
414 
415     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
416     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
417     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
418     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
419 
420     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
421     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
422     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
423     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
424 
425     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
426     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
427     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
428     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
429 
430     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
431     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
432     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
433     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
434 
435     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
436     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
437     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
438     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
439 
440     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
441     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
442     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
443     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
444 
445     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
446     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
447     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
448     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
449 
450     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
451     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
452     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
453     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
454 
455     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
456     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
457     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
458     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
459 
460     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
461     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
462     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
463     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
464 
465     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
466     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
467     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
468     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
469 
470     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
471     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
472     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
473     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
474 
475 #ifdef SYNC_NO_RAW_REGS
476     /*
477      * The registers below are manually synced on init because they are
478      * marked as NO_RAW. We still list them to make number space sync easier.
479      */
480     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
481     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
482     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
483     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
484 #endif
485     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
486     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
487     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
488     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
489     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
490 #ifdef SYNC_NO_MMFR0
491     /* We keep the hardware MMFR0 around. HW limits are there anyway */
492     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
493 #endif
494     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
495     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
496 
497     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
498     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
499     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
500     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
501     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
502     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
503 
504     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
505     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
506     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
507     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
508     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
509     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
510     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
511     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
512     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
513     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
514 
515     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
516     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
517     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
518     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
519     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
520     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
521     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
522     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
523     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
524     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
525     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
526     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
527     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
528     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
529     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
530     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
531     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
532     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
533     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
534     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
535 };
536 
537 int hvf_get_registers(CPUState *cpu)
538 {
539     ARMCPU *arm_cpu = ARM_CPU(cpu);
540     CPUARMState *env = &arm_cpu->env;
541     hv_return_t ret;
542     uint64_t val;
543     hv_simd_fp_uchar16_t fpval;
544     int i;
545 
546     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
547         ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
548         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
549         assert_hvf_ok(ret);
550     }
551 
552     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
553         ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
554                                       &fpval);
555         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
556         assert_hvf_ok(ret);
557     }
558 
559     val = 0;
560     ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
561     assert_hvf_ok(ret);
562     vfp_set_fpcr(env, val);
563 
564     val = 0;
565     ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
566     assert_hvf_ok(ret);
567     vfp_set_fpsr(env, val);
568 
569     ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
570     assert_hvf_ok(ret);
571     pstate_write(env, val);
572 
573     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
574         if (hvf_sreg_match[i].cp_idx == -1) {
575             continue;
576         }
577 
578         if (cpu->hvf->guest_debug_enabled) {
579             /* Handle debug registers */
580             switch (hvf_sreg_match[i].reg) {
581             case HV_SYS_REG_DBGBVR0_EL1:
582             case HV_SYS_REG_DBGBCR0_EL1:
583             case HV_SYS_REG_DBGWVR0_EL1:
584             case HV_SYS_REG_DBGWCR0_EL1:
585             case HV_SYS_REG_DBGBVR1_EL1:
586             case HV_SYS_REG_DBGBCR1_EL1:
587             case HV_SYS_REG_DBGWVR1_EL1:
588             case HV_SYS_REG_DBGWCR1_EL1:
589             case HV_SYS_REG_DBGBVR2_EL1:
590             case HV_SYS_REG_DBGBCR2_EL1:
591             case HV_SYS_REG_DBGWVR2_EL1:
592             case HV_SYS_REG_DBGWCR2_EL1:
593             case HV_SYS_REG_DBGBVR3_EL1:
594             case HV_SYS_REG_DBGBCR3_EL1:
595             case HV_SYS_REG_DBGWVR3_EL1:
596             case HV_SYS_REG_DBGWCR3_EL1:
597             case HV_SYS_REG_DBGBVR4_EL1:
598             case HV_SYS_REG_DBGBCR4_EL1:
599             case HV_SYS_REG_DBGWVR4_EL1:
600             case HV_SYS_REG_DBGWCR4_EL1:
601             case HV_SYS_REG_DBGBVR5_EL1:
602             case HV_SYS_REG_DBGBCR5_EL1:
603             case HV_SYS_REG_DBGWVR5_EL1:
604             case HV_SYS_REG_DBGWCR5_EL1:
605             case HV_SYS_REG_DBGBVR6_EL1:
606             case HV_SYS_REG_DBGBCR6_EL1:
607             case HV_SYS_REG_DBGWVR6_EL1:
608             case HV_SYS_REG_DBGWCR6_EL1:
609             case HV_SYS_REG_DBGBVR7_EL1:
610             case HV_SYS_REG_DBGBCR7_EL1:
611             case HV_SYS_REG_DBGWVR7_EL1:
612             case HV_SYS_REG_DBGWCR7_EL1:
613             case HV_SYS_REG_DBGBVR8_EL1:
614             case HV_SYS_REG_DBGBCR8_EL1:
615             case HV_SYS_REG_DBGWVR8_EL1:
616             case HV_SYS_REG_DBGWCR8_EL1:
617             case HV_SYS_REG_DBGBVR9_EL1:
618             case HV_SYS_REG_DBGBCR9_EL1:
619             case HV_SYS_REG_DBGWVR9_EL1:
620             case HV_SYS_REG_DBGWCR9_EL1:
621             case HV_SYS_REG_DBGBVR10_EL1:
622             case HV_SYS_REG_DBGBCR10_EL1:
623             case HV_SYS_REG_DBGWVR10_EL1:
624             case HV_SYS_REG_DBGWCR10_EL1:
625             case HV_SYS_REG_DBGBVR11_EL1:
626             case HV_SYS_REG_DBGBCR11_EL1:
627             case HV_SYS_REG_DBGWVR11_EL1:
628             case HV_SYS_REG_DBGWCR11_EL1:
629             case HV_SYS_REG_DBGBVR12_EL1:
630             case HV_SYS_REG_DBGBCR12_EL1:
631             case HV_SYS_REG_DBGWVR12_EL1:
632             case HV_SYS_REG_DBGWCR12_EL1:
633             case HV_SYS_REG_DBGBVR13_EL1:
634             case HV_SYS_REG_DBGBCR13_EL1:
635             case HV_SYS_REG_DBGWVR13_EL1:
636             case HV_SYS_REG_DBGWCR13_EL1:
637             case HV_SYS_REG_DBGBVR14_EL1:
638             case HV_SYS_REG_DBGBCR14_EL1:
639             case HV_SYS_REG_DBGWVR14_EL1:
640             case HV_SYS_REG_DBGWCR14_EL1:
641             case HV_SYS_REG_DBGBVR15_EL1:
642             case HV_SYS_REG_DBGBCR15_EL1:
643             case HV_SYS_REG_DBGWVR15_EL1:
644             case HV_SYS_REG_DBGWCR15_EL1: {
645                 /*
646                  * If the guest is being debugged, the vCPU's debug registers
647                  * are holding the gdbstub's view of the registers (set in
648                  * hvf_arch_update_guest_debug()).
649                  * Since the environment is used to store only the guest's view
650                  * of the registers, don't update it with the values from the
651                  * vCPU but simply keep the values from the previous
652                  * environment.
653                  */
654                 const ARMCPRegInfo *ri;
655                 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
656                 val = read_raw_cp_reg(env, ri);
657 
658                 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
659                 continue;
660             }
661             }
662         }
663 
664         ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
665         assert_hvf_ok(ret);
666 
667         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
668     }
669     assert(write_list_to_cpustate(arm_cpu));
670 
671     aarch64_restore_sp(env, arm_current_el(env));
672 
673     return 0;
674 }
675 
676 int hvf_put_registers(CPUState *cpu)
677 {
678     ARMCPU *arm_cpu = ARM_CPU(cpu);
679     CPUARMState *env = &arm_cpu->env;
680     hv_return_t ret;
681     uint64_t val;
682     hv_simd_fp_uchar16_t fpval;
683     int i;
684 
685     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
686         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
687         ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
688         assert_hvf_ok(ret);
689     }
690 
691     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
692         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
693         ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
694                                       fpval);
695         assert_hvf_ok(ret);
696     }
697 
698     ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
699     assert_hvf_ok(ret);
700 
701     ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
702     assert_hvf_ok(ret);
703 
704     ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
705     assert_hvf_ok(ret);
706 
707     aarch64_save_sp(env, arm_current_el(env));
708 
709     assert(write_cpustate_to_list(arm_cpu, false));
710     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
711         if (hvf_sreg_match[i].cp_idx == -1) {
712             continue;
713         }
714 
715         if (cpu->hvf->guest_debug_enabled) {
716             /* Handle debug registers */
717             switch (hvf_sreg_match[i].reg) {
718             case HV_SYS_REG_DBGBVR0_EL1:
719             case HV_SYS_REG_DBGBCR0_EL1:
720             case HV_SYS_REG_DBGWVR0_EL1:
721             case HV_SYS_REG_DBGWCR0_EL1:
722             case HV_SYS_REG_DBGBVR1_EL1:
723             case HV_SYS_REG_DBGBCR1_EL1:
724             case HV_SYS_REG_DBGWVR1_EL1:
725             case HV_SYS_REG_DBGWCR1_EL1:
726             case HV_SYS_REG_DBGBVR2_EL1:
727             case HV_SYS_REG_DBGBCR2_EL1:
728             case HV_SYS_REG_DBGWVR2_EL1:
729             case HV_SYS_REG_DBGWCR2_EL1:
730             case HV_SYS_REG_DBGBVR3_EL1:
731             case HV_SYS_REG_DBGBCR3_EL1:
732             case HV_SYS_REG_DBGWVR3_EL1:
733             case HV_SYS_REG_DBGWCR3_EL1:
734             case HV_SYS_REG_DBGBVR4_EL1:
735             case HV_SYS_REG_DBGBCR4_EL1:
736             case HV_SYS_REG_DBGWVR4_EL1:
737             case HV_SYS_REG_DBGWCR4_EL1:
738             case HV_SYS_REG_DBGBVR5_EL1:
739             case HV_SYS_REG_DBGBCR5_EL1:
740             case HV_SYS_REG_DBGWVR5_EL1:
741             case HV_SYS_REG_DBGWCR5_EL1:
742             case HV_SYS_REG_DBGBVR6_EL1:
743             case HV_SYS_REG_DBGBCR6_EL1:
744             case HV_SYS_REG_DBGWVR6_EL1:
745             case HV_SYS_REG_DBGWCR6_EL1:
746             case HV_SYS_REG_DBGBVR7_EL1:
747             case HV_SYS_REG_DBGBCR7_EL1:
748             case HV_SYS_REG_DBGWVR7_EL1:
749             case HV_SYS_REG_DBGWCR7_EL1:
750             case HV_SYS_REG_DBGBVR8_EL1:
751             case HV_SYS_REG_DBGBCR8_EL1:
752             case HV_SYS_REG_DBGWVR8_EL1:
753             case HV_SYS_REG_DBGWCR8_EL1:
754             case HV_SYS_REG_DBGBVR9_EL1:
755             case HV_SYS_REG_DBGBCR9_EL1:
756             case HV_SYS_REG_DBGWVR9_EL1:
757             case HV_SYS_REG_DBGWCR9_EL1:
758             case HV_SYS_REG_DBGBVR10_EL1:
759             case HV_SYS_REG_DBGBCR10_EL1:
760             case HV_SYS_REG_DBGWVR10_EL1:
761             case HV_SYS_REG_DBGWCR10_EL1:
762             case HV_SYS_REG_DBGBVR11_EL1:
763             case HV_SYS_REG_DBGBCR11_EL1:
764             case HV_SYS_REG_DBGWVR11_EL1:
765             case HV_SYS_REG_DBGWCR11_EL1:
766             case HV_SYS_REG_DBGBVR12_EL1:
767             case HV_SYS_REG_DBGBCR12_EL1:
768             case HV_SYS_REG_DBGWVR12_EL1:
769             case HV_SYS_REG_DBGWCR12_EL1:
770             case HV_SYS_REG_DBGBVR13_EL1:
771             case HV_SYS_REG_DBGBCR13_EL1:
772             case HV_SYS_REG_DBGWVR13_EL1:
773             case HV_SYS_REG_DBGWCR13_EL1:
774             case HV_SYS_REG_DBGBVR14_EL1:
775             case HV_SYS_REG_DBGBCR14_EL1:
776             case HV_SYS_REG_DBGWVR14_EL1:
777             case HV_SYS_REG_DBGWCR14_EL1:
778             case HV_SYS_REG_DBGBVR15_EL1:
779             case HV_SYS_REG_DBGBCR15_EL1:
780             case HV_SYS_REG_DBGWVR15_EL1:
781             case HV_SYS_REG_DBGWCR15_EL1:
782                 /*
783                  * If the guest is being debugged, the vCPU's debug registers
784                  * are already holding the gdbstub's view of the registers (set
785                  * in hvf_arch_update_guest_debug()).
786                  */
787                 continue;
788             }
789         }
790 
791         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
792         ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
793         assert_hvf_ok(ret);
794     }
795 
796     ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
797     assert_hvf_ok(ret);
798 
799     return 0;
800 }
801 
802 static void flush_cpu_state(CPUState *cpu)
803 {
804     if (cpu->vcpu_dirty) {
805         hvf_put_registers(cpu);
806         cpu->vcpu_dirty = false;
807     }
808 }
809 
810 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
811 {
812     hv_return_t r;
813 
814     flush_cpu_state(cpu);
815 
816     if (rt < 31) {
817         r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
818         assert_hvf_ok(r);
819     }
820 }
821 
822 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
823 {
824     uint64_t val = 0;
825     hv_return_t r;
826 
827     flush_cpu_state(cpu);
828 
829     if (rt < 31) {
830         r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
831         assert_hvf_ok(r);
832     }
833 
834     return val;
835 }
836 
837 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
838 {
839     ARMISARegisters host_isar = {};
840     const struct isar_regs {
841         int reg;
842         uint64_t *val;
843     } regs[] = {
844         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
845         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
846         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
847         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
848         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
849         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
850         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
851         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
852         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
853     };
854     hv_vcpu_t fd;
855     hv_return_t r = HV_SUCCESS;
856     hv_vcpu_exit_t *exit;
857     int i;
858 
859     ahcf->dtb_compatible = "arm,arm-v8";
860     ahcf->features = (1ULL << ARM_FEATURE_V8) |
861                      (1ULL << ARM_FEATURE_NEON) |
862                      (1ULL << ARM_FEATURE_AARCH64) |
863                      (1ULL << ARM_FEATURE_PMU) |
864                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
865 
866     /* We set up a small vcpu to extract host registers */
867 
868     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
869         return false;
870     }
871 
872     for (i = 0; i < ARRAY_SIZE(regs); i++) {
873         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
874     }
875     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
876     r |= hv_vcpu_destroy(fd);
877 
878     ahcf->isar = host_isar;
879 
880     /*
881      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
882      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
883      */
884     ahcf->reset_sctlr = 0x30100180;
885     /*
886      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
887      * let's disable it on boot and then allow guest software to turn it on by
888      * setting it to 0.
889      */
890     ahcf->reset_sctlr |= 0x00800000;
891 
892     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
893     if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
894         return false;
895     }
896 
897     return r == HV_SUCCESS;
898 }
899 
900 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
901 {
902     if (!arm_host_cpu_features.dtb_compatible) {
903         if (!hvf_enabled() ||
904             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
905             /*
906              * We can't report this error yet, so flag that we need to
907              * in arm_cpu_realizefn().
908              */
909             cpu->host_cpu_probe_failed = true;
910             return;
911         }
912     }
913 
914     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
915     cpu->isar = arm_host_cpu_features.isar;
916     cpu->env.features = arm_host_cpu_features.features;
917     cpu->midr = arm_host_cpu_features.midr;
918     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
919 }
920 
921 void hvf_arch_vcpu_destroy(CPUState *cpu)
922 {
923 }
924 
925 int hvf_arch_init_vcpu(CPUState *cpu)
926 {
927     ARMCPU *arm_cpu = ARM_CPU(cpu);
928     CPUARMState *env = &arm_cpu->env;
929     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
930     uint32_t sregs_cnt = 0;
931     uint64_t pfr;
932     hv_return_t ret;
933     int i;
934 
935     env->aarch64 = true;
936     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
937 
938     /* Allocate enough space for our sysreg sync */
939     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
940                                      sregs_match_len);
941     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
942                                     sregs_match_len);
943     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
944                                              arm_cpu->cpreg_vmstate_indexes,
945                                              sregs_match_len);
946     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
947                                             arm_cpu->cpreg_vmstate_values,
948                                             sregs_match_len);
949 
950     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
951 
952     /* Populate cp list for all known sysregs */
953     for (i = 0; i < sregs_match_len; i++) {
954         const ARMCPRegInfo *ri;
955         uint32_t key = hvf_sreg_match[i].key;
956 
957         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
958         if (ri) {
959             assert(!(ri->type & ARM_CP_NO_RAW));
960             hvf_sreg_match[i].cp_idx = sregs_cnt;
961             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
962         } else {
963             hvf_sreg_match[i].cp_idx = -1;
964         }
965     }
966     arm_cpu->cpreg_array_len = sregs_cnt;
967     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
968 
969     assert(write_cpustate_to_list(arm_cpu, false));
970 
971     /* Set CP_NO_RAW system registers on init */
972     ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
973                               arm_cpu->midr);
974     assert_hvf_ok(ret);
975 
976     ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
977                               arm_cpu->mp_affinity);
978     assert_hvf_ok(ret);
979 
980     ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
981     assert_hvf_ok(ret);
982     pfr |= env->gicv3state ? (1 << 24) : 0;
983     ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
984     assert_hvf_ok(ret);
985 
986     /* We're limited to underlying hardware caps, override internal versions */
987     ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
988                               &arm_cpu->isar.id_aa64mmfr0);
989     assert_hvf_ok(ret);
990 
991     return 0;
992 }
993 
994 void hvf_kick_vcpu_thread(CPUState *cpu)
995 {
996     cpus_kick_thread(cpu);
997     hv_vcpus_exit(&cpu->hvf->fd, 1);
998 }
999 
1000 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1001                                 uint32_t syndrome)
1002 {
1003     ARMCPU *arm_cpu = ARM_CPU(cpu);
1004     CPUARMState *env = &arm_cpu->env;
1005 
1006     cpu->exception_index = excp;
1007     env->exception.target_el = 1;
1008     env->exception.syndrome = syndrome;
1009 
1010     arm_cpu_do_interrupt(cpu);
1011 }
1012 
1013 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1014 {
1015     int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity);
1016     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1017 }
1018 
1019 /*
1020  * Handle a PSCI call.
1021  *
1022  * Returns 0 on success
1023  *         -1 when the PSCI call is unknown,
1024  */
1025 static bool hvf_handle_psci_call(CPUState *cpu)
1026 {
1027     ARMCPU *arm_cpu = ARM_CPU(cpu);
1028     CPUARMState *env = &arm_cpu->env;
1029     uint64_t param[4] = {
1030         env->xregs[0],
1031         env->xregs[1],
1032         env->xregs[2],
1033         env->xregs[3]
1034     };
1035     uint64_t context_id, mpidr;
1036     bool target_aarch64 = true;
1037     CPUState *target_cpu_state;
1038     ARMCPU *target_cpu;
1039     target_ulong entry;
1040     int target_el = 1;
1041     int32_t ret = 0;
1042 
1043     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1044                         arm_cpu->mp_affinity);
1045 
1046     switch (param[0]) {
1047     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1048         ret = QEMU_PSCI_VERSION_1_1;
1049         break;
1050     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1051         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1052         break;
1053     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1054     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1055         mpidr = param[1];
1056 
1057         switch (param[2]) {
1058         case 0:
1059             target_cpu_state = arm_get_cpu_by_id(mpidr);
1060             if (!target_cpu_state) {
1061                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1062                 break;
1063             }
1064             target_cpu = ARM_CPU(target_cpu_state);
1065 
1066             ret = target_cpu->power_state;
1067             break;
1068         default:
1069             /* Everything above affinity level 0 is always on. */
1070             ret = 0;
1071         }
1072         break;
1073     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1074         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1075         /*
1076          * QEMU reset and shutdown are async requests, but PSCI
1077          * mandates that we never return from the reset/shutdown
1078          * call, so power the CPU off now so it doesn't execute
1079          * anything further.
1080          */
1081         hvf_psci_cpu_off(arm_cpu);
1082         break;
1083     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1084         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1085         hvf_psci_cpu_off(arm_cpu);
1086         break;
1087     case QEMU_PSCI_0_1_FN_CPU_ON:
1088     case QEMU_PSCI_0_2_FN_CPU_ON:
1089     case QEMU_PSCI_0_2_FN64_CPU_ON:
1090         mpidr = param[1];
1091         entry = param[2];
1092         context_id = param[3];
1093         ret = arm_set_cpu_on(mpidr, entry, context_id,
1094                              target_el, target_aarch64);
1095         break;
1096     case QEMU_PSCI_0_1_FN_CPU_OFF:
1097     case QEMU_PSCI_0_2_FN_CPU_OFF:
1098         hvf_psci_cpu_off(arm_cpu);
1099         break;
1100     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1101     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1102     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1103         /* Affinity levels are not supported in QEMU */
1104         if (param[1] & 0xfffe0000) {
1105             ret = QEMU_PSCI_RET_INVALID_PARAMS;
1106             break;
1107         }
1108         /* Powerdown is not supported, we always go into WFI */
1109         env->xregs[0] = 0;
1110         hvf_wfi(cpu);
1111         break;
1112     case QEMU_PSCI_0_1_FN_MIGRATE:
1113     case QEMU_PSCI_0_2_FN_MIGRATE:
1114         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1115         break;
1116     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1117         switch (param[1]) {
1118         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1119         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1120         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1121         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1122         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1123         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1124         case QEMU_PSCI_0_1_FN_CPU_ON:
1125         case QEMU_PSCI_0_2_FN_CPU_ON:
1126         case QEMU_PSCI_0_2_FN64_CPU_ON:
1127         case QEMU_PSCI_0_1_FN_CPU_OFF:
1128         case QEMU_PSCI_0_2_FN_CPU_OFF:
1129         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1130         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1131         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1132         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1133             ret = 0;
1134             break;
1135         case QEMU_PSCI_0_1_FN_MIGRATE:
1136         case QEMU_PSCI_0_2_FN_MIGRATE:
1137         default:
1138             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1139         }
1140         break;
1141     default:
1142         return false;
1143     }
1144 
1145     env->xregs[0] = ret;
1146     return true;
1147 }
1148 
1149 static bool is_id_sysreg(uint32_t reg)
1150 {
1151     return SYSREG_OP0(reg) == 3 &&
1152            SYSREG_OP1(reg) == 0 &&
1153            SYSREG_CRN(reg) == 0 &&
1154            SYSREG_CRM(reg) >= 1 &&
1155            SYSREG_CRM(reg) < 8;
1156 }
1157 
1158 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1159 {
1160     return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1161                               (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1162                               (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1163                               (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1164                               (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1165                               (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1166 }
1167 
1168 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1169 {
1170     ARMCPU *arm_cpu = ARM_CPU(cpu);
1171     CPUARMState *env = &arm_cpu->env;
1172     const ARMCPRegInfo *ri;
1173 
1174     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1175     if (ri) {
1176         if (ri->accessfn) {
1177             if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1178                 return false;
1179             }
1180         }
1181         if (ri->type & ARM_CP_CONST) {
1182             *val = ri->resetvalue;
1183         } else if (ri->readfn) {
1184             *val = ri->readfn(env, ri);
1185         } else {
1186             *val = CPREG_FIELD64(env, ri);
1187         }
1188         trace_hvf_vgic_read(ri->name, *val);
1189         return true;
1190     }
1191 
1192     return false;
1193 }
1194 
1195 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
1196 {
1197     ARMCPU *arm_cpu = ARM_CPU(cpu);
1198     CPUARMState *env = &arm_cpu->env;
1199     uint64_t val = 0;
1200 
1201     switch (reg) {
1202     case SYSREG_CNTPCT_EL0:
1203         val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1204               gt_cntfrq_period_ns(arm_cpu);
1205         break;
1206     case SYSREG_PMCR_EL0:
1207         val = env->cp15.c9_pmcr;
1208         break;
1209     case SYSREG_PMCCNTR_EL0:
1210         pmu_op_start(env);
1211         val = env->cp15.c15_ccnt;
1212         pmu_op_finish(env);
1213         break;
1214     case SYSREG_PMCNTENCLR_EL0:
1215         val = env->cp15.c9_pmcnten;
1216         break;
1217     case SYSREG_PMOVSCLR_EL0:
1218         val = env->cp15.c9_pmovsr;
1219         break;
1220     case SYSREG_PMSELR_EL0:
1221         val = env->cp15.c9_pmselr;
1222         break;
1223     case SYSREG_PMINTENCLR_EL1:
1224         val = env->cp15.c9_pminten;
1225         break;
1226     case SYSREG_PMCCFILTR_EL0:
1227         val = env->cp15.pmccfiltr_el0;
1228         break;
1229     case SYSREG_PMCNTENSET_EL0:
1230         val = env->cp15.c9_pmcnten;
1231         break;
1232     case SYSREG_PMUSERENR_EL0:
1233         val = env->cp15.c9_pmuserenr;
1234         break;
1235     case SYSREG_PMCEID0_EL0:
1236     case SYSREG_PMCEID1_EL0:
1237         /* We can't really count anything yet, declare all events invalid */
1238         val = 0;
1239         break;
1240     case SYSREG_OSLSR_EL1:
1241         val = env->cp15.oslsr_el1;
1242         break;
1243     case SYSREG_OSDLR_EL1:
1244         /* Dummy register */
1245         break;
1246     case SYSREG_ICC_AP0R0_EL1:
1247     case SYSREG_ICC_AP0R1_EL1:
1248     case SYSREG_ICC_AP0R2_EL1:
1249     case SYSREG_ICC_AP0R3_EL1:
1250     case SYSREG_ICC_AP1R0_EL1:
1251     case SYSREG_ICC_AP1R1_EL1:
1252     case SYSREG_ICC_AP1R2_EL1:
1253     case SYSREG_ICC_AP1R3_EL1:
1254     case SYSREG_ICC_ASGI1R_EL1:
1255     case SYSREG_ICC_BPR0_EL1:
1256     case SYSREG_ICC_BPR1_EL1:
1257     case SYSREG_ICC_DIR_EL1:
1258     case SYSREG_ICC_EOIR0_EL1:
1259     case SYSREG_ICC_EOIR1_EL1:
1260     case SYSREG_ICC_HPPIR0_EL1:
1261     case SYSREG_ICC_HPPIR1_EL1:
1262     case SYSREG_ICC_IAR0_EL1:
1263     case SYSREG_ICC_IAR1_EL1:
1264     case SYSREG_ICC_IGRPEN0_EL1:
1265     case SYSREG_ICC_IGRPEN1_EL1:
1266     case SYSREG_ICC_PMR_EL1:
1267     case SYSREG_ICC_SGI0R_EL1:
1268     case SYSREG_ICC_SGI1R_EL1:
1269     case SYSREG_ICC_SRE_EL1:
1270     case SYSREG_ICC_CTLR_EL1:
1271         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1272         if (!hvf_sysreg_read_cp(cpu, reg, &val)) {
1273             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1274         }
1275         break;
1276     case SYSREG_DBGBVR0_EL1:
1277     case SYSREG_DBGBVR1_EL1:
1278     case SYSREG_DBGBVR2_EL1:
1279     case SYSREG_DBGBVR3_EL1:
1280     case SYSREG_DBGBVR4_EL1:
1281     case SYSREG_DBGBVR5_EL1:
1282     case SYSREG_DBGBVR6_EL1:
1283     case SYSREG_DBGBVR7_EL1:
1284     case SYSREG_DBGBVR8_EL1:
1285     case SYSREG_DBGBVR9_EL1:
1286     case SYSREG_DBGBVR10_EL1:
1287     case SYSREG_DBGBVR11_EL1:
1288     case SYSREG_DBGBVR12_EL1:
1289     case SYSREG_DBGBVR13_EL1:
1290     case SYSREG_DBGBVR14_EL1:
1291     case SYSREG_DBGBVR15_EL1:
1292         val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1293         break;
1294     case SYSREG_DBGBCR0_EL1:
1295     case SYSREG_DBGBCR1_EL1:
1296     case SYSREG_DBGBCR2_EL1:
1297     case SYSREG_DBGBCR3_EL1:
1298     case SYSREG_DBGBCR4_EL1:
1299     case SYSREG_DBGBCR5_EL1:
1300     case SYSREG_DBGBCR6_EL1:
1301     case SYSREG_DBGBCR7_EL1:
1302     case SYSREG_DBGBCR8_EL1:
1303     case SYSREG_DBGBCR9_EL1:
1304     case SYSREG_DBGBCR10_EL1:
1305     case SYSREG_DBGBCR11_EL1:
1306     case SYSREG_DBGBCR12_EL1:
1307     case SYSREG_DBGBCR13_EL1:
1308     case SYSREG_DBGBCR14_EL1:
1309     case SYSREG_DBGBCR15_EL1:
1310         val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1311         break;
1312     case SYSREG_DBGWVR0_EL1:
1313     case SYSREG_DBGWVR1_EL1:
1314     case SYSREG_DBGWVR2_EL1:
1315     case SYSREG_DBGWVR3_EL1:
1316     case SYSREG_DBGWVR4_EL1:
1317     case SYSREG_DBGWVR5_EL1:
1318     case SYSREG_DBGWVR6_EL1:
1319     case SYSREG_DBGWVR7_EL1:
1320     case SYSREG_DBGWVR8_EL1:
1321     case SYSREG_DBGWVR9_EL1:
1322     case SYSREG_DBGWVR10_EL1:
1323     case SYSREG_DBGWVR11_EL1:
1324     case SYSREG_DBGWVR12_EL1:
1325     case SYSREG_DBGWVR13_EL1:
1326     case SYSREG_DBGWVR14_EL1:
1327     case SYSREG_DBGWVR15_EL1:
1328         val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1329         break;
1330     case SYSREG_DBGWCR0_EL1:
1331     case SYSREG_DBGWCR1_EL1:
1332     case SYSREG_DBGWCR2_EL1:
1333     case SYSREG_DBGWCR3_EL1:
1334     case SYSREG_DBGWCR4_EL1:
1335     case SYSREG_DBGWCR5_EL1:
1336     case SYSREG_DBGWCR6_EL1:
1337     case SYSREG_DBGWCR7_EL1:
1338     case SYSREG_DBGWCR8_EL1:
1339     case SYSREG_DBGWCR9_EL1:
1340     case SYSREG_DBGWCR10_EL1:
1341     case SYSREG_DBGWCR11_EL1:
1342     case SYSREG_DBGWCR12_EL1:
1343     case SYSREG_DBGWCR13_EL1:
1344     case SYSREG_DBGWCR14_EL1:
1345     case SYSREG_DBGWCR15_EL1:
1346         val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1347         break;
1348     default:
1349         if (is_id_sysreg(reg)) {
1350             /* ID system registers read as RES0 */
1351             val = 0;
1352             break;
1353         }
1354         cpu_synchronize_state(cpu);
1355         trace_hvf_unhandled_sysreg_read(env->pc, reg,
1356                                         SYSREG_OP0(reg),
1357                                         SYSREG_OP1(reg),
1358                                         SYSREG_CRN(reg),
1359                                         SYSREG_CRM(reg),
1360                                         SYSREG_OP2(reg));
1361         hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1362         return 1;
1363     }
1364 
1365     trace_hvf_sysreg_read(reg,
1366                           SYSREG_OP0(reg),
1367                           SYSREG_OP1(reg),
1368                           SYSREG_CRN(reg),
1369                           SYSREG_CRM(reg),
1370                           SYSREG_OP2(reg),
1371                           val);
1372     hvf_set_reg(cpu, rt, val);
1373 
1374     return 0;
1375 }
1376 
1377 static void pmu_update_irq(CPUARMState *env)
1378 {
1379     ARMCPU *cpu = env_archcpu(env);
1380     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1381             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1382 }
1383 
1384 static bool pmu_event_supported(uint16_t number)
1385 {
1386     return false;
1387 }
1388 
1389 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1390  * the current EL, security state, and register configuration.
1391  */
1392 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1393 {
1394     uint64_t filter;
1395     bool enabled, filtered = true;
1396     int el = arm_current_el(env);
1397 
1398     enabled = (env->cp15.c9_pmcr & PMCRE) &&
1399               (env->cp15.c9_pmcnten & (1 << counter));
1400 
1401     if (counter == 31) {
1402         filter = env->cp15.pmccfiltr_el0;
1403     } else {
1404         filter = env->cp15.c14_pmevtyper[counter];
1405     }
1406 
1407     if (el == 0) {
1408         filtered = filter & PMXEVTYPER_U;
1409     } else if (el == 1) {
1410         filtered = filter & PMXEVTYPER_P;
1411     }
1412 
1413     if (counter != 31) {
1414         /*
1415          * If not checking PMCCNTR, ensure the counter is setup to an event we
1416          * support
1417          */
1418         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1419         if (!pmu_event_supported(event)) {
1420             return false;
1421         }
1422     }
1423 
1424     return enabled && !filtered;
1425 }
1426 
1427 static void pmswinc_write(CPUARMState *env, uint64_t value)
1428 {
1429     unsigned int i;
1430     for (i = 0; i < pmu_num_counters(env); i++) {
1431         /* Increment a counter's count iff: */
1432         if ((value & (1 << i)) && /* counter's bit is set */
1433                 /* counter is enabled and not filtered */
1434                 pmu_counter_enabled(env, i) &&
1435                 /* counter is SW_INCR */
1436                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1437             /*
1438              * Detect if this write causes an overflow since we can't predict
1439              * PMSWINC overflows like we can for other events
1440              */
1441             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1442 
1443             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1444                 env->cp15.c9_pmovsr |= (1 << i);
1445                 pmu_update_irq(env);
1446             }
1447 
1448             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1449         }
1450     }
1451 }
1452 
1453 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1454 {
1455     ARMCPU *arm_cpu = ARM_CPU(cpu);
1456     CPUARMState *env = &arm_cpu->env;
1457     const ARMCPRegInfo *ri;
1458 
1459     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1460 
1461     if (ri) {
1462         if (ri->accessfn) {
1463             if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1464                 return false;
1465             }
1466         }
1467         if (ri->writefn) {
1468             ri->writefn(env, ri, val);
1469         } else {
1470             CPREG_FIELD64(env, ri) = val;
1471         }
1472 
1473         trace_hvf_vgic_write(ri->name, val);
1474         return true;
1475     }
1476 
1477     return false;
1478 }
1479 
1480 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1481 {
1482     ARMCPU *arm_cpu = ARM_CPU(cpu);
1483     CPUARMState *env = &arm_cpu->env;
1484 
1485     trace_hvf_sysreg_write(reg,
1486                            SYSREG_OP0(reg),
1487                            SYSREG_OP1(reg),
1488                            SYSREG_CRN(reg),
1489                            SYSREG_CRM(reg),
1490                            SYSREG_OP2(reg),
1491                            val);
1492 
1493     switch (reg) {
1494     case SYSREG_PMCCNTR_EL0:
1495         pmu_op_start(env);
1496         env->cp15.c15_ccnt = val;
1497         pmu_op_finish(env);
1498         break;
1499     case SYSREG_PMCR_EL0:
1500         pmu_op_start(env);
1501 
1502         if (val & PMCRC) {
1503             /* The counter has been reset */
1504             env->cp15.c15_ccnt = 0;
1505         }
1506 
1507         if (val & PMCRP) {
1508             unsigned int i;
1509             for (i = 0; i < pmu_num_counters(env); i++) {
1510                 env->cp15.c14_pmevcntr[i] = 0;
1511             }
1512         }
1513 
1514         env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1515         env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1516 
1517         pmu_op_finish(env);
1518         break;
1519     case SYSREG_PMUSERENR_EL0:
1520         env->cp15.c9_pmuserenr = val & 0xf;
1521         break;
1522     case SYSREG_PMCNTENSET_EL0:
1523         env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1524         break;
1525     case SYSREG_PMCNTENCLR_EL0:
1526         env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1527         break;
1528     case SYSREG_PMINTENCLR_EL1:
1529         pmu_op_start(env);
1530         env->cp15.c9_pminten |= val;
1531         pmu_op_finish(env);
1532         break;
1533     case SYSREG_PMOVSCLR_EL0:
1534         pmu_op_start(env);
1535         env->cp15.c9_pmovsr &= ~val;
1536         pmu_op_finish(env);
1537         break;
1538     case SYSREG_PMSWINC_EL0:
1539         pmu_op_start(env);
1540         pmswinc_write(env, val);
1541         pmu_op_finish(env);
1542         break;
1543     case SYSREG_PMSELR_EL0:
1544         env->cp15.c9_pmselr = val & 0x1f;
1545         break;
1546     case SYSREG_PMCCFILTR_EL0:
1547         pmu_op_start(env);
1548         env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1549         pmu_op_finish(env);
1550         break;
1551     case SYSREG_OSLAR_EL1:
1552         env->cp15.oslsr_el1 = val & 1;
1553         break;
1554     case SYSREG_OSDLR_EL1:
1555         /* Dummy register */
1556         break;
1557     case SYSREG_ICC_AP0R0_EL1:
1558     case SYSREG_ICC_AP0R1_EL1:
1559     case SYSREG_ICC_AP0R2_EL1:
1560     case SYSREG_ICC_AP0R3_EL1:
1561     case SYSREG_ICC_AP1R0_EL1:
1562     case SYSREG_ICC_AP1R1_EL1:
1563     case SYSREG_ICC_AP1R2_EL1:
1564     case SYSREG_ICC_AP1R3_EL1:
1565     case SYSREG_ICC_ASGI1R_EL1:
1566     case SYSREG_ICC_BPR0_EL1:
1567     case SYSREG_ICC_BPR1_EL1:
1568     case SYSREG_ICC_CTLR_EL1:
1569     case SYSREG_ICC_DIR_EL1:
1570     case SYSREG_ICC_EOIR0_EL1:
1571     case SYSREG_ICC_EOIR1_EL1:
1572     case SYSREG_ICC_HPPIR0_EL1:
1573     case SYSREG_ICC_HPPIR1_EL1:
1574     case SYSREG_ICC_IAR0_EL1:
1575     case SYSREG_ICC_IAR1_EL1:
1576     case SYSREG_ICC_IGRPEN0_EL1:
1577     case SYSREG_ICC_IGRPEN1_EL1:
1578     case SYSREG_ICC_PMR_EL1:
1579     case SYSREG_ICC_SGI0R_EL1:
1580     case SYSREG_ICC_SGI1R_EL1:
1581     case SYSREG_ICC_SRE_EL1:
1582         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1583         if (!hvf_sysreg_write_cp(cpu, reg, val)) {
1584             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1585         }
1586         break;
1587     case SYSREG_MDSCR_EL1:
1588         env->cp15.mdscr_el1 = val;
1589         break;
1590     case SYSREG_DBGBVR0_EL1:
1591     case SYSREG_DBGBVR1_EL1:
1592     case SYSREG_DBGBVR2_EL1:
1593     case SYSREG_DBGBVR3_EL1:
1594     case SYSREG_DBGBVR4_EL1:
1595     case SYSREG_DBGBVR5_EL1:
1596     case SYSREG_DBGBVR6_EL1:
1597     case SYSREG_DBGBVR7_EL1:
1598     case SYSREG_DBGBVR8_EL1:
1599     case SYSREG_DBGBVR9_EL1:
1600     case SYSREG_DBGBVR10_EL1:
1601     case SYSREG_DBGBVR11_EL1:
1602     case SYSREG_DBGBVR12_EL1:
1603     case SYSREG_DBGBVR13_EL1:
1604     case SYSREG_DBGBVR14_EL1:
1605     case SYSREG_DBGBVR15_EL1:
1606         env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1607         break;
1608     case SYSREG_DBGBCR0_EL1:
1609     case SYSREG_DBGBCR1_EL1:
1610     case SYSREG_DBGBCR2_EL1:
1611     case SYSREG_DBGBCR3_EL1:
1612     case SYSREG_DBGBCR4_EL1:
1613     case SYSREG_DBGBCR5_EL1:
1614     case SYSREG_DBGBCR6_EL1:
1615     case SYSREG_DBGBCR7_EL1:
1616     case SYSREG_DBGBCR8_EL1:
1617     case SYSREG_DBGBCR9_EL1:
1618     case SYSREG_DBGBCR10_EL1:
1619     case SYSREG_DBGBCR11_EL1:
1620     case SYSREG_DBGBCR12_EL1:
1621     case SYSREG_DBGBCR13_EL1:
1622     case SYSREG_DBGBCR14_EL1:
1623     case SYSREG_DBGBCR15_EL1:
1624         env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1625         break;
1626     case SYSREG_DBGWVR0_EL1:
1627     case SYSREG_DBGWVR1_EL1:
1628     case SYSREG_DBGWVR2_EL1:
1629     case SYSREG_DBGWVR3_EL1:
1630     case SYSREG_DBGWVR4_EL1:
1631     case SYSREG_DBGWVR5_EL1:
1632     case SYSREG_DBGWVR6_EL1:
1633     case SYSREG_DBGWVR7_EL1:
1634     case SYSREG_DBGWVR8_EL1:
1635     case SYSREG_DBGWVR9_EL1:
1636     case SYSREG_DBGWVR10_EL1:
1637     case SYSREG_DBGWVR11_EL1:
1638     case SYSREG_DBGWVR12_EL1:
1639     case SYSREG_DBGWVR13_EL1:
1640     case SYSREG_DBGWVR14_EL1:
1641     case SYSREG_DBGWVR15_EL1:
1642         env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1643         break;
1644     case SYSREG_DBGWCR0_EL1:
1645     case SYSREG_DBGWCR1_EL1:
1646     case SYSREG_DBGWCR2_EL1:
1647     case SYSREG_DBGWCR3_EL1:
1648     case SYSREG_DBGWCR4_EL1:
1649     case SYSREG_DBGWCR5_EL1:
1650     case SYSREG_DBGWCR6_EL1:
1651     case SYSREG_DBGWCR7_EL1:
1652     case SYSREG_DBGWCR8_EL1:
1653     case SYSREG_DBGWCR9_EL1:
1654     case SYSREG_DBGWCR10_EL1:
1655     case SYSREG_DBGWCR11_EL1:
1656     case SYSREG_DBGWCR12_EL1:
1657     case SYSREG_DBGWCR13_EL1:
1658     case SYSREG_DBGWCR14_EL1:
1659     case SYSREG_DBGWCR15_EL1:
1660         env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1661         break;
1662     default:
1663         cpu_synchronize_state(cpu);
1664         trace_hvf_unhandled_sysreg_write(env->pc, reg,
1665                                          SYSREG_OP0(reg),
1666                                          SYSREG_OP1(reg),
1667                                          SYSREG_CRN(reg),
1668                                          SYSREG_CRM(reg),
1669                                          SYSREG_OP2(reg));
1670         hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1671         return 1;
1672     }
1673 
1674     return 0;
1675 }
1676 
1677 static int hvf_inject_interrupts(CPUState *cpu)
1678 {
1679     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1680         trace_hvf_inject_fiq();
1681         hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
1682                                       true);
1683     }
1684 
1685     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1686         trace_hvf_inject_irq();
1687         hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
1688                                       true);
1689     }
1690 
1691     return 0;
1692 }
1693 
1694 static uint64_t hvf_vtimer_val_raw(void)
1695 {
1696     /*
1697      * mach_absolute_time() returns the vtimer value without the VM
1698      * offset that we define. Add our own offset on top.
1699      */
1700     return mach_absolute_time() - hvf_state->vtimer_offset;
1701 }
1702 
1703 static uint64_t hvf_vtimer_val(void)
1704 {
1705     if (!runstate_is_running()) {
1706         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1707         return vtimer.vtimer_val;
1708     }
1709 
1710     return hvf_vtimer_val_raw();
1711 }
1712 
1713 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1714 {
1715     /*
1716      * Use pselect to sleep so that other threads can IPI us while we're
1717      * sleeping.
1718      */
1719     qatomic_set_mb(&cpu->thread_kicked, false);
1720     qemu_mutex_unlock_iothread();
1721     pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
1722     qemu_mutex_lock_iothread();
1723 }
1724 
1725 static void hvf_wfi(CPUState *cpu)
1726 {
1727     ARMCPU *arm_cpu = ARM_CPU(cpu);
1728     struct timespec ts;
1729     hv_return_t r;
1730     uint64_t ctl;
1731     uint64_t cval;
1732     int64_t ticks_to_sleep;
1733     uint64_t seconds;
1734     uint64_t nanos;
1735     uint32_t cntfrq;
1736 
1737     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1738         /* Interrupt pending, no need to wait */
1739         return;
1740     }
1741 
1742     r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1743     assert_hvf_ok(r);
1744 
1745     if (!(ctl & 1) || (ctl & 2)) {
1746         /* Timer disabled or masked, just wait for an IPI. */
1747         hvf_wait_for_ipi(cpu, NULL);
1748         return;
1749     }
1750 
1751     r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1752     assert_hvf_ok(r);
1753 
1754     ticks_to_sleep = cval - hvf_vtimer_val();
1755     if (ticks_to_sleep < 0) {
1756         return;
1757     }
1758 
1759     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1760     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1761     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1762     nanos = ticks_to_sleep * cntfrq;
1763 
1764     /*
1765      * Don't sleep for less than the time a context switch would take,
1766      * so that we can satisfy fast timer requests on the same CPU.
1767      * Measurements on M1 show the sweet spot to be ~2ms.
1768      */
1769     if (!seconds && nanos < (2 * SCALE_MS)) {
1770         return;
1771     }
1772 
1773     ts = (struct timespec) { seconds, nanos };
1774     hvf_wait_for_ipi(cpu, &ts);
1775 }
1776 
1777 static void hvf_sync_vtimer(CPUState *cpu)
1778 {
1779     ARMCPU *arm_cpu = ARM_CPU(cpu);
1780     hv_return_t r;
1781     uint64_t ctl;
1782     bool irq_state;
1783 
1784     if (!cpu->hvf->vtimer_masked) {
1785         /* We will get notified on vtimer changes by hvf, nothing to do */
1786         return;
1787     }
1788 
1789     r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1790     assert_hvf_ok(r);
1791 
1792     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1793                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1794     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1795 
1796     if (!irq_state) {
1797         /* Timer no longer asserting, we can unmask it */
1798         hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
1799         cpu->hvf->vtimer_masked = false;
1800     }
1801 }
1802 
1803 int hvf_vcpu_exec(CPUState *cpu)
1804 {
1805     ARMCPU *arm_cpu = ARM_CPU(cpu);
1806     CPUARMState *env = &arm_cpu->env;
1807     int ret;
1808     hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
1809     hv_return_t r;
1810     bool advance_pc = false;
1811 
1812     if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1813         hvf_inject_interrupts(cpu)) {
1814         return EXCP_INTERRUPT;
1815     }
1816 
1817     if (cpu->halted) {
1818         return EXCP_HLT;
1819     }
1820 
1821     flush_cpu_state(cpu);
1822 
1823     qemu_mutex_unlock_iothread();
1824     assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
1825 
1826     /* handle VMEXIT */
1827     uint64_t exit_reason = hvf_exit->reason;
1828     uint64_t syndrome = hvf_exit->exception.syndrome;
1829     uint32_t ec = syn_get_ec(syndrome);
1830 
1831     ret = 0;
1832     qemu_mutex_lock_iothread();
1833     switch (exit_reason) {
1834     case HV_EXIT_REASON_EXCEPTION:
1835         /* This is the main one, handle below. */
1836         break;
1837     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1838         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1839         cpu->hvf->vtimer_masked = true;
1840         return 0;
1841     case HV_EXIT_REASON_CANCELED:
1842         /* we got kicked, no exit to process */
1843         return 0;
1844     default:
1845         g_assert_not_reached();
1846     }
1847 
1848     hvf_sync_vtimer(cpu);
1849 
1850     switch (ec) {
1851     case EC_SOFTWARESTEP: {
1852         ret = EXCP_DEBUG;
1853 
1854         if (!cpu->singlestep_enabled) {
1855             error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1856         }
1857         break;
1858     }
1859     case EC_AA64_BKPT: {
1860         ret = EXCP_DEBUG;
1861 
1862         cpu_synchronize_state(cpu);
1863 
1864         if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1865             /* Re-inject into the guest */
1866             ret = 0;
1867             hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
1868         }
1869         break;
1870     }
1871     case EC_BREAKPOINT: {
1872         ret = EXCP_DEBUG;
1873 
1874         cpu_synchronize_state(cpu);
1875 
1876         if (!find_hw_breakpoint(cpu, env->pc)) {
1877             error_report("EC_BREAKPOINT but unknown hw breakpoint");
1878         }
1879         break;
1880     }
1881     case EC_WATCHPOINT: {
1882         ret = EXCP_DEBUG;
1883 
1884         cpu_synchronize_state(cpu);
1885 
1886         CPUWatchpoint *wp =
1887             find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
1888         if (!wp) {
1889             error_report("EXCP_DEBUG but unknown hw watchpoint");
1890         }
1891         cpu->watchpoint_hit = wp;
1892         break;
1893     }
1894     case EC_DATAABORT: {
1895         bool isv = syndrome & ARM_EL_ISV;
1896         bool iswrite = (syndrome >> 6) & 1;
1897         bool s1ptw = (syndrome >> 7) & 1;
1898         uint32_t sas = (syndrome >> 22) & 3;
1899         uint32_t len = 1 << sas;
1900         uint32_t srt = (syndrome >> 16) & 0x1f;
1901         uint32_t cm = (syndrome >> 8) & 0x1;
1902         uint64_t val = 0;
1903 
1904         trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
1905                              hvf_exit->exception.physical_address, isv,
1906                              iswrite, s1ptw, len, srt);
1907 
1908         if (cm) {
1909             /* We don't cache MMIO regions */
1910             advance_pc = true;
1911             break;
1912         }
1913 
1914         assert(isv);
1915 
1916         if (iswrite) {
1917             val = hvf_get_reg(cpu, srt);
1918             address_space_write(&address_space_memory,
1919                                 hvf_exit->exception.physical_address,
1920                                 MEMTXATTRS_UNSPECIFIED, &val, len);
1921         } else {
1922             address_space_read(&address_space_memory,
1923                                hvf_exit->exception.physical_address,
1924                                MEMTXATTRS_UNSPECIFIED, &val, len);
1925             hvf_set_reg(cpu, srt, val);
1926         }
1927 
1928         advance_pc = true;
1929         break;
1930     }
1931     case EC_SYSTEMREGISTERTRAP: {
1932         bool isread = (syndrome >> 0) & 1;
1933         uint32_t rt = (syndrome >> 5) & 0x1f;
1934         uint32_t reg = syndrome & SYSREG_MASK;
1935         uint64_t val;
1936         int ret = 0;
1937 
1938         if (isread) {
1939             ret = hvf_sysreg_read(cpu, reg, rt);
1940         } else {
1941             val = hvf_get_reg(cpu, rt);
1942             ret = hvf_sysreg_write(cpu, reg, val);
1943         }
1944 
1945         advance_pc = !ret;
1946         break;
1947     }
1948     case EC_WFX_TRAP:
1949         advance_pc = true;
1950         if (!(syndrome & WFX_IS_WFE)) {
1951             hvf_wfi(cpu);
1952         }
1953         break;
1954     case EC_AA64_HVC:
1955         cpu_synchronize_state(cpu);
1956         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
1957             if (!hvf_handle_psci_call(cpu)) {
1958                 trace_hvf_unknown_hvc(env->xregs[0]);
1959                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
1960                 env->xregs[0] = -1;
1961             }
1962         } else {
1963             trace_hvf_unknown_hvc(env->xregs[0]);
1964             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1965         }
1966         break;
1967     case EC_AA64_SMC:
1968         cpu_synchronize_state(cpu);
1969         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
1970             advance_pc = true;
1971 
1972             if (!hvf_handle_psci_call(cpu)) {
1973                 trace_hvf_unknown_smc(env->xregs[0]);
1974                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
1975                 env->xregs[0] = -1;
1976             }
1977         } else {
1978             trace_hvf_unknown_smc(env->xregs[0]);
1979             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1980         }
1981         break;
1982     default:
1983         cpu_synchronize_state(cpu);
1984         trace_hvf_exit(syndrome, ec, env->pc);
1985         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
1986     }
1987 
1988     if (advance_pc) {
1989         uint64_t pc;
1990 
1991         flush_cpu_state(cpu);
1992 
1993         r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
1994         assert_hvf_ok(r);
1995         pc += 4;
1996         r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
1997         assert_hvf_ok(r);
1998 
1999         /* Handle single-stepping over instructions which trigger a VM exit */
2000         if (cpu->singlestep_enabled) {
2001             ret = EXCP_DEBUG;
2002         }
2003     }
2004 
2005     return ret;
2006 }
2007 
2008 static const VMStateDescription vmstate_hvf_vtimer = {
2009     .name = "hvf-vtimer",
2010     .version_id = 1,
2011     .minimum_version_id = 1,
2012     .fields = (VMStateField[]) {
2013         VMSTATE_UINT64(vtimer_val, HVFVTimer),
2014         VMSTATE_END_OF_LIST()
2015     },
2016 };
2017 
2018 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2019 {
2020     HVFVTimer *s = opaque;
2021 
2022     if (running) {
2023         /* Update vtimer offset on all CPUs */
2024         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2025         cpu_synchronize_all_states();
2026     } else {
2027         /* Remember vtimer value on every pause */
2028         s->vtimer_val = hvf_vtimer_val_raw();
2029     }
2030 }
2031 
2032 int hvf_arch_init(void)
2033 {
2034     hvf_state->vtimer_offset = mach_absolute_time();
2035     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2036     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2037 
2038     hvf_arm_init_debug();
2039 
2040     return 0;
2041 }
2042 
2043 static const uint32_t brk_insn = 0xd4200000;
2044 
2045 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2046 {
2047     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2048         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2049         return -EINVAL;
2050     }
2051     return 0;
2052 }
2053 
2054 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2055 {
2056     static uint32_t brk;
2057 
2058     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2059         brk != brk_insn ||
2060         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2061         return -EINVAL;
2062     }
2063     return 0;
2064 }
2065 
2066 int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
2067 {
2068     switch (type) {
2069     case GDB_BREAKPOINT_HW:
2070         return insert_hw_breakpoint(addr);
2071     case GDB_WATCHPOINT_READ:
2072     case GDB_WATCHPOINT_WRITE:
2073     case GDB_WATCHPOINT_ACCESS:
2074         return insert_hw_watchpoint(addr, len, type);
2075     default:
2076         return -ENOSYS;
2077     }
2078 }
2079 
2080 int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type)
2081 {
2082     switch (type) {
2083     case GDB_BREAKPOINT_HW:
2084         return delete_hw_breakpoint(addr);
2085     case GDB_WATCHPOINT_READ:
2086     case GDB_WATCHPOINT_WRITE:
2087     case GDB_WATCHPOINT_ACCESS:
2088         return delete_hw_watchpoint(addr, len, type);
2089     default:
2090         return -ENOSYS;
2091     }
2092 }
2093 
2094 void hvf_arch_remove_all_hw_breakpoints(void)
2095 {
2096     if (cur_hw_wps > 0) {
2097         g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2098     }
2099     if (cur_hw_bps > 0) {
2100         g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2101     }
2102 }
2103 
2104 /*
2105  * Update the vCPU with the gdbstub's view of debug registers. This view
2106  * consists of all hardware breakpoints and watchpoints inserted so far while
2107  * debugging the guest.
2108  */
2109 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2110 {
2111     hv_return_t r = HV_SUCCESS;
2112     int i;
2113 
2114     for (i = 0; i < cur_hw_bps; i++) {
2115         HWBreakpoint *bp = get_hw_bp(i);
2116         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], bp->bcr);
2117         assert_hvf_ok(r);
2118         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], bp->bvr);
2119         assert_hvf_ok(r);
2120     }
2121     for (i = cur_hw_bps; i < max_hw_bps; i++) {
2122         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], 0);
2123         assert_hvf_ok(r);
2124         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], 0);
2125         assert_hvf_ok(r);
2126     }
2127 
2128     for (i = 0; i < cur_hw_wps; i++) {
2129         HWWatchpoint *wp = get_hw_wp(i);
2130         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], wp->wcr);
2131         assert_hvf_ok(r);
2132         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], wp->wvr);
2133         assert_hvf_ok(r);
2134     }
2135     for (i = cur_hw_wps; i < max_hw_wps; i++) {
2136         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], 0);
2137         assert_hvf_ok(r);
2138         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], 0);
2139         assert_hvf_ok(r);
2140     }
2141 }
2142 
2143 /*
2144  * Update the vCPU with the guest's view of debug registers. This view is kept
2145  * in the environment at all times.
2146  */
2147 static void hvf_put_guest_debug_registers(CPUState *cpu)
2148 {
2149     ARMCPU *arm_cpu = ARM_CPU(cpu);
2150     CPUARMState *env = &arm_cpu->env;
2151     hv_return_t r = HV_SUCCESS;
2152     int i;
2153 
2154     for (i = 0; i < max_hw_bps; i++) {
2155         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i],
2156                                 env->cp15.dbgbcr[i]);
2157         assert_hvf_ok(r);
2158         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i],
2159                                 env->cp15.dbgbvr[i]);
2160         assert_hvf_ok(r);
2161     }
2162 
2163     for (i = 0; i < max_hw_wps; i++) {
2164         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i],
2165                                 env->cp15.dbgwcr[i]);
2166         assert_hvf_ok(r);
2167         r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i],
2168                                 env->cp15.dbgwvr[i]);
2169         assert_hvf_ok(r);
2170     }
2171 }
2172 
2173 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2174 {
2175     return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2176 }
2177 
2178 static void hvf_arch_set_traps(void)
2179 {
2180     CPUState *cpu;
2181     bool should_enable_traps = false;
2182     hv_return_t r = HV_SUCCESS;
2183 
2184     /* Check whether guest debugging is enabled for at least one vCPU; if it
2185      * is, enable exiting the guest on all vCPUs */
2186     CPU_FOREACH(cpu) {
2187         should_enable_traps |= cpu->hvf->guest_debug_enabled;
2188     }
2189     CPU_FOREACH(cpu) {
2190         /* Set whether debug exceptions exit the guest */
2191         r = hv_vcpu_set_trap_debug_exceptions(cpu->hvf->fd,
2192                                               should_enable_traps);
2193         assert_hvf_ok(r);
2194 
2195         /* Set whether accesses to debug registers exit the guest */
2196         r = hv_vcpu_set_trap_debug_reg_accesses(cpu->hvf->fd,
2197                                                 should_enable_traps);
2198         assert_hvf_ok(r);
2199     }
2200 }
2201 
2202 void hvf_arch_update_guest_debug(CPUState *cpu)
2203 {
2204     ARMCPU *arm_cpu = ARM_CPU(cpu);
2205     CPUARMState *env = &arm_cpu->env;
2206 
2207     /* Check whether guest debugging is enabled */
2208     cpu->hvf->guest_debug_enabled = cpu->singlestep_enabled ||
2209                                     hvf_sw_breakpoints_active(cpu) ||
2210                                     hvf_arm_hw_debug_active(cpu);
2211 
2212     /* Update debug registers */
2213     if (cpu->hvf->guest_debug_enabled) {
2214         hvf_put_gdbstub_debug_registers(cpu);
2215     } else {
2216         hvf_put_guest_debug_registers(cpu);
2217     }
2218 
2219     cpu_synchronize_state(cpu);
2220 
2221     /* Enable/disable single-stepping */
2222     if (cpu->singlestep_enabled) {
2223         env->cp15.mdscr_el1 =
2224             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2225         pstate_write(env, pstate_read(env) | PSTATE_SS);
2226     } else {
2227         env->cp15.mdscr_el1 =
2228             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2229     }
2230 
2231     /* Enable/disable Breakpoint exceptions */
2232     if (hvf_arm_hw_debug_active(cpu)) {
2233         env->cp15.mdscr_el1 =
2234             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2235     } else {
2236         env->cp15.mdscr_el1 =
2237             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2238     }
2239 
2240     hvf_arch_set_traps();
2241 }
2242 
2243 inline bool hvf_arch_supports_guest_debug(void)
2244 {
2245     return true;
2246 }
2247