xref: /openbmc/qemu/target/arm/hvf/hvf.c (revision c017386f28c03a03b8f14444f8671d3d8f7180fe)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "qemu/log.h"
15 
16 #include "system/runstate.h"
17 #include "system/hvf.h"
18 #include "system/hvf_int.h"
19 #include "system/hw_accel.h"
20 #include "hvf_arm.h"
21 #include "cpregs.h"
22 #include "cpu-sysregs.h"
23 
24 #include <mach/mach_time.h>
25 
26 #include "system/address-spaces.h"
27 #include "system/memory.h"
28 #include "hw/boards.h"
29 #include "hw/irq.h"
30 #include "qemu/main-loop.h"
31 #include "system/cpus.h"
32 #include "arm-powerctl.h"
33 #include "target/arm/cpu.h"
34 #include "target/arm/internals.h"
35 #include "target/arm/multiprocessing.h"
36 #include "target/arm/gtimer.h"
37 #include "trace.h"
38 #include "migration/vmstate.h"
39 
40 #include "gdbstub/enums.h"
41 
42 #define MDSCR_EL1_SS_SHIFT  0
43 #define MDSCR_EL1_MDE_SHIFT 15
44 
45 static const uint16_t dbgbcr_regs[] = {
46     HV_SYS_REG_DBGBCR0_EL1,
47     HV_SYS_REG_DBGBCR1_EL1,
48     HV_SYS_REG_DBGBCR2_EL1,
49     HV_SYS_REG_DBGBCR3_EL1,
50     HV_SYS_REG_DBGBCR4_EL1,
51     HV_SYS_REG_DBGBCR5_EL1,
52     HV_SYS_REG_DBGBCR6_EL1,
53     HV_SYS_REG_DBGBCR7_EL1,
54     HV_SYS_REG_DBGBCR8_EL1,
55     HV_SYS_REG_DBGBCR9_EL1,
56     HV_SYS_REG_DBGBCR10_EL1,
57     HV_SYS_REG_DBGBCR11_EL1,
58     HV_SYS_REG_DBGBCR12_EL1,
59     HV_SYS_REG_DBGBCR13_EL1,
60     HV_SYS_REG_DBGBCR14_EL1,
61     HV_SYS_REG_DBGBCR15_EL1,
62 };
63 
64 static const uint16_t dbgbvr_regs[] = {
65     HV_SYS_REG_DBGBVR0_EL1,
66     HV_SYS_REG_DBGBVR1_EL1,
67     HV_SYS_REG_DBGBVR2_EL1,
68     HV_SYS_REG_DBGBVR3_EL1,
69     HV_SYS_REG_DBGBVR4_EL1,
70     HV_SYS_REG_DBGBVR5_EL1,
71     HV_SYS_REG_DBGBVR6_EL1,
72     HV_SYS_REG_DBGBVR7_EL1,
73     HV_SYS_REG_DBGBVR8_EL1,
74     HV_SYS_REG_DBGBVR9_EL1,
75     HV_SYS_REG_DBGBVR10_EL1,
76     HV_SYS_REG_DBGBVR11_EL1,
77     HV_SYS_REG_DBGBVR12_EL1,
78     HV_SYS_REG_DBGBVR13_EL1,
79     HV_SYS_REG_DBGBVR14_EL1,
80     HV_SYS_REG_DBGBVR15_EL1,
81 };
82 
83 static const uint16_t dbgwcr_regs[] = {
84     HV_SYS_REG_DBGWCR0_EL1,
85     HV_SYS_REG_DBGWCR1_EL1,
86     HV_SYS_REG_DBGWCR2_EL1,
87     HV_SYS_REG_DBGWCR3_EL1,
88     HV_SYS_REG_DBGWCR4_EL1,
89     HV_SYS_REG_DBGWCR5_EL1,
90     HV_SYS_REG_DBGWCR6_EL1,
91     HV_SYS_REG_DBGWCR7_EL1,
92     HV_SYS_REG_DBGWCR8_EL1,
93     HV_SYS_REG_DBGWCR9_EL1,
94     HV_SYS_REG_DBGWCR10_EL1,
95     HV_SYS_REG_DBGWCR11_EL1,
96     HV_SYS_REG_DBGWCR12_EL1,
97     HV_SYS_REG_DBGWCR13_EL1,
98     HV_SYS_REG_DBGWCR14_EL1,
99     HV_SYS_REG_DBGWCR15_EL1,
100 };
101 
102 static const uint16_t dbgwvr_regs[] = {
103     HV_SYS_REG_DBGWVR0_EL1,
104     HV_SYS_REG_DBGWVR1_EL1,
105     HV_SYS_REG_DBGWVR2_EL1,
106     HV_SYS_REG_DBGWVR3_EL1,
107     HV_SYS_REG_DBGWVR4_EL1,
108     HV_SYS_REG_DBGWVR5_EL1,
109     HV_SYS_REG_DBGWVR6_EL1,
110     HV_SYS_REG_DBGWVR7_EL1,
111     HV_SYS_REG_DBGWVR8_EL1,
112     HV_SYS_REG_DBGWVR9_EL1,
113     HV_SYS_REG_DBGWVR10_EL1,
114     HV_SYS_REG_DBGWVR11_EL1,
115     HV_SYS_REG_DBGWVR12_EL1,
116     HV_SYS_REG_DBGWVR13_EL1,
117     HV_SYS_REG_DBGWVR14_EL1,
118     HV_SYS_REG_DBGWVR15_EL1,
119 };
120 
hvf_arm_num_brps(hv_vcpu_config_t config)121 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
122 {
123     uint64_t val;
124     hv_return_t ret;
125     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
126                                          &val);
127     assert_hvf_ok(ret);
128     return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
129 }
130 
hvf_arm_num_wrps(hv_vcpu_config_t config)131 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
132 {
133     uint64_t val;
134     hv_return_t ret;
135     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
136                                          &val);
137     assert_hvf_ok(ret);
138     return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
139 }
140 
hvf_arm_init_debug(void)141 void hvf_arm_init_debug(void)
142 {
143     hv_vcpu_config_t config;
144     config = hv_vcpu_config_create();
145 
146     max_hw_bps = hvf_arm_num_brps(config);
147     hw_breakpoints =
148         g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
149 
150     max_hw_wps = hvf_arm_num_wrps(config);
151     hw_watchpoints =
152         g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
153 }
154 
155 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
156         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
157 
158 #define SYSREG_OP0_SHIFT      20
159 #define SYSREG_OP0_MASK       0x3
160 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
161 #define SYSREG_OP1_SHIFT      14
162 #define SYSREG_OP1_MASK       0x7
163 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
164 #define SYSREG_CRN_SHIFT      10
165 #define SYSREG_CRN_MASK       0xf
166 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
167 #define SYSREG_CRM_SHIFT      1
168 #define SYSREG_CRM_MASK       0xf
169 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
170 #define SYSREG_OP2_SHIFT      17
171 #define SYSREG_OP2_MASK       0x7
172 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
173 
174 #define SYSREG(op0, op1, crn, crm, op2) \
175     ((op0 << SYSREG_OP0_SHIFT) | \
176      (op1 << SYSREG_OP1_SHIFT) | \
177      (crn << SYSREG_CRN_SHIFT) | \
178      (crm << SYSREG_CRM_SHIFT) | \
179      (op2 << SYSREG_OP2_SHIFT))
180 #define SYSREG_MASK \
181     SYSREG(SYSREG_OP0_MASK, \
182            SYSREG_OP1_MASK, \
183            SYSREG_CRN_MASK, \
184            SYSREG_CRM_MASK, \
185            SYSREG_OP2_MASK)
186 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
187 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
188 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
189 #define SYSREG_LORC_EL1       SYSREG(3, 0, 10, 4, 3)
190 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
191 #define SYSREG_CNTP_CTL_EL0   SYSREG(3, 3, 14, 2, 1)
192 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
193 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
194 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
195 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
196 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
197 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
198 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
199 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
200 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
201 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
202 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
203 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
204 
205 #define SYSREG_ICC_AP0R0_EL1     SYSREG(3, 0, 12, 8, 4)
206 #define SYSREG_ICC_AP0R1_EL1     SYSREG(3, 0, 12, 8, 5)
207 #define SYSREG_ICC_AP0R2_EL1     SYSREG(3, 0, 12, 8, 6)
208 #define SYSREG_ICC_AP0R3_EL1     SYSREG(3, 0, 12, 8, 7)
209 #define SYSREG_ICC_AP1R0_EL1     SYSREG(3, 0, 12, 9, 0)
210 #define SYSREG_ICC_AP1R1_EL1     SYSREG(3, 0, 12, 9, 1)
211 #define SYSREG_ICC_AP1R2_EL1     SYSREG(3, 0, 12, 9, 2)
212 #define SYSREG_ICC_AP1R3_EL1     SYSREG(3, 0, 12, 9, 3)
213 #define SYSREG_ICC_ASGI1R_EL1    SYSREG(3, 0, 12, 11, 6)
214 #define SYSREG_ICC_BPR0_EL1      SYSREG(3, 0, 12, 8, 3)
215 #define SYSREG_ICC_BPR1_EL1      SYSREG(3, 0, 12, 12, 3)
216 #define SYSREG_ICC_CTLR_EL1      SYSREG(3, 0, 12, 12, 4)
217 #define SYSREG_ICC_DIR_EL1       SYSREG(3, 0, 12, 11, 1)
218 #define SYSREG_ICC_EOIR0_EL1     SYSREG(3, 0, 12, 8, 1)
219 #define SYSREG_ICC_EOIR1_EL1     SYSREG(3, 0, 12, 12, 1)
220 #define SYSREG_ICC_HPPIR0_EL1    SYSREG(3, 0, 12, 8, 2)
221 #define SYSREG_ICC_HPPIR1_EL1    SYSREG(3, 0, 12, 12, 2)
222 #define SYSREG_ICC_IAR0_EL1      SYSREG(3, 0, 12, 8, 0)
223 #define SYSREG_ICC_IAR1_EL1      SYSREG(3, 0, 12, 12, 0)
224 #define SYSREG_ICC_IGRPEN0_EL1   SYSREG(3, 0, 12, 12, 6)
225 #define SYSREG_ICC_IGRPEN1_EL1   SYSREG(3, 0, 12, 12, 7)
226 #define SYSREG_ICC_PMR_EL1       SYSREG(3, 0, 4, 6, 0)
227 #define SYSREG_ICC_RPR_EL1       SYSREG(3, 0, 12, 11, 3)
228 #define SYSREG_ICC_SGI0R_EL1     SYSREG(3, 0, 12, 11, 7)
229 #define SYSREG_ICC_SGI1R_EL1     SYSREG(3, 0, 12, 11, 5)
230 #define SYSREG_ICC_SRE_EL1       SYSREG(3, 0, 12, 12, 5)
231 
232 #define SYSREG_MDSCR_EL1      SYSREG(2, 0, 0, 2, 2)
233 #define SYSREG_DBGBVR0_EL1    SYSREG(2, 0, 0, 0, 4)
234 #define SYSREG_DBGBCR0_EL1    SYSREG(2, 0, 0, 0, 5)
235 #define SYSREG_DBGWVR0_EL1    SYSREG(2, 0, 0, 0, 6)
236 #define SYSREG_DBGWCR0_EL1    SYSREG(2, 0, 0, 0, 7)
237 #define SYSREG_DBGBVR1_EL1    SYSREG(2, 0, 0, 1, 4)
238 #define SYSREG_DBGBCR1_EL1    SYSREG(2, 0, 0, 1, 5)
239 #define SYSREG_DBGWVR1_EL1    SYSREG(2, 0, 0, 1, 6)
240 #define SYSREG_DBGWCR1_EL1    SYSREG(2, 0, 0, 1, 7)
241 #define SYSREG_DBGBVR2_EL1    SYSREG(2, 0, 0, 2, 4)
242 #define SYSREG_DBGBCR2_EL1    SYSREG(2, 0, 0, 2, 5)
243 #define SYSREG_DBGWVR2_EL1    SYSREG(2, 0, 0, 2, 6)
244 #define SYSREG_DBGWCR2_EL1    SYSREG(2, 0, 0, 2, 7)
245 #define SYSREG_DBGBVR3_EL1    SYSREG(2, 0, 0, 3, 4)
246 #define SYSREG_DBGBCR3_EL1    SYSREG(2, 0, 0, 3, 5)
247 #define SYSREG_DBGWVR3_EL1    SYSREG(2, 0, 0, 3, 6)
248 #define SYSREG_DBGWCR3_EL1    SYSREG(2, 0, 0, 3, 7)
249 #define SYSREG_DBGBVR4_EL1    SYSREG(2, 0, 0, 4, 4)
250 #define SYSREG_DBGBCR4_EL1    SYSREG(2, 0, 0, 4, 5)
251 #define SYSREG_DBGWVR4_EL1    SYSREG(2, 0, 0, 4, 6)
252 #define SYSREG_DBGWCR4_EL1    SYSREG(2, 0, 0, 4, 7)
253 #define SYSREG_DBGBVR5_EL1    SYSREG(2, 0, 0, 5, 4)
254 #define SYSREG_DBGBCR5_EL1    SYSREG(2, 0, 0, 5, 5)
255 #define SYSREG_DBGWVR5_EL1    SYSREG(2, 0, 0, 5, 6)
256 #define SYSREG_DBGWCR5_EL1    SYSREG(2, 0, 0, 5, 7)
257 #define SYSREG_DBGBVR6_EL1    SYSREG(2, 0, 0, 6, 4)
258 #define SYSREG_DBGBCR6_EL1    SYSREG(2, 0, 0, 6, 5)
259 #define SYSREG_DBGWVR6_EL1    SYSREG(2, 0, 0, 6, 6)
260 #define SYSREG_DBGWCR6_EL1    SYSREG(2, 0, 0, 6, 7)
261 #define SYSREG_DBGBVR7_EL1    SYSREG(2, 0, 0, 7, 4)
262 #define SYSREG_DBGBCR7_EL1    SYSREG(2, 0, 0, 7, 5)
263 #define SYSREG_DBGWVR7_EL1    SYSREG(2, 0, 0, 7, 6)
264 #define SYSREG_DBGWCR7_EL1    SYSREG(2, 0, 0, 7, 7)
265 #define SYSREG_DBGBVR8_EL1    SYSREG(2, 0, 0, 8, 4)
266 #define SYSREG_DBGBCR8_EL1    SYSREG(2, 0, 0, 8, 5)
267 #define SYSREG_DBGWVR8_EL1    SYSREG(2, 0, 0, 8, 6)
268 #define SYSREG_DBGWCR8_EL1    SYSREG(2, 0, 0, 8, 7)
269 #define SYSREG_DBGBVR9_EL1    SYSREG(2, 0, 0, 9, 4)
270 #define SYSREG_DBGBCR9_EL1    SYSREG(2, 0, 0, 9, 5)
271 #define SYSREG_DBGWVR9_EL1    SYSREG(2, 0, 0, 9, 6)
272 #define SYSREG_DBGWCR9_EL1    SYSREG(2, 0, 0, 9, 7)
273 #define SYSREG_DBGBVR10_EL1   SYSREG(2, 0, 0, 10, 4)
274 #define SYSREG_DBGBCR10_EL1   SYSREG(2, 0, 0, 10, 5)
275 #define SYSREG_DBGWVR10_EL1   SYSREG(2, 0, 0, 10, 6)
276 #define SYSREG_DBGWCR10_EL1   SYSREG(2, 0, 0, 10, 7)
277 #define SYSREG_DBGBVR11_EL1   SYSREG(2, 0, 0, 11, 4)
278 #define SYSREG_DBGBCR11_EL1   SYSREG(2, 0, 0, 11, 5)
279 #define SYSREG_DBGWVR11_EL1   SYSREG(2, 0, 0, 11, 6)
280 #define SYSREG_DBGWCR11_EL1   SYSREG(2, 0, 0, 11, 7)
281 #define SYSREG_DBGBVR12_EL1   SYSREG(2, 0, 0, 12, 4)
282 #define SYSREG_DBGBCR12_EL1   SYSREG(2, 0, 0, 12, 5)
283 #define SYSREG_DBGWVR12_EL1   SYSREG(2, 0, 0, 12, 6)
284 #define SYSREG_DBGWCR12_EL1   SYSREG(2, 0, 0, 12, 7)
285 #define SYSREG_DBGBVR13_EL1   SYSREG(2, 0, 0, 13, 4)
286 #define SYSREG_DBGBCR13_EL1   SYSREG(2, 0, 0, 13, 5)
287 #define SYSREG_DBGWVR13_EL1   SYSREG(2, 0, 0, 13, 6)
288 #define SYSREG_DBGWCR13_EL1   SYSREG(2, 0, 0, 13, 7)
289 #define SYSREG_DBGBVR14_EL1   SYSREG(2, 0, 0, 14, 4)
290 #define SYSREG_DBGBCR14_EL1   SYSREG(2, 0, 0, 14, 5)
291 #define SYSREG_DBGWVR14_EL1   SYSREG(2, 0, 0, 14, 6)
292 #define SYSREG_DBGWCR14_EL1   SYSREG(2, 0, 0, 14, 7)
293 #define SYSREG_DBGBVR15_EL1   SYSREG(2, 0, 0, 15, 4)
294 #define SYSREG_DBGBCR15_EL1   SYSREG(2, 0, 0, 15, 5)
295 #define SYSREG_DBGWVR15_EL1   SYSREG(2, 0, 0, 15, 6)
296 #define SYSREG_DBGWCR15_EL1   SYSREG(2, 0, 0, 15, 7)
297 
298 #define WFX_IS_WFE (1 << 0)
299 
300 #define TMR_CTL_ENABLE  (1 << 0)
301 #define TMR_CTL_IMASK   (1 << 1)
302 #define TMR_CTL_ISTATUS (1 << 2)
303 
304 static void hvf_wfi(CPUState *cpu);
305 
306 static uint32_t chosen_ipa_bit_size;
307 
308 typedef struct HVFVTimer {
309     /* Vtimer value during migration and paused state */
310     uint64_t vtimer_val;
311 } HVFVTimer;
312 
313 static HVFVTimer vtimer;
314 
315 typedef struct ARMHostCPUFeatures {
316     ARMISARegisters isar;
317     uint64_t features;
318     uint64_t midr;
319     uint32_t reset_sctlr;
320     const char *dtb_compatible;
321 } ARMHostCPUFeatures;
322 
323 static ARMHostCPUFeatures arm_host_cpu_features;
324 
325 struct hvf_reg_match {
326     int reg;
327     uint64_t offset;
328 };
329 
330 static const struct hvf_reg_match hvf_reg_match[] = {
331     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
332     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
333     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
334     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
335     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
336     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
337     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
338     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
339     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
340     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
341     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
342     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
343     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
344     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
345     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
346     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
347     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
348     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
349     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
350     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
351     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
352     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
353     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
354     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
355     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
356     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
357     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
358     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
359     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
360     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
361     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
362     { HV_REG_PC,   offsetof(CPUARMState, pc) },
363 };
364 
365 static const struct hvf_reg_match hvf_fpreg_match[] = {
366     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
367     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
368     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
369     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
370     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
371     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
372     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
373     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
374     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
375     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
376     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
377     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
378     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
379     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
380     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
381     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
382     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
383     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
384     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
385     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
386     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
387     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
388     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
389     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
390     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
391     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
392     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
393     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
394     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
395     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
396     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
397     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
398 };
399 
400 struct hvf_sreg_match {
401     int reg;
402     uint32_t key;
403     uint32_t cp_idx;
404 };
405 
406 static struct hvf_sreg_match hvf_sreg_match[] = {
407     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 4) },
408     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 5) },
409     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 6) },
410     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 7) },
411 
412     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 4) },
413     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 5) },
414     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 6) },
415     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 7) },
416 
417     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 4) },
418     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 5) },
419     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 6) },
420     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 7) },
421 
422     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 4) },
423     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 5) },
424     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 6) },
425     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 7) },
426 
427     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 4) },
428     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 5) },
429     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 6) },
430     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 7) },
431 
432     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 4) },
433     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 5) },
434     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 6) },
435     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 7) },
436 
437     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 4) },
438     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 5) },
439     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 6) },
440     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 7) },
441 
442     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 4) },
443     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 5) },
444     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 6) },
445     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 7) },
446 
447     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 4) },
448     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 5) },
449     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 6) },
450     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 7) },
451 
452     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 4) },
453     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 5) },
454     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 6) },
455     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 7) },
456 
457     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 4) },
458     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 5) },
459     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 6) },
460     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 7) },
461 
462     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 4) },
463     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 5) },
464     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 6) },
465     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 7) },
466 
467     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 4) },
468     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 5) },
469     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 6) },
470     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 7) },
471 
472     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 4) },
473     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 5) },
474     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 6) },
475     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 7) },
476 
477     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 4) },
478     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 5) },
479     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 6) },
480     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 7) },
481 
482     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 4) },
483     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 5) },
484     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 6) },
485     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 7) },
486 
487 #ifdef SYNC_NO_RAW_REGS
488     /*
489      * The registers below are manually synced on init because they are
490      * marked as NO_RAW. We still list them to make number space sync easier.
491      */
492     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
493     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
494     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
495     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
496 #endif
497     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 1) },
498     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
499     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
500     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
501     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
502 #ifdef SYNC_NO_MMFR0
503     /* We keep the hardware MMFR0 around. HW limits are there anyway */
504     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
505 #endif
506     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
507     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
508     /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
509 
510     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
511     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
512     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
513     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
514     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
515     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
516 
517     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
518     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
519     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
520     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
521     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
522     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
523     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
524     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
525     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
526     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
527 
528     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
529     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
530     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
531     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
532     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
533     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
534     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
535     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
536     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
537     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
538     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
539     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
540     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
541     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
542     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
543     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
544     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
545     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
546     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
547     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
548 };
549 
hvf_get_registers(CPUState * cpu)550 int hvf_get_registers(CPUState *cpu)
551 {
552     ARMCPU *arm_cpu = ARM_CPU(cpu);
553     CPUARMState *env = &arm_cpu->env;
554     hv_return_t ret;
555     uint64_t val;
556     hv_simd_fp_uchar16_t fpval;
557     int i;
558 
559     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
560         ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
561         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
562         assert_hvf_ok(ret);
563     }
564 
565     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
566         ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
567                                       &fpval);
568         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
569         assert_hvf_ok(ret);
570     }
571 
572     val = 0;
573     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
574     assert_hvf_ok(ret);
575     vfp_set_fpcr(env, val);
576 
577     val = 0;
578     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
579     assert_hvf_ok(ret);
580     vfp_set_fpsr(env, val);
581 
582     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
583     assert_hvf_ok(ret);
584     pstate_write(env, val);
585 
586     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
587         if (hvf_sreg_match[i].cp_idx == -1) {
588             continue;
589         }
590 
591         if (cpu->accel->guest_debug_enabled) {
592             /* Handle debug registers */
593             switch (hvf_sreg_match[i].reg) {
594             case HV_SYS_REG_DBGBVR0_EL1:
595             case HV_SYS_REG_DBGBCR0_EL1:
596             case HV_SYS_REG_DBGWVR0_EL1:
597             case HV_SYS_REG_DBGWCR0_EL1:
598             case HV_SYS_REG_DBGBVR1_EL1:
599             case HV_SYS_REG_DBGBCR1_EL1:
600             case HV_SYS_REG_DBGWVR1_EL1:
601             case HV_SYS_REG_DBGWCR1_EL1:
602             case HV_SYS_REG_DBGBVR2_EL1:
603             case HV_SYS_REG_DBGBCR2_EL1:
604             case HV_SYS_REG_DBGWVR2_EL1:
605             case HV_SYS_REG_DBGWCR2_EL1:
606             case HV_SYS_REG_DBGBVR3_EL1:
607             case HV_SYS_REG_DBGBCR3_EL1:
608             case HV_SYS_REG_DBGWVR3_EL1:
609             case HV_SYS_REG_DBGWCR3_EL1:
610             case HV_SYS_REG_DBGBVR4_EL1:
611             case HV_SYS_REG_DBGBCR4_EL1:
612             case HV_SYS_REG_DBGWVR4_EL1:
613             case HV_SYS_REG_DBGWCR4_EL1:
614             case HV_SYS_REG_DBGBVR5_EL1:
615             case HV_SYS_REG_DBGBCR5_EL1:
616             case HV_SYS_REG_DBGWVR5_EL1:
617             case HV_SYS_REG_DBGWCR5_EL1:
618             case HV_SYS_REG_DBGBVR6_EL1:
619             case HV_SYS_REG_DBGBCR6_EL1:
620             case HV_SYS_REG_DBGWVR6_EL1:
621             case HV_SYS_REG_DBGWCR6_EL1:
622             case HV_SYS_REG_DBGBVR7_EL1:
623             case HV_SYS_REG_DBGBCR7_EL1:
624             case HV_SYS_REG_DBGWVR7_EL1:
625             case HV_SYS_REG_DBGWCR7_EL1:
626             case HV_SYS_REG_DBGBVR8_EL1:
627             case HV_SYS_REG_DBGBCR8_EL1:
628             case HV_SYS_REG_DBGWVR8_EL1:
629             case HV_SYS_REG_DBGWCR8_EL1:
630             case HV_SYS_REG_DBGBVR9_EL1:
631             case HV_SYS_REG_DBGBCR9_EL1:
632             case HV_SYS_REG_DBGWVR9_EL1:
633             case HV_SYS_REG_DBGWCR9_EL1:
634             case HV_SYS_REG_DBGBVR10_EL1:
635             case HV_SYS_REG_DBGBCR10_EL1:
636             case HV_SYS_REG_DBGWVR10_EL1:
637             case HV_SYS_REG_DBGWCR10_EL1:
638             case HV_SYS_REG_DBGBVR11_EL1:
639             case HV_SYS_REG_DBGBCR11_EL1:
640             case HV_SYS_REG_DBGWVR11_EL1:
641             case HV_SYS_REG_DBGWCR11_EL1:
642             case HV_SYS_REG_DBGBVR12_EL1:
643             case HV_SYS_REG_DBGBCR12_EL1:
644             case HV_SYS_REG_DBGWVR12_EL1:
645             case HV_SYS_REG_DBGWCR12_EL1:
646             case HV_SYS_REG_DBGBVR13_EL1:
647             case HV_SYS_REG_DBGBCR13_EL1:
648             case HV_SYS_REG_DBGWVR13_EL1:
649             case HV_SYS_REG_DBGWCR13_EL1:
650             case HV_SYS_REG_DBGBVR14_EL1:
651             case HV_SYS_REG_DBGBCR14_EL1:
652             case HV_SYS_REG_DBGWVR14_EL1:
653             case HV_SYS_REG_DBGWCR14_EL1:
654             case HV_SYS_REG_DBGBVR15_EL1:
655             case HV_SYS_REG_DBGBCR15_EL1:
656             case HV_SYS_REG_DBGWVR15_EL1:
657             case HV_SYS_REG_DBGWCR15_EL1: {
658                 /*
659                  * If the guest is being debugged, the vCPU's debug registers
660                  * are holding the gdbstub's view of the registers (set in
661                  * hvf_arch_update_guest_debug()).
662                  * Since the environment is used to store only the guest's view
663                  * of the registers, don't update it with the values from the
664                  * vCPU but simply keep the values from the previous
665                  * environment.
666                  */
667                 const ARMCPRegInfo *ri;
668                 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
669                 val = read_raw_cp_reg(env, ri);
670 
671                 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
672                 continue;
673             }
674             }
675         }
676 
677         ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
678         assert_hvf_ok(ret);
679 
680         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
681     }
682     assert(write_list_to_cpustate(arm_cpu));
683 
684     aarch64_restore_sp(env, arm_current_el(env));
685 
686     return 0;
687 }
688 
hvf_put_registers(CPUState * cpu)689 int hvf_put_registers(CPUState *cpu)
690 {
691     ARMCPU *arm_cpu = ARM_CPU(cpu);
692     CPUARMState *env = &arm_cpu->env;
693     hv_return_t ret;
694     uint64_t val;
695     hv_simd_fp_uchar16_t fpval;
696     int i;
697 
698     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
699         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
700         ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
701         assert_hvf_ok(ret);
702     }
703 
704     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
705         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
706         ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
707                                       fpval);
708         assert_hvf_ok(ret);
709     }
710 
711     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
712     assert_hvf_ok(ret);
713 
714     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
715     assert_hvf_ok(ret);
716 
717     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
718     assert_hvf_ok(ret);
719 
720     aarch64_save_sp(env, arm_current_el(env));
721 
722     assert(write_cpustate_to_list(arm_cpu, false));
723     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
724         if (hvf_sreg_match[i].cp_idx == -1) {
725             continue;
726         }
727 
728         if (cpu->accel->guest_debug_enabled) {
729             /* Handle debug registers */
730             switch (hvf_sreg_match[i].reg) {
731             case HV_SYS_REG_DBGBVR0_EL1:
732             case HV_SYS_REG_DBGBCR0_EL1:
733             case HV_SYS_REG_DBGWVR0_EL1:
734             case HV_SYS_REG_DBGWCR0_EL1:
735             case HV_SYS_REG_DBGBVR1_EL1:
736             case HV_SYS_REG_DBGBCR1_EL1:
737             case HV_SYS_REG_DBGWVR1_EL1:
738             case HV_SYS_REG_DBGWCR1_EL1:
739             case HV_SYS_REG_DBGBVR2_EL1:
740             case HV_SYS_REG_DBGBCR2_EL1:
741             case HV_SYS_REG_DBGWVR2_EL1:
742             case HV_SYS_REG_DBGWCR2_EL1:
743             case HV_SYS_REG_DBGBVR3_EL1:
744             case HV_SYS_REG_DBGBCR3_EL1:
745             case HV_SYS_REG_DBGWVR3_EL1:
746             case HV_SYS_REG_DBGWCR3_EL1:
747             case HV_SYS_REG_DBGBVR4_EL1:
748             case HV_SYS_REG_DBGBCR4_EL1:
749             case HV_SYS_REG_DBGWVR4_EL1:
750             case HV_SYS_REG_DBGWCR4_EL1:
751             case HV_SYS_REG_DBGBVR5_EL1:
752             case HV_SYS_REG_DBGBCR5_EL1:
753             case HV_SYS_REG_DBGWVR5_EL1:
754             case HV_SYS_REG_DBGWCR5_EL1:
755             case HV_SYS_REG_DBGBVR6_EL1:
756             case HV_SYS_REG_DBGBCR6_EL1:
757             case HV_SYS_REG_DBGWVR6_EL1:
758             case HV_SYS_REG_DBGWCR6_EL1:
759             case HV_SYS_REG_DBGBVR7_EL1:
760             case HV_SYS_REG_DBGBCR7_EL1:
761             case HV_SYS_REG_DBGWVR7_EL1:
762             case HV_SYS_REG_DBGWCR7_EL1:
763             case HV_SYS_REG_DBGBVR8_EL1:
764             case HV_SYS_REG_DBGBCR8_EL1:
765             case HV_SYS_REG_DBGWVR8_EL1:
766             case HV_SYS_REG_DBGWCR8_EL1:
767             case HV_SYS_REG_DBGBVR9_EL1:
768             case HV_SYS_REG_DBGBCR9_EL1:
769             case HV_SYS_REG_DBGWVR9_EL1:
770             case HV_SYS_REG_DBGWCR9_EL1:
771             case HV_SYS_REG_DBGBVR10_EL1:
772             case HV_SYS_REG_DBGBCR10_EL1:
773             case HV_SYS_REG_DBGWVR10_EL1:
774             case HV_SYS_REG_DBGWCR10_EL1:
775             case HV_SYS_REG_DBGBVR11_EL1:
776             case HV_SYS_REG_DBGBCR11_EL1:
777             case HV_SYS_REG_DBGWVR11_EL1:
778             case HV_SYS_REG_DBGWCR11_EL1:
779             case HV_SYS_REG_DBGBVR12_EL1:
780             case HV_SYS_REG_DBGBCR12_EL1:
781             case HV_SYS_REG_DBGWVR12_EL1:
782             case HV_SYS_REG_DBGWCR12_EL1:
783             case HV_SYS_REG_DBGBVR13_EL1:
784             case HV_SYS_REG_DBGBCR13_EL1:
785             case HV_SYS_REG_DBGWVR13_EL1:
786             case HV_SYS_REG_DBGWCR13_EL1:
787             case HV_SYS_REG_DBGBVR14_EL1:
788             case HV_SYS_REG_DBGBCR14_EL1:
789             case HV_SYS_REG_DBGWVR14_EL1:
790             case HV_SYS_REG_DBGWCR14_EL1:
791             case HV_SYS_REG_DBGBVR15_EL1:
792             case HV_SYS_REG_DBGBCR15_EL1:
793             case HV_SYS_REG_DBGWVR15_EL1:
794             case HV_SYS_REG_DBGWCR15_EL1:
795                 /*
796                  * If the guest is being debugged, the vCPU's debug registers
797                  * are already holding the gdbstub's view of the registers (set
798                  * in hvf_arch_update_guest_debug()).
799                  */
800                 continue;
801             }
802         }
803 
804         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
805         ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
806         assert_hvf_ok(ret);
807     }
808 
809     ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
810     assert_hvf_ok(ret);
811 
812     return 0;
813 }
814 
flush_cpu_state(CPUState * cpu)815 static void flush_cpu_state(CPUState *cpu)
816 {
817     if (cpu->vcpu_dirty) {
818         hvf_put_registers(cpu);
819         cpu->vcpu_dirty = false;
820     }
821 }
822 
hvf_set_reg(CPUState * cpu,int rt,uint64_t val)823 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
824 {
825     hv_return_t r;
826 
827     flush_cpu_state(cpu);
828 
829     if (rt < 31) {
830         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
831         assert_hvf_ok(r);
832     }
833 }
834 
hvf_get_reg(CPUState * cpu,int rt)835 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
836 {
837     uint64_t val = 0;
838     hv_return_t r;
839 
840     flush_cpu_state(cpu);
841 
842     if (rt < 31) {
843         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
844         assert_hvf_ok(r);
845     }
846 
847     return val;
848 }
849 
clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters * isar)850 static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
851 {
852     uint32_t ipa_size = chosen_ipa_bit_size ?
853             chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
854     uint64_t id_aa64mmfr0;
855 
856     /* Clamp down the PARange to the IPA size the kernel supports. */
857     uint8_t index = round_down_to_parange_index(ipa_size);
858     id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0);
859     id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
860     SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0);
861 }
862 
hvf_arm_get_host_cpu_features(ARMHostCPUFeatures * ahcf)863 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
864 {
865     ARMISARegisters host_isar = {};
866     const struct isar_regs {
867         int reg;
868         uint64_t *val;
869     } regs[] = {
870         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
871         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
872         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
873         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
874         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
875         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
876         /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
877         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
878         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
879         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
880         /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
881     };
882     hv_vcpu_t fd;
883     hv_return_t r = HV_SUCCESS;
884     hv_vcpu_exit_t *exit;
885     int i;
886 
887     ahcf->dtb_compatible = "arm,armv8";
888     ahcf->features = (1ULL << ARM_FEATURE_V8) |
889                      (1ULL << ARM_FEATURE_NEON) |
890                      (1ULL << ARM_FEATURE_AARCH64) |
891                      (1ULL << ARM_FEATURE_PMU) |
892                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
893 
894     /* We set up a small vcpu to extract host registers */
895 
896     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
897         return false;
898     }
899 
900     for (i = 0; i < ARRAY_SIZE(regs); i++) {
901         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
902     }
903     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
904     r |= hv_vcpu_destroy(fd);
905 
906     clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
907 
908     /*
909      * Disable SME, which is not properly handled by QEMU hvf yet.
910      * To allow this through we would need to:
911      * - make sure that the SME state is correctly handled in the
912      *   get_registers/put_registers functions
913      * - get the SME-specific CPU properties to work with accelerators
914      *   other than TCG
915      * - fix any assumptions we made that SME implies SVE (since
916      *   on the M4 there is SME but not SVE)
917      */
918     SET_IDREG(&host_isar, ID_AA64PFR1,
919               GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK);
920 
921     ahcf->isar = host_isar;
922 
923     /*
924      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
925      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
926      */
927     ahcf->reset_sctlr = 0x30100180;
928     /*
929      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
930      * let's disable it on boot and then allow guest software to turn it on by
931      * setting it to 0.
932      */
933     ahcf->reset_sctlr |= 0x00800000;
934 
935     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
936     if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) {
937         return false;
938     }
939 
940     return r == HV_SUCCESS;
941 }
942 
hvf_arm_get_default_ipa_bit_size(void)943 uint32_t hvf_arm_get_default_ipa_bit_size(void)
944 {
945     uint32_t default_ipa_size;
946     hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size);
947     assert_hvf_ok(ret);
948 
949     return default_ipa_size;
950 }
951 
hvf_arm_get_max_ipa_bit_size(void)952 uint32_t hvf_arm_get_max_ipa_bit_size(void)
953 {
954     uint32_t max_ipa_size;
955     hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size);
956     assert_hvf_ok(ret);
957 
958     /*
959      * We clamp any IPA size we want to back the VM with to a valid PARange
960      * value so the guest doesn't try and map memory outside of the valid range.
961      * This logic just clamps the passed in IPA bit size to the first valid
962      * PARange value <= to it.
963      */
964     return round_down_to_parange_bit_size(max_ipa_size);
965 }
966 
hvf_arm_set_cpu_features_from_host(ARMCPU * cpu)967 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
968 {
969     if (!arm_host_cpu_features.dtb_compatible) {
970         if (!hvf_enabled() ||
971             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
972             /*
973              * We can't report this error yet, so flag that we need to
974              * in arm_cpu_realizefn().
975              */
976             cpu->host_cpu_probe_failed = true;
977             return;
978         }
979     }
980 
981     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
982     cpu->isar = arm_host_cpu_features.isar;
983     cpu->env.features = arm_host_cpu_features.features;
984     cpu->midr = arm_host_cpu_features.midr;
985     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
986 }
987 
hvf_arch_vcpu_destroy(CPUState * cpu)988 void hvf_arch_vcpu_destroy(CPUState *cpu)
989 {
990 }
991 
hvf_arch_vm_create(MachineState * ms,uint32_t pa_range)992 hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
993 {
994     hv_return_t ret;
995     hv_vm_config_t config = hv_vm_config_create();
996 
997     ret = hv_vm_config_set_ipa_size(config, pa_range);
998     if (ret != HV_SUCCESS) {
999         goto cleanup;
1000     }
1001     chosen_ipa_bit_size = pa_range;
1002 
1003     ret = hv_vm_create(config);
1004 
1005 cleanup:
1006     os_release(config);
1007 
1008     return ret;
1009 }
1010 
hvf_arch_init_vcpu(CPUState * cpu)1011 int hvf_arch_init_vcpu(CPUState *cpu)
1012 {
1013     ARMCPU *arm_cpu = ARM_CPU(cpu);
1014     CPUARMState *env = &arm_cpu->env;
1015     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
1016     uint32_t sregs_cnt = 0;
1017     uint64_t pfr;
1018     hv_return_t ret;
1019     int i;
1020 
1021     env->aarch64 = true;
1022     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
1023 
1024     /* Allocate enough space for our sysreg sync */
1025     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
1026                                      sregs_match_len);
1027     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
1028                                     sregs_match_len);
1029     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
1030                                              arm_cpu->cpreg_vmstate_indexes,
1031                                              sregs_match_len);
1032     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
1033                                             arm_cpu->cpreg_vmstate_values,
1034                                             sregs_match_len);
1035 
1036     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
1037 
1038     /* Populate cp list for all known sysregs */
1039     for (i = 0; i < sregs_match_len; i++) {
1040         const ARMCPRegInfo *ri;
1041         uint32_t key = hvf_sreg_match[i].key;
1042 
1043         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
1044         if (ri) {
1045             assert(!(ri->type & ARM_CP_NO_RAW));
1046             hvf_sreg_match[i].cp_idx = sregs_cnt;
1047             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
1048         } else {
1049             hvf_sreg_match[i].cp_idx = -1;
1050         }
1051     }
1052     arm_cpu->cpreg_array_len = sregs_cnt;
1053     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
1054 
1055     assert(write_cpustate_to_list(arm_cpu, false));
1056 
1057     /* Set CP_NO_RAW system registers on init */
1058     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
1059                               arm_cpu->midr);
1060     assert_hvf_ok(ret);
1061 
1062     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
1063                               arm_cpu->mp_affinity);
1064     assert_hvf_ok(ret);
1065 
1066     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
1067     assert_hvf_ok(ret);
1068     pfr |= env->gicv3state ? (1 << 24) : 0;
1069     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
1070     assert_hvf_ok(ret);
1071 
1072     /* We're limited to underlying hardware caps, override internal versions */
1073     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1074                               &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
1075     assert_hvf_ok(ret);
1076 
1077     clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar);
1078     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1079                               arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
1080     assert_hvf_ok(ret);
1081 
1082     return 0;
1083 }
1084 
hvf_kick_vcpu_thread(CPUState * cpu)1085 void hvf_kick_vcpu_thread(CPUState *cpu)
1086 {
1087     cpus_kick_thread(cpu);
1088     hv_vcpus_exit(&cpu->accel->fd, 1);
1089 }
1090 
hvf_raise_exception(CPUState * cpu,uint32_t excp,uint32_t syndrome,int target_el)1091 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1092                                 uint32_t syndrome, int target_el)
1093 {
1094     ARMCPU *arm_cpu = ARM_CPU(cpu);
1095     CPUARMState *env = &arm_cpu->env;
1096 
1097     cpu->exception_index = excp;
1098     env->exception.target_el = target_el;
1099     env->exception.syndrome = syndrome;
1100 
1101     arm_cpu_do_interrupt(cpu);
1102 }
1103 
hvf_psci_cpu_off(ARMCPU * arm_cpu)1104 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1105 {
1106     int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu));
1107     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1108 }
1109 
1110 /*
1111  * Handle a PSCI call.
1112  *
1113  * Returns 0 on success
1114  *         -1 when the PSCI call is unknown,
1115  */
hvf_handle_psci_call(CPUState * cpu)1116 static bool hvf_handle_psci_call(CPUState *cpu)
1117 {
1118     ARMCPU *arm_cpu = ARM_CPU(cpu);
1119     CPUARMState *env = &arm_cpu->env;
1120     uint64_t param[4] = {
1121         env->xregs[0],
1122         env->xregs[1],
1123         env->xregs[2],
1124         env->xregs[3]
1125     };
1126     uint64_t context_id, mpidr;
1127     bool target_aarch64 = true;
1128     CPUState *target_cpu_state;
1129     ARMCPU *target_cpu;
1130     target_ulong entry;
1131     int target_el = 1;
1132     int32_t ret = 0;
1133 
1134     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1135                         arm_cpu_mp_affinity(arm_cpu));
1136 
1137     switch (param[0]) {
1138     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1139         ret = QEMU_PSCI_VERSION_1_1;
1140         break;
1141     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1142         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1143         break;
1144     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1145     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1146         mpidr = param[1];
1147 
1148         switch (param[2]) {
1149         case 0:
1150             target_cpu_state = arm_get_cpu_by_id(mpidr);
1151             if (!target_cpu_state) {
1152                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1153                 break;
1154             }
1155             target_cpu = ARM_CPU(target_cpu_state);
1156 
1157             ret = target_cpu->power_state;
1158             break;
1159         default:
1160             /* Everything above affinity level 0 is always on. */
1161             ret = 0;
1162         }
1163         break;
1164     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1165         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1166         /*
1167          * QEMU reset and shutdown are async requests, but PSCI
1168          * mandates that we never return from the reset/shutdown
1169          * call, so power the CPU off now so it doesn't execute
1170          * anything further.
1171          */
1172         hvf_psci_cpu_off(arm_cpu);
1173         break;
1174     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1175         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1176         hvf_psci_cpu_off(arm_cpu);
1177         break;
1178     case QEMU_PSCI_0_1_FN_CPU_ON:
1179     case QEMU_PSCI_0_2_FN_CPU_ON:
1180     case QEMU_PSCI_0_2_FN64_CPU_ON:
1181         mpidr = param[1];
1182         entry = param[2];
1183         context_id = param[3];
1184         ret = arm_set_cpu_on(mpidr, entry, context_id,
1185                              target_el, target_aarch64);
1186         break;
1187     case QEMU_PSCI_0_1_FN_CPU_OFF:
1188     case QEMU_PSCI_0_2_FN_CPU_OFF:
1189         hvf_psci_cpu_off(arm_cpu);
1190         break;
1191     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1192     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1193     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1194         /* Affinity levels are not supported in QEMU */
1195         if (param[1] & 0xfffe0000) {
1196             ret = QEMU_PSCI_RET_INVALID_PARAMS;
1197             break;
1198         }
1199         /* Powerdown is not supported, we always go into WFI */
1200         env->xregs[0] = 0;
1201         hvf_wfi(cpu);
1202         break;
1203     case QEMU_PSCI_0_1_FN_MIGRATE:
1204     case QEMU_PSCI_0_2_FN_MIGRATE:
1205         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1206         break;
1207     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1208         switch (param[1]) {
1209         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1210         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1211         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1212         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1213         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1214         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1215         case QEMU_PSCI_0_1_FN_CPU_ON:
1216         case QEMU_PSCI_0_2_FN_CPU_ON:
1217         case QEMU_PSCI_0_2_FN64_CPU_ON:
1218         case QEMU_PSCI_0_1_FN_CPU_OFF:
1219         case QEMU_PSCI_0_2_FN_CPU_OFF:
1220         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1221         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1222         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1223         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1224             ret = 0;
1225             break;
1226         case QEMU_PSCI_0_1_FN_MIGRATE:
1227         case QEMU_PSCI_0_2_FN_MIGRATE:
1228         default:
1229             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1230         }
1231         break;
1232     default:
1233         return false;
1234     }
1235 
1236     env->xregs[0] = ret;
1237     return true;
1238 }
1239 
is_id_sysreg(uint32_t reg)1240 static bool is_id_sysreg(uint32_t reg)
1241 {
1242     return SYSREG_OP0(reg) == 3 &&
1243            SYSREG_OP1(reg) == 0 &&
1244            SYSREG_CRN(reg) == 0 &&
1245            SYSREG_CRM(reg) >= 1 &&
1246            SYSREG_CRM(reg) < 8;
1247 }
1248 
hvf_reg2cp_reg(uint32_t reg)1249 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1250 {
1251     return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1252                               (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1253                               (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1254                               (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1255                               (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1256                               (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1257 }
1258 
hvf_sysreg_read_cp(CPUState * cpu,uint32_t reg,uint64_t * val)1259 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1260 {
1261     ARMCPU *arm_cpu = ARM_CPU(cpu);
1262     CPUARMState *env = &arm_cpu->env;
1263     const ARMCPRegInfo *ri;
1264 
1265     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1266     if (ri) {
1267         if (!cp_access_ok(1, ri, true)) {
1268             return false;
1269         }
1270         if (ri->accessfn) {
1271             if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1272                 return false;
1273             }
1274         }
1275         if (ri->type & ARM_CP_CONST) {
1276             *val = ri->resetvalue;
1277         } else if (ri->readfn) {
1278             *val = ri->readfn(env, ri);
1279         } else {
1280             *val = CPREG_FIELD64(env, ri);
1281         }
1282         trace_hvf_vgic_read(ri->name, *val);
1283         return true;
1284     }
1285 
1286     return false;
1287 }
1288 
hvf_sysreg_read(CPUState * cpu,uint32_t reg,uint64_t * val)1289 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
1290 {
1291     ARMCPU *arm_cpu = ARM_CPU(cpu);
1292     CPUARMState *env = &arm_cpu->env;
1293 
1294     if (arm_feature(env, ARM_FEATURE_PMU)) {
1295         switch (reg) {
1296         case SYSREG_PMCR_EL0:
1297             *val = env->cp15.c9_pmcr;
1298             return 0;
1299         case SYSREG_PMCCNTR_EL0:
1300             pmu_op_start(env);
1301             *val = env->cp15.c15_ccnt;
1302             pmu_op_finish(env);
1303             return 0;
1304         case SYSREG_PMCNTENCLR_EL0:
1305             *val = env->cp15.c9_pmcnten;
1306             return 0;
1307         case SYSREG_PMOVSCLR_EL0:
1308             *val = env->cp15.c9_pmovsr;
1309             return 0;
1310         case SYSREG_PMSELR_EL0:
1311             *val = env->cp15.c9_pmselr;
1312             return 0;
1313         case SYSREG_PMINTENCLR_EL1:
1314             *val = env->cp15.c9_pminten;
1315             return 0;
1316         case SYSREG_PMCCFILTR_EL0:
1317             *val = env->cp15.pmccfiltr_el0;
1318             return 0;
1319         case SYSREG_PMCNTENSET_EL0:
1320             *val = env->cp15.c9_pmcnten;
1321             return 0;
1322         case SYSREG_PMUSERENR_EL0:
1323             *val = env->cp15.c9_pmuserenr;
1324             return 0;
1325         case SYSREG_PMCEID0_EL0:
1326         case SYSREG_PMCEID1_EL0:
1327             /* We can't really count anything yet, declare all events invalid */
1328             *val = 0;
1329             return 0;
1330         }
1331     }
1332 
1333     switch (reg) {
1334     case SYSREG_CNTPCT_EL0:
1335         *val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1336               gt_cntfrq_period_ns(arm_cpu);
1337         return 0;
1338     case SYSREG_OSLSR_EL1:
1339         *val = env->cp15.oslsr_el1;
1340         return 0;
1341     case SYSREG_OSDLR_EL1:
1342         /* Dummy register */
1343         return 0;
1344     case SYSREG_ICC_AP0R0_EL1:
1345     case SYSREG_ICC_AP0R1_EL1:
1346     case SYSREG_ICC_AP0R2_EL1:
1347     case SYSREG_ICC_AP0R3_EL1:
1348     case SYSREG_ICC_AP1R0_EL1:
1349     case SYSREG_ICC_AP1R1_EL1:
1350     case SYSREG_ICC_AP1R2_EL1:
1351     case SYSREG_ICC_AP1R3_EL1:
1352     case SYSREG_ICC_ASGI1R_EL1:
1353     case SYSREG_ICC_BPR0_EL1:
1354     case SYSREG_ICC_BPR1_EL1:
1355     case SYSREG_ICC_DIR_EL1:
1356     case SYSREG_ICC_EOIR0_EL1:
1357     case SYSREG_ICC_EOIR1_EL1:
1358     case SYSREG_ICC_HPPIR0_EL1:
1359     case SYSREG_ICC_HPPIR1_EL1:
1360     case SYSREG_ICC_IAR0_EL1:
1361     case SYSREG_ICC_IAR1_EL1:
1362     case SYSREG_ICC_IGRPEN0_EL1:
1363     case SYSREG_ICC_IGRPEN1_EL1:
1364     case SYSREG_ICC_PMR_EL1:
1365     case SYSREG_ICC_RPR_EL1:
1366     case SYSREG_ICC_SGI0R_EL1:
1367     case SYSREG_ICC_SGI1R_EL1:
1368     case SYSREG_ICC_SRE_EL1:
1369     case SYSREG_ICC_CTLR_EL1:
1370         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1371         if (hvf_sysreg_read_cp(cpu, reg, val)) {
1372             return 0;
1373         }
1374         break;
1375     case SYSREG_DBGBVR0_EL1:
1376     case SYSREG_DBGBVR1_EL1:
1377     case SYSREG_DBGBVR2_EL1:
1378     case SYSREG_DBGBVR3_EL1:
1379     case SYSREG_DBGBVR4_EL1:
1380     case SYSREG_DBGBVR5_EL1:
1381     case SYSREG_DBGBVR6_EL1:
1382     case SYSREG_DBGBVR7_EL1:
1383     case SYSREG_DBGBVR8_EL1:
1384     case SYSREG_DBGBVR9_EL1:
1385     case SYSREG_DBGBVR10_EL1:
1386     case SYSREG_DBGBVR11_EL1:
1387     case SYSREG_DBGBVR12_EL1:
1388     case SYSREG_DBGBVR13_EL1:
1389     case SYSREG_DBGBVR14_EL1:
1390     case SYSREG_DBGBVR15_EL1:
1391         *val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1392         return 0;
1393     case SYSREG_DBGBCR0_EL1:
1394     case SYSREG_DBGBCR1_EL1:
1395     case SYSREG_DBGBCR2_EL1:
1396     case SYSREG_DBGBCR3_EL1:
1397     case SYSREG_DBGBCR4_EL1:
1398     case SYSREG_DBGBCR5_EL1:
1399     case SYSREG_DBGBCR6_EL1:
1400     case SYSREG_DBGBCR7_EL1:
1401     case SYSREG_DBGBCR8_EL1:
1402     case SYSREG_DBGBCR9_EL1:
1403     case SYSREG_DBGBCR10_EL1:
1404     case SYSREG_DBGBCR11_EL1:
1405     case SYSREG_DBGBCR12_EL1:
1406     case SYSREG_DBGBCR13_EL1:
1407     case SYSREG_DBGBCR14_EL1:
1408     case SYSREG_DBGBCR15_EL1:
1409         *val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1410         return 0;
1411     case SYSREG_DBGWVR0_EL1:
1412     case SYSREG_DBGWVR1_EL1:
1413     case SYSREG_DBGWVR2_EL1:
1414     case SYSREG_DBGWVR3_EL1:
1415     case SYSREG_DBGWVR4_EL1:
1416     case SYSREG_DBGWVR5_EL1:
1417     case SYSREG_DBGWVR6_EL1:
1418     case SYSREG_DBGWVR7_EL1:
1419     case SYSREG_DBGWVR8_EL1:
1420     case SYSREG_DBGWVR9_EL1:
1421     case SYSREG_DBGWVR10_EL1:
1422     case SYSREG_DBGWVR11_EL1:
1423     case SYSREG_DBGWVR12_EL1:
1424     case SYSREG_DBGWVR13_EL1:
1425     case SYSREG_DBGWVR14_EL1:
1426     case SYSREG_DBGWVR15_EL1:
1427         *val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1428         return 0;
1429     case SYSREG_DBGWCR0_EL1:
1430     case SYSREG_DBGWCR1_EL1:
1431     case SYSREG_DBGWCR2_EL1:
1432     case SYSREG_DBGWCR3_EL1:
1433     case SYSREG_DBGWCR4_EL1:
1434     case SYSREG_DBGWCR5_EL1:
1435     case SYSREG_DBGWCR6_EL1:
1436     case SYSREG_DBGWCR7_EL1:
1437     case SYSREG_DBGWCR8_EL1:
1438     case SYSREG_DBGWCR9_EL1:
1439     case SYSREG_DBGWCR10_EL1:
1440     case SYSREG_DBGWCR11_EL1:
1441     case SYSREG_DBGWCR12_EL1:
1442     case SYSREG_DBGWCR13_EL1:
1443     case SYSREG_DBGWCR14_EL1:
1444     case SYSREG_DBGWCR15_EL1:
1445         *val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1446         return 0;
1447     default:
1448         if (is_id_sysreg(reg)) {
1449             /* ID system registers read as RES0 */
1450             *val = 0;
1451             return 0;
1452         }
1453     }
1454 
1455     cpu_synchronize_state(cpu);
1456     trace_hvf_unhandled_sysreg_read(env->pc, reg,
1457                                     SYSREG_OP0(reg),
1458                                     SYSREG_OP1(reg),
1459                                     SYSREG_CRN(reg),
1460                                     SYSREG_CRM(reg),
1461                                     SYSREG_OP2(reg));
1462     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
1463     return 1;
1464 }
1465 
pmu_update_irq(CPUARMState * env)1466 static void pmu_update_irq(CPUARMState *env)
1467 {
1468     ARMCPU *cpu = env_archcpu(env);
1469     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1470             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1471 }
1472 
pmu_event_supported(uint16_t number)1473 static bool pmu_event_supported(uint16_t number)
1474 {
1475     return false;
1476 }
1477 
1478 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1479  * the current EL, security state, and register configuration.
1480  */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)1481 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1482 {
1483     uint64_t filter;
1484     bool enabled, filtered = true;
1485     int el = arm_current_el(env);
1486 
1487     enabled = (env->cp15.c9_pmcr & PMCRE) &&
1488               (env->cp15.c9_pmcnten & (1 << counter));
1489 
1490     if (counter == 31) {
1491         filter = env->cp15.pmccfiltr_el0;
1492     } else {
1493         filter = env->cp15.c14_pmevtyper[counter];
1494     }
1495 
1496     if (el == 0) {
1497         filtered = filter & PMXEVTYPER_U;
1498     } else if (el == 1) {
1499         filtered = filter & PMXEVTYPER_P;
1500     }
1501 
1502     if (counter != 31) {
1503         /*
1504          * If not checking PMCCNTR, ensure the counter is setup to an event we
1505          * support
1506          */
1507         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1508         if (!pmu_event_supported(event)) {
1509             return false;
1510         }
1511     }
1512 
1513     return enabled && !filtered;
1514 }
1515 
pmswinc_write(CPUARMState * env,uint64_t value)1516 static void pmswinc_write(CPUARMState *env, uint64_t value)
1517 {
1518     unsigned int i;
1519     for (i = 0; i < pmu_num_counters(env); i++) {
1520         /* Increment a counter's count iff: */
1521         if ((value & (1 << i)) && /* counter's bit is set */
1522                 /* counter is enabled and not filtered */
1523                 pmu_counter_enabled(env, i) &&
1524                 /* counter is SW_INCR */
1525                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1526             /*
1527              * Detect if this write causes an overflow since we can't predict
1528              * PMSWINC overflows like we can for other events
1529              */
1530             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1531 
1532             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1533                 env->cp15.c9_pmovsr |= (1 << i);
1534                 pmu_update_irq(env);
1535             }
1536 
1537             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1538         }
1539     }
1540 }
1541 
hvf_sysreg_write_cp(CPUState * cpu,uint32_t reg,uint64_t val)1542 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1543 {
1544     ARMCPU *arm_cpu = ARM_CPU(cpu);
1545     CPUARMState *env = &arm_cpu->env;
1546     const ARMCPRegInfo *ri;
1547 
1548     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1549 
1550     if (ri) {
1551         if (!cp_access_ok(1, ri, false)) {
1552             return false;
1553         }
1554         if (ri->accessfn) {
1555             if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1556                 return false;
1557             }
1558         }
1559         if (ri->writefn) {
1560             ri->writefn(env, ri, val);
1561         } else {
1562             CPREG_FIELD64(env, ri) = val;
1563         }
1564 
1565         trace_hvf_vgic_write(ri->name, val);
1566         return true;
1567     }
1568 
1569     return false;
1570 }
1571 
hvf_sysreg_write(CPUState * cpu,uint32_t reg,uint64_t val)1572 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1573 {
1574     ARMCPU *arm_cpu = ARM_CPU(cpu);
1575     CPUARMState *env = &arm_cpu->env;
1576 
1577     trace_hvf_sysreg_write(reg,
1578                            SYSREG_OP0(reg),
1579                            SYSREG_OP1(reg),
1580                            SYSREG_CRN(reg),
1581                            SYSREG_CRM(reg),
1582                            SYSREG_OP2(reg),
1583                            val);
1584 
1585     if (arm_feature(env, ARM_FEATURE_PMU)) {
1586         switch (reg) {
1587         case SYSREG_PMCCNTR_EL0:
1588             pmu_op_start(env);
1589             env->cp15.c15_ccnt = val;
1590             pmu_op_finish(env);
1591             return 0;
1592         case SYSREG_PMCR_EL0:
1593             pmu_op_start(env);
1594 
1595             if (val & PMCRC) {
1596                 /* The counter has been reset */
1597                 env->cp15.c15_ccnt = 0;
1598             }
1599 
1600             if (val & PMCRP) {
1601                 unsigned int i;
1602                 for (i = 0; i < pmu_num_counters(env); i++) {
1603                     env->cp15.c14_pmevcntr[i] = 0;
1604                 }
1605             }
1606 
1607             env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1608             env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1609 
1610             pmu_op_finish(env);
1611             return 0;
1612         case SYSREG_PMUSERENR_EL0:
1613             env->cp15.c9_pmuserenr = val & 0xf;
1614             return 0;
1615         case SYSREG_PMCNTENSET_EL0:
1616             env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1617             return 0;
1618         case SYSREG_PMCNTENCLR_EL0:
1619             env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1620             return 0;
1621         case SYSREG_PMINTENCLR_EL1:
1622             pmu_op_start(env);
1623             env->cp15.c9_pminten |= val;
1624             pmu_op_finish(env);
1625             return 0;
1626         case SYSREG_PMOVSCLR_EL0:
1627             pmu_op_start(env);
1628             env->cp15.c9_pmovsr &= ~val;
1629             pmu_op_finish(env);
1630             return 0;
1631         case SYSREG_PMSWINC_EL0:
1632             pmu_op_start(env);
1633             pmswinc_write(env, val);
1634             pmu_op_finish(env);
1635             return 0;
1636         case SYSREG_PMSELR_EL0:
1637             env->cp15.c9_pmselr = val & 0x1f;
1638             return 0;
1639         case SYSREG_PMCCFILTR_EL0:
1640             pmu_op_start(env);
1641             env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1642             pmu_op_finish(env);
1643             return 0;
1644         }
1645     }
1646 
1647     switch (reg) {
1648     case SYSREG_OSLAR_EL1:
1649         env->cp15.oslsr_el1 = val & 1;
1650         return 0;
1651     case SYSREG_CNTP_CTL_EL0:
1652         /*
1653          * Guests should not rely on the physical counter, but macOS emits
1654          * disable writes to it. Let it do so, but ignore the requests.
1655          */
1656         qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n");
1657         return 0;
1658     case SYSREG_OSDLR_EL1:
1659         /* Dummy register */
1660         return 0;
1661     case SYSREG_LORC_EL1:
1662         /* Dummy register */
1663         return 0;
1664     case SYSREG_ICC_AP0R0_EL1:
1665     case SYSREG_ICC_AP0R1_EL1:
1666     case SYSREG_ICC_AP0R2_EL1:
1667     case SYSREG_ICC_AP0R3_EL1:
1668     case SYSREG_ICC_AP1R0_EL1:
1669     case SYSREG_ICC_AP1R1_EL1:
1670     case SYSREG_ICC_AP1R2_EL1:
1671     case SYSREG_ICC_AP1R3_EL1:
1672     case SYSREG_ICC_ASGI1R_EL1:
1673     case SYSREG_ICC_BPR0_EL1:
1674     case SYSREG_ICC_BPR1_EL1:
1675     case SYSREG_ICC_CTLR_EL1:
1676     case SYSREG_ICC_DIR_EL1:
1677     case SYSREG_ICC_EOIR0_EL1:
1678     case SYSREG_ICC_EOIR1_EL1:
1679     case SYSREG_ICC_HPPIR0_EL1:
1680     case SYSREG_ICC_HPPIR1_EL1:
1681     case SYSREG_ICC_IAR0_EL1:
1682     case SYSREG_ICC_IAR1_EL1:
1683     case SYSREG_ICC_IGRPEN0_EL1:
1684     case SYSREG_ICC_IGRPEN1_EL1:
1685     case SYSREG_ICC_PMR_EL1:
1686     case SYSREG_ICC_RPR_EL1:
1687     case SYSREG_ICC_SGI0R_EL1:
1688     case SYSREG_ICC_SGI1R_EL1:
1689     case SYSREG_ICC_SRE_EL1:
1690         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1691         if (hvf_sysreg_write_cp(cpu, reg, val)) {
1692             return 0;
1693         }
1694         break;
1695     case SYSREG_MDSCR_EL1:
1696         env->cp15.mdscr_el1 = val;
1697         return 0;
1698     case SYSREG_DBGBVR0_EL1:
1699     case SYSREG_DBGBVR1_EL1:
1700     case SYSREG_DBGBVR2_EL1:
1701     case SYSREG_DBGBVR3_EL1:
1702     case SYSREG_DBGBVR4_EL1:
1703     case SYSREG_DBGBVR5_EL1:
1704     case SYSREG_DBGBVR6_EL1:
1705     case SYSREG_DBGBVR7_EL1:
1706     case SYSREG_DBGBVR8_EL1:
1707     case SYSREG_DBGBVR9_EL1:
1708     case SYSREG_DBGBVR10_EL1:
1709     case SYSREG_DBGBVR11_EL1:
1710     case SYSREG_DBGBVR12_EL1:
1711     case SYSREG_DBGBVR13_EL1:
1712     case SYSREG_DBGBVR14_EL1:
1713     case SYSREG_DBGBVR15_EL1:
1714         env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1715         return 0;
1716     case SYSREG_DBGBCR0_EL1:
1717     case SYSREG_DBGBCR1_EL1:
1718     case SYSREG_DBGBCR2_EL1:
1719     case SYSREG_DBGBCR3_EL1:
1720     case SYSREG_DBGBCR4_EL1:
1721     case SYSREG_DBGBCR5_EL1:
1722     case SYSREG_DBGBCR6_EL1:
1723     case SYSREG_DBGBCR7_EL1:
1724     case SYSREG_DBGBCR8_EL1:
1725     case SYSREG_DBGBCR9_EL1:
1726     case SYSREG_DBGBCR10_EL1:
1727     case SYSREG_DBGBCR11_EL1:
1728     case SYSREG_DBGBCR12_EL1:
1729     case SYSREG_DBGBCR13_EL1:
1730     case SYSREG_DBGBCR14_EL1:
1731     case SYSREG_DBGBCR15_EL1:
1732         env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1733         return 0;
1734     case SYSREG_DBGWVR0_EL1:
1735     case SYSREG_DBGWVR1_EL1:
1736     case SYSREG_DBGWVR2_EL1:
1737     case SYSREG_DBGWVR3_EL1:
1738     case SYSREG_DBGWVR4_EL1:
1739     case SYSREG_DBGWVR5_EL1:
1740     case SYSREG_DBGWVR6_EL1:
1741     case SYSREG_DBGWVR7_EL1:
1742     case SYSREG_DBGWVR8_EL1:
1743     case SYSREG_DBGWVR9_EL1:
1744     case SYSREG_DBGWVR10_EL1:
1745     case SYSREG_DBGWVR11_EL1:
1746     case SYSREG_DBGWVR12_EL1:
1747     case SYSREG_DBGWVR13_EL1:
1748     case SYSREG_DBGWVR14_EL1:
1749     case SYSREG_DBGWVR15_EL1:
1750         env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1751         return 0;
1752     case SYSREG_DBGWCR0_EL1:
1753     case SYSREG_DBGWCR1_EL1:
1754     case SYSREG_DBGWCR2_EL1:
1755     case SYSREG_DBGWCR3_EL1:
1756     case SYSREG_DBGWCR4_EL1:
1757     case SYSREG_DBGWCR5_EL1:
1758     case SYSREG_DBGWCR6_EL1:
1759     case SYSREG_DBGWCR7_EL1:
1760     case SYSREG_DBGWCR8_EL1:
1761     case SYSREG_DBGWCR9_EL1:
1762     case SYSREG_DBGWCR10_EL1:
1763     case SYSREG_DBGWCR11_EL1:
1764     case SYSREG_DBGWCR12_EL1:
1765     case SYSREG_DBGWCR13_EL1:
1766     case SYSREG_DBGWCR14_EL1:
1767     case SYSREG_DBGWCR15_EL1:
1768         env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1769         return 0;
1770     }
1771 
1772     cpu_synchronize_state(cpu);
1773     trace_hvf_unhandled_sysreg_write(env->pc, reg,
1774                                      SYSREG_OP0(reg),
1775                                      SYSREG_OP1(reg),
1776                                      SYSREG_CRN(reg),
1777                                      SYSREG_CRM(reg),
1778                                      SYSREG_OP2(reg));
1779     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
1780     return 1;
1781 }
1782 
hvf_inject_interrupts(CPUState * cpu)1783 static int hvf_inject_interrupts(CPUState *cpu)
1784 {
1785     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1786         trace_hvf_inject_fiq();
1787         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
1788                                       true);
1789     }
1790 
1791     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1792         trace_hvf_inject_irq();
1793         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
1794                                       true);
1795     }
1796 
1797     return 0;
1798 }
1799 
hvf_vtimer_val_raw(void)1800 static uint64_t hvf_vtimer_val_raw(void)
1801 {
1802     /*
1803      * mach_absolute_time() returns the vtimer value without the VM
1804      * offset that we define. Add our own offset on top.
1805      */
1806     return mach_absolute_time() - hvf_state->vtimer_offset;
1807 }
1808 
hvf_vtimer_val(void)1809 static uint64_t hvf_vtimer_val(void)
1810 {
1811     if (!runstate_is_running()) {
1812         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1813         return vtimer.vtimer_val;
1814     }
1815 
1816     return hvf_vtimer_val_raw();
1817 }
1818 
hvf_wait_for_ipi(CPUState * cpu,struct timespec * ts)1819 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1820 {
1821     /*
1822      * Use pselect to sleep so that other threads can IPI us while we're
1823      * sleeping.
1824      */
1825     qatomic_set_mb(&cpu->thread_kicked, false);
1826     bql_unlock();
1827     pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
1828     bql_lock();
1829 }
1830 
hvf_wfi(CPUState * cpu)1831 static void hvf_wfi(CPUState *cpu)
1832 {
1833     ARMCPU *arm_cpu = ARM_CPU(cpu);
1834     struct timespec ts;
1835     hv_return_t r;
1836     uint64_t ctl;
1837     uint64_t cval;
1838     int64_t ticks_to_sleep;
1839     uint64_t seconds;
1840     uint64_t nanos;
1841     uint32_t cntfrq;
1842 
1843     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1844         /* Interrupt pending, no need to wait */
1845         return;
1846     }
1847 
1848     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1849     assert_hvf_ok(r);
1850 
1851     if (!(ctl & 1) || (ctl & 2)) {
1852         /* Timer disabled or masked, just wait for an IPI. */
1853         hvf_wait_for_ipi(cpu, NULL);
1854         return;
1855     }
1856 
1857     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1858     assert_hvf_ok(r);
1859 
1860     ticks_to_sleep = cval - hvf_vtimer_val();
1861     if (ticks_to_sleep < 0) {
1862         return;
1863     }
1864 
1865     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1866     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1867     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1868     nanos = ticks_to_sleep * cntfrq;
1869 
1870     /*
1871      * Don't sleep for less than the time a context switch would take,
1872      * so that we can satisfy fast timer requests on the same CPU.
1873      * Measurements on M1 show the sweet spot to be ~2ms.
1874      */
1875     if (!seconds && nanos < (2 * SCALE_MS)) {
1876         return;
1877     }
1878 
1879     ts = (struct timespec) { seconds, nanos };
1880     hvf_wait_for_ipi(cpu, &ts);
1881 }
1882 
hvf_sync_vtimer(CPUState * cpu)1883 static void hvf_sync_vtimer(CPUState *cpu)
1884 {
1885     ARMCPU *arm_cpu = ARM_CPU(cpu);
1886     hv_return_t r;
1887     uint64_t ctl;
1888     bool irq_state;
1889 
1890     if (!cpu->accel->vtimer_masked) {
1891         /* We will get notified on vtimer changes by hvf, nothing to do */
1892         return;
1893     }
1894 
1895     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1896     assert_hvf_ok(r);
1897 
1898     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1899                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1900     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1901 
1902     if (!irq_state) {
1903         /* Timer no longer asserting, we can unmask it */
1904         hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
1905         cpu->accel->vtimer_masked = false;
1906     }
1907 }
1908 
hvf_vcpu_exec(CPUState * cpu)1909 int hvf_vcpu_exec(CPUState *cpu)
1910 {
1911     ARMCPU *arm_cpu = ARM_CPU(cpu);
1912     CPUARMState *env = &arm_cpu->env;
1913     int ret;
1914     hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
1915     hv_return_t r;
1916     bool advance_pc = false;
1917 
1918     if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1919         hvf_inject_interrupts(cpu)) {
1920         return EXCP_INTERRUPT;
1921     }
1922 
1923     if (cpu->halted) {
1924         return EXCP_HLT;
1925     }
1926 
1927     flush_cpu_state(cpu);
1928 
1929     bql_unlock();
1930     r = hv_vcpu_run(cpu->accel->fd);
1931     bql_lock();
1932     switch (r) {
1933     case HV_SUCCESS:
1934         break;
1935     case HV_ILLEGAL_GUEST_STATE:
1936         trace_hvf_illegal_guest_state();
1937         /* fall through */
1938     default:
1939         g_assert_not_reached();
1940     }
1941 
1942     /* handle VMEXIT */
1943     uint64_t exit_reason = hvf_exit->reason;
1944     uint64_t syndrome = hvf_exit->exception.syndrome;
1945     uint32_t ec = syn_get_ec(syndrome);
1946 
1947     ret = 0;
1948     switch (exit_reason) {
1949     case HV_EXIT_REASON_EXCEPTION:
1950         /* This is the main one, handle below. */
1951         break;
1952     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1953         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1954         cpu->accel->vtimer_masked = true;
1955         return 0;
1956     case HV_EXIT_REASON_CANCELED:
1957         /* we got kicked, no exit to process */
1958         return 0;
1959     default:
1960         g_assert_not_reached();
1961     }
1962 
1963     hvf_sync_vtimer(cpu);
1964 
1965     switch (ec) {
1966     case EC_SOFTWARESTEP: {
1967         ret = EXCP_DEBUG;
1968 
1969         if (!cpu->singlestep_enabled) {
1970             error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1971         }
1972         break;
1973     }
1974     case EC_AA64_BKPT: {
1975         ret = EXCP_DEBUG;
1976 
1977         cpu_synchronize_state(cpu);
1978 
1979         if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1980             /* Re-inject into the guest */
1981             ret = 0;
1982             hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1);
1983         }
1984         break;
1985     }
1986     case EC_BREAKPOINT: {
1987         ret = EXCP_DEBUG;
1988 
1989         cpu_synchronize_state(cpu);
1990 
1991         if (!find_hw_breakpoint(cpu, env->pc)) {
1992             error_report("EC_BREAKPOINT but unknown hw breakpoint");
1993         }
1994         break;
1995     }
1996     case EC_WATCHPOINT: {
1997         ret = EXCP_DEBUG;
1998 
1999         cpu_synchronize_state(cpu);
2000 
2001         CPUWatchpoint *wp =
2002             find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
2003         if (!wp) {
2004             error_report("EXCP_DEBUG but unknown hw watchpoint");
2005         }
2006         cpu->watchpoint_hit = wp;
2007         break;
2008     }
2009     case EC_DATAABORT: {
2010         bool isv = syndrome & ARM_EL_ISV;
2011         bool iswrite = (syndrome >> 6) & 1;
2012         bool s1ptw = (syndrome >> 7) & 1;
2013         bool sse = (syndrome >> 21) & 1;
2014         uint32_t sas = (syndrome >> 22) & 3;
2015         uint32_t len = 1 << sas;
2016         uint32_t srt = (syndrome >> 16) & 0x1f;
2017         uint32_t cm = (syndrome >> 8) & 0x1;
2018         uint64_t val = 0;
2019 
2020         trace_hvf_data_abort(hvf_exit->exception.virtual_address,
2021                              hvf_exit->exception.physical_address, isv,
2022                              iswrite, s1ptw, len, srt);
2023 
2024         if (cm) {
2025             /* We don't cache MMIO regions */
2026             advance_pc = true;
2027             break;
2028         }
2029 
2030         assert(isv);
2031 
2032         if (iswrite) {
2033             val = hvf_get_reg(cpu, srt);
2034             address_space_write(&address_space_memory,
2035                                 hvf_exit->exception.physical_address,
2036                                 MEMTXATTRS_UNSPECIFIED, &val, len);
2037         } else {
2038             address_space_read(&address_space_memory,
2039                                hvf_exit->exception.physical_address,
2040                                MEMTXATTRS_UNSPECIFIED, &val, len);
2041             if (sse) {
2042                 val = sextract64(val, 0, len * 8);
2043             }
2044             hvf_set_reg(cpu, srt, val);
2045         }
2046 
2047         advance_pc = true;
2048         break;
2049     }
2050     case EC_SYSTEMREGISTERTRAP: {
2051         bool isread = (syndrome >> 0) & 1;
2052         uint32_t rt = (syndrome >> 5) & 0x1f;
2053         uint32_t reg = syndrome & SYSREG_MASK;
2054         uint64_t val;
2055         int sysreg_ret = 0;
2056 
2057         if (isread) {
2058             sysreg_ret = hvf_sysreg_read(cpu, reg, &val);
2059             if (!sysreg_ret) {
2060                 trace_hvf_sysreg_read(reg,
2061                                       SYSREG_OP0(reg),
2062                                       SYSREG_OP1(reg),
2063                                       SYSREG_CRN(reg),
2064                                       SYSREG_CRM(reg),
2065                                       SYSREG_OP2(reg),
2066                                       val);
2067                 hvf_set_reg(cpu, rt, val);
2068             }
2069         } else {
2070             val = hvf_get_reg(cpu, rt);
2071             sysreg_ret = hvf_sysreg_write(cpu, reg, val);
2072         }
2073 
2074         advance_pc = !sysreg_ret;
2075         break;
2076     }
2077     case EC_WFX_TRAP:
2078         advance_pc = true;
2079         if (!(syndrome & WFX_IS_WFE)) {
2080             hvf_wfi(cpu);
2081         }
2082         break;
2083     case EC_AA64_HVC:
2084         cpu_synchronize_state(cpu);
2085         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
2086             if (!hvf_handle_psci_call(cpu)) {
2087                 trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
2088                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2089                 env->xregs[0] = -1;
2090             }
2091         } else {
2092             trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
2093             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
2094         }
2095         break;
2096     case EC_AA64_SMC:
2097         cpu_synchronize_state(cpu);
2098         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
2099             advance_pc = true;
2100 
2101             if (!hvf_handle_psci_call(cpu)) {
2102                 trace_hvf_unknown_smc(env->xregs[0]);
2103                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2104                 env->xregs[0] = -1;
2105             }
2106         } else {
2107             trace_hvf_unknown_smc(env->xregs[0]);
2108             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
2109         }
2110         break;
2111     default:
2112         cpu_synchronize_state(cpu);
2113         trace_hvf_exit(syndrome, ec, env->pc);
2114         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
2115     }
2116 
2117     if (advance_pc) {
2118         uint64_t pc;
2119 
2120         flush_cpu_state(cpu);
2121 
2122         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
2123         assert_hvf_ok(r);
2124         pc += 4;
2125         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
2126         assert_hvf_ok(r);
2127 
2128         /* Handle single-stepping over instructions which trigger a VM exit */
2129         if (cpu->singlestep_enabled) {
2130             ret = EXCP_DEBUG;
2131         }
2132     }
2133 
2134     return ret;
2135 }
2136 
2137 static const VMStateDescription vmstate_hvf_vtimer = {
2138     .name = "hvf-vtimer",
2139     .version_id = 1,
2140     .minimum_version_id = 1,
2141     .fields = (const VMStateField[]) {
2142         VMSTATE_UINT64(vtimer_val, HVFVTimer),
2143         VMSTATE_END_OF_LIST()
2144     },
2145 };
2146 
hvf_vm_state_change(void * opaque,bool running,RunState state)2147 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2148 {
2149     HVFVTimer *s = opaque;
2150 
2151     if (running) {
2152         /* Update vtimer offset on all CPUs */
2153         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2154         cpu_synchronize_all_states();
2155     } else {
2156         /* Remember vtimer value on every pause */
2157         s->vtimer_val = hvf_vtimer_val_raw();
2158     }
2159 }
2160 
hvf_arch_init(void)2161 int hvf_arch_init(void)
2162 {
2163     hvf_state->vtimer_offset = mach_absolute_time();
2164     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2165     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2166 
2167     hvf_arm_init_debug();
2168 
2169     return 0;
2170 }
2171 
2172 static const uint32_t brk_insn = 0xd4200000;
2173 
hvf_arch_insert_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2174 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2175 {
2176     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2177         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2178         return -EINVAL;
2179     }
2180     return 0;
2181 }
2182 
hvf_arch_remove_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2183 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2184 {
2185     static uint32_t brk;
2186 
2187     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2188         brk != brk_insn ||
2189         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2190         return -EINVAL;
2191     }
2192     return 0;
2193 }
2194 
hvf_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2195 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2196 {
2197     switch (type) {
2198     case GDB_BREAKPOINT_HW:
2199         return insert_hw_breakpoint(addr);
2200     case GDB_WATCHPOINT_READ:
2201     case GDB_WATCHPOINT_WRITE:
2202     case GDB_WATCHPOINT_ACCESS:
2203         return insert_hw_watchpoint(addr, len, type);
2204     default:
2205         return -ENOSYS;
2206     }
2207 }
2208 
hvf_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2209 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2210 {
2211     switch (type) {
2212     case GDB_BREAKPOINT_HW:
2213         return delete_hw_breakpoint(addr);
2214     case GDB_WATCHPOINT_READ:
2215     case GDB_WATCHPOINT_WRITE:
2216     case GDB_WATCHPOINT_ACCESS:
2217         return delete_hw_watchpoint(addr, len, type);
2218     default:
2219         return -ENOSYS;
2220     }
2221 }
2222 
hvf_arch_remove_all_hw_breakpoints(void)2223 void hvf_arch_remove_all_hw_breakpoints(void)
2224 {
2225     if (cur_hw_wps > 0) {
2226         g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2227     }
2228     if (cur_hw_bps > 0) {
2229         g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2230     }
2231 }
2232 
2233 /*
2234  * Update the vCPU with the gdbstub's view of debug registers. This view
2235  * consists of all hardware breakpoints and watchpoints inserted so far while
2236  * debugging the guest.
2237  */
hvf_put_gdbstub_debug_registers(CPUState * cpu)2238 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2239 {
2240     hv_return_t r = HV_SUCCESS;
2241     int i;
2242 
2243     for (i = 0; i < cur_hw_bps; i++) {
2244         HWBreakpoint *bp = get_hw_bp(i);
2245         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
2246         assert_hvf_ok(r);
2247         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
2248         assert_hvf_ok(r);
2249     }
2250     for (i = cur_hw_bps; i < max_hw_bps; i++) {
2251         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
2252         assert_hvf_ok(r);
2253         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
2254         assert_hvf_ok(r);
2255     }
2256 
2257     for (i = 0; i < cur_hw_wps; i++) {
2258         HWWatchpoint *wp = get_hw_wp(i);
2259         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
2260         assert_hvf_ok(r);
2261         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
2262         assert_hvf_ok(r);
2263     }
2264     for (i = cur_hw_wps; i < max_hw_wps; i++) {
2265         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
2266         assert_hvf_ok(r);
2267         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
2268         assert_hvf_ok(r);
2269     }
2270 }
2271 
2272 /*
2273  * Update the vCPU with the guest's view of debug registers. This view is kept
2274  * in the environment at all times.
2275  */
hvf_put_guest_debug_registers(CPUState * cpu)2276 static void hvf_put_guest_debug_registers(CPUState *cpu)
2277 {
2278     ARMCPU *arm_cpu = ARM_CPU(cpu);
2279     CPUARMState *env = &arm_cpu->env;
2280     hv_return_t r = HV_SUCCESS;
2281     int i;
2282 
2283     for (i = 0; i < max_hw_bps; i++) {
2284         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
2285                                 env->cp15.dbgbcr[i]);
2286         assert_hvf_ok(r);
2287         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
2288                                 env->cp15.dbgbvr[i]);
2289         assert_hvf_ok(r);
2290     }
2291 
2292     for (i = 0; i < max_hw_wps; i++) {
2293         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
2294                                 env->cp15.dbgwcr[i]);
2295         assert_hvf_ok(r);
2296         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
2297                                 env->cp15.dbgwvr[i]);
2298         assert_hvf_ok(r);
2299     }
2300 }
2301 
hvf_arm_hw_debug_active(CPUState * cpu)2302 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2303 {
2304     return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2305 }
2306 
hvf_arch_set_traps(CPUState * cpu)2307 static void hvf_arch_set_traps(CPUState *cpu)
2308 {
2309     bool should_enable_traps = false;
2310     hv_return_t r = HV_SUCCESS;
2311 
2312     /* Check whether guest debugging is enabled for at least one vCPU; if it
2313      * is, enable exiting the guest on all vCPUs */
2314     should_enable_traps |= cpu->accel->guest_debug_enabled;
2315     /* Set whether debug exceptions exit the guest */
2316     r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
2317                                             should_enable_traps);
2318     assert_hvf_ok(r);
2319 
2320     /* Set whether accesses to debug registers exit the guest */
2321     r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
2322                                             should_enable_traps);
2323     assert_hvf_ok(r);
2324 }
2325 
hvf_arch_update_guest_debug(CPUState * cpu)2326 void hvf_arch_update_guest_debug(CPUState *cpu)
2327 {
2328     ARMCPU *arm_cpu = ARM_CPU(cpu);
2329     CPUARMState *env = &arm_cpu->env;
2330 
2331     /* Check whether guest debugging is enabled */
2332     cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
2333                                     hvf_sw_breakpoints_active(cpu) ||
2334                                     hvf_arm_hw_debug_active(cpu);
2335 
2336     /* Update debug registers */
2337     if (cpu->accel->guest_debug_enabled) {
2338         hvf_put_gdbstub_debug_registers(cpu);
2339     } else {
2340         hvf_put_guest_debug_registers(cpu);
2341     }
2342 
2343     cpu_synchronize_state(cpu);
2344 
2345     /* Enable/disable single-stepping */
2346     if (cpu->singlestep_enabled) {
2347         env->cp15.mdscr_el1 =
2348             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2349         pstate_write(env, pstate_read(env) | PSTATE_SS);
2350     } else {
2351         env->cp15.mdscr_el1 =
2352             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2353     }
2354 
2355     /* Enable/disable Breakpoint exceptions */
2356     if (hvf_arm_hw_debug_active(cpu)) {
2357         env->cp15.mdscr_el1 =
2358             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2359     } else {
2360         env->cp15.mdscr_el1 =
2361             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2362     }
2363 
2364     hvf_arch_set_traps(cpu);
2365 }
2366 
hvf_arch_supports_guest_debug(void)2367 bool hvf_arch_supports_guest_debug(void)
2368 {
2369     return true;
2370 }
2371