xref: /openbmc/qemu/target/arm/hvf/hvf.c (revision 9c4888c9)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 
15 #include "sysemu/runstate.h"
16 #include "sysemu/hvf.h"
17 #include "sysemu/hvf_int.h"
18 #include "sysemu/hw_accel.h"
19 #include "hvf_arm.h"
20 
21 #include <mach/mach_time.h>
22 
23 #include "exec/address-spaces.h"
24 #include "hw/irq.h"
25 #include "qemu/main-loop.h"
26 #include "sysemu/cpus.h"
27 #include "arm-powerctl.h"
28 #include "target/arm/cpu.h"
29 #include "target/arm/internals.h"
30 #include "trace/trace-target_arm_hvf.h"
31 #include "migration/vmstate.h"
32 
33 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
34         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
35 #define PL1_WRITE_MASK 0x4
36 
37 #define SYSREG_OP0_SHIFT      20
38 #define SYSREG_OP0_MASK       0x3
39 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
40 #define SYSREG_OP1_SHIFT      14
41 #define SYSREG_OP1_MASK       0x7
42 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
43 #define SYSREG_CRN_SHIFT      10
44 #define SYSREG_CRN_MASK       0xf
45 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
46 #define SYSREG_CRM_SHIFT      1
47 #define SYSREG_CRM_MASK       0xf
48 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
49 #define SYSREG_OP2_SHIFT      17
50 #define SYSREG_OP2_MASK       0x7
51 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
52 
53 #define SYSREG(op0, op1, crn, crm, op2) \
54     ((op0 << SYSREG_OP0_SHIFT) | \
55      (op1 << SYSREG_OP1_SHIFT) | \
56      (crn << SYSREG_CRN_SHIFT) | \
57      (crm << SYSREG_CRM_SHIFT) | \
58      (op2 << SYSREG_OP2_SHIFT))
59 #define SYSREG_MASK \
60     SYSREG(SYSREG_OP0_MASK, \
61            SYSREG_OP1_MASK, \
62            SYSREG_CRN_MASK, \
63            SYSREG_CRM_MASK, \
64            SYSREG_OP2_MASK)
65 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
66 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
67 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
68 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
69 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
70 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
71 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
72 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
73 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
74 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
75 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
76 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
77 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
78 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
79 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
80 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
81 
82 #define WFX_IS_WFE (1 << 0)
83 
84 #define TMR_CTL_ENABLE  (1 << 0)
85 #define TMR_CTL_IMASK   (1 << 1)
86 #define TMR_CTL_ISTATUS (1 << 2)
87 
88 static void hvf_wfi(CPUState *cpu);
89 
90 typedef struct HVFVTimer {
91     /* Vtimer value during migration and paused state */
92     uint64_t vtimer_val;
93 } HVFVTimer;
94 
95 static HVFVTimer vtimer;
96 
97 typedef struct ARMHostCPUFeatures {
98     ARMISARegisters isar;
99     uint64_t features;
100     uint64_t midr;
101     uint32_t reset_sctlr;
102     const char *dtb_compatible;
103 } ARMHostCPUFeatures;
104 
105 static ARMHostCPUFeatures arm_host_cpu_features;
106 
107 struct hvf_reg_match {
108     int reg;
109     uint64_t offset;
110 };
111 
112 static const struct hvf_reg_match hvf_reg_match[] = {
113     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
114     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
115     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
116     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
117     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
118     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
119     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
120     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
121     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
122     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
123     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
124     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
125     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
126     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
127     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
128     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
129     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
130     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
131     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
132     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
133     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
134     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
135     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
136     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
137     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
138     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
139     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
140     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
141     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
142     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
143     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
144     { HV_REG_PC,   offsetof(CPUARMState, pc) },
145 };
146 
147 static const struct hvf_reg_match hvf_fpreg_match[] = {
148     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
149     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
150     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
151     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
152     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
153     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
154     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
155     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
156     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
157     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
158     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
159     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
160     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
161     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
162     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
163     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
164     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
165     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
166     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
167     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
168     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
169     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
170     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
171     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
172     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
173     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
174     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
175     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
176     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
177     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
178     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
179     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
180 };
181 
182 struct hvf_sreg_match {
183     int reg;
184     uint32_t key;
185     uint32_t cp_idx;
186 };
187 
188 static struct hvf_sreg_match hvf_sreg_match[] = {
189     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
190     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
191     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
192     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
193 
194     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
195     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
196     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
197     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
198 
199     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
200     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
201     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
202     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
203 
204     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
205     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
206     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
207     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
208 
209     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
210     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
211     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
212     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
213 
214     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
215     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
216     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
217     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
218 
219     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
220     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
221     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
222     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
223 
224     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
225     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
226     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
227     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
228 
229     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
230     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
231     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
232     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
233 
234     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
235     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
236     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
237     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
238 
239     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
240     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
241     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
242     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
243 
244     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
245     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
246     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
247     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
248 
249     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
250     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
251     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
252     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
253 
254     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
255     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
256     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
257     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
258 
259     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
260     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
261     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
262     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
263 
264     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
265     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
266     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
267     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
268 
269 #ifdef SYNC_NO_RAW_REGS
270     /*
271      * The registers below are manually synced on init because they are
272      * marked as NO_RAW. We still list them to make number space sync easier.
273      */
274     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
275     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
276     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
277     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
278 #endif
279     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
280     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
281     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
282     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
283     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
284 #ifdef SYNC_NO_MMFR0
285     /* We keep the hardware MMFR0 around. HW limits are there anyway */
286     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
287 #endif
288     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
289     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
290 
291     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
292     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
293     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
294     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
295     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
296     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
297 
298     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
299     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
300     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
301     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
302     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
303     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
304     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
305     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
306     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
307     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
308 
309     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
310     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
311     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
312     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
313     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
314     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
315     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
316     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
317     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
318     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
319     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
320     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
321     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
322     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
323     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
324     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
325     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
326     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
327     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
328     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
329 };
330 
331 int hvf_get_registers(CPUState *cpu)
332 {
333     ARMCPU *arm_cpu = ARM_CPU(cpu);
334     CPUARMState *env = &arm_cpu->env;
335     hv_return_t ret;
336     uint64_t val;
337     hv_simd_fp_uchar16_t fpval;
338     int i;
339 
340     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
341         ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
342         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
343         assert_hvf_ok(ret);
344     }
345 
346     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
347         ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
348                                       &fpval);
349         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
350         assert_hvf_ok(ret);
351     }
352 
353     val = 0;
354     ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
355     assert_hvf_ok(ret);
356     vfp_set_fpcr(env, val);
357 
358     val = 0;
359     ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
360     assert_hvf_ok(ret);
361     vfp_set_fpsr(env, val);
362 
363     ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
364     assert_hvf_ok(ret);
365     pstate_write(env, val);
366 
367     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
368         if (hvf_sreg_match[i].cp_idx == -1) {
369             continue;
370         }
371 
372         ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
373         assert_hvf_ok(ret);
374 
375         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
376     }
377     assert(write_list_to_cpustate(arm_cpu));
378 
379     aarch64_restore_sp(env, arm_current_el(env));
380 
381     return 0;
382 }
383 
384 int hvf_put_registers(CPUState *cpu)
385 {
386     ARMCPU *arm_cpu = ARM_CPU(cpu);
387     CPUARMState *env = &arm_cpu->env;
388     hv_return_t ret;
389     uint64_t val;
390     hv_simd_fp_uchar16_t fpval;
391     int i;
392 
393     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
394         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
395         ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
396         assert_hvf_ok(ret);
397     }
398 
399     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
400         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
401         ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
402                                       fpval);
403         assert_hvf_ok(ret);
404     }
405 
406     ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
407     assert_hvf_ok(ret);
408 
409     ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
410     assert_hvf_ok(ret);
411 
412     ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
413     assert_hvf_ok(ret);
414 
415     aarch64_save_sp(env, arm_current_el(env));
416 
417     assert(write_cpustate_to_list(arm_cpu, false));
418     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
419         if (hvf_sreg_match[i].cp_idx == -1) {
420             continue;
421         }
422 
423         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
424         ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
425         assert_hvf_ok(ret);
426     }
427 
428     ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
429     assert_hvf_ok(ret);
430 
431     return 0;
432 }
433 
434 static void flush_cpu_state(CPUState *cpu)
435 {
436     if (cpu->vcpu_dirty) {
437         hvf_put_registers(cpu);
438         cpu->vcpu_dirty = false;
439     }
440 }
441 
442 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
443 {
444     hv_return_t r;
445 
446     flush_cpu_state(cpu);
447 
448     if (rt < 31) {
449         r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
450         assert_hvf_ok(r);
451     }
452 }
453 
454 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
455 {
456     uint64_t val = 0;
457     hv_return_t r;
458 
459     flush_cpu_state(cpu);
460 
461     if (rt < 31) {
462         r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
463         assert_hvf_ok(r);
464     }
465 
466     return val;
467 }
468 
469 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
470 {
471     ARMISARegisters host_isar = {};
472     const struct isar_regs {
473         int reg;
474         uint64_t *val;
475     } regs[] = {
476         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
477         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
478         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
479         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
480         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
481         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
482         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
483         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
484         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
485     };
486     hv_vcpu_t fd;
487     hv_return_t r = HV_SUCCESS;
488     hv_vcpu_exit_t *exit;
489     int i;
490 
491     ahcf->dtb_compatible = "arm,arm-v8";
492     ahcf->features = (1ULL << ARM_FEATURE_V8) |
493                      (1ULL << ARM_FEATURE_NEON) |
494                      (1ULL << ARM_FEATURE_AARCH64) |
495                      (1ULL << ARM_FEATURE_PMU) |
496                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
497 
498     /* We set up a small vcpu to extract host registers */
499 
500     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
501         return false;
502     }
503 
504     for (i = 0; i < ARRAY_SIZE(regs); i++) {
505         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
506     }
507     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
508     r |= hv_vcpu_destroy(fd);
509 
510     ahcf->isar = host_isar;
511 
512     /*
513      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
514      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
515      */
516     ahcf->reset_sctlr = 0x30100180;
517     /*
518      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
519      * let's disable it on boot and then allow guest software to turn it on by
520      * setting it to 0.
521      */
522     ahcf->reset_sctlr |= 0x00800000;
523 
524     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
525     if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
526         return false;
527     }
528 
529     return r == HV_SUCCESS;
530 }
531 
532 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
533 {
534     if (!arm_host_cpu_features.dtb_compatible) {
535         if (!hvf_enabled() ||
536             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
537             /*
538              * We can't report this error yet, so flag that we need to
539              * in arm_cpu_realizefn().
540              */
541             cpu->host_cpu_probe_failed = true;
542             return;
543         }
544     }
545 
546     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
547     cpu->isar = arm_host_cpu_features.isar;
548     cpu->env.features = arm_host_cpu_features.features;
549     cpu->midr = arm_host_cpu_features.midr;
550     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
551 }
552 
553 void hvf_arch_vcpu_destroy(CPUState *cpu)
554 {
555 }
556 
557 int hvf_arch_init_vcpu(CPUState *cpu)
558 {
559     ARMCPU *arm_cpu = ARM_CPU(cpu);
560     CPUARMState *env = &arm_cpu->env;
561     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
562     uint32_t sregs_cnt = 0;
563     uint64_t pfr;
564     hv_return_t ret;
565     int i;
566 
567     env->aarch64 = 1;
568     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
569 
570     /* Allocate enough space for our sysreg sync */
571     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
572                                      sregs_match_len);
573     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
574                                     sregs_match_len);
575     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
576                                              arm_cpu->cpreg_vmstate_indexes,
577                                              sregs_match_len);
578     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
579                                             arm_cpu->cpreg_vmstate_values,
580                                             sregs_match_len);
581 
582     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
583 
584     /* Populate cp list for all known sysregs */
585     for (i = 0; i < sregs_match_len; i++) {
586         const ARMCPRegInfo *ri;
587         uint32_t key = hvf_sreg_match[i].key;
588 
589         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
590         if (ri) {
591             assert(!(ri->type & ARM_CP_NO_RAW));
592             hvf_sreg_match[i].cp_idx = sregs_cnt;
593             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
594         } else {
595             hvf_sreg_match[i].cp_idx = -1;
596         }
597     }
598     arm_cpu->cpreg_array_len = sregs_cnt;
599     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
600 
601     assert(write_cpustate_to_list(arm_cpu, false));
602 
603     /* Set CP_NO_RAW system registers on init */
604     ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
605                               arm_cpu->midr);
606     assert_hvf_ok(ret);
607 
608     ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
609                               arm_cpu->mp_affinity);
610     assert_hvf_ok(ret);
611 
612     ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
613     assert_hvf_ok(ret);
614     pfr |= env->gicv3state ? (1 << 24) : 0;
615     ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
616     assert_hvf_ok(ret);
617 
618     /* We're limited to underlying hardware caps, override internal versions */
619     ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
620                               &arm_cpu->isar.id_aa64mmfr0);
621     assert_hvf_ok(ret);
622 
623     return 0;
624 }
625 
626 void hvf_kick_vcpu_thread(CPUState *cpu)
627 {
628     cpus_kick_thread(cpu);
629     hv_vcpus_exit(&cpu->hvf->fd, 1);
630 }
631 
632 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
633                                 uint32_t syndrome)
634 {
635     ARMCPU *arm_cpu = ARM_CPU(cpu);
636     CPUARMState *env = &arm_cpu->env;
637 
638     cpu->exception_index = excp;
639     env->exception.target_el = 1;
640     env->exception.syndrome = syndrome;
641 
642     arm_cpu_do_interrupt(cpu);
643 }
644 
645 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
646 {
647     int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity);
648     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
649 }
650 
651 /*
652  * Handle a PSCI call.
653  *
654  * Returns 0 on success
655  *         -1 when the PSCI call is unknown,
656  */
657 static bool hvf_handle_psci_call(CPUState *cpu)
658 {
659     ARMCPU *arm_cpu = ARM_CPU(cpu);
660     CPUARMState *env = &arm_cpu->env;
661     uint64_t param[4] = {
662         env->xregs[0],
663         env->xregs[1],
664         env->xregs[2],
665         env->xregs[3]
666     };
667     uint64_t context_id, mpidr;
668     bool target_aarch64 = true;
669     CPUState *target_cpu_state;
670     ARMCPU *target_cpu;
671     target_ulong entry;
672     int target_el = 1;
673     int32_t ret = 0;
674 
675     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
676                         arm_cpu->mp_affinity);
677 
678     switch (param[0]) {
679     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
680         ret = QEMU_PSCI_VERSION_1_1;
681         break;
682     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
683         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
684         break;
685     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
686     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
687         mpidr = param[1];
688 
689         switch (param[2]) {
690         case 0:
691             target_cpu_state = arm_get_cpu_by_id(mpidr);
692             if (!target_cpu_state) {
693                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
694                 break;
695             }
696             target_cpu = ARM_CPU(target_cpu_state);
697 
698             ret = target_cpu->power_state;
699             break;
700         default:
701             /* Everything above affinity level 0 is always on. */
702             ret = 0;
703         }
704         break;
705     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
706         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
707         /*
708          * QEMU reset and shutdown are async requests, but PSCI
709          * mandates that we never return from the reset/shutdown
710          * call, so power the CPU off now so it doesn't execute
711          * anything further.
712          */
713         hvf_psci_cpu_off(arm_cpu);
714         break;
715     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
716         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
717         hvf_psci_cpu_off(arm_cpu);
718         break;
719     case QEMU_PSCI_0_1_FN_CPU_ON:
720     case QEMU_PSCI_0_2_FN_CPU_ON:
721     case QEMU_PSCI_0_2_FN64_CPU_ON:
722         mpidr = param[1];
723         entry = param[2];
724         context_id = param[3];
725         ret = arm_set_cpu_on(mpidr, entry, context_id,
726                              target_el, target_aarch64);
727         break;
728     case QEMU_PSCI_0_1_FN_CPU_OFF:
729     case QEMU_PSCI_0_2_FN_CPU_OFF:
730         hvf_psci_cpu_off(arm_cpu);
731         break;
732     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
733     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
734     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
735         /* Affinity levels are not supported in QEMU */
736         if (param[1] & 0xfffe0000) {
737             ret = QEMU_PSCI_RET_INVALID_PARAMS;
738             break;
739         }
740         /* Powerdown is not supported, we always go into WFI */
741         env->xregs[0] = 0;
742         hvf_wfi(cpu);
743         break;
744     case QEMU_PSCI_0_1_FN_MIGRATE:
745     case QEMU_PSCI_0_2_FN_MIGRATE:
746         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
747         break;
748     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
749         switch (param[1]) {
750         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
751         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
752         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
753         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
754         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
755         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
756         case QEMU_PSCI_0_1_FN_CPU_ON:
757         case QEMU_PSCI_0_2_FN_CPU_ON:
758         case QEMU_PSCI_0_2_FN64_CPU_ON:
759         case QEMU_PSCI_0_1_FN_CPU_OFF:
760         case QEMU_PSCI_0_2_FN_CPU_OFF:
761         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
762         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
763         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
764         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
765             ret = 0;
766             break;
767         case QEMU_PSCI_0_1_FN_MIGRATE:
768         case QEMU_PSCI_0_2_FN_MIGRATE:
769         default:
770             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
771         }
772         break;
773     default:
774         return false;
775     }
776 
777     env->xregs[0] = ret;
778     return true;
779 }
780 
781 static bool is_id_sysreg(uint32_t reg)
782 {
783     return SYSREG_OP0(reg) == 3 &&
784            SYSREG_OP1(reg) == 0 &&
785            SYSREG_CRN(reg) == 0 &&
786            SYSREG_CRM(reg) >= 1 &&
787            SYSREG_CRM(reg) < 8;
788 }
789 
790 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
791 {
792     ARMCPU *arm_cpu = ARM_CPU(cpu);
793     CPUARMState *env = &arm_cpu->env;
794     uint64_t val = 0;
795 
796     switch (reg) {
797     case SYSREG_CNTPCT_EL0:
798         val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
799               gt_cntfrq_period_ns(arm_cpu);
800         break;
801     case SYSREG_PMCR_EL0:
802         val = env->cp15.c9_pmcr;
803         break;
804     case SYSREG_PMCCNTR_EL0:
805         pmu_op_start(env);
806         val = env->cp15.c15_ccnt;
807         pmu_op_finish(env);
808         break;
809     case SYSREG_PMCNTENCLR_EL0:
810         val = env->cp15.c9_pmcnten;
811         break;
812     case SYSREG_PMOVSCLR_EL0:
813         val = env->cp15.c9_pmovsr;
814         break;
815     case SYSREG_PMSELR_EL0:
816         val = env->cp15.c9_pmselr;
817         break;
818     case SYSREG_PMINTENCLR_EL1:
819         val = env->cp15.c9_pminten;
820         break;
821     case SYSREG_PMCCFILTR_EL0:
822         val = env->cp15.pmccfiltr_el0;
823         break;
824     case SYSREG_PMCNTENSET_EL0:
825         val = env->cp15.c9_pmcnten;
826         break;
827     case SYSREG_PMUSERENR_EL0:
828         val = env->cp15.c9_pmuserenr;
829         break;
830     case SYSREG_PMCEID0_EL0:
831     case SYSREG_PMCEID1_EL0:
832         /* We can't really count anything yet, declare all events invalid */
833         val = 0;
834         break;
835     case SYSREG_OSLSR_EL1:
836         val = env->cp15.oslsr_el1;
837         break;
838     case SYSREG_OSDLR_EL1:
839         /* Dummy register */
840         break;
841     default:
842         if (is_id_sysreg(reg)) {
843             /* ID system registers read as RES0 */
844             val = 0;
845             break;
846         }
847         cpu_synchronize_state(cpu);
848         trace_hvf_unhandled_sysreg_read(env->pc, reg,
849                                         SYSREG_OP0(reg),
850                                         SYSREG_OP1(reg),
851                                         SYSREG_CRN(reg),
852                                         SYSREG_CRM(reg),
853                                         SYSREG_OP2(reg));
854         hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
855         return 1;
856     }
857 
858     trace_hvf_sysreg_read(reg,
859                           SYSREG_OP0(reg),
860                           SYSREG_OP1(reg),
861                           SYSREG_CRN(reg),
862                           SYSREG_CRM(reg),
863                           SYSREG_OP2(reg),
864                           val);
865     hvf_set_reg(cpu, rt, val);
866 
867     return 0;
868 }
869 
870 static void pmu_update_irq(CPUARMState *env)
871 {
872     ARMCPU *cpu = env_archcpu(env);
873     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
874             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
875 }
876 
877 static bool pmu_event_supported(uint16_t number)
878 {
879     return false;
880 }
881 
882 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
883  * the current EL, security state, and register configuration.
884  */
885 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
886 {
887     uint64_t filter;
888     bool enabled, filtered = true;
889     int el = arm_current_el(env);
890 
891     enabled = (env->cp15.c9_pmcr & PMCRE) &&
892               (env->cp15.c9_pmcnten & (1 << counter));
893 
894     if (counter == 31) {
895         filter = env->cp15.pmccfiltr_el0;
896     } else {
897         filter = env->cp15.c14_pmevtyper[counter];
898     }
899 
900     if (el == 0) {
901         filtered = filter & PMXEVTYPER_U;
902     } else if (el == 1) {
903         filtered = filter & PMXEVTYPER_P;
904     }
905 
906     if (counter != 31) {
907         /*
908          * If not checking PMCCNTR, ensure the counter is setup to an event we
909          * support
910          */
911         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
912         if (!pmu_event_supported(event)) {
913             return false;
914         }
915     }
916 
917     return enabled && !filtered;
918 }
919 
920 static void pmswinc_write(CPUARMState *env, uint64_t value)
921 {
922     unsigned int i;
923     for (i = 0; i < pmu_num_counters(env); i++) {
924         /* Increment a counter's count iff: */
925         if ((value & (1 << i)) && /* counter's bit is set */
926                 /* counter is enabled and not filtered */
927                 pmu_counter_enabled(env, i) &&
928                 /* counter is SW_INCR */
929                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
930             /*
931              * Detect if this write causes an overflow since we can't predict
932              * PMSWINC overflows like we can for other events
933              */
934             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
935 
936             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
937                 env->cp15.c9_pmovsr |= (1 << i);
938                 pmu_update_irq(env);
939             }
940 
941             env->cp15.c14_pmevcntr[i] = new_pmswinc;
942         }
943     }
944 }
945 
946 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
947 {
948     ARMCPU *arm_cpu = ARM_CPU(cpu);
949     CPUARMState *env = &arm_cpu->env;
950 
951     trace_hvf_sysreg_write(reg,
952                            SYSREG_OP0(reg),
953                            SYSREG_OP1(reg),
954                            SYSREG_CRN(reg),
955                            SYSREG_CRM(reg),
956                            SYSREG_OP2(reg),
957                            val);
958 
959     switch (reg) {
960     case SYSREG_PMCCNTR_EL0:
961         pmu_op_start(env);
962         env->cp15.c15_ccnt = val;
963         pmu_op_finish(env);
964         break;
965     case SYSREG_PMCR_EL0:
966         pmu_op_start(env);
967 
968         if (val & PMCRC) {
969             /* The counter has been reset */
970             env->cp15.c15_ccnt = 0;
971         }
972 
973         if (val & PMCRP) {
974             unsigned int i;
975             for (i = 0; i < pmu_num_counters(env); i++) {
976                 env->cp15.c14_pmevcntr[i] = 0;
977             }
978         }
979 
980         env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
981         env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
982 
983         pmu_op_finish(env);
984         break;
985     case SYSREG_PMUSERENR_EL0:
986         env->cp15.c9_pmuserenr = val & 0xf;
987         break;
988     case SYSREG_PMCNTENSET_EL0:
989         env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
990         break;
991     case SYSREG_PMCNTENCLR_EL0:
992         env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
993         break;
994     case SYSREG_PMINTENCLR_EL1:
995         pmu_op_start(env);
996         env->cp15.c9_pminten |= val;
997         pmu_op_finish(env);
998         break;
999     case SYSREG_PMOVSCLR_EL0:
1000         pmu_op_start(env);
1001         env->cp15.c9_pmovsr &= ~val;
1002         pmu_op_finish(env);
1003         break;
1004     case SYSREG_PMSWINC_EL0:
1005         pmu_op_start(env);
1006         pmswinc_write(env, val);
1007         pmu_op_finish(env);
1008         break;
1009     case SYSREG_PMSELR_EL0:
1010         env->cp15.c9_pmselr = val & 0x1f;
1011         break;
1012     case SYSREG_PMCCFILTR_EL0:
1013         pmu_op_start(env);
1014         env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1015         pmu_op_finish(env);
1016         break;
1017     case SYSREG_OSLAR_EL1:
1018         env->cp15.oslsr_el1 = val & 1;
1019         break;
1020     case SYSREG_OSDLR_EL1:
1021         /* Dummy register */
1022         break;
1023     default:
1024         cpu_synchronize_state(cpu);
1025         trace_hvf_unhandled_sysreg_write(env->pc, reg,
1026                                          SYSREG_OP0(reg),
1027                                          SYSREG_OP1(reg),
1028                                          SYSREG_CRN(reg),
1029                                          SYSREG_CRM(reg),
1030                                          SYSREG_OP2(reg));
1031         hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1032         return 1;
1033     }
1034 
1035     return 0;
1036 }
1037 
1038 static int hvf_inject_interrupts(CPUState *cpu)
1039 {
1040     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1041         trace_hvf_inject_fiq();
1042         hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
1043                                       true);
1044     }
1045 
1046     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1047         trace_hvf_inject_irq();
1048         hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
1049                                       true);
1050     }
1051 
1052     return 0;
1053 }
1054 
1055 static uint64_t hvf_vtimer_val_raw(void)
1056 {
1057     /*
1058      * mach_absolute_time() returns the vtimer value without the VM
1059      * offset that we define. Add our own offset on top.
1060      */
1061     return mach_absolute_time() - hvf_state->vtimer_offset;
1062 }
1063 
1064 static uint64_t hvf_vtimer_val(void)
1065 {
1066     if (!runstate_is_running()) {
1067         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1068         return vtimer.vtimer_val;
1069     }
1070 
1071     return hvf_vtimer_val_raw();
1072 }
1073 
1074 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1075 {
1076     /*
1077      * Use pselect to sleep so that other threads can IPI us while we're
1078      * sleeping.
1079      */
1080     qatomic_mb_set(&cpu->thread_kicked, false);
1081     qemu_mutex_unlock_iothread();
1082     pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
1083     qemu_mutex_lock_iothread();
1084 }
1085 
1086 static void hvf_wfi(CPUState *cpu)
1087 {
1088     ARMCPU *arm_cpu = ARM_CPU(cpu);
1089     struct timespec ts;
1090     hv_return_t r;
1091     uint64_t ctl;
1092     uint64_t cval;
1093     int64_t ticks_to_sleep;
1094     uint64_t seconds;
1095     uint64_t nanos;
1096     uint32_t cntfrq;
1097 
1098     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1099         /* Interrupt pending, no need to wait */
1100         return;
1101     }
1102 
1103     r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1104     assert_hvf_ok(r);
1105 
1106     if (!(ctl & 1) || (ctl & 2)) {
1107         /* Timer disabled or masked, just wait for an IPI. */
1108         hvf_wait_for_ipi(cpu, NULL);
1109         return;
1110     }
1111 
1112     r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1113     assert_hvf_ok(r);
1114 
1115     ticks_to_sleep = cval - hvf_vtimer_val();
1116     if (ticks_to_sleep < 0) {
1117         return;
1118     }
1119 
1120     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1121     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1122     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1123     nanos = ticks_to_sleep * cntfrq;
1124 
1125     /*
1126      * Don't sleep for less than the time a context switch would take,
1127      * so that we can satisfy fast timer requests on the same CPU.
1128      * Measurements on M1 show the sweet spot to be ~2ms.
1129      */
1130     if (!seconds && nanos < (2 * SCALE_MS)) {
1131         return;
1132     }
1133 
1134     ts = (struct timespec) { seconds, nanos };
1135     hvf_wait_for_ipi(cpu, &ts);
1136 }
1137 
1138 static void hvf_sync_vtimer(CPUState *cpu)
1139 {
1140     ARMCPU *arm_cpu = ARM_CPU(cpu);
1141     hv_return_t r;
1142     uint64_t ctl;
1143     bool irq_state;
1144 
1145     if (!cpu->hvf->vtimer_masked) {
1146         /* We will get notified on vtimer changes by hvf, nothing to do */
1147         return;
1148     }
1149 
1150     r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1151     assert_hvf_ok(r);
1152 
1153     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1154                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1155     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1156 
1157     if (!irq_state) {
1158         /* Timer no longer asserting, we can unmask it */
1159         hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
1160         cpu->hvf->vtimer_masked = false;
1161     }
1162 }
1163 
1164 int hvf_vcpu_exec(CPUState *cpu)
1165 {
1166     ARMCPU *arm_cpu = ARM_CPU(cpu);
1167     CPUARMState *env = &arm_cpu->env;
1168     hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
1169     hv_return_t r;
1170     bool advance_pc = false;
1171 
1172     if (hvf_inject_interrupts(cpu)) {
1173         return EXCP_INTERRUPT;
1174     }
1175 
1176     if (cpu->halted) {
1177         return EXCP_HLT;
1178     }
1179 
1180     flush_cpu_state(cpu);
1181 
1182     qemu_mutex_unlock_iothread();
1183     assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
1184 
1185     /* handle VMEXIT */
1186     uint64_t exit_reason = hvf_exit->reason;
1187     uint64_t syndrome = hvf_exit->exception.syndrome;
1188     uint32_t ec = syn_get_ec(syndrome);
1189 
1190     qemu_mutex_lock_iothread();
1191     switch (exit_reason) {
1192     case HV_EXIT_REASON_EXCEPTION:
1193         /* This is the main one, handle below. */
1194         break;
1195     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1196         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1197         cpu->hvf->vtimer_masked = true;
1198         return 0;
1199     case HV_EXIT_REASON_CANCELED:
1200         /* we got kicked, no exit to process */
1201         return 0;
1202     default:
1203         assert(0);
1204     }
1205 
1206     hvf_sync_vtimer(cpu);
1207 
1208     switch (ec) {
1209     case EC_DATAABORT: {
1210         bool isv = syndrome & ARM_EL_ISV;
1211         bool iswrite = (syndrome >> 6) & 1;
1212         bool s1ptw = (syndrome >> 7) & 1;
1213         uint32_t sas = (syndrome >> 22) & 3;
1214         uint32_t len = 1 << sas;
1215         uint32_t srt = (syndrome >> 16) & 0x1f;
1216         uint32_t cm = (syndrome >> 8) & 0x1;
1217         uint64_t val = 0;
1218 
1219         trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
1220                              hvf_exit->exception.physical_address, isv,
1221                              iswrite, s1ptw, len, srt);
1222 
1223         if (cm) {
1224             /* We don't cache MMIO regions */
1225             advance_pc = true;
1226             break;
1227         }
1228 
1229         assert(isv);
1230 
1231         if (iswrite) {
1232             val = hvf_get_reg(cpu, srt);
1233             address_space_write(&address_space_memory,
1234                                 hvf_exit->exception.physical_address,
1235                                 MEMTXATTRS_UNSPECIFIED, &val, len);
1236         } else {
1237             address_space_read(&address_space_memory,
1238                                hvf_exit->exception.physical_address,
1239                                MEMTXATTRS_UNSPECIFIED, &val, len);
1240             hvf_set_reg(cpu, srt, val);
1241         }
1242 
1243         advance_pc = true;
1244         break;
1245     }
1246     case EC_SYSTEMREGISTERTRAP: {
1247         bool isread = (syndrome >> 0) & 1;
1248         uint32_t rt = (syndrome >> 5) & 0x1f;
1249         uint32_t reg = syndrome & SYSREG_MASK;
1250         uint64_t val;
1251         int ret = 0;
1252 
1253         if (isread) {
1254             ret = hvf_sysreg_read(cpu, reg, rt);
1255         } else {
1256             val = hvf_get_reg(cpu, rt);
1257             ret = hvf_sysreg_write(cpu, reg, val);
1258         }
1259 
1260         advance_pc = !ret;
1261         break;
1262     }
1263     case EC_WFX_TRAP:
1264         advance_pc = true;
1265         if (!(syndrome & WFX_IS_WFE)) {
1266             hvf_wfi(cpu);
1267         }
1268         break;
1269     case EC_AA64_HVC:
1270         cpu_synchronize_state(cpu);
1271         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
1272             if (!hvf_handle_psci_call(cpu)) {
1273                 trace_hvf_unknown_hvc(env->xregs[0]);
1274                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
1275                 env->xregs[0] = -1;
1276             }
1277         } else {
1278             trace_hvf_unknown_hvc(env->xregs[0]);
1279             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1280         }
1281         break;
1282     case EC_AA64_SMC:
1283         cpu_synchronize_state(cpu);
1284         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
1285             advance_pc = true;
1286 
1287             if (!hvf_handle_psci_call(cpu)) {
1288                 trace_hvf_unknown_smc(env->xregs[0]);
1289                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
1290                 env->xregs[0] = -1;
1291             }
1292         } else {
1293             trace_hvf_unknown_smc(env->xregs[0]);
1294             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1295         }
1296         break;
1297     default:
1298         cpu_synchronize_state(cpu);
1299         trace_hvf_exit(syndrome, ec, env->pc);
1300         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
1301     }
1302 
1303     if (advance_pc) {
1304         uint64_t pc;
1305 
1306         flush_cpu_state(cpu);
1307 
1308         r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
1309         assert_hvf_ok(r);
1310         pc += 4;
1311         r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
1312         assert_hvf_ok(r);
1313     }
1314 
1315     return 0;
1316 }
1317 
1318 static const VMStateDescription vmstate_hvf_vtimer = {
1319     .name = "hvf-vtimer",
1320     .version_id = 1,
1321     .minimum_version_id = 1,
1322     .fields = (VMStateField[]) {
1323         VMSTATE_UINT64(vtimer_val, HVFVTimer),
1324         VMSTATE_END_OF_LIST()
1325     },
1326 };
1327 
1328 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
1329 {
1330     HVFVTimer *s = opaque;
1331 
1332     if (running) {
1333         /* Update vtimer offset on all CPUs */
1334         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
1335         cpu_synchronize_all_states();
1336     } else {
1337         /* Remember vtimer value on every pause */
1338         s->vtimer_val = hvf_vtimer_val_raw();
1339     }
1340 }
1341 
1342 int hvf_arch_init(void)
1343 {
1344     hvf_state->vtimer_offset = mach_absolute_time();
1345     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
1346     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
1347     return 0;
1348 }
1349