xref: /openbmc/qemu/target/arm/internals.h (revision 2b74dd91)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "exec/breakpoint.h"
29 #include "hw/registerfields.h"
30 #include "tcg/tcg-gvec-desc.h"
31 #include "syndrome.h"
32 #include "cpu-features.h"
33 
34 /* register banks for CPU modes */
35 #define BANK_USRSYS 0
36 #define BANK_SVC    1
37 #define BANK_ABT    2
38 #define BANK_UND    3
39 #define BANK_IRQ    4
40 #define BANK_FIQ    5
41 #define BANK_HYP    6
42 #define BANK_MON    7
43 
44 static inline int arm_env_mmu_index(CPUARMState *env)
45 {
46     return EX_TBFLAG_ANY(env->hflags, MMUIDX);
47 }
48 
49 static inline bool excp_is_internal(int excp)
50 {
51     /* Return true if this exception number represents a QEMU-internal
52      * exception that will not be passed to the guest.
53      */
54     return excp == EXCP_INTERRUPT
55         || excp == EXCP_HLT
56         || excp == EXCP_DEBUG
57         || excp == EXCP_HALTED
58         || excp == EXCP_EXCEPTION_EXIT
59         || excp == EXCP_KERNEL_TRAP
60         || excp == EXCP_SEMIHOST;
61 }
62 
63 /*
64  * Default frequency for the generic timer, in Hz.
65  * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
66  * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
67  * which gives a 16ns tick period.
68  *
69  * We will use the back-compat value:
70  *  - for QEMU CPU types added before we standardized on 1GHz
71  *  - for versioned machine types with a version of 9.0 or earlier
72  * In any case, the machine model may override via the cntfrq property.
73  */
74 #define GTIMER_DEFAULT_HZ 1000000000
75 #define GTIMER_BACKCOMPAT_HZ 62500000
76 
77 /* Bit definitions for the v7M CONTROL register */
78 FIELD(V7M_CONTROL, NPRIV, 0, 1)
79 FIELD(V7M_CONTROL, SPSEL, 1, 1)
80 FIELD(V7M_CONTROL, FPCA, 2, 1)
81 FIELD(V7M_CONTROL, SFPA, 3, 1)
82 
83 /* Bit definitions for v7M exception return payload */
84 FIELD(V7M_EXCRET, ES, 0, 1)
85 FIELD(V7M_EXCRET, RES0, 1, 1)
86 FIELD(V7M_EXCRET, SPSEL, 2, 1)
87 FIELD(V7M_EXCRET, MODE, 3, 1)
88 FIELD(V7M_EXCRET, FTYPE, 4, 1)
89 FIELD(V7M_EXCRET, DCRS, 5, 1)
90 FIELD(V7M_EXCRET, S, 6, 1)
91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
92 
93 /* Minimum value which is a magic number for exception return */
94 #define EXC_RETURN_MIN_MAGIC 0xff000000
95 /* Minimum number which is a magic number for function or exception return
96  * when using v8M security extension
97  */
98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
99 
100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
101 FIELD(DBGWCR, E, 0, 1)
102 FIELD(DBGWCR, PAC, 1, 2)
103 FIELD(DBGWCR, LSC, 3, 2)
104 FIELD(DBGWCR, BAS, 5, 8)
105 FIELD(DBGWCR, HMC, 13, 1)
106 FIELD(DBGWCR, SSC, 14, 2)
107 FIELD(DBGWCR, LBN, 16, 4)
108 FIELD(DBGWCR, WT, 20, 1)
109 FIELD(DBGWCR, MASK, 24, 5)
110 FIELD(DBGWCR, SSCE, 29, 1)
111 
112 #define VTCR_NSW (1u << 29)
113 #define VTCR_NSA (1u << 30)
114 #define VSTCR_SW VTCR_NSW
115 #define VSTCR_SA VTCR_NSA
116 
117 /* Bit definitions for CPACR (AArch32 only) */
118 FIELD(CPACR, CP10, 20, 2)
119 FIELD(CPACR, CP11, 22, 2)
120 FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
121 FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
122 FIELD(CPACR, ASEDIS, 31, 1)
123 
124 /* Bit definitions for CPACR_EL1 (AArch64 only) */
125 FIELD(CPACR_EL1, ZEN, 16, 2)
126 FIELD(CPACR_EL1, FPEN, 20, 2)
127 FIELD(CPACR_EL1, SMEN, 24, 2)
128 FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
129 
130 /* Bit definitions for HCPTR (AArch32 only) */
131 FIELD(HCPTR, TCP10, 10, 1)
132 FIELD(HCPTR, TCP11, 11, 1)
133 FIELD(HCPTR, TASE, 15, 1)
134 FIELD(HCPTR, TTA, 20, 1)
135 FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
136 FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
137 
138 /* Bit definitions for CPTR_EL2 (AArch64 only) */
139 FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
140 FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
141 FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
142 FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
143 FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
144 FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
145 FIELD(CPTR_EL2, TTA, 28, 1)
146 FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
147 FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
148 
149 /* Bit definitions for CPTR_EL3 (AArch64 only) */
150 FIELD(CPTR_EL3, EZ, 8, 1)
151 FIELD(CPTR_EL3, TFP, 10, 1)
152 FIELD(CPTR_EL3, ESM, 12, 1)
153 FIELD(CPTR_EL3, TTA, 20, 1)
154 FIELD(CPTR_EL3, TAM, 30, 1)
155 FIELD(CPTR_EL3, TCPAC, 31, 1)
156 
157 #define MDCR_MTPME    (1U << 28)
158 #define MDCR_TDCC     (1U << 27)
159 #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
160 #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
161 #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
162 #define MDCR_EPMAD    (1U << 21)
163 #define MDCR_EDAD     (1U << 20)
164 #define MDCR_TTRF     (1U << 19)
165 #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
166 #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
167 #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
168 #define MDCR_SDD      (1U << 16)
169 #define MDCR_SPD      (3U << 14)
170 #define MDCR_TDRA     (1U << 11)
171 #define MDCR_TDOSA    (1U << 10)
172 #define MDCR_TDA      (1U << 9)
173 #define MDCR_TDE      (1U << 8)
174 #define MDCR_HPME     (1U << 7)
175 #define MDCR_TPM      (1U << 6)
176 #define MDCR_TPMCR    (1U << 5)
177 #define MDCR_HPMN     (0x1fU)
178 
179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
181                          MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
182                          MDCR_STE | MDCR_SPME | MDCR_SPD)
183 
184 #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
185 #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
186 #define TTBCR_PD0    (1U << 4)
187 #define TTBCR_PD1    (1U << 5)
188 #define TTBCR_EPD0   (1U << 7)
189 #define TTBCR_IRGN0  (3U << 8)
190 #define TTBCR_ORGN0  (3U << 10)
191 #define TTBCR_SH0    (3U << 12)
192 #define TTBCR_T1SZ   (3U << 16)
193 #define TTBCR_A1     (1U << 22)
194 #define TTBCR_EPD1   (1U << 23)
195 #define TTBCR_IRGN1  (3U << 24)
196 #define TTBCR_ORGN1  (3U << 26)
197 #define TTBCR_SH1    (1U << 28)
198 #define TTBCR_EAE    (1U << 31)
199 
200 FIELD(VTCR, T0SZ, 0, 6)
201 FIELD(VTCR, SL0, 6, 2)
202 FIELD(VTCR, IRGN0, 8, 2)
203 FIELD(VTCR, ORGN0, 10, 2)
204 FIELD(VTCR, SH0, 12, 2)
205 FIELD(VTCR, TG0, 14, 2)
206 FIELD(VTCR, PS, 16, 3)
207 FIELD(VTCR, VS, 19, 1)
208 FIELD(VTCR, HA, 21, 1)
209 FIELD(VTCR, HD, 22, 1)
210 FIELD(VTCR, HWU59, 25, 1)
211 FIELD(VTCR, HWU60, 26, 1)
212 FIELD(VTCR, HWU61, 27, 1)
213 FIELD(VTCR, HWU62, 28, 1)
214 FIELD(VTCR, NSW, 29, 1)
215 FIELD(VTCR, NSA, 30, 1)
216 FIELD(VTCR, DS, 32, 1)
217 FIELD(VTCR, SL2, 33, 1)
218 
219 #define HCRX_ENAS0    (1ULL << 0)
220 #define HCRX_ENALS    (1ULL << 1)
221 #define HCRX_ENASR    (1ULL << 2)
222 #define HCRX_FNXS     (1ULL << 3)
223 #define HCRX_FGTNXS   (1ULL << 4)
224 #define HCRX_SMPME    (1ULL << 5)
225 #define HCRX_TALLINT  (1ULL << 6)
226 #define HCRX_VINMI    (1ULL << 7)
227 #define HCRX_VFNMI    (1ULL << 8)
228 #define HCRX_CMOW     (1ULL << 9)
229 #define HCRX_MCE2     (1ULL << 10)
230 #define HCRX_MSCEN    (1ULL << 11)
231 
232 #define HPFAR_NS      (1ULL << 63)
233 
234 #define HSTR_TTEE (1 << 16)
235 #define HSTR_TJDBX (1 << 17)
236 
237 /*
238  * Depending on the value of HCR_EL2.E2H, bits 0 and 1
239  * have different bit definitions, and EL1PCTEN might be
240  * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
241  * disambiguate if necessary.
242  */
243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
247 FIELD(CNTHCTL, EVNTEN, 2, 1)
248 FIELD(CNTHCTL, EVNTDIR, 3, 1)
249 FIELD(CNTHCTL, EVNTI, 4, 4)
250 FIELD(CNTHCTL, EL0VTEN, 8, 1)
251 FIELD(CNTHCTL, EL0PTEN, 9, 1)
252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
253 FIELD(CNTHCTL, EL1PTEN, 11, 1)
254 FIELD(CNTHCTL, ECV, 12, 1)
255 FIELD(CNTHCTL, EL1TVT, 13, 1)
256 FIELD(CNTHCTL, EL1TVCT, 14, 1)
257 FIELD(CNTHCTL, EL1NVPCT, 15, 1)
258 FIELD(CNTHCTL, EL1NVVCT, 16, 1)
259 FIELD(CNTHCTL, EVNTIS, 17, 1)
260 FIELD(CNTHCTL, CNTVMASK, 18, 1)
261 FIELD(CNTHCTL, CNTPMASK, 19, 1)
262 
263 /* We use a few fake FSR values for internal purposes in M profile.
264  * M profile cores don't have A/R format FSRs, but currently our
265  * get_phys_addr() code assumes A/R profile and reports failures via
266  * an A/R format FSR value. We then translate that into the proper
267  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
268  * Mostly the FSR values we use for this are those defined for v7PMSA,
269  * since we share some of that codepath. A few kinds of fault are
270  * only for M profile and have no A/R equivalent, though, so we have
271  * to pick a value from the reserved range (which we never otherwise
272  * generate) to use for these.
273  * These values will never be visible to the guest.
274  */
275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
277 
278 /**
279  * arm_aa32_secure_pl1_0(): Return true if in Secure PL1&0 regime
280  *
281  * Return true if the CPU is in the Secure PL1&0 translation regime.
282  * This requires that EL3 exists and is AArch32 and we are currently
283  * Secure. If this is the case then the ARMMMUIdx_E10* apply and
284  * mean we are in EL3, not EL1.
285  */
286 static inline bool arm_aa32_secure_pl1_0(CPUARMState *env)
287 {
288     return arm_feature(env, ARM_FEATURE_EL3) &&
289         !arm_el_is_aa64(env, 3) && arm_is_secure(env);
290 }
291 
292 /**
293  * raise_exception: Raise the specified exception.
294  * Raise a guest exception with the specified value, syndrome register
295  * and target exception level. This should be called from helper functions,
296  * and never returns because we will longjump back up to the CPU main loop.
297  */
298 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
299                                 uint32_t syndrome, uint32_t target_el);
300 
301 /*
302  * Similarly, but also use unwinding to restore cpu state.
303  */
304 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
305                                       uint32_t syndrome, uint32_t target_el,
306                                       uintptr_t ra);
307 
308 /*
309  * For AArch64, map a given EL to an index in the banked_spsr array.
310  * Note that this mapping and the AArch32 mapping defined in bank_number()
311  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
312  * mandated mapping between each other.
313  */
314 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
315 {
316     static const unsigned int map[4] = {
317         [1] = BANK_SVC, /* EL1.  */
318         [2] = BANK_HYP, /* EL2.  */
319         [3] = BANK_MON, /* EL3.  */
320     };
321     assert(el >= 1 && el <= 3);
322     return map[el];
323 }
324 
325 /* Map CPU modes onto saved register banks.  */
326 static inline int bank_number(int mode)
327 {
328     switch (mode) {
329     case ARM_CPU_MODE_USR:
330     case ARM_CPU_MODE_SYS:
331         return BANK_USRSYS;
332     case ARM_CPU_MODE_SVC:
333         return BANK_SVC;
334     case ARM_CPU_MODE_ABT:
335         return BANK_ABT;
336     case ARM_CPU_MODE_UND:
337         return BANK_UND;
338     case ARM_CPU_MODE_IRQ:
339         return BANK_IRQ;
340     case ARM_CPU_MODE_FIQ:
341         return BANK_FIQ;
342     case ARM_CPU_MODE_HYP:
343         return BANK_HYP;
344     case ARM_CPU_MODE_MON:
345         return BANK_MON;
346     }
347     g_assert_not_reached();
348 }
349 
350 /**
351  * r14_bank_number: Map CPU mode onto register bank for r14
352  *
353  * Given an AArch32 CPU mode, return the index into the saved register
354  * banks to use for the R14 (LR) in that mode. This is the same as
355  * bank_number(), except for the special case of Hyp mode, where
356  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
357  * This should be used as the index into env->banked_r14[], and
358  * bank_number() used for the index into env->banked_r13[] and
359  * env->banked_spsr[].
360  */
361 static inline int r14_bank_number(int mode)
362 {
363     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
364 }
365 
366 void arm_cpu_register(const ARMCPUInfo *info);
367 void aarch64_cpu_register(const ARMCPUInfo *info);
368 
369 void register_cp_regs_for_features(ARMCPU *cpu);
370 void init_cpreg_list(ARMCPU *cpu);
371 
372 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
373 void arm_translate_init(void);
374 
375 void arm_cpu_register_gdb_commands(ARMCPU *cpu);
376 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
377                                        GPtrArray *, GPtrArray *);
378 
379 void arm_restore_state_to_opc(CPUState *cs,
380                               const TranslationBlock *tb,
381                               const uint64_t *data);
382 
383 #ifdef CONFIG_TCG
384 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
385 
386 /* Our implementation of TCGCPUOps::cpu_exec_halt */
387 bool arm_cpu_exec_halt(CPUState *cs);
388 #endif /* CONFIG_TCG */
389 
390 typedef enum ARMFPRounding {
391     FPROUNDING_TIEEVEN,
392     FPROUNDING_POSINF,
393     FPROUNDING_NEGINF,
394     FPROUNDING_ZERO,
395     FPROUNDING_TIEAWAY,
396     FPROUNDING_ODD
397 } ARMFPRounding;
398 
399 extern const FloatRoundMode arm_rmode_to_sf_map[6];
400 
401 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
402 {
403     assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
404     return arm_rmode_to_sf_map[rmode];
405 }
406 
407 static inline void aarch64_save_sp(CPUARMState *env, int el)
408 {
409     if (env->pstate & PSTATE_SP) {
410         env->sp_el[el] = env->xregs[31];
411     } else {
412         env->sp_el[0] = env->xregs[31];
413     }
414 }
415 
416 static inline void aarch64_restore_sp(CPUARMState *env, int el)
417 {
418     if (env->pstate & PSTATE_SP) {
419         env->xregs[31] = env->sp_el[el];
420     } else {
421         env->xregs[31] = env->sp_el[0];
422     }
423 }
424 
425 static inline void update_spsel(CPUARMState *env, uint32_t imm)
426 {
427     unsigned int cur_el = arm_current_el(env);
428     /* Update PSTATE SPSel bit; this requires us to update the
429      * working stack pointer in xregs[31].
430      */
431     if (!((imm ^ env->pstate) & PSTATE_SP)) {
432         return;
433     }
434     aarch64_save_sp(env, cur_el);
435     env->pstate = deposit32(env->pstate, 0, 1, imm);
436 
437     /* We rely on illegal updates to SPsel from EL0 to get trapped
438      * at translation time.
439      */
440     assert(cur_el >= 1 && cur_el <= 3);
441     aarch64_restore_sp(env, cur_el);
442 }
443 
444 /*
445  * arm_pamax
446  * @cpu: ARMCPU
447  *
448  * Returns the implementation defined bit-width of physical addresses.
449  * The ARMv8 reference manuals refer to this as PAMax().
450  */
451 unsigned int arm_pamax(ARMCPU *cpu);
452 
453 /*
454  * round_down_to_parange_index
455  * @bit_size: uint8_t
456  *
457  * Rounds down the bit_size supplied to the first supported ARM physical
458  * address range and returns the index for this. The index is intended to
459  * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
460  */
461 uint8_t round_down_to_parange_index(uint8_t bit_size);
462 
463 /*
464  * round_down_to_parange_bit_size
465  * @bit_size: uint8_t
466  *
467  * Rounds down the bit_size supplied to the first supported ARM physical
468  * address range bit size and returns this.
469  */
470 uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
471 
472 /* Return true if extended addresses are enabled.
473  * This is always the case if our translation regime is 64 bit,
474  * but depends on TTBCR.EAE for 32 bit.
475  */
476 static inline bool extended_addresses_enabled(CPUARMState *env)
477 {
478     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
479     if (arm_feature(env, ARM_FEATURE_PMSA) &&
480         arm_feature(env, ARM_FEATURE_V8)) {
481         return true;
482     }
483     return arm_el_is_aa64(env, 1) ||
484            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
485 }
486 
487 /* Update a QEMU watchpoint based on the information the guest has set in the
488  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
489  */
490 void hw_watchpoint_update(ARMCPU *cpu, int n);
491 /* Update the QEMU watchpoints for every guest watchpoint. This does a
492  * complete delete-and-reinstate of the QEMU watchpoint list and so is
493  * suitable for use after migration or on reset.
494  */
495 void hw_watchpoint_update_all(ARMCPU *cpu);
496 /* Update a QEMU breakpoint based on the information the guest has set in the
497  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
498  */
499 void hw_breakpoint_update(ARMCPU *cpu, int n);
500 /* Update the QEMU breakpoints for every guest breakpoint. This does a
501  * complete delete-and-reinstate of the QEMU breakpoint list and so is
502  * suitable for use after migration or on reset.
503  */
504 void hw_breakpoint_update_all(ARMCPU *cpu);
505 
506 /* Callback function for checking if a breakpoint should trigger. */
507 bool arm_debug_check_breakpoint(CPUState *cs);
508 
509 /* Callback function for checking if a watchpoint should trigger. */
510 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
511 
512 /* Adjust addresses (in BE32 mode) before testing against watchpoint
513  * addresses.
514  */
515 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
516 
517 /* Callback function for when a watchpoint or breakpoint triggers. */
518 void arm_debug_excp_handler(CPUState *cs);
519 
520 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
521 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
522 {
523     return false;
524 }
525 static inline void arm_handle_psci_call(ARMCPU *cpu)
526 {
527     g_assert_not_reached();
528 }
529 #else
530 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
531 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
532 /* Actually handle a PSCI call */
533 void arm_handle_psci_call(ARMCPU *cpu);
534 #endif
535 
536 /**
537  * arm_clear_exclusive: clear the exclusive monitor
538  * @env: CPU env
539  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
540  */
541 static inline void arm_clear_exclusive(CPUARMState *env)
542 {
543     env->exclusive_addr = -1;
544 }
545 
546 /**
547  * ARMFaultType: type of an ARM MMU fault
548  * This corresponds to the v8A pseudocode's Fault enumeration,
549  * with extensions for QEMU internal conditions.
550  */
551 typedef enum ARMFaultType {
552     ARMFault_None,
553     ARMFault_AccessFlag,
554     ARMFault_Alignment,
555     ARMFault_Background,
556     ARMFault_Domain,
557     ARMFault_Permission,
558     ARMFault_Translation,
559     ARMFault_AddressSize,
560     ARMFault_SyncExternal,
561     ARMFault_SyncExternalOnWalk,
562     ARMFault_SyncParity,
563     ARMFault_SyncParityOnWalk,
564     ARMFault_AsyncParity,
565     ARMFault_AsyncExternal,
566     ARMFault_Debug,
567     ARMFault_TLBConflict,
568     ARMFault_UnsuppAtomicUpdate,
569     ARMFault_Lockdown,
570     ARMFault_Exclusive,
571     ARMFault_ICacheMaint,
572     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
573     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
574     ARMFault_GPCFOnWalk,
575     ARMFault_GPCFOnOutput,
576 } ARMFaultType;
577 
578 typedef enum ARMGPCF {
579     GPCF_None,
580     GPCF_AddressSize,
581     GPCF_Walk,
582     GPCF_EABT,
583     GPCF_Fail,
584 } ARMGPCF;
585 
586 /**
587  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
588  * @type: Type of fault
589  * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
590  * @level: Table walk level (for translation, access flag and permission faults)
591  * @domain: Domain of the fault address (for non-LPAE CPUs only)
592  * @s2addr: Address that caused a fault at stage 2
593  * @paddr: physical address that caused a fault for gpc
594  * @paddr_space: physical address space that caused a fault for gpc
595  * @stage2: True if we faulted at stage 2
596  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
597  * @s1ns: True if we faulted on a non-secure IPA while in secure state
598  * @ea: True if we should set the EA (external abort type) bit in syndrome
599  */
600 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
601 struct ARMMMUFaultInfo {
602     ARMFaultType type;
603     ARMGPCF gpcf;
604     target_ulong s2addr;
605     target_ulong paddr;
606     ARMSecuritySpace paddr_space;
607     int level;
608     int domain;
609     bool stage2;
610     bool s1ptw;
611     bool s1ns;
612     bool ea;
613 };
614 
615 /**
616  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
617  * Compare pseudocode EncodeSDFSC(), though unlike that function
618  * we set up a whole FSR-format code including domain field and
619  * putting the high bit of the FSC into bit 10.
620  */
621 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
622 {
623     uint32_t fsc;
624 
625     switch (fi->type) {
626     case ARMFault_None:
627         return 0;
628     case ARMFault_AccessFlag:
629         fsc = fi->level == 1 ? 0x3 : 0x6;
630         break;
631     case ARMFault_Alignment:
632         fsc = 0x1;
633         break;
634     case ARMFault_Permission:
635         fsc = fi->level == 1 ? 0xd : 0xf;
636         break;
637     case ARMFault_Domain:
638         fsc = fi->level == 1 ? 0x9 : 0xb;
639         break;
640     case ARMFault_Translation:
641         fsc = fi->level == 1 ? 0x5 : 0x7;
642         break;
643     case ARMFault_SyncExternal:
644         fsc = 0x8 | (fi->ea << 12);
645         break;
646     case ARMFault_SyncExternalOnWalk:
647         fsc = fi->level == 1 ? 0xc : 0xe;
648         fsc |= (fi->ea << 12);
649         break;
650     case ARMFault_SyncParity:
651         fsc = 0x409;
652         break;
653     case ARMFault_SyncParityOnWalk:
654         fsc = fi->level == 1 ? 0x40c : 0x40e;
655         break;
656     case ARMFault_AsyncParity:
657         fsc = 0x408;
658         break;
659     case ARMFault_AsyncExternal:
660         fsc = 0x406 | (fi->ea << 12);
661         break;
662     case ARMFault_Debug:
663         fsc = 0x2;
664         break;
665     case ARMFault_TLBConflict:
666         fsc = 0x400;
667         break;
668     case ARMFault_Lockdown:
669         fsc = 0x404;
670         break;
671     case ARMFault_Exclusive:
672         fsc = 0x405;
673         break;
674     case ARMFault_ICacheMaint:
675         fsc = 0x4;
676         break;
677     case ARMFault_Background:
678         fsc = 0x0;
679         break;
680     case ARMFault_QEMU_NSCExec:
681         fsc = M_FAKE_FSR_NSC_EXEC;
682         break;
683     case ARMFault_QEMU_SFault:
684         fsc = M_FAKE_FSR_SFAULT;
685         break;
686     default:
687         /* Other faults can't occur in a context that requires a
688          * short-format status code.
689          */
690         g_assert_not_reached();
691     }
692 
693     fsc |= (fi->domain << 4);
694     return fsc;
695 }
696 
697 /**
698  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
699  * Compare pseudocode EncodeLDFSC(), though unlike that function
700  * we fill in also the LPAE bit 9 of a DFSR format.
701  */
702 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
703 {
704     uint32_t fsc;
705 
706     switch (fi->type) {
707     case ARMFault_None:
708         return 0;
709     case ARMFault_AddressSize:
710         assert(fi->level >= -1 && fi->level <= 3);
711         if (fi->level < 0) {
712             fsc = 0b101001;
713         } else {
714             fsc = fi->level;
715         }
716         break;
717     case ARMFault_AccessFlag:
718         assert(fi->level >= 0 && fi->level <= 3);
719         fsc = 0b001000 | fi->level;
720         break;
721     case ARMFault_Permission:
722         assert(fi->level >= 0 && fi->level <= 3);
723         fsc = 0b001100 | fi->level;
724         break;
725     case ARMFault_Translation:
726         assert(fi->level >= -1 && fi->level <= 3);
727         if (fi->level < 0) {
728             fsc = 0b101011;
729         } else {
730             fsc = 0b000100 | fi->level;
731         }
732         break;
733     case ARMFault_SyncExternal:
734         fsc = 0x10 | (fi->ea << 12);
735         break;
736     case ARMFault_SyncExternalOnWalk:
737         assert(fi->level >= -1 && fi->level <= 3);
738         if (fi->level < 0) {
739             fsc = 0b010011;
740         } else {
741             fsc = 0b010100 | fi->level;
742         }
743         fsc |= fi->ea << 12;
744         break;
745     case ARMFault_SyncParity:
746         fsc = 0x18;
747         break;
748     case ARMFault_SyncParityOnWalk:
749         assert(fi->level >= -1 && fi->level <= 3);
750         if (fi->level < 0) {
751             fsc = 0b011011;
752         } else {
753             fsc = 0b011100 | fi->level;
754         }
755         break;
756     case ARMFault_AsyncParity:
757         fsc = 0x19;
758         break;
759     case ARMFault_AsyncExternal:
760         fsc = 0x11 | (fi->ea << 12);
761         break;
762     case ARMFault_Alignment:
763         fsc = 0x21;
764         break;
765     case ARMFault_Debug:
766         fsc = 0x22;
767         break;
768     case ARMFault_TLBConflict:
769         fsc = 0x30;
770         break;
771     case ARMFault_UnsuppAtomicUpdate:
772         fsc = 0x31;
773         break;
774     case ARMFault_Lockdown:
775         fsc = 0x34;
776         break;
777     case ARMFault_Exclusive:
778         fsc = 0x35;
779         break;
780     case ARMFault_GPCFOnWalk:
781         assert(fi->level >= -1 && fi->level <= 3);
782         if (fi->level < 0) {
783             fsc = 0b100011;
784         } else {
785             fsc = 0b100100 | fi->level;
786         }
787         break;
788     case ARMFault_GPCFOnOutput:
789         fsc = 0b101000;
790         break;
791     default:
792         /* Other faults can't occur in a context that requires a
793          * long-format status code.
794          */
795         g_assert_not_reached();
796     }
797 
798     fsc |= 1 << 9;
799     return fsc;
800 }
801 
802 static inline bool arm_extabort_type(MemTxResult result)
803 {
804     /* The EA bit in syndromes and fault status registers is an
805      * IMPDEF classification of external aborts. ARM implementations
806      * usually use this to indicate AXI bus Decode error (0) or
807      * Slave error (1); in QEMU we follow that.
808      */
809     return result != MEMTX_DECODE_ERROR;
810 }
811 
812 #ifdef CONFIG_USER_ONLY
813 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
814                             MMUAccessType access_type,
815                             bool maperr, uintptr_t ra);
816 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
817                            MMUAccessType access_type, uintptr_t ra);
818 #else
819 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
820                       MMUAccessType access_type, int mmu_idx,
821                       bool probe, uintptr_t retaddr);
822 #endif
823 
824 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
825 {
826     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
827 }
828 
829 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
830 {
831     if (arm_feature(env, ARM_FEATURE_M)) {
832         return mmu_idx | ARM_MMU_IDX_M;
833     } else {
834         return mmu_idx | ARM_MMU_IDX_A;
835     }
836 }
837 
838 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
839 {
840     /* AArch64 is always a-profile. */
841     return mmu_idx | ARM_MMU_IDX_A;
842 }
843 
844 /**
845  * Return the exception level we're running at if our current MMU index
846  * is @mmu_idx. @s_pl1_0 should be true if this is the AArch32
847  * Secure PL1&0 translation regime.
848  */
849 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0);
850 
851 /* Return the MMU index for a v7M CPU in the specified security state */
852 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
853 
854 /*
855  * Return true if the stage 1 translation regime is using LPAE
856  * format page tables
857  */
858 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
859 
860 /* Raise a data fault alignment exception for the specified virtual address */
861 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
862                                             MMUAccessType access_type,
863                                             int mmu_idx, uintptr_t retaddr);
864 
865 #ifndef CONFIG_USER_ONLY
866 /* arm_cpu_do_transaction_failed: handle a memory system error response
867  * (eg "no device/memory present at address") by raising an external abort
868  * exception
869  */
870 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
871                                    vaddr addr, unsigned size,
872                                    MMUAccessType access_type,
873                                    int mmu_idx, MemTxAttrs attrs,
874                                    MemTxResult response, uintptr_t retaddr);
875 #endif
876 
877 /* Call any registered EL change hooks */
878 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
879 {
880     ARMELChangeHook *hook, *next;
881     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
882         hook->hook(cpu, hook->opaque);
883     }
884 }
885 static inline void arm_call_el_change_hook(ARMCPU *cpu)
886 {
887     ARMELChangeHook *hook, *next;
888     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
889         hook->hook(cpu, hook->opaque);
890     }
891 }
892 
893 /* Return true if this address translation regime has two ranges.  */
894 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
895 {
896     switch (mmu_idx) {
897     case ARMMMUIdx_Stage1_E0:
898     case ARMMMUIdx_Stage1_E1:
899     case ARMMMUIdx_Stage1_E1_PAN:
900     case ARMMMUIdx_E10_0:
901     case ARMMMUIdx_E10_1:
902     case ARMMMUIdx_E10_1_PAN:
903     case ARMMMUIdx_E20_0:
904     case ARMMMUIdx_E20_2:
905     case ARMMMUIdx_E20_2_PAN:
906         return true;
907     default:
908         return false;
909     }
910 }
911 
912 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
913 {
914     switch (mmu_idx) {
915     case ARMMMUIdx_Stage1_E1_PAN:
916     case ARMMMUIdx_E10_1_PAN:
917     case ARMMMUIdx_E20_2_PAN:
918         return true;
919     default:
920         return false;
921     }
922 }
923 
924 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
925 {
926     return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
927 }
928 
929 /* Return the exception level which controls this address translation regime */
930 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
931 {
932     switch (mmu_idx) {
933     case ARMMMUIdx_E20_0:
934     case ARMMMUIdx_E20_2:
935     case ARMMMUIdx_E20_2_PAN:
936     case ARMMMUIdx_Stage2:
937     case ARMMMUIdx_Stage2_S:
938     case ARMMMUIdx_E2:
939         return 2;
940     case ARMMMUIdx_E3:
941         return 3;
942     case ARMMMUIdx_E10_0:
943     case ARMMMUIdx_Stage1_E0:
944     case ARMMMUIdx_E10_1:
945     case ARMMMUIdx_E10_1_PAN:
946     case ARMMMUIdx_Stage1_E1:
947     case ARMMMUIdx_Stage1_E1_PAN:
948         return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
949     case ARMMMUIdx_MPrivNegPri:
950     case ARMMMUIdx_MUserNegPri:
951     case ARMMMUIdx_MPriv:
952     case ARMMMUIdx_MUser:
953     case ARMMMUIdx_MSPrivNegPri:
954     case ARMMMUIdx_MSUserNegPri:
955     case ARMMMUIdx_MSPriv:
956     case ARMMMUIdx_MSUser:
957         return 1;
958     default:
959         g_assert_not_reached();
960     }
961 }
962 
963 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
964 {
965     switch (mmu_idx) {
966     case ARMMMUIdx_E20_0:
967     case ARMMMUIdx_Stage1_E0:
968     case ARMMMUIdx_MUser:
969     case ARMMMUIdx_MSUser:
970     case ARMMMUIdx_MUserNegPri:
971     case ARMMMUIdx_MSUserNegPri:
972         return true;
973     default:
974         return false;
975     case ARMMMUIdx_E10_0:
976     case ARMMMUIdx_E10_1:
977     case ARMMMUIdx_E10_1_PAN:
978         g_assert_not_reached();
979     }
980 }
981 
982 /* Return the SCTLR value which controls this address translation regime */
983 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
984 {
985     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
986 }
987 
988 /*
989  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
990  * and the Non-Secure stage 2 translation regimes (and hence which are
991  * not present in VSTCR_EL2).
992  */
993 #define VTCR_SHARED_FIELD_MASK \
994     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
995      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
996      R_VTCR_DS_MASK)
997 
998 /* Return the value of the TCR controlling this translation regime */
999 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
1000 {
1001     if (mmu_idx == ARMMMUIdx_Stage2) {
1002         return env->cp15.vtcr_el2;
1003     }
1004     if (mmu_idx == ARMMMUIdx_Stage2_S) {
1005         /*
1006          * Secure stage 2 shares fields from VTCR_EL2. We merge those
1007          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
1008          * value so the callers don't need to special case this.
1009          *
1010          * If a future architecture change defines bits in VSTCR_EL2 that
1011          * overlap with these VTCR_EL2 fields we may need to revisit this.
1012          */
1013         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
1014         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
1015         return v;
1016     }
1017     return env->cp15.tcr_el[regime_el(env, mmu_idx)];
1018 }
1019 
1020 /* Return true if the translation regime is using LPAE format page tables */
1021 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
1022 {
1023     int el = regime_el(env, mmu_idx);
1024     if (el == 2 || arm_el_is_aa64(env, el)) {
1025         return true;
1026     }
1027     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1028         arm_feature(env, ARM_FEATURE_V8)) {
1029         return true;
1030     }
1031     if (arm_feature(env, ARM_FEATURE_LPAE)
1032         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
1033         return true;
1034     }
1035     return false;
1036 }
1037 
1038 /**
1039  * arm_num_brps: Return number of implemented breakpoints.
1040  * Note that the ID register BRPS field is "number of bps - 1",
1041  * and we return the actual number of breakpoints.
1042  */
1043 static inline int arm_num_brps(ARMCPU *cpu)
1044 {
1045     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1046         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
1047     } else {
1048         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1049     }
1050 }
1051 
1052 /**
1053  * arm_num_wrps: Return number of implemented watchpoints.
1054  * Note that the ID register WRPS field is "number of wps - 1",
1055  * and we return the actual number of watchpoints.
1056  */
1057 static inline int arm_num_wrps(ARMCPU *cpu)
1058 {
1059     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1060         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
1061     } else {
1062         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1063     }
1064 }
1065 
1066 /**
1067  * arm_num_ctx_cmps: Return number of implemented context comparators.
1068  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1069  * and we return the actual number of comparators.
1070  */
1071 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1072 {
1073     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1074         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
1075     } else {
1076         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1077     }
1078 }
1079 
1080 /**
1081  * v7m_using_psp: Return true if using process stack pointer
1082  * Return true if the CPU is currently using the process stack
1083  * pointer, or false if it is using the main stack pointer.
1084  */
1085 static inline bool v7m_using_psp(CPUARMState *env)
1086 {
1087     /* Handler mode always uses the main stack; for thread mode
1088      * the CONTROL.SPSEL bit determines the answer.
1089      * Note that in v7M it is not possible to be in Handler mode with
1090      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1091      */
1092     return !arm_v7m_is_handler_mode(env) &&
1093         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1094 }
1095 
1096 /**
1097  * v7m_sp_limit: Return SP limit for current CPU state
1098  * Return the SP limit value for the current CPU security state
1099  * and stack pointer.
1100  */
1101 static inline uint32_t v7m_sp_limit(CPUARMState *env)
1102 {
1103     if (v7m_using_psp(env)) {
1104         return env->v7m.psplim[env->v7m.secure];
1105     } else {
1106         return env->v7m.msplim[env->v7m.secure];
1107     }
1108 }
1109 
1110 /**
1111  * v7m_cpacr_pass:
1112  * Return true if the v7M CPACR permits access to the FPU for the specified
1113  * security state and privilege level.
1114  */
1115 static inline bool v7m_cpacr_pass(CPUARMState *env,
1116                                   bool is_secure, bool is_priv)
1117 {
1118     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1119     case 0:
1120     case 2: /* UNPREDICTABLE: we treat like 0 */
1121         return false;
1122     case 1:
1123         return is_priv;
1124     case 3:
1125         return true;
1126     default:
1127         g_assert_not_reached();
1128     }
1129 }
1130 
1131 /**
1132  * aarch32_mode_name(): Return name of the AArch32 CPU mode
1133  * @psr: Program Status Register indicating CPU mode
1134  *
1135  * Returns, for debug logging purposes, a printable representation
1136  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1137  * the low bits of the specified PSR.
1138  */
1139 static inline const char *aarch32_mode_name(uint32_t psr)
1140 {
1141     static const char cpu_mode_names[16][4] = {
1142         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1143         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1144     };
1145 
1146     return cpu_mode_names[psr & 0xf];
1147 }
1148 
1149 /**
1150  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1151  *
1152  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1153  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1154  * Must be called with the BQL held.
1155  */
1156 void arm_cpu_update_virq(ARMCPU *cpu);
1157 
1158 /**
1159  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1160  *
1161  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1162  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1163  * Must be called with the BQL held.
1164  */
1165 void arm_cpu_update_vfiq(ARMCPU *cpu);
1166 
1167 /**
1168  * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1169  *
1170  * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1171  * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
1172  * Must be called with the BQL held.
1173  */
1174 void arm_cpu_update_vinmi(ARMCPU *cpu);
1175 
1176 /**
1177  * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1178  *
1179  * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1180  * a change to the HCRX_EL2.VFNMI.
1181  * Must be called with the BQL held.
1182  */
1183 void arm_cpu_update_vfnmi(ARMCPU *cpu);
1184 
1185 /**
1186  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1187  *
1188  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1189  * following a change to the HCR_EL2.VSE bit.
1190  */
1191 void arm_cpu_update_vserr(ARMCPU *cpu);
1192 
1193 /**
1194  * arm_mmu_idx_el:
1195  * @env: The cpu environment
1196  * @el: The EL to use.
1197  *
1198  * Return the full ARMMMUIdx for the translation regime for EL.
1199  */
1200 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1201 
1202 /**
1203  * arm_mmu_idx:
1204  * @env: The cpu environment
1205  *
1206  * Return the full ARMMMUIdx for the current translation regime.
1207  */
1208 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1209 
1210 /**
1211  * arm_stage1_mmu_idx:
1212  * @env: The cpu environment
1213  *
1214  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1215  */
1216 #ifdef CONFIG_USER_ONLY
1217 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1218 {
1219     return ARMMMUIdx_Stage1_E0;
1220 }
1221 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1222 {
1223     return ARMMMUIdx_Stage1_E0;
1224 }
1225 #else
1226 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1227 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1228 #endif
1229 
1230 /**
1231  * arm_mmu_idx_is_stage1_of_2:
1232  * @mmu_idx: The ARMMMUIdx to test
1233  *
1234  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1235  * first stage of a two stage regime.
1236  */
1237 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1238 {
1239     switch (mmu_idx) {
1240     case ARMMMUIdx_Stage1_E0:
1241     case ARMMMUIdx_Stage1_E1:
1242     case ARMMMUIdx_Stage1_E1_PAN:
1243         return true;
1244     default:
1245         return false;
1246     }
1247 }
1248 
1249 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1250                                                const ARMISARegisters *id)
1251 {
1252     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1253 
1254     if ((features >> ARM_FEATURE_V4T) & 1) {
1255         valid |= CPSR_T;
1256     }
1257     if ((features >> ARM_FEATURE_V5) & 1) {
1258         valid |= CPSR_Q; /* V5TE in reality*/
1259     }
1260     if ((features >> ARM_FEATURE_V6) & 1) {
1261         valid |= CPSR_E | CPSR_GE;
1262     }
1263     if ((features >> ARM_FEATURE_THUMB2) & 1) {
1264         valid |= CPSR_IT;
1265     }
1266     if (isar_feature_aa32_jazelle(id)) {
1267         valid |= CPSR_J;
1268     }
1269     if (isar_feature_aa32_pan(id)) {
1270         valid |= CPSR_PAN;
1271     }
1272     if (isar_feature_aa32_dit(id)) {
1273         valid |= CPSR_DIT;
1274     }
1275     if (isar_feature_aa32_ssbs(id)) {
1276         valid |= CPSR_SSBS;
1277     }
1278 
1279     return valid;
1280 }
1281 
1282 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1283 {
1284     uint32_t valid;
1285 
1286     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1287     if (isar_feature_aa64_bti(id)) {
1288         valid |= PSTATE_BTYPE;
1289     }
1290     if (isar_feature_aa64_pan(id)) {
1291         valid |= PSTATE_PAN;
1292     }
1293     if (isar_feature_aa64_uao(id)) {
1294         valid |= PSTATE_UAO;
1295     }
1296     if (isar_feature_aa64_dit(id)) {
1297         valid |= PSTATE_DIT;
1298     }
1299     if (isar_feature_aa64_ssbs(id)) {
1300         valid |= PSTATE_SSBS;
1301     }
1302     if (isar_feature_aa64_mte(id)) {
1303         valid |= PSTATE_TCO;
1304     }
1305     if (isar_feature_aa64_nmi(id)) {
1306         valid |= PSTATE_ALLINT;
1307     }
1308 
1309     return valid;
1310 }
1311 
1312 /* Granule size (i.e. page size) */
1313 typedef enum ARMGranuleSize {
1314     /* Same order as TG0 encoding */
1315     Gran4K,
1316     Gran64K,
1317     Gran16K,
1318     GranInvalid,
1319 } ARMGranuleSize;
1320 
1321 /**
1322  * arm_granule_bits: Return address size of the granule in bits
1323  *
1324  * Return the address size of the granule in bits. This corresponds
1325  * to the pseudocode TGxGranuleBits().
1326  */
1327 static inline int arm_granule_bits(ARMGranuleSize gran)
1328 {
1329     switch (gran) {
1330     case Gran64K:
1331         return 16;
1332     case Gran16K:
1333         return 14;
1334     case Gran4K:
1335         return 12;
1336     default:
1337         g_assert_not_reached();
1338     }
1339 }
1340 
1341 /*
1342  * Parameters of a given virtual address, as extracted from the
1343  * translation control register (TCR) for a given regime.
1344  */
1345 typedef struct ARMVAParameters {
1346     unsigned tsz    : 8;
1347     unsigned ps     : 3;
1348     unsigned sh     : 2;
1349     unsigned select : 1;
1350     bool tbi        : 1;
1351     bool epd        : 1;
1352     bool hpd        : 1;
1353     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1354     bool ds         : 1;
1355     bool ha         : 1;
1356     bool hd         : 1;
1357     ARMGranuleSize gran : 2;
1358 } ARMVAParameters;
1359 
1360 /**
1361  * aa64_va_parameters: Return parameters for an AArch64 virtual address
1362  * @env: CPU
1363  * @va: virtual address to look up
1364  * @mmu_idx: determines translation regime to use
1365  * @data: true if this is a data access
1366  * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1367  *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1368  */
1369 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1370                                    ARMMMUIdx mmu_idx, bool data,
1371                                    bool el1_is_aa32);
1372 
1373 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1374 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1375 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1376 
1377 /* Determine if allocation tags are available.  */
1378 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1379                                                  uint64_t sctlr)
1380 {
1381     if (el < 3
1382         && arm_feature(env, ARM_FEATURE_EL3)
1383         && !(env->cp15.scr_el3 & SCR_ATA)) {
1384         return false;
1385     }
1386     if (el < 2 && arm_is_el2_enabled(env)) {
1387         uint64_t hcr = arm_hcr_el2_eff(env);
1388         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1389             return false;
1390         }
1391     }
1392     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1393     return sctlr != 0;
1394 }
1395 
1396 #ifndef CONFIG_USER_ONLY
1397 
1398 /* Security attributes for an address, as returned by v8m_security_lookup. */
1399 typedef struct V8M_SAttributes {
1400     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1401     bool ns;
1402     bool nsc;
1403     uint8_t sregion;
1404     bool srvalid;
1405     uint8_t iregion;
1406     bool irvalid;
1407 } V8M_SAttributes;
1408 
1409 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1410                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1411                          bool secure, V8M_SAttributes *sattrs);
1412 
1413 /* Cacheability and shareability attributes for a memory access */
1414 typedef struct ARMCacheAttrs {
1415     /*
1416      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1417      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1418      */
1419     unsigned int attrs:8;
1420     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1421     bool is_s2_format:1;
1422 } ARMCacheAttrs;
1423 
1424 /* Fields that are valid upon success. */
1425 typedef struct GetPhysAddrResult {
1426     CPUTLBEntryFull f;
1427     ARMCacheAttrs cacheattrs;
1428 } GetPhysAddrResult;
1429 
1430 /**
1431  * get_phys_addr: get the physical address for a virtual address
1432  * @env: CPUARMState
1433  * @address: virtual address to get physical address for
1434  * @access_type: 0 for read, 1 for write, 2 for execute
1435  * @mmu_idx: MMU index indicating required translation regime
1436  * @result: set on translation success.
1437  * @fi: set to fault info if the translation fails
1438  *
1439  * Find the physical address corresponding to the given virtual address,
1440  * by doing a translation table walk on MMU based systems or using the
1441  * MPU state on MPU based systems.
1442  *
1443  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1444  * prot and page_size may not be filled in, and the populated fsr value provides
1445  * information on why the translation aborted, in the format of a
1446  * DFSR/IFSR fault register, with the following caveats:
1447  *  * we honour the short vs long DFSR format differences.
1448  *  * the WnR bit is never set (the caller must do this).
1449  *  * for PSMAv5 based systems we don't bother to return a full FSR format
1450  *    value.
1451  */
1452 bool get_phys_addr(CPUARMState *env, vaddr address,
1453                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
1454                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1455     __attribute__((nonnull));
1456 
1457 /**
1458  * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1459  *                                 address
1460  * @env: CPUARMState
1461  * @address: virtual address to get physical address for
1462  * @access_type: 0 for read, 1 for write, 2 for execute
1463  * @mmu_idx: MMU index indicating required translation regime
1464  * @space: security space for the access
1465  * @result: set on translation success.
1466  * @fi: set to fault info if the translation fails
1467  *
1468  * Similar to get_phys_addr, but use the given security space and don't perform
1469  * a Granule Protection Check on the resulting address.
1470  */
1471 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
1472                                     MMUAccessType access_type,
1473                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1474                                     GetPhysAddrResult *result,
1475                                     ARMMMUFaultInfo *fi)
1476     __attribute__((nonnull));
1477 
1478 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1479                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1480                        bool is_secure, GetPhysAddrResult *result,
1481                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1482 
1483 void arm_log_exception(CPUState *cs);
1484 
1485 #endif /* !CONFIG_USER_ONLY */
1486 
1487 /*
1488  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1489  * the same simd_desc() encoding due to restrictions on size.
1490  * Use these instead.
1491  */
1492 FIELD(PREDDESC, OPRSZ, 0, 6)
1493 FIELD(PREDDESC, ESZ, 6, 2)
1494 FIELD(PREDDESC, DATA, 8, 24)
1495 
1496 /*
1497  * The SVE simd_data field, for memory ops, contains either
1498  * rd (5 bits) or a shift count (2 bits).
1499  */
1500 #define SVE_MTEDESC_SHIFT 5
1501 
1502 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1503 FIELD(MTEDESC, MIDX,  0, 4)
1504 FIELD(MTEDESC, TBI,   4, 2)
1505 FIELD(MTEDESC, TCMA,  6, 2)
1506 FIELD(MTEDESC, WRITE, 8, 1)
1507 FIELD(MTEDESC, ALIGN, 9, 3)
1508 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12)  /* size - 1 */
1509 
1510 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1511 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1512 
1513 /**
1514  * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1515  * @env: CPU env
1516  * @ptr: start address of memory region (dirty pointer)
1517  * @size: length of region (guaranteed not to cross a page boundary)
1518  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1519  * Returns: the size of the region that can be copied without hitting
1520  *          an MTE tag failure
1521  *
1522  * Note that we assume that the caller has already checked the TBI
1523  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1524  * required.
1525  */
1526 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1527                         uint32_t desc);
1528 
1529 /**
1530  * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1531  *                     operation going in the reverse direction
1532  * @env: CPU env
1533  * @ptr: *end* address of memory region (dirty pointer)
1534  * @size: length of region (guaranteed not to cross a page boundary)
1535  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1536  * Returns: the size of the region that can be copied without hitting
1537  *          an MTE tag failure
1538  *
1539  * Note that we assume that the caller has already checked the TBI
1540  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1541  * required.
1542  */
1543 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1544                             uint32_t desc);
1545 
1546 /**
1547  * mte_check_fail: Record an MTE tag check failure
1548  * @env: CPU env
1549  * @desc: MTEDESC descriptor word
1550  * @dirty_ptr: Failing dirty address
1551  * @ra: TCG retaddr
1552  *
1553  * This may never return (if the MTE tag checks are configured to fault).
1554  */
1555 void mte_check_fail(CPUARMState *env, uint32_t desc,
1556                     uint64_t dirty_ptr, uintptr_t ra);
1557 
1558 /**
1559  * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1560  * @env: CPU env
1561  * @dirty_ptr: Start address of memory region (dirty pointer)
1562  * @size: length of region (guaranteed not to cross page boundary)
1563  * @desc: MTEDESC descriptor word
1564  */
1565 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1566                        uint32_t desc);
1567 
1568 static inline int allocation_tag_from_addr(uint64_t ptr)
1569 {
1570     return extract64(ptr, 56, 4);
1571 }
1572 
1573 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1574 {
1575     return deposit64(ptr, 56, 4, rtag);
1576 }
1577 
1578 /* Return true if tbi bits mean that the access is checked.  */
1579 static inline bool tbi_check(uint32_t desc, int bit55)
1580 {
1581     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1582 }
1583 
1584 /* Return true if tcma bits mean that the access is unchecked.  */
1585 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1586 {
1587     /*
1588      * We had extracted bit55 and ptr_tag for other reasons, so fold
1589      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1590      */
1591     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1592     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1593     return tcma && match;
1594 }
1595 
1596 /*
1597  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1598  * for the tag to be present in the FAR_ELx register.  But for user-only
1599  * mode, we do not have a TLB with which to implement this, so we must
1600  * remove the top byte.
1601  */
1602 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1603 {
1604 #ifdef CONFIG_USER_ONLY
1605     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1606     ptr &= sextract64(ptr, 0, 56);
1607 #endif
1608     return ptr;
1609 }
1610 
1611 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1612 {
1613 #ifdef CONFIG_USER_ONLY
1614     int64_t clean_ptr = sextract64(ptr, 0, 56);
1615     if (tbi_check(desc, clean_ptr < 0)) {
1616         ptr = clean_ptr;
1617     }
1618 #endif
1619     return ptr;
1620 }
1621 
1622 /* Values for M-profile PSR.ECI for MVE insns */
1623 enum MVEECIState {
1624     ECI_NONE = 0, /* No completed beats */
1625     ECI_A0 = 1, /* Completed: A0 */
1626     ECI_A0A1 = 2, /* Completed: A0, A1 */
1627     /* 3 is reserved */
1628     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1629     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1630     /* All other values reserved */
1631 };
1632 
1633 /* Definitions for the PMU registers */
1634 #define PMCRN_MASK  0xf800
1635 #define PMCRN_SHIFT 11
1636 #define PMCRLP  0x80
1637 #define PMCRLC  0x40
1638 #define PMCRDP  0x20
1639 #define PMCRX   0x10
1640 #define PMCRD   0x8
1641 #define PMCRC   0x4
1642 #define PMCRP   0x2
1643 #define PMCRE   0x1
1644 /*
1645  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1646  * which can be written as 1 to trigger behaviour but which stay RAZ).
1647  */
1648 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1649 
1650 #define PMXEVTYPER_P          0x80000000
1651 #define PMXEVTYPER_U          0x40000000
1652 #define PMXEVTYPER_NSK        0x20000000
1653 #define PMXEVTYPER_NSU        0x10000000
1654 #define PMXEVTYPER_NSH        0x08000000
1655 #define PMXEVTYPER_M          0x04000000
1656 #define PMXEVTYPER_MT         0x02000000
1657 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1658 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1659                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1660                                PMXEVTYPER_M | PMXEVTYPER_MT | \
1661                                PMXEVTYPER_EVTCOUNT)
1662 
1663 #define PMCCFILTR             0xf8000000
1664 #define PMCCFILTR_M           PMXEVTYPER_M
1665 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1666 
1667 static inline uint32_t pmu_num_counters(CPUARMState *env)
1668 {
1669     ARMCPU *cpu = env_archcpu(env);
1670 
1671     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1672 }
1673 
1674 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1675 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1676 {
1677   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1678 }
1679 
1680 #ifdef TARGET_AARCH64
1681 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
1682 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
1683 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
1684 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
1685 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
1686 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
1687 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
1688 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
1689 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
1690 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1691 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1692 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1693 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1694 void aarch64_max_tcg_initfn(Object *obj);
1695 void aarch64_add_pauth_properties(Object *obj);
1696 void aarch64_add_sve_properties(Object *obj);
1697 void aarch64_add_sme_properties(Object *obj);
1698 #endif
1699 
1700 /* Read the CONTROL register as the MRS instruction would. */
1701 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1702 
1703 /*
1704  * Return a pointer to the location where we currently store the
1705  * stack pointer for the requested security state and thread mode.
1706  * This pointer will become invalid if the CPU state is updated
1707  * such that the stack pointers are switched around (eg changing
1708  * the SPSEL control bit).
1709  */
1710 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1711                              bool threadmode, bool spsel);
1712 
1713 bool el_is_in_host(CPUARMState *env, int el);
1714 
1715 void aa32_max_features(ARMCPU *cpu);
1716 int exception_target_el(CPUARMState *env);
1717 bool arm_singlestep_active(CPUARMState *env);
1718 bool arm_generate_debug_exceptions(CPUARMState *env);
1719 
1720 /**
1721  * pauth_ptr_mask:
1722  * @param: parameters defining the MMU setup
1723  *
1724  * Return a mask of the address bits that contain the authentication code,
1725  * given the MMU config defined by @param.
1726  */
1727 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1728 {
1729     int bot_pac_bit = 64 - param.tsz;
1730     int top_pac_bit = 64 - 8 * param.tbi;
1731 
1732     return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1733 }
1734 
1735 /* Add the cpreg definitions for debug related system registers */
1736 void define_debug_regs(ARMCPU *cpu);
1737 
1738 /* Effective value of MDCR_EL2 */
1739 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1740 {
1741     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1742 }
1743 
1744 /* Powers of 2 for sve_vq_map et al. */
1745 #define SVE_VQ_POW2_MAP                                 \
1746     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1747      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1748 
1749 /*
1750  * Return true if it is possible to take a fine-grained-trap to EL2.
1751  */
1752 static inline bool arm_fgt_active(CPUARMState *env, int el)
1753 {
1754     /*
1755      * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1756      * that can affect EL0, but it is harmless to do the test also for
1757      * traps on registers that are only accessible at EL1 because if the test
1758      * returns true then we can't be executing at EL1 anyway.
1759      * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1760      * traps from AArch32 only happen for the EL0 is AArch32 case.
1761      */
1762     return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1763         el < 2 && arm_is_el2_enabled(env) &&
1764         arm_el_is_aa64(env, 1) &&
1765         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1766         (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1767 }
1768 
1769 void assert_hflags_rebuild_correctly(CPUARMState *env);
1770 
1771 /*
1772  * Although the ARM implementation of hardware assisted debugging
1773  * allows for different breakpoints per-core, the current GDB
1774  * interface treats them as a global pool of registers (which seems to
1775  * be the case for x86, ppc and s390). As a result we store one copy
1776  * of registers which is used for all active cores.
1777  *
1778  * Write access is serialised by virtue of the GDB protocol which
1779  * updates things. Read access (i.e. when the values are copied to the
1780  * vCPU) is also gated by GDB's run control.
1781  *
1782  * This is not unreasonable as most of the time debugging kernels you
1783  * never know which core will eventually execute your function.
1784  */
1785 
1786 typedef struct {
1787     uint64_t bcr;
1788     uint64_t bvr;
1789 } HWBreakpoint;
1790 
1791 /*
1792  * The watchpoint registers can cover more area than the requested
1793  * watchpoint so we need to store the additional information
1794  * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1795  * when the watchpoint is hit.
1796  */
1797 typedef struct {
1798     uint64_t wcr;
1799     uint64_t wvr;
1800     CPUWatchpoint details;
1801 } HWWatchpoint;
1802 
1803 /* Maximum and current break/watch point counts */
1804 extern int max_hw_bps, max_hw_wps;
1805 extern GArray *hw_breakpoints, *hw_watchpoints;
1806 
1807 #define cur_hw_wps      (hw_watchpoints->len)
1808 #define cur_hw_bps      (hw_breakpoints->len)
1809 #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1810 #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1811 
1812 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1813 int insert_hw_breakpoint(target_ulong pc);
1814 int delete_hw_breakpoint(target_ulong pc);
1815 
1816 bool check_watchpoint_in_range(int i, target_ulong addr);
1817 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1818 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1819 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1820 
1821 /* Return the current value of the system counter in ticks */
1822 uint64_t gt_get_countervalue(CPUARMState *env);
1823 /*
1824  * Return the currently applicable offset between the system counter
1825  * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2).
1826  */
1827 uint64_t gt_virt_cnt_offset(CPUARMState *env);
1828 #endif
1829