xref: /openbmc/qemu/target/arm/internals.h (revision feb58e3b)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "exec/breakpoint.h"
29 #include "hw/registerfields.h"
30 #include "tcg/tcg-gvec-desc.h"
31 #include "syndrome.h"
32 #include "cpu-features.h"
33 
34 /* register banks for CPU modes */
35 #define BANK_USRSYS 0
36 #define BANK_SVC    1
37 #define BANK_ABT    2
38 #define BANK_UND    3
39 #define BANK_IRQ    4
40 #define BANK_FIQ    5
41 #define BANK_HYP    6
42 #define BANK_MON    7
43 
44 static inline int arm_env_mmu_index(CPUARMState *env)
45 {
46     return EX_TBFLAG_ANY(env->hflags, MMUIDX);
47 }
48 
49 static inline bool excp_is_internal(int excp)
50 {
51     /* Return true if this exception number represents a QEMU-internal
52      * exception that will not be passed to the guest.
53      */
54     return excp == EXCP_INTERRUPT
55         || excp == EXCP_HLT
56         || excp == EXCP_DEBUG
57         || excp == EXCP_HALTED
58         || excp == EXCP_EXCEPTION_EXIT
59         || excp == EXCP_KERNEL_TRAP
60         || excp == EXCP_SEMIHOST;
61 }
62 
63 /*
64  * Default frequency for the generic timer, in Hz.
65  * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
66  * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
67  * which gives a 16ns tick period.
68  *
69  * We will use the back-compat value:
70  *  - for QEMU CPU types added before we standardized on 1GHz
71  *  - for versioned machine types with a version of 9.0 or earlier
72  * In any case, the machine model may override via the cntfrq property.
73  */
74 #define GTIMER_DEFAULT_HZ 1000000000
75 #define GTIMER_BACKCOMPAT_HZ 62500000
76 
77 /* Bit definitions for the v7M CONTROL register */
78 FIELD(V7M_CONTROL, NPRIV, 0, 1)
79 FIELD(V7M_CONTROL, SPSEL, 1, 1)
80 FIELD(V7M_CONTROL, FPCA, 2, 1)
81 FIELD(V7M_CONTROL, SFPA, 3, 1)
82 
83 /* Bit definitions for v7M exception return payload */
84 FIELD(V7M_EXCRET, ES, 0, 1)
85 FIELD(V7M_EXCRET, RES0, 1, 1)
86 FIELD(V7M_EXCRET, SPSEL, 2, 1)
87 FIELD(V7M_EXCRET, MODE, 3, 1)
88 FIELD(V7M_EXCRET, FTYPE, 4, 1)
89 FIELD(V7M_EXCRET, DCRS, 5, 1)
90 FIELD(V7M_EXCRET, S, 6, 1)
91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
92 
93 /* Minimum value which is a magic number for exception return */
94 #define EXC_RETURN_MIN_MAGIC 0xff000000
95 /* Minimum number which is a magic number for function or exception return
96  * when using v8M security extension
97  */
98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
99 
100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
101 FIELD(DBGWCR, E, 0, 1)
102 FIELD(DBGWCR, PAC, 1, 2)
103 FIELD(DBGWCR, LSC, 3, 2)
104 FIELD(DBGWCR, BAS, 5, 8)
105 FIELD(DBGWCR, HMC, 13, 1)
106 FIELD(DBGWCR, SSC, 14, 2)
107 FIELD(DBGWCR, LBN, 16, 4)
108 FIELD(DBGWCR, WT, 20, 1)
109 FIELD(DBGWCR, MASK, 24, 5)
110 FIELD(DBGWCR, SSCE, 29, 1)
111 
112 #define VTCR_NSW (1u << 29)
113 #define VTCR_NSA (1u << 30)
114 #define VSTCR_SW VTCR_NSW
115 #define VSTCR_SA VTCR_NSA
116 
117 /* Bit definitions for CPACR (AArch32 only) */
118 FIELD(CPACR, CP10, 20, 2)
119 FIELD(CPACR, CP11, 22, 2)
120 FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
121 FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
122 FIELD(CPACR, ASEDIS, 31, 1)
123 
124 /* Bit definitions for CPACR_EL1 (AArch64 only) */
125 FIELD(CPACR_EL1, ZEN, 16, 2)
126 FIELD(CPACR_EL1, FPEN, 20, 2)
127 FIELD(CPACR_EL1, SMEN, 24, 2)
128 FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
129 
130 /* Bit definitions for HCPTR (AArch32 only) */
131 FIELD(HCPTR, TCP10, 10, 1)
132 FIELD(HCPTR, TCP11, 11, 1)
133 FIELD(HCPTR, TASE, 15, 1)
134 FIELD(HCPTR, TTA, 20, 1)
135 FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
136 FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
137 
138 /* Bit definitions for CPTR_EL2 (AArch64 only) */
139 FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
140 FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
141 FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
142 FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
143 FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
144 FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
145 FIELD(CPTR_EL2, TTA, 28, 1)
146 FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
147 FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
148 
149 /* Bit definitions for CPTR_EL3 (AArch64 only) */
150 FIELD(CPTR_EL3, EZ, 8, 1)
151 FIELD(CPTR_EL3, TFP, 10, 1)
152 FIELD(CPTR_EL3, ESM, 12, 1)
153 FIELD(CPTR_EL3, TTA, 20, 1)
154 FIELD(CPTR_EL3, TAM, 30, 1)
155 FIELD(CPTR_EL3, TCPAC, 31, 1)
156 
157 #define MDCR_MTPME    (1U << 28)
158 #define MDCR_TDCC     (1U << 27)
159 #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
160 #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
161 #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
162 #define MDCR_EPMAD    (1U << 21)
163 #define MDCR_EDAD     (1U << 20)
164 #define MDCR_TTRF     (1U << 19)
165 #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
166 #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
167 #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
168 #define MDCR_SDD      (1U << 16)
169 #define MDCR_SPD      (3U << 14)
170 #define MDCR_TDRA     (1U << 11)
171 #define MDCR_TDOSA    (1U << 10)
172 #define MDCR_TDA      (1U << 9)
173 #define MDCR_TDE      (1U << 8)
174 #define MDCR_HPME     (1U << 7)
175 #define MDCR_TPM      (1U << 6)
176 #define MDCR_TPMCR    (1U << 5)
177 #define MDCR_HPMN     (0x1fU)
178 
179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
181                          MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
182                          MDCR_STE | MDCR_SPME | MDCR_SPD)
183 
184 #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
185 #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
186 #define TTBCR_PD0    (1U << 4)
187 #define TTBCR_PD1    (1U << 5)
188 #define TTBCR_EPD0   (1U << 7)
189 #define TTBCR_IRGN0  (3U << 8)
190 #define TTBCR_ORGN0  (3U << 10)
191 #define TTBCR_SH0    (3U << 12)
192 #define TTBCR_T1SZ   (3U << 16)
193 #define TTBCR_A1     (1U << 22)
194 #define TTBCR_EPD1   (1U << 23)
195 #define TTBCR_IRGN1  (3U << 24)
196 #define TTBCR_ORGN1  (3U << 26)
197 #define TTBCR_SH1    (1U << 28)
198 #define TTBCR_EAE    (1U << 31)
199 
200 FIELD(VTCR, T0SZ, 0, 6)
201 FIELD(VTCR, SL0, 6, 2)
202 FIELD(VTCR, IRGN0, 8, 2)
203 FIELD(VTCR, ORGN0, 10, 2)
204 FIELD(VTCR, SH0, 12, 2)
205 FIELD(VTCR, TG0, 14, 2)
206 FIELD(VTCR, PS, 16, 3)
207 FIELD(VTCR, VS, 19, 1)
208 FIELD(VTCR, HA, 21, 1)
209 FIELD(VTCR, HD, 22, 1)
210 FIELD(VTCR, HWU59, 25, 1)
211 FIELD(VTCR, HWU60, 26, 1)
212 FIELD(VTCR, HWU61, 27, 1)
213 FIELD(VTCR, HWU62, 28, 1)
214 FIELD(VTCR, NSW, 29, 1)
215 FIELD(VTCR, NSA, 30, 1)
216 FIELD(VTCR, DS, 32, 1)
217 FIELD(VTCR, SL2, 33, 1)
218 
219 #define HCRX_ENAS0    (1ULL << 0)
220 #define HCRX_ENALS    (1ULL << 1)
221 #define HCRX_ENASR    (1ULL << 2)
222 #define HCRX_FNXS     (1ULL << 3)
223 #define HCRX_FGTNXS   (1ULL << 4)
224 #define HCRX_SMPME    (1ULL << 5)
225 #define HCRX_TALLINT  (1ULL << 6)
226 #define HCRX_VINMI    (1ULL << 7)
227 #define HCRX_VFNMI    (1ULL << 8)
228 #define HCRX_CMOW     (1ULL << 9)
229 #define HCRX_MCE2     (1ULL << 10)
230 #define HCRX_MSCEN    (1ULL << 11)
231 
232 #define HPFAR_NS      (1ULL << 63)
233 
234 #define HSTR_TTEE (1 << 16)
235 #define HSTR_TJDBX (1 << 17)
236 
237 /*
238  * Depending on the value of HCR_EL2.E2H, bits 0 and 1
239  * have different bit definitions, and EL1PCTEN might be
240  * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
241  * disambiguate if necessary.
242  */
243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
247 FIELD(CNTHCTL, EVNTEN, 2, 1)
248 FIELD(CNTHCTL, EVNTDIR, 3, 1)
249 FIELD(CNTHCTL, EVNTI, 4, 4)
250 FIELD(CNTHCTL, EL0VTEN, 8, 1)
251 FIELD(CNTHCTL, EL0PTEN, 9, 1)
252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
253 FIELD(CNTHCTL, EL1PTEN, 11, 1)
254 FIELD(CNTHCTL, ECV, 12, 1)
255 FIELD(CNTHCTL, EL1TVT, 13, 1)
256 FIELD(CNTHCTL, EL1TVCT, 14, 1)
257 FIELD(CNTHCTL, EL1NVPCT, 15, 1)
258 FIELD(CNTHCTL, EL1NVVCT, 16, 1)
259 FIELD(CNTHCTL, EVNTIS, 17, 1)
260 FIELD(CNTHCTL, CNTVMASK, 18, 1)
261 FIELD(CNTHCTL, CNTPMASK, 19, 1)
262 
263 /* We use a few fake FSR values for internal purposes in M profile.
264  * M profile cores don't have A/R format FSRs, but currently our
265  * get_phys_addr() code assumes A/R profile and reports failures via
266  * an A/R format FSR value. We then translate that into the proper
267  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
268  * Mostly the FSR values we use for this are those defined for v7PMSA,
269  * since we share some of that codepath. A few kinds of fault are
270  * only for M profile and have no A/R equivalent, though, so we have
271  * to pick a value from the reserved range (which we never otherwise
272  * generate) to use for these.
273  * These values will never be visible to the guest.
274  */
275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
277 
278 /**
279  * arm_aa32_secure_pl1_0(): Return true if in Secure PL1&0 regime
280  *
281  * Return true if the CPU is in the Secure PL1&0 translation regime.
282  * This requires that EL3 exists and is AArch32 and we are currently
283  * Secure. If this is the case then the ARMMMUIdx_E10* apply and
284  * mean we are in EL3, not EL1.
285  */
286 static inline bool arm_aa32_secure_pl1_0(CPUARMState *env)
287 {
288     return arm_feature(env, ARM_FEATURE_EL3) &&
289         !arm_el_is_aa64(env, 3) && arm_is_secure(env);
290 }
291 
292 /**
293  * raise_exception: Raise the specified exception.
294  * Raise a guest exception with the specified value, syndrome register
295  * and target exception level. This should be called from helper functions,
296  * and never returns because we will longjump back up to the CPU main loop.
297  */
298 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
299                                 uint32_t syndrome, uint32_t target_el);
300 
301 /*
302  * Similarly, but also use unwinding to restore cpu state.
303  */
304 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
305                                       uint32_t syndrome, uint32_t target_el,
306                                       uintptr_t ra);
307 
308 /*
309  * For AArch64, map a given EL to an index in the banked_spsr array.
310  * Note that this mapping and the AArch32 mapping defined in bank_number()
311  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
312  * mandated mapping between each other.
313  */
314 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
315 {
316     static const unsigned int map[4] = {
317         [1] = BANK_SVC, /* EL1.  */
318         [2] = BANK_HYP, /* EL2.  */
319         [3] = BANK_MON, /* EL3.  */
320     };
321     assert(el >= 1 && el <= 3);
322     return map[el];
323 }
324 
325 /* Map CPU modes onto saved register banks.  */
326 static inline int bank_number(int mode)
327 {
328     switch (mode) {
329     case ARM_CPU_MODE_USR:
330     case ARM_CPU_MODE_SYS:
331         return BANK_USRSYS;
332     case ARM_CPU_MODE_SVC:
333         return BANK_SVC;
334     case ARM_CPU_MODE_ABT:
335         return BANK_ABT;
336     case ARM_CPU_MODE_UND:
337         return BANK_UND;
338     case ARM_CPU_MODE_IRQ:
339         return BANK_IRQ;
340     case ARM_CPU_MODE_FIQ:
341         return BANK_FIQ;
342     case ARM_CPU_MODE_HYP:
343         return BANK_HYP;
344     case ARM_CPU_MODE_MON:
345         return BANK_MON;
346     }
347     g_assert_not_reached();
348 }
349 
350 /**
351  * r14_bank_number: Map CPU mode onto register bank for r14
352  *
353  * Given an AArch32 CPU mode, return the index into the saved register
354  * banks to use for the R14 (LR) in that mode. This is the same as
355  * bank_number(), except for the special case of Hyp mode, where
356  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
357  * This should be used as the index into env->banked_r14[], and
358  * bank_number() used for the index into env->banked_r13[] and
359  * env->banked_spsr[].
360  */
361 static inline int r14_bank_number(int mode)
362 {
363     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
364 }
365 
366 void arm_cpu_register(const ARMCPUInfo *info);
367 void aarch64_cpu_register(const ARMCPUInfo *info);
368 
369 void register_cp_regs_for_features(ARMCPU *cpu);
370 void init_cpreg_list(ARMCPU *cpu);
371 
372 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
373 void arm_translate_init(void);
374 
375 void arm_cpu_register_gdb_commands(ARMCPU *cpu);
376 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
377                                        GPtrArray *, GPtrArray *);
378 
379 void arm_restore_state_to_opc(CPUState *cs,
380                               const TranslationBlock *tb,
381                               const uint64_t *data);
382 
383 #ifdef CONFIG_TCG
384 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
385 
386 /* Our implementation of TCGCPUOps::cpu_exec_halt */
387 bool arm_cpu_exec_halt(CPUState *cs);
388 #endif /* CONFIG_TCG */
389 
390 typedef enum ARMFPRounding {
391     FPROUNDING_TIEEVEN,
392     FPROUNDING_POSINF,
393     FPROUNDING_NEGINF,
394     FPROUNDING_ZERO,
395     FPROUNDING_TIEAWAY,
396     FPROUNDING_ODD
397 } ARMFPRounding;
398 
399 extern const FloatRoundMode arm_rmode_to_sf_map[6];
400 
401 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
402 {
403     assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
404     return arm_rmode_to_sf_map[rmode];
405 }
406 
407 static inline void aarch64_save_sp(CPUARMState *env, int el)
408 {
409     if (env->pstate & PSTATE_SP) {
410         env->sp_el[el] = env->xregs[31];
411     } else {
412         env->sp_el[0] = env->xregs[31];
413     }
414 }
415 
416 static inline void aarch64_restore_sp(CPUARMState *env, int el)
417 {
418     if (env->pstate & PSTATE_SP) {
419         env->xregs[31] = env->sp_el[el];
420     } else {
421         env->xregs[31] = env->sp_el[0];
422     }
423 }
424 
425 static inline void update_spsel(CPUARMState *env, uint32_t imm)
426 {
427     unsigned int cur_el = arm_current_el(env);
428     /* Update PSTATE SPSel bit; this requires us to update the
429      * working stack pointer in xregs[31].
430      */
431     if (!((imm ^ env->pstate) & PSTATE_SP)) {
432         return;
433     }
434     aarch64_save_sp(env, cur_el);
435     env->pstate = deposit32(env->pstate, 0, 1, imm);
436 
437     /* We rely on illegal updates to SPsel from EL0 to get trapped
438      * at translation time.
439      */
440     assert(cur_el >= 1 && cur_el <= 3);
441     aarch64_restore_sp(env, cur_el);
442 }
443 
444 /*
445  * arm_pamax
446  * @cpu: ARMCPU
447  *
448  * Returns the implementation defined bit-width of physical addresses.
449  * The ARMv8 reference manuals refer to this as PAMax().
450  */
451 unsigned int arm_pamax(ARMCPU *cpu);
452 
453 /*
454  * round_down_to_parange_index
455  * @bit_size: uint8_t
456  *
457  * Rounds down the bit_size supplied to the first supported ARM physical
458  * address range and returns the index for this. The index is intended to
459  * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
460  */
461 uint8_t round_down_to_parange_index(uint8_t bit_size);
462 
463 /*
464  * round_down_to_parange_bit_size
465  * @bit_size: uint8_t
466  *
467  * Rounds down the bit_size supplied to the first supported ARM physical
468  * address range bit size and returns this.
469  */
470 uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
471 
472 /* Return true if extended addresses are enabled.
473  * This is always the case if our translation regime is 64 bit,
474  * but depends on TTBCR.EAE for 32 bit.
475  */
476 static inline bool extended_addresses_enabled(CPUARMState *env)
477 {
478     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
479     if (arm_feature(env, ARM_FEATURE_PMSA) &&
480         arm_feature(env, ARM_FEATURE_V8)) {
481         return true;
482     }
483     return arm_el_is_aa64(env, 1) ||
484            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
485 }
486 
487 /* Update a QEMU watchpoint based on the information the guest has set in the
488  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
489  */
490 void hw_watchpoint_update(ARMCPU *cpu, int n);
491 /* Update the QEMU watchpoints for every guest watchpoint. This does a
492  * complete delete-and-reinstate of the QEMU watchpoint list and so is
493  * suitable for use after migration or on reset.
494  */
495 void hw_watchpoint_update_all(ARMCPU *cpu);
496 /* Update a QEMU breakpoint based on the information the guest has set in the
497  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
498  */
499 void hw_breakpoint_update(ARMCPU *cpu, int n);
500 /* Update the QEMU breakpoints for every guest breakpoint. This does a
501  * complete delete-and-reinstate of the QEMU breakpoint list and so is
502  * suitable for use after migration or on reset.
503  */
504 void hw_breakpoint_update_all(ARMCPU *cpu);
505 
506 /* Callback function for checking if a breakpoint should trigger. */
507 bool arm_debug_check_breakpoint(CPUState *cs);
508 
509 /* Callback function for checking if a watchpoint should trigger. */
510 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
511 
512 /* Adjust addresses (in BE32 mode) before testing against watchpoint
513  * addresses.
514  */
515 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
516 
517 /* Callback function for when a watchpoint or breakpoint triggers. */
518 void arm_debug_excp_handler(CPUState *cs);
519 
520 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
521 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
522 {
523     return false;
524 }
525 static inline void arm_handle_psci_call(ARMCPU *cpu)
526 {
527     g_assert_not_reached();
528 }
529 #else
530 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
531 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
532 /* Actually handle a PSCI call */
533 void arm_handle_psci_call(ARMCPU *cpu);
534 #endif
535 
536 /**
537  * arm_clear_exclusive: clear the exclusive monitor
538  * @env: CPU env
539  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
540  */
541 static inline void arm_clear_exclusive(CPUARMState *env)
542 {
543     env->exclusive_addr = -1;
544 }
545 
546 /**
547  * ARMFaultType: type of an ARM MMU fault
548  * This corresponds to the v8A pseudocode's Fault enumeration,
549  * with extensions for QEMU internal conditions.
550  */
551 typedef enum ARMFaultType {
552     ARMFault_None,
553     ARMFault_AccessFlag,
554     ARMFault_Alignment,
555     ARMFault_Background,
556     ARMFault_Domain,
557     ARMFault_Permission,
558     ARMFault_Translation,
559     ARMFault_AddressSize,
560     ARMFault_SyncExternal,
561     ARMFault_SyncExternalOnWalk,
562     ARMFault_SyncParity,
563     ARMFault_SyncParityOnWalk,
564     ARMFault_AsyncParity,
565     ARMFault_AsyncExternal,
566     ARMFault_Debug,
567     ARMFault_TLBConflict,
568     ARMFault_UnsuppAtomicUpdate,
569     ARMFault_Lockdown,
570     ARMFault_Exclusive,
571     ARMFault_ICacheMaint,
572     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
573     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
574     ARMFault_GPCFOnWalk,
575     ARMFault_GPCFOnOutput,
576 } ARMFaultType;
577 
578 typedef enum ARMGPCF {
579     GPCF_None,
580     GPCF_AddressSize,
581     GPCF_Walk,
582     GPCF_EABT,
583     GPCF_Fail,
584 } ARMGPCF;
585 
586 /**
587  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
588  * @type: Type of fault
589  * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
590  * @level: Table walk level (for translation, access flag and permission faults)
591  * @domain: Domain of the fault address (for non-LPAE CPUs only)
592  * @s2addr: Address that caused a fault at stage 2
593  * @paddr: physical address that caused a fault for gpc
594  * @paddr_space: physical address space that caused a fault for gpc
595  * @stage2: True if we faulted at stage 2
596  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
597  * @s1ns: True if we faulted on a non-secure IPA while in secure state
598  * @ea: True if we should set the EA (external abort type) bit in syndrome
599  */
600 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
601 struct ARMMMUFaultInfo {
602     ARMFaultType type;
603     ARMGPCF gpcf;
604     target_ulong s2addr;
605     target_ulong paddr;
606     ARMSecuritySpace paddr_space;
607     int level;
608     int domain;
609     bool stage2;
610     bool s1ptw;
611     bool s1ns;
612     bool ea;
613 };
614 
615 /**
616  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
617  * Compare pseudocode EncodeSDFSC(), though unlike that function
618  * we set up a whole FSR-format code including domain field and
619  * putting the high bit of the FSC into bit 10.
620  */
621 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
622 {
623     uint32_t fsc;
624 
625     switch (fi->type) {
626     case ARMFault_None:
627         return 0;
628     case ARMFault_AccessFlag:
629         fsc = fi->level == 1 ? 0x3 : 0x6;
630         break;
631     case ARMFault_Alignment:
632         fsc = 0x1;
633         break;
634     case ARMFault_Permission:
635         fsc = fi->level == 1 ? 0xd : 0xf;
636         break;
637     case ARMFault_Domain:
638         fsc = fi->level == 1 ? 0x9 : 0xb;
639         break;
640     case ARMFault_Translation:
641         fsc = fi->level == 1 ? 0x5 : 0x7;
642         break;
643     case ARMFault_SyncExternal:
644         fsc = 0x8 | (fi->ea << 12);
645         break;
646     case ARMFault_SyncExternalOnWalk:
647         fsc = fi->level == 1 ? 0xc : 0xe;
648         fsc |= (fi->ea << 12);
649         break;
650     case ARMFault_SyncParity:
651         fsc = 0x409;
652         break;
653     case ARMFault_SyncParityOnWalk:
654         fsc = fi->level == 1 ? 0x40c : 0x40e;
655         break;
656     case ARMFault_AsyncParity:
657         fsc = 0x408;
658         break;
659     case ARMFault_AsyncExternal:
660         fsc = 0x406 | (fi->ea << 12);
661         break;
662     case ARMFault_Debug:
663         fsc = 0x2;
664         break;
665     case ARMFault_TLBConflict:
666         fsc = 0x400;
667         break;
668     case ARMFault_Lockdown:
669         fsc = 0x404;
670         break;
671     case ARMFault_Exclusive:
672         fsc = 0x405;
673         break;
674     case ARMFault_ICacheMaint:
675         fsc = 0x4;
676         break;
677     case ARMFault_Background:
678         fsc = 0x0;
679         break;
680     case ARMFault_QEMU_NSCExec:
681         fsc = M_FAKE_FSR_NSC_EXEC;
682         break;
683     case ARMFault_QEMU_SFault:
684         fsc = M_FAKE_FSR_SFAULT;
685         break;
686     default:
687         /* Other faults can't occur in a context that requires a
688          * short-format status code.
689          */
690         g_assert_not_reached();
691     }
692 
693     fsc |= (fi->domain << 4);
694     return fsc;
695 }
696 
697 /**
698  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
699  * Compare pseudocode EncodeLDFSC(), though unlike that function
700  * we fill in also the LPAE bit 9 of a DFSR format.
701  */
702 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
703 {
704     uint32_t fsc;
705 
706     switch (fi->type) {
707     case ARMFault_None:
708         return 0;
709     case ARMFault_AddressSize:
710         assert(fi->level >= -1 && fi->level <= 3);
711         if (fi->level < 0) {
712             fsc = 0b101001;
713         } else {
714             fsc = fi->level;
715         }
716         break;
717     case ARMFault_AccessFlag:
718         assert(fi->level >= 0 && fi->level <= 3);
719         fsc = 0b001000 | fi->level;
720         break;
721     case ARMFault_Permission:
722         assert(fi->level >= 0 && fi->level <= 3);
723         fsc = 0b001100 | fi->level;
724         break;
725     case ARMFault_Translation:
726         assert(fi->level >= -1 && fi->level <= 3);
727         if (fi->level < 0) {
728             fsc = 0b101011;
729         } else {
730             fsc = 0b000100 | fi->level;
731         }
732         break;
733     case ARMFault_SyncExternal:
734         fsc = 0x10 | (fi->ea << 12);
735         break;
736     case ARMFault_SyncExternalOnWalk:
737         assert(fi->level >= -1 && fi->level <= 3);
738         if (fi->level < 0) {
739             fsc = 0b010011;
740         } else {
741             fsc = 0b010100 | fi->level;
742         }
743         fsc |= fi->ea << 12;
744         break;
745     case ARMFault_SyncParity:
746         fsc = 0x18;
747         break;
748     case ARMFault_SyncParityOnWalk:
749         assert(fi->level >= -1 && fi->level <= 3);
750         if (fi->level < 0) {
751             fsc = 0b011011;
752         } else {
753             fsc = 0b011100 | fi->level;
754         }
755         break;
756     case ARMFault_AsyncParity:
757         fsc = 0x19;
758         break;
759     case ARMFault_AsyncExternal:
760         fsc = 0x11 | (fi->ea << 12);
761         break;
762     case ARMFault_Alignment:
763         fsc = 0x21;
764         break;
765     case ARMFault_Debug:
766         fsc = 0x22;
767         break;
768     case ARMFault_TLBConflict:
769         fsc = 0x30;
770         break;
771     case ARMFault_UnsuppAtomicUpdate:
772         fsc = 0x31;
773         break;
774     case ARMFault_Lockdown:
775         fsc = 0x34;
776         break;
777     case ARMFault_Exclusive:
778         fsc = 0x35;
779         break;
780     case ARMFault_GPCFOnWalk:
781         assert(fi->level >= -1 && fi->level <= 3);
782         if (fi->level < 0) {
783             fsc = 0b100011;
784         } else {
785             fsc = 0b100100 | fi->level;
786         }
787         break;
788     case ARMFault_GPCFOnOutput:
789         fsc = 0b101000;
790         break;
791     default:
792         /* Other faults can't occur in a context that requires a
793          * long-format status code.
794          */
795         g_assert_not_reached();
796     }
797 
798     fsc |= 1 << 9;
799     return fsc;
800 }
801 
802 static inline bool arm_extabort_type(MemTxResult result)
803 {
804     /* The EA bit in syndromes and fault status registers is an
805      * IMPDEF classification of external aborts. ARM implementations
806      * usually use this to indicate AXI bus Decode error (0) or
807      * Slave error (1); in QEMU we follow that.
808      */
809     return result != MEMTX_DECODE_ERROR;
810 }
811 
812 #ifdef CONFIG_USER_ONLY
813 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
814                             MMUAccessType access_type,
815                             bool maperr, uintptr_t ra);
816 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
817                            MMUAccessType access_type, uintptr_t ra);
818 #else
819 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
820                             MMUAccessType access_type, int mmu_idx,
821                             MemOp memop, int size, bool probe, uintptr_t ra);
822 #endif
823 
824 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
825 {
826     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
827 }
828 
829 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
830 {
831     if (arm_feature(env, ARM_FEATURE_M)) {
832         return mmu_idx | ARM_MMU_IDX_M;
833     } else {
834         return mmu_idx | ARM_MMU_IDX_A;
835     }
836 }
837 
838 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
839 {
840     /* AArch64 is always a-profile. */
841     return mmu_idx | ARM_MMU_IDX_A;
842 }
843 
844 /**
845  * Return the exception level we're running at if our current MMU index
846  * is @mmu_idx. @s_pl1_0 should be true if this is the AArch32
847  * Secure PL1&0 translation regime.
848  */
849 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0);
850 
851 /* Return the MMU index for a v7M CPU in the specified security state */
852 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
853 
854 /*
855  * Return true if the stage 1 translation regime is using LPAE
856  * format page tables
857  */
858 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
859 
860 /* Raise a data fault alignment exception for the specified virtual address */
861 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
862                                             MMUAccessType access_type,
863                                             int mmu_idx, uintptr_t retaddr);
864 
865 #ifndef CONFIG_USER_ONLY
866 /* arm_cpu_do_transaction_failed: handle a memory system error response
867  * (eg "no device/memory present at address") by raising an external abort
868  * exception
869  */
870 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
871                                    vaddr addr, unsigned size,
872                                    MMUAccessType access_type,
873                                    int mmu_idx, MemTxAttrs attrs,
874                                    MemTxResult response, uintptr_t retaddr);
875 #endif
876 
877 /* Call any registered EL change hooks */
878 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
879 {
880     ARMELChangeHook *hook, *next;
881     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
882         hook->hook(cpu, hook->opaque);
883     }
884 }
885 static inline void arm_call_el_change_hook(ARMCPU *cpu)
886 {
887     ARMELChangeHook *hook, *next;
888     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
889         hook->hook(cpu, hook->opaque);
890     }
891 }
892 
893 /* Return true if this address translation regime has two ranges.  */
894 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
895 {
896     switch (mmu_idx) {
897     case ARMMMUIdx_Stage1_E0:
898     case ARMMMUIdx_Stage1_E1:
899     case ARMMMUIdx_Stage1_E1_PAN:
900     case ARMMMUIdx_E10_0:
901     case ARMMMUIdx_E10_1:
902     case ARMMMUIdx_E10_1_PAN:
903     case ARMMMUIdx_E20_0:
904     case ARMMMUIdx_E20_2:
905     case ARMMMUIdx_E20_2_PAN:
906         return true;
907     default:
908         return false;
909     }
910 }
911 
912 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
913 {
914     switch (mmu_idx) {
915     case ARMMMUIdx_Stage1_E1_PAN:
916     case ARMMMUIdx_E10_1_PAN:
917     case ARMMMUIdx_E20_2_PAN:
918         return true;
919     default:
920         return false;
921     }
922 }
923 
924 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
925 {
926     return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
927 }
928 
929 /* Return the exception level which controls this address translation regime */
930 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
931 {
932     switch (mmu_idx) {
933     case ARMMMUIdx_E20_0:
934     case ARMMMUIdx_E20_2:
935     case ARMMMUIdx_E20_2_PAN:
936     case ARMMMUIdx_Stage2:
937     case ARMMMUIdx_Stage2_S:
938     case ARMMMUIdx_E2:
939         return 2;
940     case ARMMMUIdx_E3:
941         return 3;
942     case ARMMMUIdx_E10_0:
943     case ARMMMUIdx_Stage1_E0:
944     case ARMMMUIdx_E10_1:
945     case ARMMMUIdx_E10_1_PAN:
946     case ARMMMUIdx_Stage1_E1:
947     case ARMMMUIdx_Stage1_E1_PAN:
948         return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
949     case ARMMMUIdx_MPrivNegPri:
950     case ARMMMUIdx_MUserNegPri:
951     case ARMMMUIdx_MPriv:
952     case ARMMMUIdx_MUser:
953     case ARMMMUIdx_MSPrivNegPri:
954     case ARMMMUIdx_MSUserNegPri:
955     case ARMMMUIdx_MSPriv:
956     case ARMMMUIdx_MSUser:
957         return 1;
958     default:
959         g_assert_not_reached();
960     }
961 }
962 
963 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
964 {
965     switch (mmu_idx) {
966     case ARMMMUIdx_E10_0:
967     case ARMMMUIdx_E20_0:
968     case ARMMMUIdx_Stage1_E0:
969     case ARMMMUIdx_MUser:
970     case ARMMMUIdx_MSUser:
971     case ARMMMUIdx_MUserNegPri:
972     case ARMMMUIdx_MSUserNegPri:
973         return true;
974     default:
975         return false;
976     }
977 }
978 
979 /* Return the SCTLR value which controls this address translation regime */
980 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
981 {
982     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
983 }
984 
985 /*
986  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
987  * and the Non-Secure stage 2 translation regimes (and hence which are
988  * not present in VSTCR_EL2).
989  */
990 #define VTCR_SHARED_FIELD_MASK \
991     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
992      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
993      R_VTCR_DS_MASK)
994 
995 /* Return the value of the TCR controlling this translation regime */
996 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
997 {
998     if (mmu_idx == ARMMMUIdx_Stage2) {
999         return env->cp15.vtcr_el2;
1000     }
1001     if (mmu_idx == ARMMMUIdx_Stage2_S) {
1002         /*
1003          * Secure stage 2 shares fields from VTCR_EL2. We merge those
1004          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
1005          * value so the callers don't need to special case this.
1006          *
1007          * If a future architecture change defines bits in VSTCR_EL2 that
1008          * overlap with these VTCR_EL2 fields we may need to revisit this.
1009          */
1010         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
1011         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
1012         return v;
1013     }
1014     return env->cp15.tcr_el[regime_el(env, mmu_idx)];
1015 }
1016 
1017 /* Return true if the translation regime is using LPAE format page tables */
1018 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
1019 {
1020     int el = regime_el(env, mmu_idx);
1021     if (el == 2 || arm_el_is_aa64(env, el)) {
1022         return true;
1023     }
1024     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1025         arm_feature(env, ARM_FEATURE_V8)) {
1026         return true;
1027     }
1028     if (arm_feature(env, ARM_FEATURE_LPAE)
1029         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
1030         return true;
1031     }
1032     return false;
1033 }
1034 
1035 /**
1036  * arm_num_brps: Return number of implemented breakpoints.
1037  * Note that the ID register BRPS field is "number of bps - 1",
1038  * and we return the actual number of breakpoints.
1039  */
1040 static inline int arm_num_brps(ARMCPU *cpu)
1041 {
1042     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1043         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
1044     } else {
1045         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1046     }
1047 }
1048 
1049 /**
1050  * arm_num_wrps: Return number of implemented watchpoints.
1051  * Note that the ID register WRPS field is "number of wps - 1",
1052  * and we return the actual number of watchpoints.
1053  */
1054 static inline int arm_num_wrps(ARMCPU *cpu)
1055 {
1056     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1057         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
1058     } else {
1059         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1060     }
1061 }
1062 
1063 /**
1064  * arm_num_ctx_cmps: Return number of implemented context comparators.
1065  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1066  * and we return the actual number of comparators.
1067  */
1068 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1069 {
1070     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1071         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
1072     } else {
1073         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1074     }
1075 }
1076 
1077 /**
1078  * v7m_using_psp: Return true if using process stack pointer
1079  * Return true if the CPU is currently using the process stack
1080  * pointer, or false if it is using the main stack pointer.
1081  */
1082 static inline bool v7m_using_psp(CPUARMState *env)
1083 {
1084     /* Handler mode always uses the main stack; for thread mode
1085      * the CONTROL.SPSEL bit determines the answer.
1086      * Note that in v7M it is not possible to be in Handler mode with
1087      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1088      */
1089     return !arm_v7m_is_handler_mode(env) &&
1090         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1091 }
1092 
1093 /**
1094  * v7m_sp_limit: Return SP limit for current CPU state
1095  * Return the SP limit value for the current CPU security state
1096  * and stack pointer.
1097  */
1098 static inline uint32_t v7m_sp_limit(CPUARMState *env)
1099 {
1100     if (v7m_using_psp(env)) {
1101         return env->v7m.psplim[env->v7m.secure];
1102     } else {
1103         return env->v7m.msplim[env->v7m.secure];
1104     }
1105 }
1106 
1107 /**
1108  * v7m_cpacr_pass:
1109  * Return true if the v7M CPACR permits access to the FPU for the specified
1110  * security state and privilege level.
1111  */
1112 static inline bool v7m_cpacr_pass(CPUARMState *env,
1113                                   bool is_secure, bool is_priv)
1114 {
1115     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1116     case 0:
1117     case 2: /* UNPREDICTABLE: we treat like 0 */
1118         return false;
1119     case 1:
1120         return is_priv;
1121     case 3:
1122         return true;
1123     default:
1124         g_assert_not_reached();
1125     }
1126 }
1127 
1128 /**
1129  * aarch32_mode_name(): Return name of the AArch32 CPU mode
1130  * @psr: Program Status Register indicating CPU mode
1131  *
1132  * Returns, for debug logging purposes, a printable representation
1133  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1134  * the low bits of the specified PSR.
1135  */
1136 static inline const char *aarch32_mode_name(uint32_t psr)
1137 {
1138     static const char cpu_mode_names[16][4] = {
1139         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1140         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1141     };
1142 
1143     return cpu_mode_names[psr & 0xf];
1144 }
1145 
1146 /**
1147  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1148  *
1149  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1150  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1151  * Must be called with the BQL held.
1152  */
1153 void arm_cpu_update_virq(ARMCPU *cpu);
1154 
1155 /**
1156  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1157  *
1158  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1159  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1160  * Must be called with the BQL held.
1161  */
1162 void arm_cpu_update_vfiq(ARMCPU *cpu);
1163 
1164 /**
1165  * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1166  *
1167  * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1168  * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
1169  * Must be called with the BQL held.
1170  */
1171 void arm_cpu_update_vinmi(ARMCPU *cpu);
1172 
1173 /**
1174  * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1175  *
1176  * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1177  * a change to the HCRX_EL2.VFNMI.
1178  * Must be called with the BQL held.
1179  */
1180 void arm_cpu_update_vfnmi(ARMCPU *cpu);
1181 
1182 /**
1183  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1184  *
1185  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1186  * following a change to the HCR_EL2.VSE bit.
1187  */
1188 void arm_cpu_update_vserr(ARMCPU *cpu);
1189 
1190 /**
1191  * arm_mmu_idx_el:
1192  * @env: The cpu environment
1193  * @el: The EL to use.
1194  *
1195  * Return the full ARMMMUIdx for the translation regime for EL.
1196  */
1197 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1198 
1199 /**
1200  * arm_mmu_idx:
1201  * @env: The cpu environment
1202  *
1203  * Return the full ARMMMUIdx for the current translation regime.
1204  */
1205 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1206 
1207 /**
1208  * arm_stage1_mmu_idx:
1209  * @env: The cpu environment
1210  *
1211  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1212  */
1213 #ifdef CONFIG_USER_ONLY
1214 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1215 {
1216     return ARMMMUIdx_Stage1_E0;
1217 }
1218 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1219 {
1220     return ARMMMUIdx_Stage1_E0;
1221 }
1222 #else
1223 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1224 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1225 #endif
1226 
1227 /**
1228  * arm_mmu_idx_is_stage1_of_2:
1229  * @mmu_idx: The ARMMMUIdx to test
1230  *
1231  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1232  * first stage of a two stage regime.
1233  */
1234 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1235 {
1236     switch (mmu_idx) {
1237     case ARMMMUIdx_Stage1_E0:
1238     case ARMMMUIdx_Stage1_E1:
1239     case ARMMMUIdx_Stage1_E1_PAN:
1240         return true;
1241     default:
1242         return false;
1243     }
1244 }
1245 
1246 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1247                                                const ARMISARegisters *id)
1248 {
1249     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1250 
1251     if ((features >> ARM_FEATURE_V4T) & 1) {
1252         valid |= CPSR_T;
1253     }
1254     if ((features >> ARM_FEATURE_V5) & 1) {
1255         valid |= CPSR_Q; /* V5TE in reality*/
1256     }
1257     if ((features >> ARM_FEATURE_V6) & 1) {
1258         valid |= CPSR_E | CPSR_GE;
1259     }
1260     if ((features >> ARM_FEATURE_THUMB2) & 1) {
1261         valid |= CPSR_IT;
1262     }
1263     if (isar_feature_aa32_jazelle(id)) {
1264         valid |= CPSR_J;
1265     }
1266     if (isar_feature_aa32_pan(id)) {
1267         valid |= CPSR_PAN;
1268     }
1269     if (isar_feature_aa32_dit(id)) {
1270         valid |= CPSR_DIT;
1271     }
1272     if (isar_feature_aa32_ssbs(id)) {
1273         valid |= CPSR_SSBS;
1274     }
1275 
1276     return valid;
1277 }
1278 
1279 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1280 {
1281     uint32_t valid;
1282 
1283     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1284     if (isar_feature_aa64_bti(id)) {
1285         valid |= PSTATE_BTYPE;
1286     }
1287     if (isar_feature_aa64_pan(id)) {
1288         valid |= PSTATE_PAN;
1289     }
1290     if (isar_feature_aa64_uao(id)) {
1291         valid |= PSTATE_UAO;
1292     }
1293     if (isar_feature_aa64_dit(id)) {
1294         valid |= PSTATE_DIT;
1295     }
1296     if (isar_feature_aa64_ssbs(id)) {
1297         valid |= PSTATE_SSBS;
1298     }
1299     if (isar_feature_aa64_mte(id)) {
1300         valid |= PSTATE_TCO;
1301     }
1302     if (isar_feature_aa64_nmi(id)) {
1303         valid |= PSTATE_ALLINT;
1304     }
1305 
1306     return valid;
1307 }
1308 
1309 /* Granule size (i.e. page size) */
1310 typedef enum ARMGranuleSize {
1311     /* Same order as TG0 encoding */
1312     Gran4K,
1313     Gran64K,
1314     Gran16K,
1315     GranInvalid,
1316 } ARMGranuleSize;
1317 
1318 /**
1319  * arm_granule_bits: Return address size of the granule in bits
1320  *
1321  * Return the address size of the granule in bits. This corresponds
1322  * to the pseudocode TGxGranuleBits().
1323  */
1324 static inline int arm_granule_bits(ARMGranuleSize gran)
1325 {
1326     switch (gran) {
1327     case Gran64K:
1328         return 16;
1329     case Gran16K:
1330         return 14;
1331     case Gran4K:
1332         return 12;
1333     default:
1334         g_assert_not_reached();
1335     }
1336 }
1337 
1338 /*
1339  * Parameters of a given virtual address, as extracted from the
1340  * translation control register (TCR) for a given regime.
1341  */
1342 typedef struct ARMVAParameters {
1343     unsigned tsz    : 8;
1344     unsigned ps     : 3;
1345     unsigned sh     : 2;
1346     unsigned select : 1;
1347     bool tbi        : 1;
1348     bool epd        : 1;
1349     bool hpd        : 1;
1350     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1351     bool ds         : 1;
1352     bool ha         : 1;
1353     bool hd         : 1;
1354     ARMGranuleSize gran : 2;
1355 } ARMVAParameters;
1356 
1357 /**
1358  * aa64_va_parameters: Return parameters for an AArch64 virtual address
1359  * @env: CPU
1360  * @va: virtual address to look up
1361  * @mmu_idx: determines translation regime to use
1362  * @data: true if this is a data access
1363  * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1364  *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1365  */
1366 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1367                                    ARMMMUIdx mmu_idx, bool data,
1368                                    bool el1_is_aa32);
1369 
1370 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1371 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1372 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1373 
1374 /* Determine if allocation tags are available.  */
1375 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1376                                                  uint64_t sctlr)
1377 {
1378     if (el < 3
1379         && arm_feature(env, ARM_FEATURE_EL3)
1380         && !(env->cp15.scr_el3 & SCR_ATA)) {
1381         return false;
1382     }
1383     if (el < 2 && arm_is_el2_enabled(env)) {
1384         uint64_t hcr = arm_hcr_el2_eff(env);
1385         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1386             return false;
1387         }
1388     }
1389     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1390     return sctlr != 0;
1391 }
1392 
1393 #ifndef CONFIG_USER_ONLY
1394 
1395 /* Security attributes for an address, as returned by v8m_security_lookup. */
1396 typedef struct V8M_SAttributes {
1397     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1398     bool ns;
1399     bool nsc;
1400     uint8_t sregion;
1401     bool srvalid;
1402     uint8_t iregion;
1403     bool irvalid;
1404 } V8M_SAttributes;
1405 
1406 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1407                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1408                          bool secure, V8M_SAttributes *sattrs);
1409 
1410 /* Cacheability and shareability attributes for a memory access */
1411 typedef struct ARMCacheAttrs {
1412     /*
1413      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1414      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1415      */
1416     unsigned int attrs:8;
1417     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1418     bool is_s2_format:1;
1419 } ARMCacheAttrs;
1420 
1421 /* Fields that are valid upon success. */
1422 typedef struct GetPhysAddrResult {
1423     CPUTLBEntryFull f;
1424     ARMCacheAttrs cacheattrs;
1425 } GetPhysAddrResult;
1426 
1427 /**
1428  * get_phys_addr: get the physical address for a virtual address
1429  * @env: CPUARMState
1430  * @address: virtual address to get physical address for
1431  * @access_type: 0 for read, 1 for write, 2 for execute
1432  * @memop: memory operation feeding this access, or 0 for none
1433  * @mmu_idx: MMU index indicating required translation regime
1434  * @result: set on translation success.
1435  * @fi: set to fault info if the translation fails
1436  *
1437  * Find the physical address corresponding to the given virtual address,
1438  * by doing a translation table walk on MMU based systems or using the
1439  * MPU state on MPU based systems.
1440  *
1441  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1442  * prot and page_size may not be filled in, and the populated fsr value provides
1443  * information on why the translation aborted, in the format of a
1444  * DFSR/IFSR fault register, with the following caveats:
1445  *  * we honour the short vs long DFSR format differences.
1446  *  * the WnR bit is never set (the caller must do this).
1447  *  * for PSMAv5 based systems we don't bother to return a full FSR format
1448  *    value.
1449  */
1450 bool get_phys_addr(CPUARMState *env, vaddr address,
1451                    MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
1452                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1453     __attribute__((nonnull));
1454 
1455 /**
1456  * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1457  *                                 address
1458  * @env: CPUARMState
1459  * @address: virtual address to get physical address for
1460  * @access_type: 0 for read, 1 for write, 2 for execute
1461  * @memop: memory operation feeding this access, or 0 for none
1462  * @mmu_idx: MMU index indicating required translation regime
1463  * @space: security space for the access
1464  * @result: set on translation success.
1465  * @fi: set to fault info if the translation fails
1466  *
1467  * Similar to get_phys_addr, but use the given security space and don't perform
1468  * a Granule Protection Check on the resulting address.
1469  */
1470 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
1471                                     MMUAccessType access_type, MemOp memop,
1472                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1473                                     GetPhysAddrResult *result,
1474                                     ARMMMUFaultInfo *fi)
1475     __attribute__((nonnull));
1476 
1477 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1478                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1479                        bool is_secure, GetPhysAddrResult *result,
1480                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1481 
1482 void arm_log_exception(CPUState *cs);
1483 
1484 #endif /* !CONFIG_USER_ONLY */
1485 
1486 /*
1487  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1488  * the same simd_desc() encoding due to restrictions on size.
1489  * Use these instead.
1490  */
1491 FIELD(PREDDESC, OPRSZ, 0, 6)
1492 FIELD(PREDDESC, ESZ, 6, 2)
1493 FIELD(PREDDESC, DATA, 8, 24)
1494 
1495 /*
1496  * The SVE simd_data field, for memory ops, contains either
1497  * rd (5 bits) or a shift count (2 bits).
1498  */
1499 #define SVE_MTEDESC_SHIFT 5
1500 
1501 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1502 FIELD(MTEDESC, MIDX,  0, 4)
1503 FIELD(MTEDESC, TBI,   4, 2)
1504 FIELD(MTEDESC, TCMA,  6, 2)
1505 FIELD(MTEDESC, WRITE, 8, 1)
1506 FIELD(MTEDESC, ALIGN, 9, 3)
1507 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12)  /* size - 1 */
1508 
1509 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1510 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1511 
1512 /**
1513  * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1514  * @env: CPU env
1515  * @ptr: start address of memory region (dirty pointer)
1516  * @size: length of region (guaranteed not to cross a page boundary)
1517  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1518  * Returns: the size of the region that can be copied without hitting
1519  *          an MTE tag failure
1520  *
1521  * Note that we assume that the caller has already checked the TBI
1522  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1523  * required.
1524  */
1525 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1526                         uint32_t desc);
1527 
1528 /**
1529  * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1530  *                     operation going in the reverse direction
1531  * @env: CPU env
1532  * @ptr: *end* address of memory region (dirty pointer)
1533  * @size: length of region (guaranteed not to cross a page boundary)
1534  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1535  * Returns: the size of the region that can be copied without hitting
1536  *          an MTE tag failure
1537  *
1538  * Note that we assume that the caller has already checked the TBI
1539  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1540  * required.
1541  */
1542 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1543                             uint32_t desc);
1544 
1545 /**
1546  * mte_check_fail: Record an MTE tag check failure
1547  * @env: CPU env
1548  * @desc: MTEDESC descriptor word
1549  * @dirty_ptr: Failing dirty address
1550  * @ra: TCG retaddr
1551  *
1552  * This may never return (if the MTE tag checks are configured to fault).
1553  */
1554 void mte_check_fail(CPUARMState *env, uint32_t desc,
1555                     uint64_t dirty_ptr, uintptr_t ra);
1556 
1557 /**
1558  * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1559  * @env: CPU env
1560  * @dirty_ptr: Start address of memory region (dirty pointer)
1561  * @size: length of region (guaranteed not to cross page boundary)
1562  * @desc: MTEDESC descriptor word
1563  */
1564 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1565                        uint32_t desc);
1566 
1567 static inline int allocation_tag_from_addr(uint64_t ptr)
1568 {
1569     return extract64(ptr, 56, 4);
1570 }
1571 
1572 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1573 {
1574     return deposit64(ptr, 56, 4, rtag);
1575 }
1576 
1577 /* Return true if tbi bits mean that the access is checked.  */
1578 static inline bool tbi_check(uint32_t desc, int bit55)
1579 {
1580     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1581 }
1582 
1583 /* Return true if tcma bits mean that the access is unchecked.  */
1584 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1585 {
1586     /*
1587      * We had extracted bit55 and ptr_tag for other reasons, so fold
1588      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1589      */
1590     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1591     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1592     return tcma && match;
1593 }
1594 
1595 /*
1596  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1597  * for the tag to be present in the FAR_ELx register.  But for user-only
1598  * mode, we do not have a TLB with which to implement this, so we must
1599  * remove the top byte.
1600  */
1601 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1602 {
1603 #ifdef CONFIG_USER_ONLY
1604     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1605     ptr &= sextract64(ptr, 0, 56);
1606 #endif
1607     return ptr;
1608 }
1609 
1610 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1611 {
1612 #ifdef CONFIG_USER_ONLY
1613     int64_t clean_ptr = sextract64(ptr, 0, 56);
1614     if (tbi_check(desc, clean_ptr < 0)) {
1615         ptr = clean_ptr;
1616     }
1617 #endif
1618     return ptr;
1619 }
1620 
1621 /* Values for M-profile PSR.ECI for MVE insns */
1622 enum MVEECIState {
1623     ECI_NONE = 0, /* No completed beats */
1624     ECI_A0 = 1, /* Completed: A0 */
1625     ECI_A0A1 = 2, /* Completed: A0, A1 */
1626     /* 3 is reserved */
1627     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1628     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1629     /* All other values reserved */
1630 };
1631 
1632 /* Definitions for the PMU registers */
1633 #define PMCRN_MASK  0xf800
1634 #define PMCRN_SHIFT 11
1635 #define PMCRLP  0x80
1636 #define PMCRLC  0x40
1637 #define PMCRDP  0x20
1638 #define PMCRX   0x10
1639 #define PMCRD   0x8
1640 #define PMCRC   0x4
1641 #define PMCRP   0x2
1642 #define PMCRE   0x1
1643 /*
1644  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1645  * which can be written as 1 to trigger behaviour but which stay RAZ).
1646  */
1647 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1648 
1649 #define PMXEVTYPER_P          0x80000000
1650 #define PMXEVTYPER_U          0x40000000
1651 #define PMXEVTYPER_NSK        0x20000000
1652 #define PMXEVTYPER_NSU        0x10000000
1653 #define PMXEVTYPER_NSH        0x08000000
1654 #define PMXEVTYPER_M          0x04000000
1655 #define PMXEVTYPER_MT         0x02000000
1656 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1657 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1658                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1659                                PMXEVTYPER_M | PMXEVTYPER_MT | \
1660                                PMXEVTYPER_EVTCOUNT)
1661 
1662 #define PMCCFILTR             0xf8000000
1663 #define PMCCFILTR_M           PMXEVTYPER_M
1664 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1665 
1666 static inline uint32_t pmu_num_counters(CPUARMState *env)
1667 {
1668     ARMCPU *cpu = env_archcpu(env);
1669 
1670     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1671 }
1672 
1673 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1674 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1675 {
1676   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1677 }
1678 
1679 #ifdef TARGET_AARCH64
1680 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
1681 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
1682 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
1683 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
1684 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
1685 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
1686 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
1687 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
1688 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
1689 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1690 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1691 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1692 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1693 void aarch64_max_tcg_initfn(Object *obj);
1694 void aarch64_add_pauth_properties(Object *obj);
1695 void aarch64_add_sve_properties(Object *obj);
1696 void aarch64_add_sme_properties(Object *obj);
1697 #endif
1698 
1699 /* Read the CONTROL register as the MRS instruction would. */
1700 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1701 
1702 /*
1703  * Return a pointer to the location where we currently store the
1704  * stack pointer for the requested security state and thread mode.
1705  * This pointer will become invalid if the CPU state is updated
1706  * such that the stack pointers are switched around (eg changing
1707  * the SPSEL control bit).
1708  */
1709 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1710                              bool threadmode, bool spsel);
1711 
1712 bool el_is_in_host(CPUARMState *env, int el);
1713 
1714 void aa32_max_features(ARMCPU *cpu);
1715 int exception_target_el(CPUARMState *env);
1716 bool arm_singlestep_active(CPUARMState *env);
1717 bool arm_generate_debug_exceptions(CPUARMState *env);
1718 
1719 /**
1720  * pauth_ptr_mask:
1721  * @param: parameters defining the MMU setup
1722  *
1723  * Return a mask of the address bits that contain the authentication code,
1724  * given the MMU config defined by @param.
1725  */
1726 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1727 {
1728     int bot_pac_bit = 64 - param.tsz;
1729     int top_pac_bit = 64 - 8 * param.tbi;
1730 
1731     return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1732 }
1733 
1734 /* Add the cpreg definitions for debug related system registers */
1735 void define_debug_regs(ARMCPU *cpu);
1736 
1737 /* Effective value of MDCR_EL2 */
1738 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1739 {
1740     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1741 }
1742 
1743 /* Powers of 2 for sve_vq_map et al. */
1744 #define SVE_VQ_POW2_MAP                                 \
1745     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1746      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1747 
1748 /*
1749  * Return true if it is possible to take a fine-grained-trap to EL2.
1750  */
1751 static inline bool arm_fgt_active(CPUARMState *env, int el)
1752 {
1753     /*
1754      * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1755      * that can affect EL0, but it is harmless to do the test also for
1756      * traps on registers that are only accessible at EL1 because if the test
1757      * returns true then we can't be executing at EL1 anyway.
1758      * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1759      * traps from AArch32 only happen for the EL0 is AArch32 case.
1760      */
1761     return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1762         el < 2 && arm_is_el2_enabled(env) &&
1763         arm_el_is_aa64(env, 1) &&
1764         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1765         (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1766 }
1767 
1768 void assert_hflags_rebuild_correctly(CPUARMState *env);
1769 
1770 /*
1771  * Although the ARM implementation of hardware assisted debugging
1772  * allows for different breakpoints per-core, the current GDB
1773  * interface treats them as a global pool of registers (which seems to
1774  * be the case for x86, ppc and s390). As a result we store one copy
1775  * of registers which is used for all active cores.
1776  *
1777  * Write access is serialised by virtue of the GDB protocol which
1778  * updates things. Read access (i.e. when the values are copied to the
1779  * vCPU) is also gated by GDB's run control.
1780  *
1781  * This is not unreasonable as most of the time debugging kernels you
1782  * never know which core will eventually execute your function.
1783  */
1784 
1785 typedef struct {
1786     uint64_t bcr;
1787     uint64_t bvr;
1788 } HWBreakpoint;
1789 
1790 /*
1791  * The watchpoint registers can cover more area than the requested
1792  * watchpoint so we need to store the additional information
1793  * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1794  * when the watchpoint is hit.
1795  */
1796 typedef struct {
1797     uint64_t wcr;
1798     uint64_t wvr;
1799     CPUWatchpoint details;
1800 } HWWatchpoint;
1801 
1802 /* Maximum and current break/watch point counts */
1803 extern int max_hw_bps, max_hw_wps;
1804 extern GArray *hw_breakpoints, *hw_watchpoints;
1805 
1806 #define cur_hw_wps      (hw_watchpoints->len)
1807 #define cur_hw_bps      (hw_breakpoints->len)
1808 #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1809 #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1810 
1811 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1812 int insert_hw_breakpoint(target_ulong pc);
1813 int delete_hw_breakpoint(target_ulong pc);
1814 
1815 bool check_watchpoint_in_range(int i, target_ulong addr);
1816 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1817 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1818 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1819 
1820 /* Return the current value of the system counter in ticks */
1821 uint64_t gt_get_countervalue(CPUARMState *env);
1822 /*
1823  * Return the currently applicable offset between the system counter
1824  * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2).
1825  */
1826 uint64_t gt_virt_cnt_offset(CPUARMState *env);
1827 #endif
1828