xref: /openbmc/qemu/target/arm/internals.h (revision 53b41bb78950912ba2d9809eef6b45e4df30c647)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "exec/hwaddr.h"
29 #include "exec/vaddr.h"
30 #include "exec/breakpoint.h"
31 #include "accel/tcg/tb-cpu-state.h"
32 #include "hw/registerfields.h"
33 #include "tcg/tcg-gvec-desc.h"
34 #include "system/memory.h"
35 #include "syndrome.h"
36 #include "cpu-features.h"
37 #include "mmuidx-internal.h"
38 
39 /* register banks for CPU modes */
40 #define BANK_USRSYS 0
41 #define BANK_SVC    1
42 #define BANK_ABT    2
43 #define BANK_UND    3
44 #define BANK_IRQ    4
45 #define BANK_FIQ    5
46 #define BANK_HYP    6
47 #define BANK_MON    7
48 
49 static inline int arm_env_mmu_index(CPUARMState *env)
50 {
51     return EX_TBFLAG_ANY(env->hflags, MMUIDX);
52 }
53 
54 static inline bool excp_is_internal(int excp)
55 {
56     /* Return true if this exception number represents a QEMU-internal
57      * exception that will not be passed to the guest.
58      */
59     return excp == EXCP_INTERRUPT
60         || excp == EXCP_HLT
61         || excp == EXCP_DEBUG
62         || excp == EXCP_HALTED
63         || excp == EXCP_EXCEPTION_EXIT
64         || excp == EXCP_KERNEL_TRAP
65         || excp == EXCP_SEMIHOST;
66 }
67 
68 /*
69  * Default frequency for the generic timer, in Hz.
70  * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
71  * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
72  * which gives a 16ns tick period.
73  *
74  * We will use the back-compat value:
75  *  - for QEMU CPU types added before we standardized on 1GHz
76  *  - for versioned machine types with a version of 9.0 or earlier
77  * In any case, the machine model may override via the cntfrq property.
78  */
79 #define GTIMER_DEFAULT_HZ 1000000000
80 #define GTIMER_BACKCOMPAT_HZ 62500000
81 
82 /* Bit definitions for the v7M CONTROL register */
83 FIELD(V7M_CONTROL, NPRIV, 0, 1)
84 FIELD(V7M_CONTROL, SPSEL, 1, 1)
85 FIELD(V7M_CONTROL, FPCA, 2, 1)
86 FIELD(V7M_CONTROL, SFPA, 3, 1)
87 
88 /* Bit definitions for v7M exception return payload */
89 FIELD(V7M_EXCRET, ES, 0, 1)
90 FIELD(V7M_EXCRET, RES0, 1, 1)
91 FIELD(V7M_EXCRET, SPSEL, 2, 1)
92 FIELD(V7M_EXCRET, MODE, 3, 1)
93 FIELD(V7M_EXCRET, FTYPE, 4, 1)
94 FIELD(V7M_EXCRET, DCRS, 5, 1)
95 FIELD(V7M_EXCRET, S, 6, 1)
96 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
97 
98 /* Minimum value which is a magic number for exception return */
99 #define EXC_RETURN_MIN_MAGIC 0xff000000
100 /* Minimum number which is a magic number for function or exception return
101  * when using v8M security extension
102  */
103 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
104 
105 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
106 FIELD(DBGWCR, E, 0, 1)
107 FIELD(DBGWCR, PAC, 1, 2)
108 FIELD(DBGWCR, LSC, 3, 2)
109 FIELD(DBGWCR, BAS, 5, 8)
110 FIELD(DBGWCR, HMC, 13, 1)
111 FIELD(DBGWCR, SSC, 14, 2)
112 FIELD(DBGWCR, LBN, 16, 4)
113 FIELD(DBGWCR, WT, 20, 1)
114 FIELD(DBGWCR, MASK, 24, 5)
115 FIELD(DBGWCR, SSCE, 29, 1)
116 
117 /* Bit definitions for CPACR (AArch32 only) */
118 FIELD(CPACR, CP10, 20, 2)
119 FIELD(CPACR, CP11, 22, 2)
120 FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
121 FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
122 FIELD(CPACR, ASEDIS, 31, 1)
123 
124 /* Bit definitions for CPACR_EL1 (AArch64 only) */
125 FIELD(CPACR_EL1, ZEN, 16, 2)
126 FIELD(CPACR_EL1, FPEN, 20, 2)
127 FIELD(CPACR_EL1, SMEN, 24, 2)
128 FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
129 
130 /* Bit definitions for HCPTR (AArch32 only) */
131 FIELD(HCPTR, TCP10, 10, 1)
132 FIELD(HCPTR, TCP11, 11, 1)
133 FIELD(HCPTR, TASE, 15, 1)
134 FIELD(HCPTR, TTA, 20, 1)
135 FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
136 FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
137 
138 /* Bit definitions for CPTR_EL2 (AArch64 only) */
139 FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
140 FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
141 FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
142 FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
143 FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
144 FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
145 FIELD(CPTR_EL2, TTA, 28, 1)
146 FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
147 FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
148 
149 /* Bit definitions for CPTR_EL3 (AArch64 only) */
150 FIELD(CPTR_EL3, EZ, 8, 1)
151 FIELD(CPTR_EL3, TFP, 10, 1)
152 FIELD(CPTR_EL3, ESM, 12, 1)
153 FIELD(CPTR_EL3, TTA, 20, 1)
154 FIELD(CPTR_EL3, TAM, 30, 1)
155 FIELD(CPTR_EL3, TCPAC, 31, 1)
156 
157 #define MDCR_MTPME    (1U << 28)
158 #define MDCR_TDCC     (1U << 27)
159 #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
160 #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
161 #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
162 #define MDCR_EPMAD    (1U << 21)
163 #define MDCR_EDAD     (1U << 20)
164 #define MDCR_TTRF     (1U << 19)
165 #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
166 #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
167 #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
168 #define MDCR_SDD      (1U << 16)
169 #define MDCR_SPD      (3U << 14)
170 #define MDCR_TDRA     (1U << 11)
171 #define MDCR_TDOSA    (1U << 10)
172 #define MDCR_TDA      (1U << 9)
173 #define MDCR_TDE      (1U << 8)
174 #define MDCR_HPME     (1U << 7)
175 #define MDCR_TPM      (1U << 6)
176 #define MDCR_TPMCR    (1U << 5)
177 #define MDCR_HPMN     (0x1fU)
178 
179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
181                          MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
182                          MDCR_STE | MDCR_SPME | MDCR_SPD)
183 
184 #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
185 #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
186 #define TTBCR_PD0    (1U << 4)
187 #define TTBCR_PD1    (1U << 5)
188 #define TTBCR_EPD0   (1U << 7)
189 #define TTBCR_IRGN0  (3U << 8)
190 #define TTBCR_ORGN0  (3U << 10)
191 #define TTBCR_SH0    (3U << 12)
192 #define TTBCR_T1SZ   (3U << 16)
193 #define TTBCR_A1     (1U << 22)
194 #define TTBCR_EPD1   (1U << 23)
195 #define TTBCR_IRGN1  (3U << 24)
196 #define TTBCR_ORGN1  (3U << 26)
197 #define TTBCR_SH1    (1U << 28)
198 #define TTBCR_EAE    (1U << 31)
199 
200 #define TCR2_PNCH    (1ULL << 0)
201 #define TCR2_PIE     (1ULL << 1)
202 #define TCR2_E0POE   (1ULL << 2)
203 #define TCR2_POE     (1ULL << 3)
204 #define TCR2_AIE     (1ULL << 4)
205 #define TCR2_D128    (1ULL << 5)
206 #define TCR2_PTTWI   (1ULL << 10)
207 #define TCR2_HAFT    (1ULL << 11)
208 #define TCR2_AMEC0   (1ULL << 12)
209 #define TCR2_AMEC1   (1ULL << 13)
210 #define TCR2_DISCH0  (1ULL << 14)
211 #define TCR2_DISCH1  (1ULL << 15)
212 #define TCR2_A2      (1ULL << 16)
213 #define TCR2_FNG0    (1ULL << 17)
214 #define TCR2_FNG1    (1ULL << 18)
215 #define TCR2_FNGNA0  (1ULL << 20)
216 #define TCR2_FNGNA1  (1ULL << 21)
217 
218 FIELD(VTCR, T0SZ, 0, 6)
219 FIELD(VTCR, SL0, 6, 2)
220 FIELD(VTCR, IRGN0, 8, 2)
221 FIELD(VTCR, ORGN0, 10, 2)
222 FIELD(VTCR, SH0, 12, 2)
223 FIELD(VTCR, TG0, 14, 2)
224 FIELD(VTCR, PS, 16, 3)
225 FIELD(VTCR, VS, 19, 1)
226 FIELD(VTCR, HA, 21, 1)
227 FIELD(VTCR, HD, 22, 1)
228 FIELD(VTCR, HWU59, 25, 1)
229 FIELD(VTCR, HWU60, 26, 1)
230 FIELD(VTCR, HWU61, 27, 1)
231 FIELD(VTCR, HWU62, 28, 1)
232 FIELD(VTCR, NSW, 29, 1)
233 FIELD(VTCR, NSA, 30, 1)
234 FIELD(VTCR, DS, 32, 1)
235 FIELD(VTCR, SL2, 33, 1)
236 
237 FIELD(VSTCR, SW, 29, 1)
238 FIELD(VSTCR, SA, 30, 1)
239 
240 #define HCRX_ENAS0    (1ULL << 0)
241 #define HCRX_ENALS    (1ULL << 1)
242 #define HCRX_ENASR    (1ULL << 2)
243 #define HCRX_FNXS     (1ULL << 3)
244 #define HCRX_FGTNXS   (1ULL << 4)
245 #define HCRX_SMPME    (1ULL << 5)
246 #define HCRX_TALLINT  (1ULL << 6)
247 #define HCRX_VINMI    (1ULL << 7)
248 #define HCRX_VFNMI    (1ULL << 8)
249 #define HCRX_CMOW     (1ULL << 9)
250 #define HCRX_MCE2     (1ULL << 10)
251 #define HCRX_MSCEN    (1ULL << 11)
252 #define HCRX_TCR2EN   (1ULL << 14)
253 #define HCRX_SCTLR2EN (1ULL << 15)
254 #define HCRX_GCSEN    (1ULL << 22)
255 
256 #define HPFAR_NS      (1ULL << 63)
257 
258 #define HSTR_TTEE (1 << 16)
259 #define HSTR_TJDBX (1 << 17)
260 
261 /*
262  * Depending on the value of HCR_EL2.E2H, bits 0 and 1
263  * have different bit definitions, and EL1PCTEN might be
264  * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
265  * disambiguate if necessary.
266  */
267 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
268 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
269 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
270 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
271 FIELD(CNTHCTL, EVNTEN, 2, 1)
272 FIELD(CNTHCTL, EVNTDIR, 3, 1)
273 FIELD(CNTHCTL, EVNTI, 4, 4)
274 FIELD(CNTHCTL, EL0VTEN, 8, 1)
275 FIELD(CNTHCTL, EL0PTEN, 9, 1)
276 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
277 FIELD(CNTHCTL, EL1PTEN, 11, 1)
278 FIELD(CNTHCTL, ECV, 12, 1)
279 FIELD(CNTHCTL, EL1TVT, 13, 1)
280 FIELD(CNTHCTL, EL1TVCT, 14, 1)
281 FIELD(CNTHCTL, EL1NVPCT, 15, 1)
282 FIELD(CNTHCTL, EL1NVVCT, 16, 1)
283 FIELD(CNTHCTL, EVNTIS, 17, 1)
284 FIELD(CNTHCTL, CNTVMASK, 18, 1)
285 FIELD(CNTHCTL, CNTPMASK, 19, 1)
286 
287 /* We use a few fake FSR values for internal purposes in M profile.
288  * M profile cores don't have A/R format FSRs, but currently our
289  * get_phys_addr() code assumes A/R profile and reports failures via
290  * an A/R format FSR value. We then translate that into the proper
291  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
292  * Mostly the FSR values we use for this are those defined for v7PMSA,
293  * since we share some of that codepath. A few kinds of fault are
294  * only for M profile and have no A/R equivalent, though, so we have
295  * to pick a value from the reserved range (which we never otherwise
296  * generate) to use for these.
297  * These values will never be visible to the guest.
298  */
299 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
300 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
301 
302 /**
303  * raise_exception: Raise the specified exception.
304  * Raise a guest exception with the specified value, syndrome register
305  * and target exception level. This should be called from helper functions,
306  * and never returns because we will longjump back up to the CPU main loop.
307  */
308 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
309                                 uint64_t syndrome, uint32_t target_el);
310 
311 /*
312  * Similarly, but also use unwinding to restore cpu state.
313  */
314 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
315                                    uint64_t syndrome, uint32_t target_el,
316                                    uintptr_t ra);
317 
318 /*
319  * For AArch64, map a given EL to an index in the banked_spsr array.
320  * Note that this mapping and the AArch32 mapping defined in bank_number()
321  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
322  * mandated mapping between each other.
323  */
324 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
325 {
326     static const unsigned int map[4] = {
327         [1] = BANK_SVC, /* EL1.  */
328         [2] = BANK_HYP, /* EL2.  */
329         [3] = BANK_MON, /* EL3.  */
330     };
331     assert(el >= 1 && el <= 3);
332     return map[el];
333 }
334 
335 /* Map CPU modes onto saved register banks.  */
336 static inline int bank_number(int mode)
337 {
338     switch (mode) {
339     case ARM_CPU_MODE_USR:
340     case ARM_CPU_MODE_SYS:
341         return BANK_USRSYS;
342     case ARM_CPU_MODE_SVC:
343         return BANK_SVC;
344     case ARM_CPU_MODE_ABT:
345         return BANK_ABT;
346     case ARM_CPU_MODE_UND:
347         return BANK_UND;
348     case ARM_CPU_MODE_IRQ:
349         return BANK_IRQ;
350     case ARM_CPU_MODE_FIQ:
351         return BANK_FIQ;
352     case ARM_CPU_MODE_HYP:
353         return BANK_HYP;
354     case ARM_CPU_MODE_MON:
355         return BANK_MON;
356     }
357     g_assert_not_reached();
358 }
359 
360 /**
361  * r14_bank_number: Map CPU mode onto register bank for r14
362  *
363  * Given an AArch32 CPU mode, return the index into the saved register
364  * banks to use for the R14 (LR) in that mode. This is the same as
365  * bank_number(), except for the special case of Hyp mode, where
366  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
367  * This should be used as the index into env->banked_r14[], and
368  * bank_number() used for the index into env->banked_r13[] and
369  * env->banked_spsr[].
370  */
371 static inline int r14_bank_number(int mode)
372 {
373     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
374 }
375 
376 void arm_cpu_register(const ARMCPUInfo *info);
377 
378 void arm_do_plugin_vcpu_discon_cb(CPUState *cs, uint64_t from);
379 void register_cp_regs_for_features(ARMCPU *cpu);
380 void arm_init_cpreg_list(ARMCPU *cpu);
381 
382 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
383 void arm_translate_init(void);
384 void arm_translate_code(CPUState *cs, TranslationBlock *tb,
385                         int *max_insns, vaddr pc, void *host_pc);
386 
387 void arm_cpu_register_gdb_commands(ARMCPU *cpu);
388 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
389                                        GPtrArray *, GPtrArray *);
390 
391 void arm_restore_state_to_opc(CPUState *cs,
392                               const TranslationBlock *tb,
393                               const uint64_t *data);
394 
395 #ifdef CONFIG_TCG
396 TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
397 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
398 
399 /* Our implementation of TCGCPUOps::cpu_exec_halt */
400 bool arm_cpu_exec_halt(CPUState *cs);
401 int arm_cpu_mmu_index(CPUState *cs, bool ifetch);
402 #endif /* CONFIG_TCG */
403 
404 typedef enum ARMFPRounding {
405     FPROUNDING_TIEEVEN,
406     FPROUNDING_POSINF,
407     FPROUNDING_NEGINF,
408     FPROUNDING_ZERO,
409     FPROUNDING_TIEAWAY,
410     FPROUNDING_ODD
411 } ARMFPRounding;
412 
413 extern const FloatRoundMode arm_rmode_to_sf_map[6];
414 
415 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
416 {
417     assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
418     return arm_rmode_to_sf_map[rmode];
419 }
420 
421 /* Return the effective value of SCR_EL3.RW */
422 static inline bool arm_scr_rw_eff(CPUARMState *env)
423 {
424     /*
425      * SCR_EL3.RW has an effective value of 1 if:
426      *  - we are NS and EL2 is implemented but doesn't support AArch32
427      *  - we are S and EL2 is enabled (in which case it must be AArch64)
428      */
429     ARMCPU *cpu = env_archcpu(env);
430 
431     if (env->cp15.scr_el3 & SCR_RW) {
432         return true;
433     }
434     if (env->cp15.scr_el3 & SCR_NS) {
435         return arm_feature(env, ARM_FEATURE_EL2) &&
436             !cpu_isar_feature(aa64_aa32_el2, cpu);
437     } else {
438         return env->cp15.scr_el3 & SCR_EEL2;
439     }
440 }
441 
442 /* Return true if the specified exception level is running in AArch64 state. */
443 static inline bool arm_el_is_aa64(CPUARMState *env, int el)
444 {
445     /*
446      * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
447      * and if we're not in EL0 then the state of EL0 isn't well defined.)
448      */
449     assert(el >= 1 && el <= 3);
450     bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
451 
452     /*
453      * The highest exception level is always at the maximum supported
454      * register width, and then lower levels have a register width controlled
455      * by bits in the SCR or HCR registers.
456      */
457     if (el == 3) {
458         return aa64;
459     }
460 
461     if (arm_feature(env, ARM_FEATURE_EL3)) {
462         aa64 = aa64 && arm_scr_rw_eff(env);
463     }
464 
465     if (el == 2) {
466         return aa64;
467     }
468 
469     if (arm_is_el2_enabled(env)) {
470         aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
471     }
472 
473     return aa64;
474 }
475 
476 /*
477  * Return the current Exception Level (as per ARMv8; note that this differs
478  * from the ARMv7 Privilege Level).
479  */
480 static inline int arm_current_el(CPUARMState *env)
481 {
482     if (arm_feature(env, ARM_FEATURE_M)) {
483         return arm_v7m_is_handler_mode(env) ||
484             !(env->v7m.control[env->v7m.secure] & 1);
485     }
486 
487     if (is_a64(env)) {
488         return extract32(env->pstate, 2, 2);
489     }
490 
491     switch (env->uncached_cpsr & 0x1f) {
492     case ARM_CPU_MODE_USR:
493         return 0;
494     case ARM_CPU_MODE_HYP:
495         return 2;
496     case ARM_CPU_MODE_MON:
497         return 3;
498     default:
499         if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
500             /* If EL3 is 32-bit then all secure privileged modes run in EL3 */
501             return 3;
502         }
503 
504         return 1;
505     }
506 }
507 
508 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
509                                                   bool sctlr_b)
510 {
511 #ifdef CONFIG_USER_ONLY
512     /*
513      * In system mode, BE32 is modelled in line with the
514      * architecture (as word-invariant big-endianness), where loads
515      * and stores are done little endian but from addresses which
516      * are adjusted by XORing with the appropriate constant. So the
517      * endianness to use for the raw data access is not affected by
518      * SCTLR.B.
519      * In user mode, however, we model BE32 as byte-invariant
520      * big-endianness (because user-only code cannot tell the
521      * difference), and so we need to use a data access endianness
522      * that depends on SCTLR.B.
523      */
524     if (sctlr_b) {
525         return true;
526     }
527 #endif
528     /* In 32bit endianness is determined by looking at CPSR's E bit */
529     return env->uncached_cpsr & CPSR_E;
530 }
531 
532 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
533 {
534     return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
535 }
536 
537 /* Return true if the processor is in big-endian mode. */
538 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
539 {
540     if (!is_a64(env)) {
541         return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
542     } else {
543         int cur_el = arm_current_el(env);
544         uint64_t sctlr = arm_sctlr(env, cur_el);
545         return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
546     }
547 }
548 
549 #ifdef CONFIG_USER_ONLY
550 static inline bool arm_cpu_bswap_data(CPUARMState *env)
551 {
552     return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
553 }
554 #endif
555 
556 static inline void aarch64_save_sp(CPUARMState *env, int el)
557 {
558     if (env->pstate & PSTATE_SP) {
559         env->sp_el[el] = env->xregs[31];
560     } else {
561         env->sp_el[0] = env->xregs[31];
562     }
563 }
564 
565 static inline void aarch64_restore_sp(CPUARMState *env, int el)
566 {
567     if (env->pstate & PSTATE_SP) {
568         env->xregs[31] = env->sp_el[el];
569     } else {
570         env->xregs[31] = env->sp_el[0];
571     }
572 }
573 
574 static inline void update_spsel(CPUARMState *env, uint32_t imm)
575 {
576     unsigned int cur_el = arm_current_el(env);
577     /* Update PSTATE SPSel bit; this requires us to update the
578      * working stack pointer in xregs[31].
579      */
580     if (!((imm ^ env->pstate) & PSTATE_SP)) {
581         return;
582     }
583     aarch64_save_sp(env, cur_el);
584     env->pstate = deposit32(env->pstate, 0, 1, imm);
585 
586     /* We rely on illegal updates to SPsel from EL0 to get trapped
587      * at translation time.
588      */
589     assert(cur_el >= 1 && cur_el <= 3);
590     aarch64_restore_sp(env, cur_el);
591 }
592 
593 /*
594  * arm_pamax
595  * @cpu: ARMCPU
596  *
597  * Returns the implementation defined bit-width of physical addresses.
598  * The ARMv8 reference manuals refer to this as PAMax().
599  */
600 unsigned int arm_pamax(ARMCPU *cpu);
601 
602 /*
603  * round_down_to_parange_index
604  * @bit_size: uint8_t
605  *
606  * Rounds down the bit_size supplied to the first supported ARM physical
607  * address range and returns the index for this. The index is intended to
608  * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
609  */
610 uint8_t round_down_to_parange_index(uint8_t bit_size);
611 
612 /*
613  * round_down_to_parange_bit_size
614  * @bit_size: uint8_t
615  *
616  * Rounds down the bit_size supplied to the first supported ARM physical
617  * address range bit size and returns this.
618  */
619 uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
620 
621 /* Return true if extended addresses are enabled.
622  * This is always the case if our translation regime is 64 bit,
623  * but depends on TTBCR.EAE for 32 bit.
624  */
625 static inline bool extended_addresses_enabled(CPUARMState *env)
626 {
627     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
628     if (arm_feature(env, ARM_FEATURE_PMSA) &&
629         arm_feature(env, ARM_FEATURE_V8)) {
630         return true;
631     }
632     return arm_el_is_aa64(env, 1) ||
633            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
634 }
635 
636 /* Update a QEMU watchpoint based on the information the guest has set in the
637  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
638  */
639 void hw_watchpoint_update(ARMCPU *cpu, int n);
640 /* Update the QEMU watchpoints for every guest watchpoint. This does a
641  * complete delete-and-reinstate of the QEMU watchpoint list and so is
642  * suitable for use after migration or on reset.
643  */
644 void hw_watchpoint_update_all(ARMCPU *cpu);
645 /* Update a QEMU breakpoint based on the information the guest has set in the
646  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
647  */
648 void hw_breakpoint_update(ARMCPU *cpu, int n);
649 /* Update the QEMU breakpoints for every guest breakpoint. This does a
650  * complete delete-and-reinstate of the QEMU breakpoint list and so is
651  * suitable for use after migration or on reset.
652  */
653 void hw_breakpoint_update_all(ARMCPU *cpu);
654 
655 /* Callback function for checking if a breakpoint should trigger. */
656 bool arm_debug_check_breakpoint(CPUState *cs);
657 
658 /* Callback function for checking if a watchpoint should trigger. */
659 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
660 
661 /* Adjust addresses (in BE32 mode) before testing against watchpoint
662  * addresses.
663  */
664 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
665 
666 /* Callback function for when a watchpoint or breakpoint triggers. */
667 void arm_debug_excp_handler(CPUState *cs);
668 
669 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
670 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
671 {
672     return false;
673 }
674 #else
675 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
676 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
677 #endif
678 /* Actually handle a PSCI call */
679 void arm_handle_psci_call(ARMCPU *cpu);
680 
681 /**
682  * arm_clear_exclusive: clear the exclusive monitor
683  * @env: CPU env
684  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
685  */
686 static inline void arm_clear_exclusive(CPUARMState *env)
687 {
688     env->exclusive_addr = -1;
689 }
690 
691 /**
692  * ARMFaultType: type of an ARM MMU fault
693  * This corresponds to the v8A pseudocode's Fault enumeration,
694  * with extensions for QEMU internal conditions.
695  */
696 typedef enum ARMFaultType {
697     ARMFault_None,
698     ARMFault_AccessFlag,
699     ARMFault_Alignment,
700     ARMFault_Background,
701     ARMFault_Domain,
702     ARMFault_Permission,
703     ARMFault_Translation,
704     ARMFault_AddressSize,
705     ARMFault_SyncExternal,
706     ARMFault_SyncExternalOnWalk,
707     ARMFault_SyncParity,
708     ARMFault_SyncParityOnWalk,
709     ARMFault_AsyncParity,
710     ARMFault_AsyncExternal,
711     ARMFault_Debug,
712     ARMFault_TLBConflict,
713     ARMFault_UnsuppAtomicUpdate,
714     ARMFault_Lockdown,
715     ARMFault_Exclusive,
716     ARMFault_ICacheMaint,
717     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
718     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
719     ARMFault_GPCFOnWalk,
720     ARMFault_GPCFOnOutput,
721 } ARMFaultType;
722 
723 typedef enum ARMGPCF {
724     GPCF_None,
725     GPCF_AddressSize,
726     GPCF_Walk,
727     GPCF_EABT,
728     GPCF_Fail,
729 } ARMGPCF;
730 
731 /**
732  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
733  * @type: Type of fault
734  * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
735  * @level: Table walk level (for translation, access flag and permission faults)
736  * @domain: Domain of the fault address (for non-LPAE CPUs only)
737  * @s2addr: Address that caused a fault at stage 2
738  * @paddr: physical address that caused a fault for gpc
739  * @paddr_space: physical address space that caused a fault for gpc
740  * @stage2: True if we faulted at stage 2
741  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
742  * @s1ns: True if we faulted on a non-secure IPA while in secure state
743  * @ea: True if we should set the EA (external abort type) bit in syndrome
744  */
745 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
746 struct ARMMMUFaultInfo {
747     ARMFaultType type;
748     ARMGPCF gpcf;
749     hwaddr s2addr;
750     hwaddr paddr;
751     ARMSecuritySpace paddr_space;
752     int level;
753     int domain;
754     bool stage2;
755     bool s1ptw;
756     bool s1ns;
757     bool ea;
758     bool dirtybit;  /* FEAT_S1PIE, FEAT_S2PIE */
759 };
760 
761 /**
762  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
763  * Compare pseudocode EncodeSDFSC(), though unlike that function
764  * we set up a whole FSR-format code including domain field and
765  * putting the high bit of the FSC into bit 10.
766  */
767 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
768 {
769     uint32_t fsc;
770 
771     switch (fi->type) {
772     case ARMFault_None:
773         return 0;
774     case ARMFault_AccessFlag:
775         fsc = fi->level == 1 ? 0x3 : 0x6;
776         break;
777     case ARMFault_Alignment:
778         fsc = 0x1;
779         break;
780     case ARMFault_Permission:
781         fsc = fi->level == 1 ? 0xd : 0xf;
782         break;
783     case ARMFault_Domain:
784         fsc = fi->level == 1 ? 0x9 : 0xb;
785         break;
786     case ARMFault_Translation:
787         fsc = fi->level == 1 ? 0x5 : 0x7;
788         break;
789     case ARMFault_SyncExternal:
790         fsc = 0x8 | (fi->ea << 12);
791         break;
792     case ARMFault_SyncExternalOnWalk:
793         fsc = fi->level == 1 ? 0xc : 0xe;
794         fsc |= (fi->ea << 12);
795         break;
796     case ARMFault_SyncParity:
797         fsc = 0x409;
798         break;
799     case ARMFault_SyncParityOnWalk:
800         fsc = fi->level == 1 ? 0x40c : 0x40e;
801         break;
802     case ARMFault_AsyncParity:
803         fsc = 0x408;
804         break;
805     case ARMFault_AsyncExternal:
806         fsc = 0x406 | (fi->ea << 12);
807         break;
808     case ARMFault_Debug:
809         fsc = 0x2;
810         break;
811     case ARMFault_TLBConflict:
812         fsc = 0x400;
813         break;
814     case ARMFault_Lockdown:
815         fsc = 0x404;
816         break;
817     case ARMFault_Exclusive:
818         fsc = 0x405;
819         break;
820     case ARMFault_ICacheMaint:
821         fsc = 0x4;
822         break;
823     case ARMFault_Background:
824         fsc = 0x0;
825         break;
826     case ARMFault_QEMU_NSCExec:
827         fsc = M_FAKE_FSR_NSC_EXEC;
828         break;
829     case ARMFault_QEMU_SFault:
830         fsc = M_FAKE_FSR_SFAULT;
831         break;
832     default:
833         /* Other faults can't occur in a context that requires a
834          * short-format status code.
835          */
836         g_assert_not_reached();
837     }
838 
839     fsc |= (fi->domain << 4);
840     return fsc;
841 }
842 
843 /**
844  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
845  * Compare pseudocode EncodeLDFSC(), though unlike that function
846  * we fill in also the LPAE bit 9 of a DFSR format.
847  */
848 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
849 {
850     uint32_t fsc;
851 
852     switch (fi->type) {
853     case ARMFault_None:
854         return 0;
855     case ARMFault_AddressSize:
856         assert(fi->level >= -1 && fi->level <= 3);
857         if (fi->level < 0) {
858             fsc = 0b101001;
859         } else {
860             fsc = fi->level;
861         }
862         break;
863     case ARMFault_AccessFlag:
864         assert(fi->level >= 0 && fi->level <= 3);
865         fsc = 0b001000 | fi->level;
866         break;
867     case ARMFault_Permission:
868         assert(fi->level >= 0 && fi->level <= 3);
869         fsc = 0b001100 | fi->level;
870         break;
871     case ARMFault_Translation:
872         assert(fi->level >= -1 && fi->level <= 3);
873         if (fi->level < 0) {
874             fsc = 0b101011;
875         } else {
876             fsc = 0b000100 | fi->level;
877         }
878         break;
879     case ARMFault_SyncExternal:
880         fsc = 0x10 | (fi->ea << 12);
881         break;
882     case ARMFault_SyncExternalOnWalk:
883         assert(fi->level >= -1 && fi->level <= 3);
884         if (fi->level < 0) {
885             fsc = 0b010011;
886         } else {
887             fsc = 0b010100 | fi->level;
888         }
889         fsc |= fi->ea << 12;
890         break;
891     case ARMFault_SyncParity:
892         fsc = 0x18;
893         break;
894     case ARMFault_SyncParityOnWalk:
895         assert(fi->level >= -1 && fi->level <= 3);
896         if (fi->level < 0) {
897             fsc = 0b011011;
898         } else {
899             fsc = 0b011100 | fi->level;
900         }
901         break;
902     case ARMFault_AsyncParity:
903         fsc = 0x19;
904         break;
905     case ARMFault_AsyncExternal:
906         fsc = 0x11 | (fi->ea << 12);
907         break;
908     case ARMFault_Alignment:
909         fsc = 0x21;
910         break;
911     case ARMFault_Debug:
912         fsc = 0x22;
913         break;
914     case ARMFault_TLBConflict:
915         fsc = 0x30;
916         break;
917     case ARMFault_UnsuppAtomicUpdate:
918         fsc = 0x31;
919         break;
920     case ARMFault_Lockdown:
921         fsc = 0x34;
922         break;
923     case ARMFault_Exclusive:
924         fsc = 0x35;
925         break;
926     case ARMFault_GPCFOnWalk:
927         assert(fi->level >= -1 && fi->level <= 3);
928         if (fi->level < 0) {
929             fsc = 0b100011;
930         } else {
931             fsc = 0b100100 | fi->level;
932         }
933         break;
934     case ARMFault_GPCFOnOutput:
935         fsc = 0b101000;
936         break;
937     default:
938         /* Other faults can't occur in a context that requires a
939          * long-format status code.
940          */
941         g_assert_not_reached();
942     }
943 
944     fsc |= 1 << 9;
945     return fsc;
946 }
947 
948 static inline bool arm_extabort_type(MemTxResult result)
949 {
950     /* The EA bit in syndromes and fault status registers is an
951      * IMPDEF classification of external aborts. ARM implementations
952      * usually use this to indicate AXI bus Decode error (0) or
953      * Slave error (1); in QEMU we follow that.
954      */
955     return result != MEMTX_DECODE_ERROR;
956 }
957 
958 #ifdef CONFIG_USER_ONLY
959 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
960                             MMUAccessType access_type,
961                             bool maperr, uintptr_t ra);
962 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
963                            MMUAccessType access_type, uintptr_t ra);
964 #else
965 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
966                             MMUAccessType access_type, int mmu_idx,
967                             MemOp memop, int size, bool probe, uintptr_t ra);
968 #endif
969 
970 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
971 {
972     int coreidx = mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
973     assert(coreidx < NB_MMU_MODES);
974     return coreidx;
975 }
976 
977 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
978 {
979     if (arm_feature(env, ARM_FEATURE_M)) {
980         return mmu_idx | ARM_MMU_IDX_M;
981     } else {
982         return mmu_idx | ARM_MMU_IDX_A;
983     }
984 }
985 
986 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
987 {
988     /* AArch64 is always a-profile. */
989     return mmu_idx | ARM_MMU_IDX_A;
990 }
991 
992 /* Return the MMU index for a v7M CPU in the specified security state */
993 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
994 
995 /*
996  * Return true if the stage 1 translation regime is using LPAE
997  * format page tables
998  */
999 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
1000 
1001 /* Raise a data fault alignment exception for the specified virtual address */
1002 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1003                                             MMUAccessType access_type,
1004                                             int mmu_idx, uintptr_t retaddr);
1005 
1006 #ifndef CONFIG_USER_ONLY
1007 /* arm_cpu_do_transaction_failed: handle a memory system error response
1008  * (eg "no device/memory present at address") by raising an external abort
1009  * exception
1010  */
1011 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1012                                    vaddr addr, unsigned size,
1013                                    MMUAccessType access_type,
1014                                    int mmu_idx, MemTxAttrs attrs,
1015                                    MemTxResult response, uintptr_t retaddr);
1016 #endif
1017 
1018 /* Call any registered EL change hooks */
1019 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
1020 {
1021     ARMELChangeHook *hook, *next;
1022     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
1023         hook->hook(cpu, hook->opaque);
1024     }
1025 }
1026 static inline void arm_call_el_change_hook(ARMCPU *cpu)
1027 {
1028     ARMELChangeHook *hook, *next;
1029     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
1030         hook->hook(cpu, hook->opaque);
1031     }
1032 }
1033 
1034 /* Return the SCTLR value which controls this address translation regime */
1035 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
1036 {
1037     return env->cp15.sctlr_el[regime_el(mmu_idx)];
1038 }
1039 
1040 /*
1041  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
1042  * and the Non-Secure stage 2 translation regimes (and hence which are
1043  * not present in VSTCR_EL2).
1044  */
1045 #define VTCR_SHARED_FIELD_MASK \
1046     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
1047      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
1048      R_VTCR_DS_MASK)
1049 
1050 /* Return the value of the TCR controlling this translation regime */
1051 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
1052 {
1053     if (mmu_idx == ARMMMUIdx_Stage2) {
1054         return env->cp15.vtcr_el2;
1055     }
1056     if (mmu_idx == ARMMMUIdx_Stage2_S) {
1057         /*
1058          * Secure stage 2 shares fields from VTCR_EL2. We merge those
1059          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
1060          * value so the callers don't need to special case this.
1061          *
1062          * If a future architecture change defines bits in VSTCR_EL2 that
1063          * overlap with these VTCR_EL2 fields we may need to revisit this.
1064          */
1065         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
1066         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
1067         return v;
1068     }
1069     return env->cp15.tcr_el[regime_el(mmu_idx)];
1070 }
1071 
1072 /* Return true if the translation regime is using LPAE format page tables */
1073 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
1074 {
1075     int el = regime_el(mmu_idx);
1076     if (el == 2 || arm_el_is_aa64(env, el)) {
1077         return true;
1078     }
1079     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1080         arm_feature(env, ARM_FEATURE_V8)) {
1081         return true;
1082     }
1083     if (arm_feature(env, ARM_FEATURE_LPAE)
1084         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
1085         return true;
1086     }
1087     return false;
1088 }
1089 
1090 /**
1091  * arm_num_brps: Return number of implemented breakpoints.
1092  * Note that the ID register BRPS field is "number of bps - 1",
1093  * and we return the actual number of breakpoints.
1094  */
1095 static inline int arm_num_brps(ARMCPU *cpu)
1096 {
1097     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1098         return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1;
1099     } else {
1100         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1101     }
1102 }
1103 
1104 /**
1105  * arm_num_wrps: Return number of implemented watchpoints.
1106  * Note that the ID register WRPS field is "number of wps - 1",
1107  * and we return the actual number of watchpoints.
1108  */
1109 static inline int arm_num_wrps(ARMCPU *cpu)
1110 {
1111     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1112         return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1;
1113     } else {
1114         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1115     }
1116 }
1117 
1118 /**
1119  * arm_num_ctx_cmps: Return number of implemented context comparators.
1120  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1121  * and we return the actual number of comparators.
1122  */
1123 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1124 {
1125     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1126         return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1;
1127     } else {
1128         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1129     }
1130 }
1131 
1132 /**
1133  * v7m_using_psp: Return true if using process stack pointer
1134  * Return true if the CPU is currently using the process stack
1135  * pointer, or false if it is using the main stack pointer.
1136  */
1137 static inline bool v7m_using_psp(CPUARMState *env)
1138 {
1139     /* Handler mode always uses the main stack; for thread mode
1140      * the CONTROL.SPSEL bit determines the answer.
1141      * Note that in v7M it is not possible to be in Handler mode with
1142      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1143      */
1144     return !arm_v7m_is_handler_mode(env) &&
1145         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1146 }
1147 
1148 /**
1149  * v7m_sp_limit: Return SP limit for current CPU state
1150  * Return the SP limit value for the current CPU security state
1151  * and stack pointer.
1152  */
1153 static inline uint32_t v7m_sp_limit(CPUARMState *env)
1154 {
1155     if (v7m_using_psp(env)) {
1156         return env->v7m.psplim[env->v7m.secure];
1157     } else {
1158         return env->v7m.msplim[env->v7m.secure];
1159     }
1160 }
1161 
1162 /**
1163  * v7m_cpacr_pass:
1164  * Return true if the v7M CPACR permits access to the FPU for the specified
1165  * security state and privilege level.
1166  */
1167 static inline bool v7m_cpacr_pass(CPUARMState *env,
1168                                   bool is_secure, bool is_priv)
1169 {
1170     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1171     case 0:
1172     case 2: /* UNPREDICTABLE: we treat like 0 */
1173         return false;
1174     case 1:
1175         return is_priv;
1176     case 3:
1177         return true;
1178     default:
1179         g_assert_not_reached();
1180     }
1181 }
1182 
1183 /**
1184  * aarch32_mode_name(): Return name of the AArch32 CPU mode
1185  * @psr: Program Status Register indicating CPU mode
1186  *
1187  * Returns, for debug logging purposes, a printable representation
1188  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1189  * the low bits of the specified PSR.
1190  */
1191 static inline const char *aarch32_mode_name(uint32_t psr)
1192 {
1193     static const char cpu_mode_names[16][4] = {
1194         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1195         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1196     };
1197 
1198     return cpu_mode_names[psr & 0xf];
1199 }
1200 
1201 /**
1202  * arm_cpu_exec_interrupt(): Implementation of the cpu_exec_inrerrupt hook.
1203  */
1204 bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
1205 
1206 /**
1207  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1208  *
1209  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1210  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1211  * Must be called with the BQL held.
1212  */
1213 void arm_cpu_update_virq(ARMCPU *cpu);
1214 
1215 /**
1216  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1217  *
1218  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1219  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1220  * Must be called with the BQL held.
1221  */
1222 void arm_cpu_update_vfiq(ARMCPU *cpu);
1223 
1224 /**
1225  * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1226  *
1227  * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1228  * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
1229  * Must be called with the BQL held.
1230  */
1231 void arm_cpu_update_vinmi(ARMCPU *cpu);
1232 
1233 /**
1234  * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1235  *
1236  * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1237  * a change to the HCRX_EL2.VFNMI.
1238  * Must be called with the BQL held.
1239  */
1240 void arm_cpu_update_vfnmi(ARMCPU *cpu);
1241 
1242 /**
1243  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1244  *
1245  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1246  * following a change to the HCR_EL2.VSE bit.
1247  */
1248 void arm_cpu_update_vserr(ARMCPU *cpu);
1249 
1250 /**
1251  * arm_mmu_idx_el:
1252  * @env: The cpu environment
1253  * @el: The EL to use.
1254  *
1255  * Return the full ARMMMUIdx for the translation regime for EL.
1256  */
1257 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1258 
1259 /**
1260  * arm_mmu_idx:
1261  * @env: The cpu environment
1262  *
1263  * Return the full ARMMMUIdx for the current translation regime.
1264  */
1265 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1266 
1267 /**
1268  * arm_stage1_mmu_idx:
1269  * @env: The cpu environment
1270  *
1271  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1272  */
1273 #ifdef CONFIG_USER_ONLY
1274 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1275 {
1276     return ARMMMUIdx_Stage1_E0;
1277 }
1278 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1279 {
1280     return ARMMMUIdx_Stage1_E0;
1281 }
1282 #else
1283 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1284 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1285 #endif
1286 
1287 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1288                                                const ARMISARegisters *id)
1289 {
1290     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1291 
1292     if ((features >> ARM_FEATURE_V4T) & 1) {
1293         valid |= CPSR_T;
1294     }
1295     if ((features >> ARM_FEATURE_V5) & 1) {
1296         valid |= CPSR_Q; /* V5TE in reality*/
1297     }
1298     if ((features >> ARM_FEATURE_V6) & 1) {
1299         valid |= CPSR_E | CPSR_GE;
1300     }
1301     if ((features >> ARM_FEATURE_THUMB2) & 1) {
1302         valid |= CPSR_IT;
1303     }
1304     if (isar_feature_aa32_jazelle(id)) {
1305         valid |= CPSR_J;
1306     }
1307     if (isar_feature_aa32_pan(id)) {
1308         valid |= CPSR_PAN;
1309     }
1310     if (isar_feature_aa32_dit(id)) {
1311         valid |= CPSR_DIT;
1312     }
1313     if (isar_feature_aa32_ssbs(id)) {
1314         valid |= CPSR_SSBS;
1315     }
1316 
1317     return valid;
1318 }
1319 
1320 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1321 {
1322     uint32_t valid;
1323 
1324     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1325     if (isar_feature_aa64_bti(id)) {
1326         valid |= PSTATE_BTYPE;
1327     }
1328     if (isar_feature_aa64_pan(id)) {
1329         valid |= PSTATE_PAN;
1330     }
1331     if (isar_feature_aa64_uao(id)) {
1332         valid |= PSTATE_UAO;
1333     }
1334     if (isar_feature_aa64_dit(id)) {
1335         valid |= PSTATE_DIT;
1336     }
1337     if (isar_feature_aa64_ssbs(id)) {
1338         valid |= PSTATE_SSBS;
1339     }
1340     if (isar_feature_aa64_mte(id)) {
1341         valid |= PSTATE_TCO;
1342     }
1343     if (isar_feature_aa64_nmi(id)) {
1344         valid |= PSTATE_ALLINT;
1345     }
1346 
1347     return valid;
1348 }
1349 
1350 /* Granule size (i.e. page size) */
1351 typedef enum ARMGranuleSize {
1352     /* Same order as TG0 encoding */
1353     Gran4K,
1354     Gran64K,
1355     Gran16K,
1356     GranInvalid,
1357 } ARMGranuleSize;
1358 
1359 /**
1360  * arm_granule_bits: Return address size of the granule in bits
1361  *
1362  * Return the address size of the granule in bits. This corresponds
1363  * to the pseudocode TGxGranuleBits().
1364  */
1365 static inline int arm_granule_bits(ARMGranuleSize gran)
1366 {
1367     switch (gran) {
1368     case Gran64K:
1369         return 16;
1370     case Gran16K:
1371         return 14;
1372     case Gran4K:
1373         return 12;
1374     default:
1375         g_assert_not_reached();
1376     }
1377 }
1378 
1379 /*
1380  * Parameters of a given virtual address, as extracted from the
1381  * translation controls for a given regime.
1382  */
1383 typedef struct ARMVAParameters {
1384     unsigned tsz    : 8;
1385     unsigned ps     : 3;
1386     unsigned sh     : 2;
1387     unsigned select : 1;
1388     bool tbi        : 1;
1389     bool epd        : 1;
1390     bool hpd        : 1;
1391     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1392     bool ds         : 1;
1393     bool ha         : 1;
1394     bool hd         : 1;
1395     ARMGranuleSize gran : 2;
1396     bool pie        : 1;
1397     bool aie        : 1;
1398 } ARMVAParameters;
1399 
1400 /**
1401  * aa64_va_parameters: Return parameters for an AArch64 virtual address
1402  * @env: CPU
1403  * @va: virtual address to look up
1404  * @mmu_idx: determines translation regime to use
1405  * @data: true if this is a data access
1406  * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1407  *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1408  */
1409 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1410                                    ARMMMUIdx mmu_idx, bool data,
1411                                    bool el1_is_aa32);
1412 
1413 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1414 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1415 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1416 
1417 /* Determine if allocation tags are available.  */
1418 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1419                                                  uint64_t sctlr)
1420 {
1421     if (el < 3
1422         && arm_feature(env, ARM_FEATURE_EL3)
1423         && !(env->cp15.scr_el3 & SCR_ATA)) {
1424         return false;
1425     }
1426     if (el < 2 && arm_is_el2_enabled(env)) {
1427         uint64_t hcr = arm_hcr_el2_eff(env);
1428         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1429             return false;
1430         }
1431     }
1432     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1433     return sctlr != 0;
1434 }
1435 
1436 #ifndef CONFIG_USER_ONLY
1437 
1438 /* Security attributes for an address, as returned by v8m_security_lookup. */
1439 typedef struct V8M_SAttributes {
1440     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1441     bool ns;
1442     bool nsc;
1443     uint8_t sregion;
1444     bool srvalid;
1445     uint8_t iregion;
1446     bool irvalid;
1447 } V8M_SAttributes;
1448 
1449 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1450                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1451                          bool secure, V8M_SAttributes *sattrs);
1452 
1453 /* Cacheability and shareability attributes for a memory access */
1454 typedef struct ARMCacheAttrs {
1455     /*
1456      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1457      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1458      */
1459     unsigned int attrs:8;
1460     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1461     bool is_s2_format:1;
1462 } ARMCacheAttrs;
1463 
1464 /* Fields that are valid upon success. */
1465 typedef struct GetPhysAddrResult {
1466     CPUTLBEntryFull f;
1467     ARMCacheAttrs cacheattrs;
1468     /*
1469      * For ARMMMUIdx_Stage2*, the protection installed into f.prot
1470      * is the result for AccessType_TTW, i.e. the page table walk itself.
1471      * The protection installed info s2prot is the one to be merged
1472      * with the stage1 protection.
1473      */
1474     int s2prot;
1475 } GetPhysAddrResult;
1476 
1477 /**
1478  * get_phys_addr: get the physical address for a virtual address
1479  * @env: CPUARMState
1480  * @address: virtual address to get physical address for
1481  * @access_type: 0 for read, 1 for write, 2 for execute
1482  * @memop: memory operation feeding this access, or 0 for none
1483  * @mmu_idx: MMU index indicating required translation regime
1484  * @result: set on translation success.
1485  * @fi: set to fault info if the translation fails
1486  *
1487  * Find the physical address corresponding to the given virtual address,
1488  * by doing a translation table walk on MMU based systems or using the
1489  * MPU state on MPU based systems.
1490  *
1491  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1492  * prot and page_size may not be filled in, and the populated fsr value provides
1493  * information on why the translation aborted, in the format of a
1494  * DFSR/IFSR fault register, with the following caveats:
1495  *  * we honour the short vs long DFSR format differences.
1496  *  * the WnR bit is never set (the caller must do this).
1497  *  * for PSMAv5 based systems we don't bother to return a full FSR format
1498  *    value.
1499  */
1500 bool get_phys_addr(CPUARMState *env, vaddr address,
1501                    MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
1502                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1503     __attribute__((nonnull));
1504 
1505 /**
1506  * get_phys_addr_for_at:
1507  * @env: CPUARMState
1508  * @address: virtual address to get physical address for
1509  * @prot_check: PAGE_{READ,WRITE,EXEC}, or 0
1510  * @mmu_idx: MMU index indicating required translation regime
1511  * @space: security space for the access
1512  * @result: set on translation success.
1513  * @fi: set to fault info if the translation fails
1514  *
1515  * Similar to get_phys_addr, but for use by AccessType_AT, i.e.
1516  * system instructions for address translation.
1517  */
1518 bool get_phys_addr_for_at(CPUARMState *env, vaddr address, unsigned prot_check,
1519                           ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1520                           GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1521     __attribute__((nonnull));
1522 
1523 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1524                        MMUAccessType access_type, unsigned prot_check,
1525                        ARMMMUIdx mmu_idx, bool is_secure,
1526                        GetPhysAddrResult *result,
1527                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1528 
1529 void arm_log_exception(CPUState *cs);
1530 
1531 #endif /* !CONFIG_USER_ONLY */
1532 
1533 /*
1534  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1535  * the same simd_desc() encoding due to restrictions on size.
1536  * Use these instead.
1537  */
1538 FIELD(PREDDESC, OPRSZ, 0, 6)
1539 FIELD(PREDDESC, ESZ, 6, 2)
1540 FIELD(PREDDESC, DATA, 8, 24)
1541 
1542 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1543 FIELD(MTEDESC, MIDX,  0, 4)
1544 FIELD(MTEDESC, TBI,   4, 2)
1545 FIELD(MTEDESC, TCMA,  6, 2)
1546 FIELD(MTEDESC, WRITE, 8, 1)
1547 FIELD(MTEDESC, ALIGN, 9, 3)
1548 FIELD(MTEDESC, SIZEM1, 12, 32 - 12)  /* size - 1 */
1549 
1550 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1551 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1552 
1553 /**
1554  * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1555  * @env: CPU env
1556  * @ptr: start address of memory region (dirty pointer)
1557  * @size: length of region (guaranteed not to cross a page boundary)
1558  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1559  * Returns: the size of the region that can be copied without hitting
1560  *          an MTE tag failure
1561  *
1562  * Note that we assume that the caller has already checked the TBI
1563  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1564  * required.
1565  */
1566 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1567                         uint32_t desc);
1568 
1569 /**
1570  * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1571  *                     operation going in the reverse direction
1572  * @env: CPU env
1573  * @ptr: *end* address of memory region (dirty pointer)
1574  * @size: length of region (guaranteed not to cross a page boundary)
1575  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1576  * Returns: the size of the region that can be copied without hitting
1577  *          an MTE tag failure
1578  *
1579  * Note that we assume that the caller has already checked the TBI
1580  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1581  * required.
1582  */
1583 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1584                             uint32_t desc);
1585 
1586 /**
1587  * mte_check_fail: Record an MTE tag check failure
1588  * @env: CPU env
1589  * @desc: MTEDESC descriptor word
1590  * @dirty_ptr: Failing dirty address
1591  * @ra: TCG retaddr
1592  *
1593  * This may never return (if the MTE tag checks are configured to fault).
1594  */
1595 void mte_check_fail(CPUARMState *env, uint32_t desc,
1596                     uint64_t dirty_ptr, uintptr_t ra);
1597 
1598 /**
1599  * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1600  * @env: CPU env
1601  * @dirty_ptr: Start address of memory region (dirty pointer)
1602  * @size: length of region (guaranteed not to cross page boundary)
1603  * @desc: MTEDESC descriptor word
1604  */
1605 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1606                        uint32_t desc);
1607 
1608 static inline int allocation_tag_from_addr(uint64_t ptr)
1609 {
1610     return extract64(ptr, 56, 4);
1611 }
1612 
1613 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1614 {
1615     return deposit64(ptr, 56, 4, rtag);
1616 }
1617 
1618 /* Return true if tbi bits mean that the access is checked.  */
1619 static inline bool tbi_check(uint32_t desc, int bit55)
1620 {
1621     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1622 }
1623 
1624 /* Return true if tcma bits mean that the access is unchecked.  */
1625 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1626 {
1627     /*
1628      * We had extracted bit55 and ptr_tag for other reasons, so fold
1629      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1630      */
1631     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1632     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1633     return tcma && match;
1634 }
1635 
1636 /*
1637  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1638  * for the tag to be present in the FAR_ELx register.  But for user-only
1639  * mode, we do not have a TLB with which to implement this, so we must
1640  * remove the top byte.
1641  */
1642 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1643 {
1644 #ifdef CONFIG_USER_ONLY
1645     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1646     ptr &= sextract64(ptr, 0, 56);
1647 #endif
1648     return ptr;
1649 }
1650 
1651 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1652 {
1653 #ifdef CONFIG_USER_ONLY
1654     int64_t clean_ptr = sextract64(ptr, 0, 56);
1655     if (tbi_check(desc, clean_ptr < 0)) {
1656         ptr = clean_ptr;
1657     }
1658 #endif
1659     return ptr;
1660 }
1661 
1662 /* Values for M-profile PSR.ECI for MVE insns */
1663 enum MVEECIState {
1664     ECI_NONE = 0, /* No completed beats */
1665     ECI_A0 = 1, /* Completed: A0 */
1666     ECI_A0A1 = 2, /* Completed: A0, A1 */
1667     /* 3 is reserved */
1668     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1669     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1670     /* All other values reserved */
1671 };
1672 
1673 /* Definitions for the PMU registers */
1674 #define PMCRN_MASK  0xf800
1675 #define PMCRN_SHIFT 11
1676 #define PMCRLP  0x80
1677 #define PMCRLC  0x40
1678 #define PMCRDP  0x20
1679 #define PMCRX   0x10
1680 #define PMCRD   0x8
1681 #define PMCRC   0x4
1682 #define PMCRP   0x2
1683 #define PMCRE   0x1
1684 /*
1685  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1686  * which can be written as 1 to trigger behaviour but which stay RAZ).
1687  */
1688 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1689 
1690 #define PMXEVTYPER_P          0x80000000
1691 #define PMXEVTYPER_U          0x40000000
1692 #define PMXEVTYPER_NSK        0x20000000
1693 #define PMXEVTYPER_NSU        0x10000000
1694 #define PMXEVTYPER_NSH        0x08000000
1695 #define PMXEVTYPER_M          0x04000000
1696 #define PMXEVTYPER_MT         0x02000000
1697 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1698 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1699                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1700                                PMXEVTYPER_M | PMXEVTYPER_MT | \
1701                                PMXEVTYPER_EVTCOUNT)
1702 
1703 #define PMCCFILTR             0xf8000000
1704 #define PMCCFILTR_M           PMXEVTYPER_M
1705 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1706 
1707 static inline uint32_t pmu_num_counters(CPUARMState *env)
1708 {
1709     ARMCPU *cpu = env_archcpu(env);
1710 
1711     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1712 }
1713 
1714 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1715 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1716 {
1717   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1718 }
1719 
1720 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
1721 GDBFeature *arm_gen_dynamic_smereg_feature(CPUState *cpu, int base_reg);
1722 GDBFeature *arm_gen_dynamic_tls_feature(CPUState *cpu, int base_reg);
1723 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
1724 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
1725 int aarch64_gdb_get_sme_reg(CPUState *cs, GByteArray *buf, int reg);
1726 int aarch64_gdb_set_sme_reg(CPUState *cs, uint8_t *buf, int reg);
1727 int aarch64_gdb_get_sme2_reg(CPUState *cs, GByteArray *buf, int reg);
1728 int aarch64_gdb_set_sme2_reg(CPUState *cs, uint8_t *buf, int reg);
1729 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
1730 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
1731 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
1732 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
1733 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
1734 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
1735 int aarch64_gdb_get_tls_reg(CPUState *cs, GByteArray *buf, int reg);
1736 int aarch64_gdb_set_tls_reg(CPUState *cs, uint8_t *buf, int reg);
1737 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1738 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1739 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1740 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1741 void aarch64_max_tcg_initfn(Object *obj);
1742 void aarch64_add_pauth_properties(Object *obj);
1743 void aarch64_add_sve_properties(Object *obj);
1744 void aarch64_add_sme_properties(Object *obj);
1745 
1746 /* Return true if the gdbstub is presenting an AArch64 CPU */
1747 static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu)
1748 {
1749     return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
1750 }
1751 
1752 /* Read the CONTROL register as the MRS instruction would. */
1753 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1754 
1755 /*
1756  * Return a pointer to the location where we currently store the
1757  * stack pointer for the requested security state and thread mode.
1758  * This pointer will become invalid if the CPU state is updated
1759  * such that the stack pointers are switched around (eg changing
1760  * the SPSEL control bit).
1761  */
1762 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1763                              bool threadmode, bool spsel);
1764 
1765 bool el_is_in_host(CPUARMState *env, int el);
1766 
1767 void aa32_max_features(ARMCPU *cpu);
1768 int exception_target_el(CPUARMState *env);
1769 bool arm_singlestep_active(CPUARMState *env);
1770 bool arm_generate_debug_exceptions(CPUARMState *env);
1771 
1772 /**
1773  * pauth_ptr_mask:
1774  * @param: parameters defining the MMU setup
1775  *
1776  * Return a mask of the address bits that contain the authentication code,
1777  * given the MMU config defined by @param.
1778  */
1779 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1780 {
1781     int bot_pac_bit = 64 - param.tsz;
1782     int top_pac_bit = 64 - 8 * param.tbi;
1783 
1784     return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1785 }
1786 
1787 /* Add the cpreg definitions for debug related system registers */
1788 void define_debug_regs(ARMCPU *cpu);
1789 
1790 /* Add the cpreg definitions for TLBI instructions */
1791 void define_tlb_insn_regs(ARMCPU *cpu);
1792 /* Add the cpreg definitions for AT instructions */
1793 void define_at_insn_regs(ARMCPU *cpu);
1794 /* Add the cpreg definitions for PM cpregs */
1795 void define_pm_cpregs(ARMCPU *cpu);
1796 /* Add the cpreg definitions for GCS cpregs */
1797 void define_gcs_cpregs(ARMCPU *cpu);
1798 
1799 /* Effective value of MDCR_EL2 */
1800 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1801 {
1802     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1803 }
1804 
1805 /* Powers of 2 for sve_vq_map et al. */
1806 #define SVE_VQ_POW2_MAP                                 \
1807     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1808      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1809 
1810 /*
1811  * Return true if it is possible to take a fine-grained-trap to EL2.
1812  */
1813 static inline bool arm_fgt_active(CPUARMState *env, int el)
1814 {
1815     /*
1816      * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1817      * that can affect EL0, but it is harmless to do the test also for
1818      * traps on registers that are only accessible at EL1 because if the test
1819      * returns true then we can't be executing at EL1 anyway.
1820      * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1821      * traps from AArch32 only happen for the EL0 is AArch32 case.
1822      */
1823     return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1824         el < 2 && arm_is_el2_enabled(env) &&
1825         arm_el_is_aa64(env, 1) &&
1826         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1827         (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1828 }
1829 
1830 /*
1831  * Although the ARM implementation of hardware assisted debugging
1832  * allows for different breakpoints per-core, the current GDB
1833  * interface treats them as a global pool of registers (which seems to
1834  * be the case for x86, ppc and s390). As a result we store one copy
1835  * of registers which is used for all active cores.
1836  *
1837  * Write access is serialised by virtue of the GDB protocol which
1838  * updates things. Read access (i.e. when the values are copied to the
1839  * vCPU) is also gated by GDB's run control.
1840  *
1841  * This is not unreasonable as most of the time debugging kernels you
1842  * never know which core will eventually execute your function.
1843  */
1844 
1845 typedef struct {
1846     uint64_t bcr;
1847     uint64_t bvr;
1848 } HWBreakpoint;
1849 
1850 /*
1851  * The watchpoint registers can cover more area than the requested
1852  * watchpoint so we need to store the additional information
1853  * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1854  * when the watchpoint is hit.
1855  */
1856 typedef struct {
1857     uint64_t wcr;
1858     uint64_t wvr;
1859     CPUWatchpoint details;
1860 } HWWatchpoint;
1861 
1862 /* Maximum and current break/watch point counts */
1863 extern int max_hw_bps, max_hw_wps;
1864 extern GArray *hw_breakpoints, *hw_watchpoints;
1865 
1866 #define cur_hw_wps      (hw_watchpoints->len)
1867 #define cur_hw_bps      (hw_breakpoints->len)
1868 #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1869 #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1870 
1871 bool find_hw_breakpoint(CPUState *cpu, vaddr pc);
1872 int insert_hw_breakpoint(vaddr pc);
1873 int delete_hw_breakpoint(vaddr pc);
1874 
1875 bool check_watchpoint_in_range(int i, vaddr addr);
1876 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr);
1877 int insert_hw_watchpoint(vaddr addr, vaddr len, int type);
1878 int delete_hw_watchpoint(vaddr addr, vaddr len, int type);
1879 
1880 /* Return the current value of the system counter in ticks */
1881 uint64_t gt_get_countervalue(CPUARMState *env);
1882 /*
1883  * Return the currently applicable offset between the system counter
1884  * and the counter for the specified timer, as used for direct register
1885  * accesses.
1886  */
1887 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx);
1888 
1889 /*
1890  * Return mask of ARMMMUIdxBit values corresponding to an "invalidate
1891  * all EL1" scope; this covers stage 1 and stage 2.
1892  */
1893 int alle1_tlbmask(CPUARMState *env);
1894 
1895 /* Set the float_status behaviour to match the Arm defaults */
1896 void arm_set_default_fp_behaviours(float_status *s);
1897 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */
1898 void arm_set_ah_fp_behaviours(float_status *s);
1899 /* Read the float_status info and return the appropriate FPSR value */
1900 uint32_t vfp_get_fpsr_from_host(CPUARMState *env);
1901 /* Clear the exception status flags from all float_status fields */
1902 void vfp_clear_float_status_exc_flags(CPUARMState *env);
1903 /*
1904  * Update float_status fields to handle the bits of the FPCR
1905  * specified by mask changing to the values in val.
1906  */
1907 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
1908 bool arm_pan_enabled(CPUARMState *env);
1909 uint32_t cpsr_read_for_spsr_elx(CPUARMState *env);
1910 void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val);
1911 
1912 /* Compare uint64_t for qsort and bsearch. */
1913 int compare_u64(const void *a, const void *b);
1914 
1915 /* Used in FEAT_MEC to set the MECIDWidthm1 field in the MECIDR_EL2 register. */
1916 #define MECID_WIDTH 16
1917 
1918 #endif
1919