xref: /openbmc/qemu/target/arm/internals.h (revision 6d62f309f869c6a563fbe280c85182954df5855c)
1fcf5ef2aSThomas Huth /*
2fcf5ef2aSThomas Huth  * QEMU ARM CPU -- internal functions and types
3fcf5ef2aSThomas Huth  *
4fcf5ef2aSThomas Huth  * Copyright (c) 2014 Linaro Ltd
5fcf5ef2aSThomas Huth  *
6fcf5ef2aSThomas Huth  * This program is free software; you can redistribute it and/or
7fcf5ef2aSThomas Huth  * modify it under the terms of the GNU General Public License
8fcf5ef2aSThomas Huth  * as published by the Free Software Foundation; either version 2
9fcf5ef2aSThomas Huth  * of the License, or (at your option) any later version.
10fcf5ef2aSThomas Huth  *
11fcf5ef2aSThomas Huth  * This program is distributed in the hope that it will be useful,
12fcf5ef2aSThomas Huth  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13fcf5ef2aSThomas Huth  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14fcf5ef2aSThomas Huth  * GNU General Public License for more details.
15fcf5ef2aSThomas Huth  *
16fcf5ef2aSThomas Huth  * You should have received a copy of the GNU General Public License
17fcf5ef2aSThomas Huth  * along with this program; if not, see
18fcf5ef2aSThomas Huth  * <http://www.gnu.org/licenses/gpl-2.0.html>
19fcf5ef2aSThomas Huth  *
20fcf5ef2aSThomas Huth  * This header defines functions, types, etc which need to be shared
21fcf5ef2aSThomas Huth  * between different source files within target/arm/ but which are
22fcf5ef2aSThomas Huth  * private to it and not required by the rest of QEMU.
23fcf5ef2aSThomas Huth  */
24fcf5ef2aSThomas Huth 
25fcf5ef2aSThomas Huth #ifndef TARGET_ARM_INTERNALS_H
26fcf5ef2aSThomas Huth #define TARGET_ARM_INTERNALS_H
27fcf5ef2aSThomas Huth 
286ce1c9d0SPhilippe Mathieu-Daudé #include "exec/breakpoint.h"
29abc24d86SMichael Davidsaver #include "hw/registerfields.h"
3028f32503SRichard Henderson #include "tcg/tcg-gvec-desc.h"
311fe27859SRichard Henderson #include "syndrome.h"
325a534314SPeter Maydell #include "cpu-features.h"
33abc24d86SMichael Davidsaver 
34fcf5ef2aSThomas Huth /* register banks for CPU modes */
35fcf5ef2aSThomas Huth #define BANK_USRSYS 0
36fcf5ef2aSThomas Huth #define BANK_SVC    1
37fcf5ef2aSThomas Huth #define BANK_ABT    2
38fcf5ef2aSThomas Huth #define BANK_UND    3
39fcf5ef2aSThomas Huth #define BANK_IRQ    4
40fcf5ef2aSThomas Huth #define BANK_FIQ    5
41fcf5ef2aSThomas Huth #define BANK_HYP    6
42fcf5ef2aSThomas Huth #define BANK_MON    7
43fcf5ef2aSThomas Huth 
arm_env_mmu_index(CPUARMState * env)44b7770d72SRichard Henderson static inline int arm_env_mmu_index(CPUARMState *env)
45b7770d72SRichard Henderson {
46b7770d72SRichard Henderson     return EX_TBFLAG_ANY(env->hflags, MMUIDX);
47b7770d72SRichard Henderson }
48b7770d72SRichard Henderson 
excp_is_internal(int excp)49fcf5ef2aSThomas Huth static inline bool excp_is_internal(int excp)
50fcf5ef2aSThomas Huth {
51fcf5ef2aSThomas Huth     /* Return true if this exception number represents a QEMU-internal
52fcf5ef2aSThomas Huth      * exception that will not be passed to the guest.
53fcf5ef2aSThomas Huth      */
54fcf5ef2aSThomas Huth     return excp == EXCP_INTERRUPT
55fcf5ef2aSThomas Huth         || excp == EXCP_HLT
56fcf5ef2aSThomas Huth         || excp == EXCP_DEBUG
57fcf5ef2aSThomas Huth         || excp == EXCP_HALTED
58fcf5ef2aSThomas Huth         || excp == EXCP_EXCEPTION_EXIT
59fcf5ef2aSThomas Huth         || excp == EXCP_KERNEL_TRAP
60fcf5ef2aSThomas Huth         || excp == EXCP_SEMIHOST;
61fcf5ef2aSThomas Huth }
62fcf5ef2aSThomas Huth 
63bd8e9ddfSPeter Maydell /*
64bd8e9ddfSPeter Maydell  * Default frequency for the generic timer, in Hz.
65f037f5b4SPeter Maydell  * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
66f037f5b4SPeter Maydell  * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
67f037f5b4SPeter Maydell  * which gives a 16ns tick period.
68f037f5b4SPeter Maydell  *
69f037f5b4SPeter Maydell  * We will use the back-compat value:
70f037f5b4SPeter Maydell  *  - for QEMU CPU types added before we standardized on 1GHz
71f037f5b4SPeter Maydell  *  - for versioned machine types with a version of 9.0 or earlier
72f037f5b4SPeter Maydell  * In any case, the machine model may override via the cntfrq property.
73fcf5ef2aSThomas Huth  */
74f037f5b4SPeter Maydell #define GTIMER_DEFAULT_HZ 1000000000
75f037f5b4SPeter Maydell #define GTIMER_BACKCOMPAT_HZ 62500000
76fcf5ef2aSThomas Huth 
77abc24d86SMichael Davidsaver /* Bit definitions for the v7M CONTROL register */
78abc24d86SMichael Davidsaver FIELD(V7M_CONTROL, NPRIV, 0, 1)
79abc24d86SMichael Davidsaver FIELD(V7M_CONTROL, SPSEL, 1, 1)
80abc24d86SMichael Davidsaver FIELD(V7M_CONTROL, FPCA, 2, 1)
813e3fa230SPeter Maydell FIELD(V7M_CONTROL, SFPA, 3, 1)
82abc24d86SMichael Davidsaver 
834d1e7a47SPeter Maydell /* Bit definitions for v7M exception return payload */
844d1e7a47SPeter Maydell FIELD(V7M_EXCRET, ES, 0, 1)
854d1e7a47SPeter Maydell FIELD(V7M_EXCRET, RES0, 1, 1)
864d1e7a47SPeter Maydell FIELD(V7M_EXCRET, SPSEL, 2, 1)
874d1e7a47SPeter Maydell FIELD(V7M_EXCRET, MODE, 3, 1)
884d1e7a47SPeter Maydell FIELD(V7M_EXCRET, FTYPE, 4, 1)
894d1e7a47SPeter Maydell FIELD(V7M_EXCRET, DCRS, 5, 1)
904d1e7a47SPeter Maydell FIELD(V7M_EXCRET, S, 6, 1)
914d1e7a47SPeter Maydell FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
924d1e7a47SPeter Maydell 
93d02a8698SPeter Maydell /* Minimum value which is a magic number for exception return */
94d02a8698SPeter Maydell #define EXC_RETURN_MIN_MAGIC 0xff000000
95d02a8698SPeter Maydell /* Minimum number which is a magic number for function or exception return
96d02a8698SPeter Maydell  * when using v8M security extension
97d02a8698SPeter Maydell  */
98d02a8698SPeter Maydell #define FNC_RETURN_MIN_MAGIC 0xfefffffe
99d02a8698SPeter Maydell 
1008b7a5bbeSRichard Henderson /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
1018b7a5bbeSRichard Henderson FIELD(DBGWCR, E, 0, 1)
1028b7a5bbeSRichard Henderson FIELD(DBGWCR, PAC, 1, 2)
1038b7a5bbeSRichard Henderson FIELD(DBGWCR, LSC, 3, 2)
1048b7a5bbeSRichard Henderson FIELD(DBGWCR, BAS, 5, 8)
1058b7a5bbeSRichard Henderson FIELD(DBGWCR, HMC, 13, 1)
1068b7a5bbeSRichard Henderson FIELD(DBGWCR, SSC, 14, 2)
1078b7a5bbeSRichard Henderson FIELD(DBGWCR, LBN, 16, 4)
1088b7a5bbeSRichard Henderson FIELD(DBGWCR, WT, 20, 1)
1098b7a5bbeSRichard Henderson FIELD(DBGWCR, MASK, 24, 5)
1108b7a5bbeSRichard Henderson FIELD(DBGWCR, SSCE, 29, 1)
1118b7a5bbeSRichard Henderson 
11220b42c33SPeter Maydell #define VTCR_NSW (1u << 29)
11320b42c33SPeter Maydell #define VTCR_NSA (1u << 30)
11420b42c33SPeter Maydell #define VSTCR_SW VTCR_NSW
11520b42c33SPeter Maydell #define VSTCR_SA VTCR_NSA
11620b42c33SPeter Maydell 
11720b42c33SPeter Maydell /* Bit definitions for CPACR (AArch32 only) */
11820b42c33SPeter Maydell FIELD(CPACR, CP10, 20, 2)
11920b42c33SPeter Maydell FIELD(CPACR, CP11, 22, 2)
12020b42c33SPeter Maydell FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
12120b42c33SPeter Maydell FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
12220b42c33SPeter Maydell FIELD(CPACR, ASEDIS, 31, 1)
12320b42c33SPeter Maydell 
12420b42c33SPeter Maydell /* Bit definitions for CPACR_EL1 (AArch64 only) */
12520b42c33SPeter Maydell FIELD(CPACR_EL1, ZEN, 16, 2)
12620b42c33SPeter Maydell FIELD(CPACR_EL1, FPEN, 20, 2)
12720b42c33SPeter Maydell FIELD(CPACR_EL1, SMEN, 24, 2)
12820b42c33SPeter Maydell FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
12920b42c33SPeter Maydell 
13020b42c33SPeter Maydell /* Bit definitions for HCPTR (AArch32 only) */
13120b42c33SPeter Maydell FIELD(HCPTR, TCP10, 10, 1)
13220b42c33SPeter Maydell FIELD(HCPTR, TCP11, 11, 1)
13320b42c33SPeter Maydell FIELD(HCPTR, TASE, 15, 1)
13420b42c33SPeter Maydell FIELD(HCPTR, TTA, 20, 1)
13520b42c33SPeter Maydell FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
13620b42c33SPeter Maydell FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
13720b42c33SPeter Maydell 
13820b42c33SPeter Maydell /* Bit definitions for CPTR_EL2 (AArch64 only) */
13920b42c33SPeter Maydell FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
14020b42c33SPeter Maydell FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
14120b42c33SPeter Maydell FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
14220b42c33SPeter Maydell FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
14320b42c33SPeter Maydell FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
14420b42c33SPeter Maydell FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
14520b42c33SPeter Maydell FIELD(CPTR_EL2, TTA, 28, 1)
14620b42c33SPeter Maydell FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
14720b42c33SPeter Maydell FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
14820b42c33SPeter Maydell 
14920b42c33SPeter Maydell /* Bit definitions for CPTR_EL3 (AArch64 only) */
15020b42c33SPeter Maydell FIELD(CPTR_EL3, EZ, 8, 1)
15120b42c33SPeter Maydell FIELD(CPTR_EL3, TFP, 10, 1)
15220b42c33SPeter Maydell FIELD(CPTR_EL3, ESM, 12, 1)
15320b42c33SPeter Maydell FIELD(CPTR_EL3, TTA, 20, 1)
15420b42c33SPeter Maydell FIELD(CPTR_EL3, TAM, 30, 1)
15520b42c33SPeter Maydell FIELD(CPTR_EL3, TCPAC, 31, 1)
15620b42c33SPeter Maydell 
15720b42c33SPeter Maydell #define MDCR_MTPME    (1U << 28)
15820b42c33SPeter Maydell #define MDCR_TDCC     (1U << 27)
15920b42c33SPeter Maydell #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
16020b42c33SPeter Maydell #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
16120b42c33SPeter Maydell #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
16220b42c33SPeter Maydell #define MDCR_EPMAD    (1U << 21)
16320b42c33SPeter Maydell #define MDCR_EDAD     (1U << 20)
16420b42c33SPeter Maydell #define MDCR_TTRF     (1U << 19)
16520b42c33SPeter Maydell #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
16620b42c33SPeter Maydell #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
16720b42c33SPeter Maydell #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
16820b42c33SPeter Maydell #define MDCR_SDD      (1U << 16)
16920b42c33SPeter Maydell #define MDCR_SPD      (3U << 14)
17020b42c33SPeter Maydell #define MDCR_TDRA     (1U << 11)
17120b42c33SPeter Maydell #define MDCR_TDOSA    (1U << 10)
17220b42c33SPeter Maydell #define MDCR_TDA      (1U << 9)
17320b42c33SPeter Maydell #define MDCR_TDE      (1U << 8)
17420b42c33SPeter Maydell #define MDCR_HPME     (1U << 7)
17520b42c33SPeter Maydell #define MDCR_TPM      (1U << 6)
17620b42c33SPeter Maydell #define MDCR_TPMCR    (1U << 5)
17720b42c33SPeter Maydell #define MDCR_HPMN     (0x1fU)
17820b42c33SPeter Maydell 
17920b42c33SPeter Maydell /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
18020b42c33SPeter Maydell #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
18120b42c33SPeter Maydell                          MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
18220b42c33SPeter Maydell                          MDCR_STE | MDCR_SPME | MDCR_SPD)
18320b42c33SPeter Maydell 
18420b42c33SPeter Maydell #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
18520b42c33SPeter Maydell #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
18620b42c33SPeter Maydell #define TTBCR_PD0    (1U << 4)
18720b42c33SPeter Maydell #define TTBCR_PD1    (1U << 5)
18820b42c33SPeter Maydell #define TTBCR_EPD0   (1U << 7)
18920b42c33SPeter Maydell #define TTBCR_IRGN0  (3U << 8)
19020b42c33SPeter Maydell #define TTBCR_ORGN0  (3U << 10)
19120b42c33SPeter Maydell #define TTBCR_SH0    (3U << 12)
19220b42c33SPeter Maydell #define TTBCR_T1SZ   (3U << 16)
19320b42c33SPeter Maydell #define TTBCR_A1     (1U << 22)
19420b42c33SPeter Maydell #define TTBCR_EPD1   (1U << 23)
19520b42c33SPeter Maydell #define TTBCR_IRGN1  (3U << 24)
19620b42c33SPeter Maydell #define TTBCR_ORGN1  (3U << 26)
19720b42c33SPeter Maydell #define TTBCR_SH1    (1U << 28)
19820b42c33SPeter Maydell #define TTBCR_EAE    (1U << 31)
19920b42c33SPeter Maydell 
20020b42c33SPeter Maydell FIELD(VTCR, T0SZ, 0, 6)
20120b42c33SPeter Maydell FIELD(VTCR, SL0, 6, 2)
20220b42c33SPeter Maydell FIELD(VTCR, IRGN0, 8, 2)
20320b42c33SPeter Maydell FIELD(VTCR, ORGN0, 10, 2)
20420b42c33SPeter Maydell FIELD(VTCR, SH0, 12, 2)
20520b42c33SPeter Maydell FIELD(VTCR, TG0, 14, 2)
20620b42c33SPeter Maydell FIELD(VTCR, PS, 16, 3)
20720b42c33SPeter Maydell FIELD(VTCR, VS, 19, 1)
20820b42c33SPeter Maydell FIELD(VTCR, HA, 21, 1)
20920b42c33SPeter Maydell FIELD(VTCR, HD, 22, 1)
21020b42c33SPeter Maydell FIELD(VTCR, HWU59, 25, 1)
21120b42c33SPeter Maydell FIELD(VTCR, HWU60, 26, 1)
21220b42c33SPeter Maydell FIELD(VTCR, HWU61, 27, 1)
21320b42c33SPeter Maydell FIELD(VTCR, HWU62, 28, 1)
21420b42c33SPeter Maydell FIELD(VTCR, NSW, 29, 1)
21520b42c33SPeter Maydell FIELD(VTCR, NSA, 30, 1)
21620b42c33SPeter Maydell FIELD(VTCR, DS, 32, 1)
21720b42c33SPeter Maydell FIELD(VTCR, SL2, 33, 1)
21820b42c33SPeter Maydell 
21920b42c33SPeter Maydell #define HCRX_ENAS0    (1ULL << 0)
22020b42c33SPeter Maydell #define HCRX_ENALS    (1ULL << 1)
22120b42c33SPeter Maydell #define HCRX_ENASR    (1ULL << 2)
22220b42c33SPeter Maydell #define HCRX_FNXS     (1ULL << 3)
22320b42c33SPeter Maydell #define HCRX_FGTNXS   (1ULL << 4)
22420b42c33SPeter Maydell #define HCRX_SMPME    (1ULL << 5)
22520b42c33SPeter Maydell #define HCRX_TALLINT  (1ULL << 6)
22620b42c33SPeter Maydell #define HCRX_VINMI    (1ULL << 7)
22720b42c33SPeter Maydell #define HCRX_VFNMI    (1ULL << 8)
22820b42c33SPeter Maydell #define HCRX_CMOW     (1ULL << 9)
22920b42c33SPeter Maydell #define HCRX_MCE2     (1ULL << 10)
23020b42c33SPeter Maydell #define HCRX_MSCEN    (1ULL << 11)
23120b42c33SPeter Maydell 
23220b42c33SPeter Maydell #define HPFAR_NS      (1ULL << 63)
23320b42c33SPeter Maydell 
23420b42c33SPeter Maydell #define HSTR_TTEE (1 << 16)
23520b42c33SPeter Maydell #define HSTR_TJDBX (1 << 17)
23620b42c33SPeter Maydell 
237c6b0ecb2SPeter Maydell /*
238c6b0ecb2SPeter Maydell  * Depending on the value of HCR_EL2.E2H, bits 0 and 1
239c6b0ecb2SPeter Maydell  * have different bit definitions, and EL1PCTEN might be
240c6b0ecb2SPeter Maydell  * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
241c6b0ecb2SPeter Maydell  * disambiguate if necessary.
242c6b0ecb2SPeter Maydell  */
243c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
244c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
245c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
246c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
247c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EVNTEN, 2, 1)
248c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EVNTDIR, 3, 1)
249c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EVNTI, 4, 4)
250c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL0VTEN, 8, 1)
251c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL0PTEN, 9, 1)
252c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
253c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1PTEN, 11, 1)
254c6b0ecb2SPeter Maydell FIELD(CNTHCTL, ECV, 12, 1)
255c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1TVT, 13, 1)
256c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1TVCT, 14, 1)
257c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1NVPCT, 15, 1)
258c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EL1NVVCT, 16, 1)
259c6b0ecb2SPeter Maydell FIELD(CNTHCTL, EVNTIS, 17, 1)
260c6b0ecb2SPeter Maydell FIELD(CNTHCTL, CNTVMASK, 18, 1)
261c6b0ecb2SPeter Maydell FIELD(CNTHCTL, CNTPMASK, 19, 1)
26220b42c33SPeter Maydell 
26335337cc3SPeter Maydell /* We use a few fake FSR values for internal purposes in M profile.
26435337cc3SPeter Maydell  * M profile cores don't have A/R format FSRs, but currently our
26535337cc3SPeter Maydell  * get_phys_addr() code assumes A/R profile and reports failures via
26635337cc3SPeter Maydell  * an A/R format FSR value. We then translate that into the proper
26735337cc3SPeter Maydell  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
26835337cc3SPeter Maydell  * Mostly the FSR values we use for this are those defined for v7PMSA,
26935337cc3SPeter Maydell  * since we share some of that codepath. A few kinds of fault are
27035337cc3SPeter Maydell  * only for M profile and have no A/R equivalent, though, so we have
27135337cc3SPeter Maydell  * to pick a value from the reserved range (which we never otherwise
27235337cc3SPeter Maydell  * generate) to use for these.
27335337cc3SPeter Maydell  * These values will never be visible to the guest.
27435337cc3SPeter Maydell  */
27535337cc3SPeter Maydell #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
27635337cc3SPeter Maydell #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
27735337cc3SPeter Maydell 
278597610ebSPeter Maydell /**
279597610ebSPeter Maydell  * raise_exception: Raise the specified exception.
280597610ebSPeter Maydell  * Raise a guest exception with the specified value, syndrome register
281597610ebSPeter Maydell  * and target exception level. This should be called from helper functions,
282597610ebSPeter Maydell  * and never returns because we will longjump back up to the CPU main loop.
283597610ebSPeter Maydell  */
2848905770bSMarc-André Lureau G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
285597610ebSPeter Maydell                                 uint32_t syndrome, uint32_t target_el);
286597610ebSPeter Maydell 
287fcf5ef2aSThomas Huth /*
2887469f6c6SRichard Henderson  * Similarly, but also use unwinding to restore cpu state.
2897469f6c6SRichard Henderson  */
2908905770bSMarc-André Lureau G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
2917469f6c6SRichard Henderson                                       uint32_t syndrome, uint32_t target_el,
2927469f6c6SRichard Henderson                                       uintptr_t ra);
2937469f6c6SRichard Henderson 
2947469f6c6SRichard Henderson /*
295fcf5ef2aSThomas Huth  * For AArch64, map a given EL to an index in the banked_spsr array.
296fcf5ef2aSThomas Huth  * Note that this mapping and the AArch32 mapping defined in bank_number()
297fcf5ef2aSThomas Huth  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
298fcf5ef2aSThomas Huth  * mandated mapping between each other.
299fcf5ef2aSThomas Huth  */
aarch64_banked_spsr_index(unsigned int el)300fcf5ef2aSThomas Huth static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
301fcf5ef2aSThomas Huth {
302fcf5ef2aSThomas Huth     static const unsigned int map[4] = {
303fcf5ef2aSThomas Huth         [1] = BANK_SVC, /* EL1.  */
304fcf5ef2aSThomas Huth         [2] = BANK_HYP, /* EL2.  */
305fcf5ef2aSThomas Huth         [3] = BANK_MON, /* EL3.  */
306fcf5ef2aSThomas Huth     };
307fcf5ef2aSThomas Huth     assert(el >= 1 && el <= 3);
308fcf5ef2aSThomas Huth     return map[el];
309fcf5ef2aSThomas Huth }
310fcf5ef2aSThomas Huth 
311fcf5ef2aSThomas Huth /* Map CPU modes onto saved register banks.  */
bank_number(int mode)312fcf5ef2aSThomas Huth static inline int bank_number(int mode)
313fcf5ef2aSThomas Huth {
314fcf5ef2aSThomas Huth     switch (mode) {
315fcf5ef2aSThomas Huth     case ARM_CPU_MODE_USR:
316fcf5ef2aSThomas Huth     case ARM_CPU_MODE_SYS:
317fcf5ef2aSThomas Huth         return BANK_USRSYS;
318fcf5ef2aSThomas Huth     case ARM_CPU_MODE_SVC:
319fcf5ef2aSThomas Huth         return BANK_SVC;
320fcf5ef2aSThomas Huth     case ARM_CPU_MODE_ABT:
321fcf5ef2aSThomas Huth         return BANK_ABT;
322fcf5ef2aSThomas Huth     case ARM_CPU_MODE_UND:
323fcf5ef2aSThomas Huth         return BANK_UND;
324fcf5ef2aSThomas Huth     case ARM_CPU_MODE_IRQ:
325fcf5ef2aSThomas Huth         return BANK_IRQ;
326fcf5ef2aSThomas Huth     case ARM_CPU_MODE_FIQ:
327fcf5ef2aSThomas Huth         return BANK_FIQ;
328fcf5ef2aSThomas Huth     case ARM_CPU_MODE_HYP:
329fcf5ef2aSThomas Huth         return BANK_HYP;
330fcf5ef2aSThomas Huth     case ARM_CPU_MODE_MON:
331fcf5ef2aSThomas Huth         return BANK_MON;
332fcf5ef2aSThomas Huth     }
333fcf5ef2aSThomas Huth     g_assert_not_reached();
334fcf5ef2aSThomas Huth }
335fcf5ef2aSThomas Huth 
336593cfa2bSPeter Maydell /**
337593cfa2bSPeter Maydell  * r14_bank_number: Map CPU mode onto register bank for r14
338593cfa2bSPeter Maydell  *
339593cfa2bSPeter Maydell  * Given an AArch32 CPU mode, return the index into the saved register
340593cfa2bSPeter Maydell  * banks to use for the R14 (LR) in that mode. This is the same as
341593cfa2bSPeter Maydell  * bank_number(), except for the special case of Hyp mode, where
342593cfa2bSPeter Maydell  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
343593cfa2bSPeter Maydell  * This should be used as the index into env->banked_r14[], and
344593cfa2bSPeter Maydell  * bank_number() used for the index into env->banked_r13[] and
345593cfa2bSPeter Maydell  * env->banked_spsr[].
346593cfa2bSPeter Maydell  */
r14_bank_number(int mode)347593cfa2bSPeter Maydell static inline int r14_bank_number(int mode)
348593cfa2bSPeter Maydell {
349593cfa2bSPeter Maydell     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
350593cfa2bSPeter Maydell }
351593cfa2bSPeter Maydell 
352f6524ddfSPhilippe Mathieu-Daudé void arm_cpu_register(const ARMCPUInfo *info);
353f6524ddfSPhilippe Mathieu-Daudé void aarch64_cpu_register(const ARMCPUInfo *info);
354f6524ddfSPhilippe Mathieu-Daudé 
355f6524ddfSPhilippe Mathieu-Daudé void register_cp_regs_for_features(ARMCPU *cpu);
356f6524ddfSPhilippe Mathieu-Daudé void init_cpreg_list(ARMCPU *cpu);
357f6524ddfSPhilippe Mathieu-Daudé 
358fcf5ef2aSThomas Huth void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
359fcf5ef2aSThomas Huth void arm_translate_init(void);
360fcf5ef2aSThomas Huth 
361f81198ceSGustavo Romero void arm_cpu_register_gdb_commands(ARMCPU *cpu);
362e8122a71SAlex Bennée void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
363e8122a71SAlex Bennée                                        GPtrArray *, GPtrArray *);
364f81198ceSGustavo Romero 
365475e56b6SEvgeny Ermakov void arm_restore_state_to_opc(CPUState *cs,
366475e56b6SEvgeny Ermakov                               const TranslationBlock *tb,
367475e56b6SEvgeny Ermakov                               const uint64_t *data);
368475e56b6SEvgeny Ermakov 
36978271684SClaudio Fontana #ifdef CONFIG_TCG
3708349d2aeSRichard Henderson void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
371fcee3707SPeter Maydell 
372fcee3707SPeter Maydell /* Our implementation of TCGCPUOps::cpu_exec_halt */
373fcee3707SPeter Maydell bool arm_cpu_exec_halt(CPUState *cs);
37478271684SClaudio Fontana #endif /* CONFIG_TCG */
37578271684SClaudio Fontana 
3766ce21abdSRichard Henderson typedef enum ARMFPRounding {
377fcf5ef2aSThomas Huth     FPROUNDING_TIEEVEN,
378fcf5ef2aSThomas Huth     FPROUNDING_POSINF,
379fcf5ef2aSThomas Huth     FPROUNDING_NEGINF,
380fcf5ef2aSThomas Huth     FPROUNDING_ZERO,
381fcf5ef2aSThomas Huth     FPROUNDING_TIEAWAY,
382fcf5ef2aSThomas Huth     FPROUNDING_ODD
3836ce21abdSRichard Henderson } ARMFPRounding;
384fcf5ef2aSThomas Huth 
3856ce21abdSRichard Henderson extern const FloatRoundMode arm_rmode_to_sf_map[6];
3866ce21abdSRichard Henderson 
arm_rmode_to_sf(ARMFPRounding rmode)3876ce21abdSRichard Henderson static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
3886ce21abdSRichard Henderson {
3896ce21abdSRichard Henderson     assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
3906ce21abdSRichard Henderson     return arm_rmode_to_sf_map[rmode];
3916ce21abdSRichard Henderson }
392fcf5ef2aSThomas Huth 
aarch64_save_sp(CPUARMState * env,int el)393fcf5ef2aSThomas Huth static inline void aarch64_save_sp(CPUARMState *env, int el)
394fcf5ef2aSThomas Huth {
395fcf5ef2aSThomas Huth     if (env->pstate & PSTATE_SP) {
396fcf5ef2aSThomas Huth         env->sp_el[el] = env->xregs[31];
397fcf5ef2aSThomas Huth     } else {
398fcf5ef2aSThomas Huth         env->sp_el[0] = env->xregs[31];
399fcf5ef2aSThomas Huth     }
400fcf5ef2aSThomas Huth }
401fcf5ef2aSThomas Huth 
aarch64_restore_sp(CPUARMState * env,int el)402fcf5ef2aSThomas Huth static inline void aarch64_restore_sp(CPUARMState *env, int el)
403fcf5ef2aSThomas Huth {
404fcf5ef2aSThomas Huth     if (env->pstate & PSTATE_SP) {
405fcf5ef2aSThomas Huth         env->xregs[31] = env->sp_el[el];
406fcf5ef2aSThomas Huth     } else {
407fcf5ef2aSThomas Huth         env->xregs[31] = env->sp_el[0];
408fcf5ef2aSThomas Huth     }
409fcf5ef2aSThomas Huth }
410fcf5ef2aSThomas Huth 
update_spsel(CPUARMState * env,uint32_t imm)411fcf5ef2aSThomas Huth static inline void update_spsel(CPUARMState *env, uint32_t imm)
412fcf5ef2aSThomas Huth {
413fcf5ef2aSThomas Huth     unsigned int cur_el = arm_current_el(env);
414fcf5ef2aSThomas Huth     /* Update PSTATE SPSel bit; this requires us to update the
415fcf5ef2aSThomas Huth      * working stack pointer in xregs[31].
416fcf5ef2aSThomas Huth      */
417fcf5ef2aSThomas Huth     if (!((imm ^ env->pstate) & PSTATE_SP)) {
418fcf5ef2aSThomas Huth         return;
419fcf5ef2aSThomas Huth     }
420fcf5ef2aSThomas Huth     aarch64_save_sp(env, cur_el);
421fcf5ef2aSThomas Huth     env->pstate = deposit32(env->pstate, 0, 1, imm);
422fcf5ef2aSThomas Huth 
423fcf5ef2aSThomas Huth     /* We rely on illegal updates to SPsel from EL0 to get trapped
424fcf5ef2aSThomas Huth      * at translation time.
425fcf5ef2aSThomas Huth      */
426fcf5ef2aSThomas Huth     assert(cur_el >= 1 && cur_el <= 3);
427fcf5ef2aSThomas Huth     aarch64_restore_sp(env, cur_el);
428fcf5ef2aSThomas Huth }
429fcf5ef2aSThomas Huth 
430fcf5ef2aSThomas Huth /*
431fcf5ef2aSThomas Huth  * arm_pamax
432fcf5ef2aSThomas Huth  * @cpu: ARMCPU
433fcf5ef2aSThomas Huth  *
434fcf5ef2aSThomas Huth  * Returns the implementation defined bit-width of physical addresses.
435fcf5ef2aSThomas Huth  * The ARMv8 reference manuals refer to this as PAMax().
436fcf5ef2aSThomas Huth  */
43771a77257SRichard Henderson unsigned int arm_pamax(ARMCPU *cpu);
438fcf5ef2aSThomas Huth 
439fcf5ef2aSThomas Huth /*
440fcf5ef2aSThomas Huth  * round_down_to_parange_index
441fcf5ef2aSThomas Huth  * @bit_size: uint8_t
442fcf5ef2aSThomas Huth  *
443fcf5ef2aSThomas Huth  * Rounds down the bit_size supplied to the first supported ARM physical
444fcf5ef2aSThomas Huth  * address range and returns the index for this. The index is intended to
445cb4a0a34SPeter Maydell  * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
446452c67a4STobias Röhmel  */
447452c67a4STobias Röhmel uint8_t round_down_to_parange_index(uint8_t bit_size);
448452c67a4STobias Röhmel 
449452c67a4STobias Röhmel /*
450fcf5ef2aSThomas Huth  * round_down_to_parange_bit_size
451cb4a0a34SPeter Maydell  * @bit_size: uint8_t
452fcf5ef2aSThomas Huth  *
453fcf5ef2aSThomas Huth  * Rounds down the bit_size supplied to the first supported ARM physical
454fcf5ef2aSThomas Huth  * address range bit size and returns this.
455fcf5ef2aSThomas Huth  */
456fcf5ef2aSThomas Huth uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
457fcf5ef2aSThomas Huth 
458fcf5ef2aSThomas Huth /* Return true if extended addresses are enabled.
459fcf5ef2aSThomas Huth  * This is always the case if our translation regime is 64 bit,
460fcf5ef2aSThomas Huth  * but depends on TTBCR.EAE for 32 bit.
461fcf5ef2aSThomas Huth  */
extended_addresses_enabled(CPUARMState * env)462fcf5ef2aSThomas Huth static inline bool extended_addresses_enabled(CPUARMState *env)
463fcf5ef2aSThomas Huth {
464fcf5ef2aSThomas Huth     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
465fcf5ef2aSThomas Huth     if (arm_feature(env, ARM_FEATURE_PMSA) &&
466fcf5ef2aSThomas Huth         arm_feature(env, ARM_FEATURE_V8)) {
467fcf5ef2aSThomas Huth         return true;
468fcf5ef2aSThomas Huth     }
469fcf5ef2aSThomas Huth     return arm_el_is_aa64(env, 1) ||
470fcf5ef2aSThomas Huth            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
471fcf5ef2aSThomas Huth }
472fcf5ef2aSThomas Huth 
473b00d86bcSRichard Henderson /* Update a QEMU watchpoint based on the information the guest has set in the
474b00d86bcSRichard Henderson  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
475b00d86bcSRichard Henderson  */
476fcf5ef2aSThomas Huth void hw_watchpoint_update(ARMCPU *cpu, int n);
477fcf5ef2aSThomas Huth /* Update the QEMU watchpoints for every guest watchpoint. This does a
478fcf5ef2aSThomas Huth  * complete delete-and-reinstate of the QEMU watchpoint list and so is
47940612000SJulian Brown  * suitable for use after migration or on reset.
48040612000SJulian Brown  */
48140612000SJulian Brown void hw_watchpoint_update_all(ARMCPU *cpu);
48240612000SJulian Brown /* Update a QEMU breakpoint based on the information the guest has set in the
48340612000SJulian Brown  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
484fcf5ef2aSThomas Huth  */
485fcf5ef2aSThomas Huth void hw_breakpoint_update(ARMCPU *cpu, int n);
486fcf5ef2aSThomas Huth /* Update the QEMU breakpoints for every guest breakpoint. This does a
48721fbea8cSPhilippe Mathieu-Daudé  * complete delete-and-reinstate of the QEMU breakpoint list and so is
488fcf5ef2aSThomas Huth  * suitable for use after migration or on reset.
489fcf5ef2aSThomas Huth  */
490fcf5ef2aSThomas Huth void hw_breakpoint_update_all(ARMCPU *cpu);
491fcf5ef2aSThomas Huth 
49221fbea8cSPhilippe Mathieu-Daudé /* Callback function for checking if a breakpoint should trigger. */
49321fbea8cSPhilippe Mathieu-Daudé bool arm_debug_check_breakpoint(CPUState *cs);
49421fbea8cSPhilippe Mathieu-Daudé 
49521fbea8cSPhilippe Mathieu-Daudé /* Callback function for checking if a watchpoint should trigger. */
496fcf5ef2aSThomas Huth bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
497fcf5ef2aSThomas Huth 
498fcf5ef2aSThomas Huth /* Adjust addresses (in BE32 mode) before testing against watchpoint
499fcf5ef2aSThomas Huth  * addresses.
500fcf5ef2aSThomas Huth  */
501fcf5ef2aSThomas Huth vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
502fcf5ef2aSThomas Huth 
503fcf5ef2aSThomas Huth /* Callback function for when a watchpoint or breakpoint triggers. */
504dc3c4c14SPeter Maydell void arm_debug_excp_handler(CPUState *cs);
505dc3c4c14SPeter Maydell 
506dc3c4c14SPeter Maydell #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
arm_is_psci_call(ARMCPU * cpu,int excp_type)507dc3c4c14SPeter Maydell static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
508dc3c4c14SPeter Maydell {
509dc3c4c14SPeter Maydell     return false;
510dc3c4c14SPeter Maydell }
arm_handle_psci_call(ARMCPU * cpu)511dc3c4c14SPeter Maydell static inline void arm_handle_psci_call(ARMCPU *cpu)
512dc3c4c14SPeter Maydell {
513dc3c4c14SPeter Maydell     g_assert_not_reached();
5141fa498feSPeter Maydell }
5151fa498feSPeter Maydell #else
5161fa498feSPeter Maydell /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
5171fa498feSPeter Maydell bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
5181fa498feSPeter Maydell /* Actually handle a PSCI call */
5191fa498feSPeter Maydell void arm_handle_psci_call(ARMCPU *cpu);
5201fa498feSPeter Maydell #endif
5211fa498feSPeter Maydell 
5221fa498feSPeter Maydell /**
5231fa498feSPeter Maydell  * arm_clear_exclusive: clear the exclusive monitor
5241fa498feSPeter Maydell  * @env: CPU env
5251fa498feSPeter Maydell  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
5261fa498feSPeter Maydell  */
arm_clear_exclusive(CPUARMState * env)5271fa498feSPeter Maydell static inline void arm_clear_exclusive(CPUARMState *env)
5281fa498feSPeter Maydell {
5291fa498feSPeter Maydell     env->exclusive_addr = -1;
5301fa498feSPeter Maydell }
5311fa498feSPeter Maydell 
5321fa498feSPeter Maydell /**
5331fa498feSPeter Maydell  * ARMFaultType: type of an ARM MMU fault
5341fa498feSPeter Maydell  * This corresponds to the v8A pseudocode's Fault enumeration,
535f0a398a2SRichard Henderson  * with extensions for QEMU internal conditions.
5361fa498feSPeter Maydell  */
5371fa498feSPeter Maydell typedef enum ARMFaultType {
5381fa498feSPeter Maydell     ARMFault_None,
5391fa498feSPeter Maydell     ARMFault_AccessFlag,
5401fa498feSPeter Maydell     ARMFault_Alignment,
54111b76fdaSRichard Henderson     ARMFault_Background,
54211b76fdaSRichard Henderson     ARMFault_Domain,
5431fa498feSPeter Maydell     ARMFault_Permission,
5441fa498feSPeter Maydell     ARMFault_Translation,
54511b76fdaSRichard Henderson     ARMFault_AddressSize,
54611b76fdaSRichard Henderson     ARMFault_SyncExternal,
54711b76fdaSRichard Henderson     ARMFault_SyncExternalOnWalk,
54811b76fdaSRichard Henderson     ARMFault_SyncParity,
54911b76fdaSRichard Henderson     ARMFault_SyncParityOnWalk,
55011b76fdaSRichard Henderson     ARMFault_AsyncParity,
55111b76fdaSRichard Henderson     ARMFault_AsyncExternal,
55211b76fdaSRichard Henderson     ARMFault_Debug,
5531fa498feSPeter Maydell     ARMFault_TLBConflict,
554fcf5ef2aSThomas Huth     ARMFault_UnsuppAtomicUpdate,
5551fa498feSPeter Maydell     ARMFault_Lockdown,
55611b76fdaSRichard Henderson     ARMFault_Exclusive,
5571fa498feSPeter Maydell     ARMFault_ICacheMaint,
5581fa498feSPeter Maydell     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
559fcf5ef2aSThomas Huth     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
56011b76fdaSRichard Henderson     ARMFault_GPCFOnWalk,
56111b76fdaSRichard Henderson     ARMFault_GPCFOnOutput,
562fcf5ef2aSThomas Huth } ARMFaultType;
563fcf5ef2aSThomas Huth 
5649861248fSRémi Denis-Courmont typedef enum ARMGPCF {
565c528af7aSPeter Maydell     GPCF_None,
566fcf5ef2aSThomas Huth     GPCF_AddressSize,
567fcf5ef2aSThomas Huth     GPCF_Walk,
568fcf5ef2aSThomas Huth     GPCF_EABT,
5691fa498feSPeter Maydell     GPCF_Fail,
57011b76fdaSRichard Henderson } ARMGPCF;
571fcf5ef2aSThomas Huth 
57211b76fdaSRichard Henderson /**
57311b76fdaSRichard Henderson  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
5741fa498feSPeter Maydell  * @type: Type of fault
5751fa498feSPeter Maydell  * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
576fcf5ef2aSThomas Huth  * @level: Table walk level (for translation, access flag and permission faults)
577fcf5ef2aSThomas Huth  * @domain: Domain of the fault address (for non-LPAE CPUs only)
5789861248fSRémi Denis-Courmont  * @s2addr: Address that caused a fault at stage 2
579c528af7aSPeter Maydell  * @paddr: physical address that caused a fault for gpc
580fcf5ef2aSThomas Huth  * @paddr_space: physical address space that caused a fault for gpc
581fcf5ef2aSThomas Huth  * @stage2: True if we faulted at stage 2
5821fa498feSPeter Maydell  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
5831fa498feSPeter Maydell  * @s1ns: True if we faulted on a non-secure IPA while in secure state
5841fa498feSPeter Maydell  * @ea: True if we should set the EA (external abort type) bit in syndrome
5851fa498feSPeter Maydell  */
5861fa498feSPeter Maydell typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
5871fa498feSPeter Maydell struct ARMMMUFaultInfo {
5881fa498feSPeter Maydell     ARMFaultType type;
5891fa498feSPeter Maydell     ARMGPCF gpcf;
5901fa498feSPeter Maydell     target_ulong s2addr;
5911fa498feSPeter Maydell     target_ulong paddr;
5921fa498feSPeter Maydell     ARMSecuritySpace paddr_space;
5931fa498feSPeter Maydell     int level;
5941fa498feSPeter Maydell     int domain;
5951fa498feSPeter Maydell     bool stage2;
5961fa498feSPeter Maydell     bool s1ptw;
5971fa498feSPeter Maydell     bool s1ns;
5981fa498feSPeter Maydell     bool ea;
5991fa498feSPeter Maydell };
6001fa498feSPeter Maydell 
6011fa498feSPeter Maydell /**
6021fa498feSPeter Maydell  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
6031fa498feSPeter Maydell  * Compare pseudocode EncodeSDFSC(), though unlike that function
6041fa498feSPeter Maydell  * we set up a whole FSR-format code including domain field and
6051fa498feSPeter Maydell  * putting the high bit of the FSC into bit 10.
6061fa498feSPeter Maydell  */
arm_fi_to_sfsc(ARMMMUFaultInfo * fi)6071fa498feSPeter Maydell static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
6081fa498feSPeter Maydell {
6091fa498feSPeter Maydell     uint32_t fsc;
6101fa498feSPeter Maydell 
6111fa498feSPeter Maydell     switch (fi->type) {
6121fa498feSPeter Maydell     case ARMFault_None:
6131fa498feSPeter Maydell         return 0;
6141fa498feSPeter Maydell     case ARMFault_AccessFlag:
6151fa498feSPeter Maydell         fsc = fi->level == 1 ? 0x3 : 0x6;
6161fa498feSPeter Maydell         break;
6171fa498feSPeter Maydell     case ARMFault_Alignment:
6181fa498feSPeter Maydell         fsc = 0x1;
6191fa498feSPeter Maydell         break;
6201fa498feSPeter Maydell     case ARMFault_Permission:
6211fa498feSPeter Maydell         fsc = fi->level == 1 ? 0xd : 0xf;
6221fa498feSPeter Maydell         break;
6231fa498feSPeter Maydell     case ARMFault_Domain:
6241fa498feSPeter Maydell         fsc = fi->level == 1 ? 0x9 : 0xb;
6251fa498feSPeter Maydell         break;
6261fa498feSPeter Maydell     case ARMFault_Translation:
6271fa498feSPeter Maydell         fsc = fi->level == 1 ? 0x5 : 0x7;
6281fa498feSPeter Maydell         break;
6291fa498feSPeter Maydell     case ARMFault_SyncExternal:
6301fa498feSPeter Maydell         fsc = 0x8 | (fi->ea << 12);
6311fa498feSPeter Maydell         break;
6321fa498feSPeter Maydell     case ARMFault_SyncExternalOnWalk:
6331fa498feSPeter Maydell         fsc = fi->level == 1 ? 0xc : 0xe;
6341fa498feSPeter Maydell         fsc |= (fi->ea << 12);
6351fa498feSPeter Maydell         break;
6361fa498feSPeter Maydell     case ARMFault_SyncParity:
6371fa498feSPeter Maydell         fsc = 0x409;
6381fa498feSPeter Maydell         break;
6391fa498feSPeter Maydell     case ARMFault_SyncParityOnWalk:
6401fa498feSPeter Maydell         fsc = fi->level == 1 ? 0x40c : 0x40e;
6411fa498feSPeter Maydell         break;
6421fa498feSPeter Maydell     case ARMFault_AsyncParity:
6431fa498feSPeter Maydell         fsc = 0x408;
6441fa498feSPeter Maydell         break;
6451fa498feSPeter Maydell     case ARMFault_AsyncExternal:
6461fa498feSPeter Maydell         fsc = 0x406 | (fi->ea << 12);
6471fa498feSPeter Maydell         break;
6481fa498feSPeter Maydell     case ARMFault_Debug:
6491fa498feSPeter Maydell         fsc = 0x2;
6501fa498feSPeter Maydell         break;
6511fa498feSPeter Maydell     case ARMFault_TLBConflict:
6521fa498feSPeter Maydell         fsc = 0x400;
6531fa498feSPeter Maydell         break;
6541fa498feSPeter Maydell     case ARMFault_Lockdown:
6551fa498feSPeter Maydell         fsc = 0x404;
6561fa498feSPeter Maydell         break;
6571fa498feSPeter Maydell     case ARMFault_Exclusive:
6581fa498feSPeter Maydell         fsc = 0x405;
6591fa498feSPeter Maydell         break;
6601fa498feSPeter Maydell     case ARMFault_ICacheMaint:
6611fa498feSPeter Maydell         fsc = 0x4;
6621fa498feSPeter Maydell         break;
6631fa498feSPeter Maydell     case ARMFault_Background:
6641fa498feSPeter Maydell         fsc = 0x0;
6651fa498feSPeter Maydell         break;
6661fa498feSPeter Maydell     case ARMFault_QEMU_NSCExec:
6671fa498feSPeter Maydell         fsc = M_FAKE_FSR_NSC_EXEC;
6681fa498feSPeter Maydell         break;
6691fa498feSPeter Maydell     case ARMFault_QEMU_SFault:
6701fa498feSPeter Maydell         fsc = M_FAKE_FSR_SFAULT;
6711fa498feSPeter Maydell         break;
6721fa498feSPeter Maydell     default:
6731fa498feSPeter Maydell         /* Other faults can't occur in a context that requires a
6741fa498feSPeter Maydell          * short-format status code.
6751fa498feSPeter Maydell          */
6761fa498feSPeter Maydell         g_assert_not_reached();
67713e481c9SRichard Henderson     }
67813e481c9SRichard Henderson 
67913e481c9SRichard Henderson     fsc |= (fi->domain << 4);
68013e481c9SRichard Henderson     return fsc;
68113e481c9SRichard Henderson }
68213e481c9SRichard Henderson 
6831fa498feSPeter Maydell /**
6841fa498feSPeter Maydell  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
68513e481c9SRichard Henderson  * Compare pseudocode EncodeLDFSC(), though unlike that function
68613e481c9SRichard Henderson  * we fill in also the LPAE bit 9 of a DFSR format.
6871fa498feSPeter Maydell  */
arm_fi_to_lfsc(ARMMMUFaultInfo * fi)6881fa498feSPeter Maydell static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
68913e481c9SRichard Henderson {
69013e481c9SRichard Henderson     uint32_t fsc;
6911fa498feSPeter Maydell 
6921fa498feSPeter Maydell     switch (fi->type) {
69313e481c9SRichard Henderson     case ARMFault_None:
69413e481c9SRichard Henderson         return 0;
69513e481c9SRichard Henderson     case ARMFault_AddressSize:
69613e481c9SRichard Henderson         assert(fi->level >= -1 && fi->level <= 3);
69713e481c9SRichard Henderson         if (fi->level < 0) {
69813e481c9SRichard Henderson             fsc = 0b101001;
6991fa498feSPeter Maydell         } else {
7001fa498feSPeter Maydell             fsc = fi->level;
7011fa498feSPeter Maydell         }
7021fa498feSPeter Maydell         break;
7031fa498feSPeter Maydell     case ARMFault_AccessFlag:
70413e481c9SRichard Henderson         assert(fi->level >= 0 && fi->level <= 3);
70513e481c9SRichard Henderson         fsc = 0b001000 | fi->level;
70613e481c9SRichard Henderson         break;
70713e481c9SRichard Henderson     case ARMFault_Permission:
70813e481c9SRichard Henderson         assert(fi->level >= 0 && fi->level <= 3);
70913e481c9SRichard Henderson         fsc = 0b001100 | fi->level;
71013e481c9SRichard Henderson         break;
7111fa498feSPeter Maydell     case ARMFault_Translation:
7121fa498feSPeter Maydell         assert(fi->level >= -1 && fi->level <= 3);
7131fa498feSPeter Maydell         if (fi->level < 0) {
7141fa498feSPeter Maydell             fsc = 0b101011;
7151fa498feSPeter Maydell         } else {
71613e481c9SRichard Henderson             fsc = 0b000100 | fi->level;
71713e481c9SRichard Henderson         }
71813e481c9SRichard Henderson         break;
71913e481c9SRichard Henderson     case ARMFault_SyncExternal:
72013e481c9SRichard Henderson         fsc = 0x10 | (fi->ea << 12);
72113e481c9SRichard Henderson         break;
7221fa498feSPeter Maydell     case ARMFault_SyncExternalOnWalk:
7231fa498feSPeter Maydell         assert(fi->level >= -1 && fi->level <= 3);
7241fa498feSPeter Maydell         if (fi->level < 0) {
7251fa498feSPeter Maydell             fsc = 0b010011;
7261fa498feSPeter Maydell         } else {
7271fa498feSPeter Maydell             fsc = 0b010100 | fi->level;
7281fa498feSPeter Maydell         }
7291fa498feSPeter Maydell         fsc |= fi->ea << 12;
7301fa498feSPeter Maydell         break;
7311fa498feSPeter Maydell     case ARMFault_SyncParity:
7321fa498feSPeter Maydell         fsc = 0x18;
7331fa498feSPeter Maydell         break;
7341fa498feSPeter Maydell     case ARMFault_SyncParityOnWalk:
7351fa498feSPeter Maydell         assert(fi->level >= -1 && fi->level <= 3);
7361fa498feSPeter Maydell         if (fi->level < 0) {
7371fa498feSPeter Maydell             fsc = 0b011011;
738f0a398a2SRichard Henderson         } else {
739f0a398a2SRichard Henderson             fsc = 0b011100 | fi->level;
740f0a398a2SRichard Henderson         }
7411fa498feSPeter Maydell         break;
7421fa498feSPeter Maydell     case ARMFault_AsyncParity:
7431fa498feSPeter Maydell         fsc = 0x19;
7441fa498feSPeter Maydell         break;
7451fa498feSPeter Maydell     case ARMFault_AsyncExternal:
7461fa498feSPeter Maydell         fsc = 0x11 | (fi->ea << 12);
74711b76fdaSRichard Henderson         break;
74811b76fdaSRichard Henderson     case ARMFault_Alignment:
74911b76fdaSRichard Henderson         fsc = 0x21;
75011b76fdaSRichard Henderson         break;
75111b76fdaSRichard Henderson     case ARMFault_Debug:
75211b76fdaSRichard Henderson         fsc = 0x22;
75311b76fdaSRichard Henderson         break;
75411b76fdaSRichard Henderson     case ARMFault_TLBConflict:
75511b76fdaSRichard Henderson         fsc = 0x30;
75611b76fdaSRichard Henderson         break;
75711b76fdaSRichard Henderson     case ARMFault_UnsuppAtomicUpdate:
7581fa498feSPeter Maydell         fsc = 0x31;
7591fa498feSPeter Maydell         break;
7601fa498feSPeter Maydell     case ARMFault_Lockdown:
7611fa498feSPeter Maydell         fsc = 0x34;
7621fa498feSPeter Maydell         break;
7631fa498feSPeter Maydell     case ARMFault_Exclusive:
7641fa498feSPeter Maydell         fsc = 0x35;
7651fa498feSPeter Maydell         break;
7661fa498feSPeter Maydell     case ARMFault_GPCFOnWalk:
7671fa498feSPeter Maydell         assert(fi->level >= -1 && fi->level <= 3);
7681fa498feSPeter Maydell         if (fi->level < 0) {
7693b39d734SPeter Maydell             fsc = 0b100011;
7703b39d734SPeter Maydell         } else {
7713b39d734SPeter Maydell             fsc = 0b100100 | fi->level;
7723b39d734SPeter Maydell         }
7733b39d734SPeter Maydell         break;
7743b39d734SPeter Maydell     case ARMFault_GPCFOnOutput:
7753b39d734SPeter Maydell         fsc = 0b101000;
7763b39d734SPeter Maydell         break;
7773b39d734SPeter Maydell     default:
7783b39d734SPeter Maydell         /* Other faults can't occur in a context that requires a
7799b12b6b4SRichard Henderson          * long-format status code.
7809b12b6b4SRichard Henderson          */
7819b12b6b4SRichard Henderson         g_assert_not_reached();
7829b12b6b4SRichard Henderson     }
78339a099caSRichard Henderson 
78439a099caSRichard Henderson     fsc |= 1 << 9;
7859b12b6b4SRichard Henderson     return fsc;
7867350d553SRichard Henderson }
78703ae85f8SPeter Maydell 
arm_extabort_type(MemTxResult result)7887350d553SRichard Henderson static inline bool arm_extabort_type(MemTxResult result)
7899b12b6b4SRichard Henderson {
7907350d553SRichard Henderson     /* The EA bit in syndromes and fault status registers is an
791b9f6033cSRichard Henderson      * IMPDEF classification of external aborts. ARM implementations
792b9f6033cSRichard Henderson      * usually use this to indicate AXI bus Decode error (0) or
793b9f6033cSRichard Henderson      * Slave error (1); in QEMU we follow that.
794b9f6033cSRichard Henderson      */
795b9f6033cSRichard Henderson     return result != MEMTX_DECODE_ERROR;
796b9f6033cSRichard Henderson }
797b9f6033cSRichard Henderson 
798b9f6033cSRichard Henderson #ifdef CONFIG_USER_ONLY
799b9f6033cSRichard Henderson void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
800b9f6033cSRichard Henderson                             MMUAccessType access_type,
801b9f6033cSRichard Henderson                             bool maperr, uintptr_t ra);
802b9f6033cSRichard Henderson void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
803b9f6033cSRichard Henderson                            MMUAccessType access_type, uintptr_t ra);
804b9f6033cSRichard Henderson #else
80520dc67c9SRichard Henderson bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
80620dc67c9SRichard Henderson                             MMUAccessType access_type, int mmu_idx,
80720dc67c9SRichard Henderson                             MemOp memop, int size, bool probe, uintptr_t ra);
80820dc67c9SRichard Henderson #endif
80920dc67c9SRichard Henderson 
arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)81020dc67c9SRichard Henderson static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
811f147ed37SPeter Maydell {
812b9f6033cSRichard Henderson     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
813b9f6033cSRichard Henderson }
814b9f6033cSRichard Henderson 
core_to_arm_mmu_idx(CPUARMState * env,int mmu_idx)815b9f6033cSRichard Henderson static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
8168ae08860SRichard Henderson {
8178ae08860SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
8188ae08860SRichard Henderson         return mmu_idx | ARM_MMU_IDX_M;
8198ae08860SRichard Henderson     } else {
820fcf5ef2aSThomas Huth         return mmu_idx | ARM_MMU_IDX_A;
821fcf5ef2aSThomas Huth     }
822fcf5ef2aSThomas Huth }
8238905770bSMarc-André Lureau 
core_to_aa64_mmu_idx(int mmu_idx)824fcf5ef2aSThomas Huth static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
8258905770bSMarc-André Lureau {
826fcf5ef2aSThomas Huth     /* AArch64 is always a-profile. */
827d90ebc47SPhilippe Mathieu-Daudé     return mmu_idx | ARM_MMU_IDX_A;
828c79c0a31SPeter Maydell }
829c79c0a31SPeter Maydell 
830c79c0a31SPeter Maydell int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
831c79c0a31SPeter Maydell 
832c79c0a31SPeter Maydell /* Return the MMU index for a v7M CPU in the specified security state */
833c79c0a31SPeter Maydell ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
834c79c0a31SPeter Maydell 
835c79c0a31SPeter Maydell /*
836c79c0a31SPeter Maydell  * Return true if the stage 1 translation regime is using LPAE
837d90ebc47SPhilippe Mathieu-Daudé  * format page tables
838c79c0a31SPeter Maydell  */
83908267487SAaron Lindsay bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
840b5c53d1bSAaron Lindsay 
841b5c53d1bSAaron Lindsay /* Raise a data fault alignment exception for the specified virtual address */
842b5c53d1bSAaron Lindsay G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
843b5c53d1bSAaron Lindsay                                             MMUAccessType access_type,
844b5c53d1bSAaron Lindsay                                             int mmu_idx, uintptr_t retaddr);
845b5c53d1bSAaron Lindsay 
846b5c53d1bSAaron Lindsay #ifndef CONFIG_USER_ONLY
847fcf5ef2aSThomas Huth /* arm_cpu_do_transaction_failed: handle a memory system error response
848fcf5ef2aSThomas Huth  * (eg "no device/memory present at address") by raising an external abort
84908267487SAaron Lindsay  * exception
85008267487SAaron Lindsay  */
85108267487SAaron Lindsay void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
852fcf5ef2aSThomas Huth                                    vaddr addr, unsigned size,
853fcf5ef2aSThomas Huth                                    MMUAccessType access_type,
854fcf5ef2aSThomas Huth                                    int mmu_idx, MemTxAttrs attrs,
855*6d62f309SPeter Maydell                                    MemTxResult response, uintptr_t retaddr);
856*6d62f309SPeter Maydell #endif
857*6d62f309SPeter Maydell 
858*6d62f309SPeter Maydell /* Call any registered EL change hooks */
arm_call_pre_el_change_hook(ARMCPU * cpu)859*6d62f309SPeter Maydell static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
860*6d62f309SPeter Maydell {
861*6d62f309SPeter Maydell     ARMELChangeHook *hook, *next;
862*6d62f309SPeter Maydell     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
863*6d62f309SPeter Maydell         hook->hook(cpu, hook->opaque);
864*6d62f309SPeter Maydell     }
865339370b9SRichard Henderson }
arm_call_el_change_hook(ARMCPU * cpu)866339370b9SRichard Henderson static inline void arm_call_el_change_hook(ARMCPU *cpu)
867339370b9SRichard Henderson {
868339370b9SRichard Henderson     ARMELChangeHook *hook, *next;
869339370b9SRichard Henderson     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
870452ef8cbSRichard Henderson         hook->hook(cpu, hook->opaque);
871339370b9SRichard Henderson     }
872339370b9SRichard Henderson }
873452ef8cbSRichard Henderson 
874339370b9SRichard Henderson /*
875339370b9SRichard Henderson  * Return true if this address translation regime has two ranges.
876452ef8cbSRichard Henderson  * Note that this will not return the correct answer for AArch32
877339370b9SRichard Henderson  * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
878339370b9SRichard Henderson  * never called from a context where EL3 can be AArch32. (The
879339370b9SRichard Henderson  * correct return value for ARMMMUIdx_E3 would be different for
880339370b9SRichard Henderson  * that case, so we can't just make the function return the
881339370b9SRichard Henderson  * correct value anyway; we would need an extra "bool e3_is_aarch32"
882339370b9SRichard Henderson  * argument which all the current callsites would pass as 'false'.)
88381636b70SRichard Henderson  */
regime_has_2_ranges(ARMMMUIdx mmu_idx)88481636b70SRichard Henderson static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
88581636b70SRichard Henderson {
88681636b70SRichard Henderson     switch (mmu_idx) {
88781636b70SRichard Henderson     case ARMMMUIdx_Stage1_E0:
88881636b70SRichard Henderson     case ARMMMUIdx_Stage1_E1:
889*6d62f309SPeter Maydell     case ARMMMUIdx_Stage1_E1_PAN:
89081636b70SRichard Henderson     case ARMMMUIdx_E10_0:
89181636b70SRichard Henderson     case ARMMMUIdx_E10_1:
89281636b70SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
89381636b70SRichard Henderson     case ARMMMUIdx_E20_0:
89481636b70SRichard Henderson     case ARMMMUIdx_E20_2:
89581636b70SRichard Henderson     case ARMMMUIdx_E20_2_PAN:
896edc05dd4SRichard Henderson         return true;
897edc05dd4SRichard Henderson     default:
898edc05dd4SRichard Henderson         return false;
899edc05dd4SRichard Henderson     }
900edc05dd4SRichard Henderson }
9019c7ab8fcSRichard Henderson 
regime_is_pan(CPUARMState * env,ARMMMUIdx mmu_idx)9029c7ab8fcSRichard Henderson static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
9039c7ab8fcSRichard Henderson {
9049c7ab8fcSRichard Henderson     switch (mmu_idx) {
9059c7ab8fcSRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
9069c7ab8fcSRichard Henderson     case ARMMMUIdx_E10_1_PAN:
9079c7ab8fcSRichard Henderson     case ARMMMUIdx_E20_2_PAN:
9089c7ab8fcSRichard Henderson     case ARMMMUIdx_E30_3_PAN:
909b1a10c86SRémi Denis-Courmont         return true;
9109c7ab8fcSRichard Henderson     default:
9119c7ab8fcSRichard Henderson         return false;
912d902ae75SRichard Henderson     }
913*6d62f309SPeter Maydell }
914*6d62f309SPeter Maydell 
regime_is_stage2(ARMMMUIdx mmu_idx)9159c7ab8fcSRichard Henderson static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
916d902ae75SRichard Henderson {
9179c7ab8fcSRichard Henderson     return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
9184c2c0474SPeter Maydell }
9194c2c0474SPeter Maydell 
920f147ed37SPeter Maydell /* Return the exception level which controls this address translation regime */
regime_el(CPUARMState * env,ARMMMUIdx mmu_idx)921f147ed37SPeter Maydell static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
9229c7ab8fcSRichard Henderson {
9239c7ab8fcSRichard Henderson     switch (mmu_idx) {
9249c7ab8fcSRichard Henderson     case ARMMMUIdx_E20_0:
9259c7ab8fcSRichard Henderson     case ARMMMUIdx_E20_2:
9269c7ab8fcSRichard Henderson     case ARMMMUIdx_E20_2_PAN:
9279c7ab8fcSRichard Henderson     case ARMMMUIdx_Stage2:
9289c7ab8fcSRichard Henderson     case ARMMMUIdx_Stage2_S:
9299c7ab8fcSRichard Henderson     case ARMMMUIdx_E2:
9309c7ab8fcSRichard Henderson         return 2;
9319c7ab8fcSRichard Henderson     case ARMMMUIdx_E3:
9329c7ab8fcSRichard Henderson     case ARMMMUIdx_E30_0:
9339c7ab8fcSRichard Henderson     case ARMMMUIdx_E30_3_PAN:
9349c7ab8fcSRichard Henderson         return 3;
9359c7ab8fcSRichard Henderson     case ARMMMUIdx_E10_0:
936e4c93e44SPeter Maydell     case ARMMMUIdx_Stage1_E0:
937e4c93e44SPeter Maydell     case ARMMMUIdx_Stage1_E1:
938e4c93e44SPeter Maydell     case ARMMMUIdx_Stage1_E1_PAN:
939ae0a9ccfSPeter Maydell     case ARMMMUIdx_E10_1:
940e4c93e44SPeter Maydell     case ARMMMUIdx_E10_1_PAN:
941*6d62f309SPeter Maydell     case ARMMMUIdx_MPrivNegPri:
942e4c93e44SPeter Maydell     case ARMMMUIdx_MUserNegPri:
943e4c93e44SPeter Maydell     case ARMMMUIdx_MPriv:
944e4c93e44SPeter Maydell     case ARMMMUIdx_MUser:
945e4c93e44SPeter Maydell     case ARMMMUIdx_MSPrivNegPri:
946e4c93e44SPeter Maydell     case ARMMMUIdx_MSUserNegPri:
947e4c93e44SPeter Maydell     case ARMMMUIdx_MSPriv:
948e4c93e44SPeter Maydell     case ARMMMUIdx_MSUser:
949e4c93e44SPeter Maydell         return 1;
950e4c93e44SPeter Maydell     default:
951e4c93e44SPeter Maydell         g_assert_not_reached();
952e4c93e44SPeter Maydell     }
9538ae08860SRichard Henderson }
9548ae08860SRichard Henderson 
regime_is_user(CPUARMState * env,ARMMMUIdx mmu_idx)9558ae08860SRichard Henderson static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
9568ae08860SRichard Henderson {
9578ae08860SRichard Henderson     switch (mmu_idx) {
9588ae08860SRichard Henderson     case ARMMMUIdx_E10_0:
959f04383e7SPeter Maydell     case ARMMMUIdx_E20_0:
960f04383e7SPeter Maydell     case ARMMMUIdx_E30_0:
961f04383e7SPeter Maydell     case ARMMMUIdx_Stage1_E0:
962f04383e7SPeter Maydell     case ARMMMUIdx_MUser:
963f04383e7SPeter Maydell     case ARMMMUIdx_MSUser:
964f04383e7SPeter Maydell     case ARMMMUIdx_MUserNegPri:
965f04383e7SPeter Maydell     case ARMMMUIdx_MSUserNegPri:
966f04383e7SPeter Maydell         return true;
967f04383e7SPeter Maydell     default:
968f04383e7SPeter Maydell         return false;
969c1547bbaSPeter Maydell     }
970c1547bbaSPeter Maydell }
97138659d31SRichard Henderson 
97238659d31SRichard Henderson /* Return the SCTLR value which controls this address translation regime */
regime_sctlr(CPUARMState * env,ARMMMUIdx mmu_idx)973988cc190SPeter Maydell static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
97438659d31SRichard Henderson {
975b1a10c86SRémi Denis-Courmont     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
976b1a10c86SRémi Denis-Courmont }
977f04383e7SPeter Maydell 
978f04383e7SPeter Maydell /*
979f04383e7SPeter Maydell  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
980f04383e7SPeter Maydell  * and the Non-Secure stage 2 translation regimes (and hence which are
981f04383e7SPeter Maydell  * not present in VSTCR_EL2).
982f04383e7SPeter Maydell  */
983b1a10c86SRémi Denis-Courmont #define VTCR_SHARED_FIELD_MASK \
984f04383e7SPeter Maydell     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
985f04383e7SPeter Maydell      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
986f04383e7SPeter Maydell      R_VTCR_DS_MASK)
987b1a10c86SRémi Denis-Courmont 
988cb4a0a34SPeter Maydell /* Return the value of the TCR controlling this translation regime */
regime_tcr(CPUARMState * env,ARMMMUIdx mmu_idx)989dfce4aa8SPeter Maydell static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
990dfce4aa8SPeter Maydell {
9912ea2998fSFabiano Rosas     if (mmu_idx == ARMMMUIdx_Stage2) {
9922ea2998fSFabiano Rosas         return env->cp15.vtcr_el2;
9932ea2998fSFabiano Rosas     }
9942ea2998fSFabiano Rosas     if (mmu_idx == ARMMMUIdx_Stage2_S) {
9952ea2998fSFabiano Rosas         /*
9962ea2998fSFabiano Rosas          * Secure stage 2 shares fields from VTCR_EL2. We merge those
9972ea2998fSFabiano Rosas          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
9982ea2998fSFabiano Rosas          * value so the callers don't need to special case this.
9992ea2998fSFabiano Rosas          *
10002ea2998fSFabiano Rosas          * If a future architecture change defines bits in VSTCR_EL2 that
10012ea2998fSFabiano Rosas          * overlap with these VTCR_EL2 fields we may need to revisit this.
10022ea2998fSFabiano Rosas          */
10032ea2998fSFabiano Rosas         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
10042ea2998fSFabiano Rosas         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
10052ea2998fSFabiano Rosas         return v;
10062ea2998fSFabiano Rosas     }
10072ea2998fSFabiano Rosas     return env->cp15.tcr_el[regime_el(env, mmu_idx)];
10082ea2998fSFabiano Rosas }
100988ce6c6eSPeter Maydell 
101088ce6c6eSPeter Maydell /* Return true if the translation regime is using LPAE format page tables */
regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)101188ce6c6eSPeter Maydell static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
101288ce6c6eSPeter Maydell {
101388ce6c6eSPeter Maydell     int el = regime_el(env, mmu_idx);
101488ce6c6eSPeter Maydell     if (el == 2 || arm_el_is_aa64(env, el)) {
101588ce6c6eSPeter Maydell         return true;
101688ce6c6eSPeter Maydell     }
101788ce6c6eSPeter Maydell     if (arm_feature(env, ARM_FEATURE_PMSA) &&
101888ce6c6eSPeter Maydell         arm_feature(env, ARM_FEATURE_V8)) {
10194426d361SPeter Maydell         return true;
102088ce6c6eSPeter Maydell     }
102188ce6c6eSPeter Maydell     if (arm_feature(env, ARM_FEATURE_LPAE)
102288ce6c6eSPeter Maydell         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
102388ce6c6eSPeter Maydell         return true;
102488ce6c6eSPeter Maydell     }
102588ce6c6eSPeter Maydell     return false;
102688ce6c6eSPeter Maydell }
102788ce6c6eSPeter Maydell 
102888ce6c6eSPeter Maydell /**
102988ce6c6eSPeter Maydell  * arm_num_brps: Return number of implemented breakpoints.
103088ce6c6eSPeter Maydell  * Note that the ID register BRPS field is "number of bps - 1",
103188ce6c6eSPeter Maydell  * and we return the actual number of breakpoints.
103288ce6c6eSPeter Maydell  */
arm_num_brps(ARMCPU * cpu)10334426d361SPeter Maydell static inline int arm_num_brps(ARMCPU *cpu)
103488ce6c6eSPeter Maydell {
103588ce6c6eSPeter Maydell     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
103688ce6c6eSPeter Maydell         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
103788ce6c6eSPeter Maydell     } else {
103888ce6c6eSPeter Maydell         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
103988ce6c6eSPeter Maydell     }
104088ce6c6eSPeter Maydell }
104188ce6c6eSPeter Maydell 
104288ce6c6eSPeter Maydell /**
104388ce6c6eSPeter Maydell  * arm_num_wrps: Return number of implemented watchpoints.
104488ce6c6eSPeter Maydell  * Note that the ID register WRPS field is "number of wps - 1",
104588ce6c6eSPeter Maydell  * and we return the actual number of watchpoints.
104688ce6c6eSPeter Maydell  */
arm_num_wrps(ARMCPU * cpu)10474426d361SPeter Maydell static inline int arm_num_wrps(ARMCPU *cpu)
104888ce6c6eSPeter Maydell {
104988ce6c6eSPeter Maydell     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
105088ce6c6eSPeter Maydell         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
10515529bf18SPeter Maydell     } else {
10525529bf18SPeter Maydell         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
10535529bf18SPeter Maydell     }
10545529bf18SPeter Maydell }
10555529bf18SPeter Maydell 
10565529bf18SPeter Maydell /**
10575529bf18SPeter Maydell  * arm_num_ctx_cmps: Return number of implemented context comparators.
10585529bf18SPeter Maydell  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
10595529bf18SPeter Maydell  * and we return the actual number of comparators.
10605529bf18SPeter Maydell  */
arm_num_ctx_cmps(ARMCPU * cpu)10615529bf18SPeter Maydell static inline int arm_num_ctx_cmps(ARMCPU *cpu)
10625529bf18SPeter Maydell {
10635529bf18SPeter Maydell     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
10645529bf18SPeter Maydell         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
10655529bf18SPeter Maydell     } else {
10665529bf18SPeter Maydell         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
106755203189SPeter Maydell     }
106855203189SPeter Maydell }
106955203189SPeter Maydell 
107055203189SPeter Maydell /**
107155203189SPeter Maydell  * v7m_using_psp: Return true if using process stack pointer
107255203189SPeter Maydell  * Return true if the CPU is currently using the process stack
107355203189SPeter Maydell  * pointer, or false if it is using the main stack pointer.
107455203189SPeter Maydell  */
v7m_using_psp(CPUARMState * env)107555203189SPeter Maydell static inline bool v7m_using_psp(CPUARMState *env)
107655203189SPeter Maydell {
107755203189SPeter Maydell     /* Handler mode always uses the main stack; for thread mode
107855203189SPeter Maydell      * the CONTROL.SPSEL bit determines the answer.
107955203189SPeter Maydell      * Note that in v7M it is not possible to be in Handler mode with
108055203189SPeter Maydell      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
108181e37284SPeter Maydell      */
1082787a7e76SPhilippe Mathieu-Daudé     return !arm_v7m_is_handler_mode(env) &&
1083787a7e76SPhilippe Mathieu-Daudé         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1084787a7e76SPhilippe Mathieu-Daudé }
1085787a7e76SPhilippe Mathieu-Daudé 
1086787a7e76SPhilippe Mathieu-Daudé /**
1087787a7e76SPhilippe Mathieu-Daudé  * v7m_sp_limit: Return SP limit for current CPU state
1088787a7e76SPhilippe Mathieu-Daudé  * Return the SP limit value for the current CPU security state
1089787a7e76SPhilippe Mathieu-Daudé  * and stack pointer.
1090787a7e76SPhilippe Mathieu-Daudé  */
v7m_sp_limit(CPUARMState * env)1091787a7e76SPhilippe Mathieu-Daudé static inline uint32_t v7m_sp_limit(CPUARMState *env)
1092787a7e76SPhilippe Mathieu-Daudé {
1093787a7e76SPhilippe Mathieu-Daudé     if (v7m_using_psp(env)) {
1094787a7e76SPhilippe Mathieu-Daudé         return env->v7m.psplim[env->v7m.secure];
1095787a7e76SPhilippe Mathieu-Daudé     } else {
1096787a7e76SPhilippe Mathieu-Daudé         return env->v7m.msplim[env->v7m.secure];
1097787a7e76SPhilippe Mathieu-Daudé     }
1098787a7e76SPhilippe Mathieu-Daudé }
1099787a7e76SPhilippe Mathieu-Daudé 
1100787a7e76SPhilippe Mathieu-Daudé /**
1101787a7e76SPhilippe Mathieu-Daudé  * v7m_cpacr_pass:
1102787a7e76SPhilippe Mathieu-Daudé  * Return true if the v7M CPACR permits access to the FPU for the specified
110381e37284SPeter Maydell  * security state and privilege level.
110481e37284SPeter Maydell  */
v7m_cpacr_pass(CPUARMState * env,bool is_secure,bool is_priv)110581e37284SPeter Maydell static inline bool v7m_cpacr_pass(CPUARMState *env,
110681e37284SPeter Maydell                                   bool is_secure, bool is_priv)
110781e37284SPeter Maydell {
110881e37284SPeter Maydell     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
110981e37284SPeter Maydell     case 0:
111081e37284SPeter Maydell     case 2: /* UNPREDICTABLE: we treat like 0 */
111181e37284SPeter Maydell         return false;
111281e37284SPeter Maydell     case 1:
111381e37284SPeter Maydell         return is_priv;
111481e37284SPeter Maydell     case 3:
111581e37284SPeter Maydell         return true;
111681e37284SPeter Maydell     default:
111781e37284SPeter Maydell         g_assert_not_reached();
111881e37284SPeter Maydell     }
111981e37284SPeter Maydell }
112089430fc6SPeter Maydell 
112189430fc6SPeter Maydell /**
112289430fc6SPeter Maydell  * aarch32_mode_name(): Return name of the AArch32 CPU mode
112389430fc6SPeter Maydell  * @psr: Program Status Register indicating CPU mode
112489430fc6SPeter Maydell  *
1125a4a411fbSStefan Hajnoczi  * Returns, for debug logging purposes, a printable representation
112689430fc6SPeter Maydell  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
112789430fc6SPeter Maydell  * the low bits of the specified PSR.
112889430fc6SPeter Maydell  */
aarch32_mode_name(uint32_t psr)112989430fc6SPeter Maydell static inline const char *aarch32_mode_name(uint32_t psr)
113089430fc6SPeter Maydell {
113189430fc6SPeter Maydell     static const char cpu_mode_names[16][4] = {
113289430fc6SPeter Maydell         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
113389430fc6SPeter Maydell         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1134a4a411fbSStefan Hajnoczi     };
113589430fc6SPeter Maydell 
113689430fc6SPeter Maydell     return cpu_mode_names[psr & 0xf];
113789430fc6SPeter Maydell }
113850494a27SRichard Henderson 
1139b36a32eaSJinjie Ruan /**
1140b36a32eaSJinjie Ruan  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1141b36a32eaSJinjie Ruan  *
1142b36a32eaSJinjie Ruan  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1143b36a32eaSJinjie Ruan  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1144b36a32eaSJinjie Ruan  * Must be called with the BQL held.
1145b36a32eaSJinjie Ruan  */
1146b36a32eaSJinjie Ruan void arm_cpu_update_virq(ARMCPU *cpu);
1147b36a32eaSJinjie Ruan 
1148b36a32eaSJinjie Ruan /**
1149b36a32eaSJinjie Ruan  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1150b36a32eaSJinjie Ruan  *
1151b36a32eaSJinjie Ruan  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1152b36a32eaSJinjie Ruan  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1153b36a32eaSJinjie Ruan  * Must be called with the BQL held.
1154b36a32eaSJinjie Ruan  */
1155b36a32eaSJinjie Ruan void arm_cpu_update_vfiq(ARMCPU *cpu);
1156b36a32eaSJinjie Ruan 
11573c29632fSRichard Henderson /**
11583c29632fSRichard Henderson  * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
11593c29632fSRichard Henderson  *
11603c29632fSRichard Henderson  * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
11613c29632fSRichard Henderson  * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
11623c29632fSRichard Henderson  * Must be called with the BQL held.
11633c29632fSRichard Henderson  */
11643c29632fSRichard Henderson void arm_cpu_update_vinmi(ARMCPU *cpu);
1165164690b2SRichard Henderson 
1166164690b2SRichard Henderson /**
1167164690b2SRichard Henderson  * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1168164690b2SRichard Henderson  *
1169164690b2SRichard Henderson  * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1170164690b2SRichard Henderson  * a change to the HCRX_EL2.VFNMI.
1171164690b2SRichard Henderson  * Must be called with the BQL held.
1172164690b2SRichard Henderson  */
1173164690b2SRichard Henderson void arm_cpu_update_vfnmi(ARMCPU *cpu);
117450494a27SRichard Henderson 
117550494a27SRichard Henderson /**
117650494a27SRichard Henderson  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
117750494a27SRichard Henderson  *
117850494a27SRichard Henderson  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
117950494a27SRichard Henderson  * following a change to the HCR_EL2.VSE bit.
118050494a27SRichard Henderson  */
118164be86abSRichard Henderson void arm_cpu_update_vserr(ARMCPU *cpu);
118264be86abSRichard Henderson 
118364be86abSRichard Henderson /**
118464be86abSRichard Henderson  * arm_mmu_idx_el:
118564be86abSRichard Henderson  * @env: The cpu environment
118664be86abSRichard Henderson  * @el: The EL to use.
118764be86abSRichard Henderson  *
1188d8cca960SRichard Henderson  * Return the full ARMMMUIdx for the translation regime for EL.
1189d8cca960SRichard Henderson  */
1190d8cca960SRichard Henderson ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1191d8cca960SRichard Henderson 
119264be86abSRichard Henderson /**
119364be86abSRichard Henderson  * arm_mmu_idx:
11942859d7b5SRichard Henderson  * @env: The cpu environment
119564be86abSRichard Henderson  *
119664be86abSRichard Henderson  * Return the full ARMMMUIdx for the current translation regime.
1197d8cca960SRichard Henderson  */
119864be86abSRichard Henderson ARMMMUIdx arm_mmu_idx(CPUARMState *env);
119964be86abSRichard Henderson 
120064be86abSRichard Henderson /**
1201fee7aa46SRichard Henderson  * arm_stage1_mmu_idx:
1202fee7aa46SRichard Henderson  * @env: The cpu environment
1203fee7aa46SRichard Henderson  *
1204fee7aa46SRichard Henderson  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1205fee7aa46SRichard Henderson  */
1206fee7aa46SRichard Henderson #ifdef CONFIG_USER_ONLY
stage_1_mmu_idx(ARMMMUIdx mmu_idx)1207fee7aa46SRichard Henderson static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1208fee7aa46SRichard Henderson {
1209fee7aa46SRichard Henderson     return ARMMMUIdx_Stage1_E0;
1210fee7aa46SRichard Henderson }
arm_stage1_mmu_idx(CPUARMState * env)1211fee7aa46SRichard Henderson static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1212fee7aa46SRichard Henderson {
1213452ef8cbSRichard Henderson     return ARMMMUIdx_Stage1_E0;
1214fee7aa46SRichard Henderson }
1215fee7aa46SRichard Henderson #else
1216fee7aa46SRichard Henderson ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1217fee7aa46SRichard Henderson ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1218fee7aa46SRichard Henderson #endif
1219fee7aa46SRichard Henderson 
12204f9584edSRichard Henderson /**
12214f9584edSRichard Henderson  * arm_mmu_idx_is_stage1_of_2:
12224f9584edSRichard Henderson  * @mmu_idx: The ARMMMUIdx to test
1223f062d144SRichard Henderson  *
12244f9584edSRichard Henderson  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
12254f9584edSRichard Henderson  * first stage of a two stage regime.
12264f9584edSRichard Henderson  */
arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)12274f9584edSRichard Henderson static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
12284f9584edSRichard Henderson {
12294f9584edSRichard Henderson     switch (mmu_idx) {
12304f9584edSRichard Henderson     case ARMMMUIdx_Stage1_E0:
12314f9584edSRichard Henderson     case ARMMMUIdx_Stage1_E1:
12324f9584edSRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
12334f9584edSRichard Henderson         return true;
12344f9584edSRichard Henderson     default:
12354f9584edSRichard Henderson         return false;
12364f9584edSRichard Henderson     }
1237873b73c0SPeter Maydell }
1238f062d144SRichard Henderson 
aarch32_cpsr_valid_mask(uint64_t features,const ARMISARegisters * id)1239f062d144SRichard Henderson static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1240220f508fSRichard Henderson                                                const ARMISARegisters *id)
1241220f508fSRichard Henderson {
1242220f508fSRichard Henderson     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1243dc8b1853SRebecca Cran 
1244dc8b1853SRebecca Cran     if ((features >> ARM_FEATURE_V4T) & 1) {
1245dc8b1853SRebecca Cran         valid |= CPSR_T;
1246f2f68a78SRebecca Cran     }
1247f2f68a78SRebecca Cran     if ((features >> ARM_FEATURE_V5) & 1) {
1248f2f68a78SRebecca Cran         valid |= CPSR_Q; /* V5TE in reality*/
12494f9584edSRichard Henderson     }
12504f9584edSRichard Henderson     if ((features >> ARM_FEATURE_V6) & 1) {
12514f9584edSRichard Henderson         valid |= CPSR_E | CPSR_GE;
12524f9584edSRichard Henderson     }
125314084511SRichard Henderson     if ((features >> ARM_FEATURE_THUMB2) & 1) {
125414084511SRichard Henderson         valid |= CPSR_IT;
125514084511SRichard Henderson     }
125614084511SRichard Henderson     if (isar_feature_aa32_jazelle(id)) {
125714084511SRichard Henderson         valid |= CPSR_J;
125814084511SRichard Henderson     }
125914084511SRichard Henderson     if (isar_feature_aa32_pan(id)) {
126014084511SRichard Henderson         valid |= CPSR_PAN;
1261220f508fSRichard Henderson     }
1262220f508fSRichard Henderson     if (isar_feature_aa32_dit(id)) {
1263220f508fSRichard Henderson         valid |= CPSR_DIT;
12649eeb7a1cSRichard Henderson     }
12659eeb7a1cSRichard Henderson     if (isar_feature_aa32_ssbs(id)) {
12669eeb7a1cSRichard Henderson         valid |= CPSR_SSBS;
1267dc8b1853SRebecca Cran     }
1268dc8b1853SRebecca Cran 
1269dc8b1853SRebecca Cran     return valid;
1270f2f68a78SRebecca Cran }
1271f2f68a78SRebecca Cran 
aarch64_pstate_valid_mask(const ARMISARegisters * id)1272f2f68a78SRebecca Cran static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
12734b779cebSRichard Henderson {
12744b779cebSRichard Henderson     uint32_t valid;
12754b779cebSRichard Henderson 
12764833c756SJinjie Ruan     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
12774833c756SJinjie Ruan     if (isar_feature_aa64_bti(id)) {
12784833c756SJinjie Ruan         valid |= PSTATE_BTYPE;
127914084511SRichard Henderson     }
128014084511SRichard Henderson     if (isar_feature_aa64_pan(id)) {
128114084511SRichard Henderson         valid |= PSTATE_PAN;
128214084511SRichard Henderson     }
1283104f703dSPeter Maydell     if (isar_feature_aa64_uao(id)) {
1284104f703dSPeter Maydell         valid |= PSTATE_UAO;
1285104f703dSPeter Maydell     }
1286104f703dSPeter Maydell     if (isar_feature_aa64_dit(id)) {
1287104f703dSPeter Maydell         valid |= PSTATE_DIT;
1288104f703dSPeter Maydell     }
1289104f703dSPeter Maydell     if (isar_feature_aa64_ssbs(id)) {
1290104f703dSPeter Maydell         valid |= PSTATE_SSBS;
1291104f703dSPeter Maydell     }
12923c003f70SPeter Maydell     if (isar_feature_aa64_mte(id)) {
12933c003f70SPeter Maydell         valid |= PSTATE_TCO;
12943c003f70SPeter Maydell     }
12953c003f70SPeter Maydell     if (isar_feature_aa64_nmi(id)) {
12963c003f70SPeter Maydell         valid |= PSTATE_ALLINT;
12973c003f70SPeter Maydell     }
12983c003f70SPeter Maydell 
12993c003f70SPeter Maydell     return valid;
13003c003f70SPeter Maydell }
13013c003f70SPeter Maydell 
13023c003f70SPeter Maydell /* Granule size (i.e. page size) */
13033c003f70SPeter Maydell typedef enum ARMGranuleSize {
13043c003f70SPeter Maydell     /* Same order as TG0 encoding */
13053c003f70SPeter Maydell     Gran4K,
13063c003f70SPeter Maydell     Gran64K,
13073c003f70SPeter Maydell     Gran16K,
13083c003f70SPeter Maydell     GranInvalid,
13093c003f70SPeter Maydell } ARMGranuleSize;
13103c003f70SPeter Maydell 
13113c003f70SPeter Maydell /**
1312ba97be9fSRichard Henderson  * arm_granule_bits: Return address size of the granule in bits
1313ba97be9fSRichard Henderson  *
1314ba97be9fSRichard Henderson  * Return the address size of the granule in bits. This corresponds
1315ba97be9fSRichard Henderson  * to the pseudocode TGxGranuleBits().
1316ba97be9fSRichard Henderson  */
arm_granule_bits(ARMGranuleSize gran)1317ba97be9fSRichard Henderson static inline int arm_granule_bits(ARMGranuleSize gran)
1318f4ecc015SRichard Henderson {
1319ef56c242SRichard Henderson     switch (gran) {
1320ba97be9fSRichard Henderson     case Gran64K:
1321ba97be9fSRichard Henderson         return 16;
1322ba97be9fSRichard Henderson     case Gran16K:
1323ba97be9fSRichard Henderson         return 14;
1324ebf93ce7SRichard Henderson     case Gran4K:
1325ef56c242SRichard Henderson         return 12;
132689739227SRichard Henderson     default:
132789739227SRichard Henderson         g_assert_not_reached();
13283c003f70SPeter Maydell     }
1329ba97be9fSRichard Henderson }
1330ba97be9fSRichard Henderson 
1331478dccbbSPeter Maydell /*
1332478dccbbSPeter Maydell  * Parameters of a given virtual address, as extracted from the
1333478dccbbSPeter Maydell  * translation control register (TCR) for a given regime.
1334478dccbbSPeter Maydell  */
1335478dccbbSPeter Maydell typedef struct ARMVAParameters {
1336478dccbbSPeter Maydell     unsigned tsz    : 8;
1337478dccbbSPeter Maydell     unsigned ps     : 3;
1338478dccbbSPeter Maydell     unsigned sh     : 2;
1339478dccbbSPeter Maydell     unsigned select : 1;
1340bf0be433SRichard Henderson     bool tbi        : 1;
1341478dccbbSPeter Maydell     bool epd        : 1;
1342478dccbbSPeter Maydell     bool hpd        : 1;
1343bf0be433SRichard Henderson     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
13448ae08860SRichard Henderson     bool ds         : 1;
13458ae08860SRichard Henderson     bool ha         : 1;
1346671efad1SFabiano Rosas     bool hd         : 1;
13478ae08860SRichard Henderson     ARMGranuleSize gran : 2;
134881ae05faSRichard Henderson } ARMVAParameters;
134981ae05faSRichard Henderson 
135081ae05faSRichard Henderson /**
135181ae05faSRichard Henderson  * aa64_va_parameters: Return parameters for an AArch64 virtual address
135281ae05faSRichard Henderson  * @env: CPU
135381ae05faSRichard Henderson  * @va: virtual address to look up
135481ae05faSRichard Henderson  * @mmu_idx: determines translation regime to use
135581ae05faSRichard Henderson  * @data: true if this is a data access
135681ae05faSRichard Henderson  * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
13570da067f2SIdan Horowitz  *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
13584301acd7SRichard Henderson  */
13594301acd7SRichard Henderson ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
136081ae05faSRichard Henderson                                    ARMMMUIdx mmu_idx, bool data,
136181ae05faSRichard Henderson                                    bool el1_is_aa32);
13624301acd7SRichard Henderson 
136381ae05faSRichard Henderson int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
136481ae05faSRichard Henderson int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
136581ae05faSRichard Henderson int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
136681ae05faSRichard Henderson 
1367ebae861fSPhilippe Mathieu-Daudé /* Determine if allocation tags are available.  */
allocation_tag_access_enabled(CPUARMState * env,int el,uint64_t sctlr)1368ebae861fSPhilippe Mathieu-Daudé static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1369787a7e76SPhilippe Mathieu-Daudé                                                  uint64_t sctlr)
1370787a7e76SPhilippe Mathieu-Daudé {
1371787a7e76SPhilippe Mathieu-Daudé     if (el < 3
1372787a7e76SPhilippe Mathieu-Daudé         && arm_feature(env, ARM_FEATURE_EL3)
1373787a7e76SPhilippe Mathieu-Daudé         && !(env->cp15.scr_el3 & SCR_ATA)) {
1374787a7e76SPhilippe Mathieu-Daudé         return false;
1375787a7e76SPhilippe Mathieu-Daudé     }
1376787a7e76SPhilippe Mathieu-Daudé     if (el < 2 && arm_is_el2_enabled(env)) {
1377787a7e76SPhilippe Mathieu-Daudé         uint64_t hcr = arm_hcr_el2_eff(env);
1378787a7e76SPhilippe Mathieu-Daudé         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1379787a7e76SPhilippe Mathieu-Daudé             return false;
1380787a7e76SPhilippe Mathieu-Daudé         }
1381787a7e76SPhilippe Mathieu-Daudé     }
1382dbf2a71aSRichard Henderson     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1383787a7e76SPhilippe Mathieu-Daudé     return sctlr != 0;
1384ebae861fSPhilippe Mathieu-Daudé }
1385ebae861fSPhilippe Mathieu-Daudé 
13869f225e60SPeter Maydell #ifndef CONFIG_USER_ONLY
13879f225e60SPeter Maydell 
13889f225e60SPeter Maydell /* Security attributes for an address, as returned by v8m_security_lookup. */
13899f225e60SPeter Maydell typedef struct V8M_SAttributes {
13909f225e60SPeter Maydell     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1391ebae861fSPhilippe Mathieu-Daudé     bool ns;
13929f225e60SPeter Maydell     bool nsc;
1393ebae861fSPhilippe Mathieu-Daudé     uint8_t sregion;
1394ebae861fSPhilippe Mathieu-Daudé     bool srvalid;
1395de05a709SRichard Henderson     uint8_t iregion;
1396de05a709SRichard Henderson     bool irvalid;
13977fa7ea8fSRichard Henderson } V8M_SAttributes;
1398de05a709SRichard Henderson 
1399de05a709SRichard Henderson void v8m_security_lookup(CPUARMState *env, uint32_t address,
1400de05a709SRichard Henderson                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1401def8aa5bSRichard Henderson                          bool secure, V8M_SAttributes *sattrs);
1402f1269a98SJean-Philippe Brucker 
1403def8aa5bSRichard Henderson /* Cacheability and shareability attributes for a memory access */
1404def8aa5bSRichard Henderson typedef struct ARMCacheAttrs {
1405def8aa5bSRichard Henderson     /*
1406def8aa5bSRichard Henderson      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1407def8aa5bSRichard Henderson      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1408def8aa5bSRichard Henderson      */
1409def8aa5bSRichard Henderson     unsigned int attrs:8;
1410def8aa5bSRichard Henderson     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1411def8aa5bSRichard Henderson     bool is_s2_format:1;
1412def8aa5bSRichard Henderson } ARMCacheAttrs;
1413def8aa5bSRichard Henderson 
1414def8aa5bSRichard Henderson /* Fields that are valid upon success. */
1415def8aa5bSRichard Henderson typedef struct GetPhysAddrResult {
1416def8aa5bSRichard Henderson     CPUTLBEntryFull f;
1417def8aa5bSRichard Henderson     ARMCacheAttrs cacheattrs;
1418def8aa5bSRichard Henderson } GetPhysAddrResult;
1419def8aa5bSRichard Henderson 
1420def8aa5bSRichard Henderson /**
1421def8aa5bSRichard Henderson  * get_phys_addr: get the physical address for a virtual address
1422def8aa5bSRichard Henderson  * @env: CPUARMState
1423767e7d8aSArd Biesheuvel  * @address: virtual address to get physical address for
1424f1269a98SJean-Philippe Brucker  * @access_type: 0 for read, 1 for write, 2 for execute
1425def8aa5bSRichard Henderson  * @memop: memory operation feeding this access, or 0 for none
1426def8aa5bSRichard Henderson  * @mmu_idx: MMU index indicating required translation regime
1427def8aa5bSRichard Henderson  * @result: set on translation success.
1428def8aa5bSRichard Henderson  * @fi: set to fault info if the translation fails
1429e1ee56ecSJean-Philippe Brucker  *
1430f1269a98SJean-Philippe Brucker  * Find the physical address corresponding to the given virtual address,
1431def8aa5bSRichard Henderson  * by doing a translation table walk on MMU based systems or using the
1432def8aa5bSRichard Henderson  * MPU state on MPU based systems.
1433def8aa5bSRichard Henderson  *
1434def8aa5bSRichard Henderson  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1435e1ee56ecSJean-Philippe Brucker  * prot and page_size may not be filled in, and the populated fsr value provides
1436def8aa5bSRichard Henderson  * information on why the translation aborted, in the format of a
1437def8aa5bSRichard Henderson  * DFSR/IFSR fault register, with the following caveats:
1438def8aa5bSRichard Henderson  *  * we honour the short vs long DFSR format differences.
1439e1ee56ecSJean-Philippe Brucker  *  * the WnR bit is never set (the caller must do this).
1440f1269a98SJean-Philippe Brucker  *  * for PSMAv5 based systems we don't bother to return a full FSR format
1441def8aa5bSRichard Henderson  *    value.
1442767e7d8aSArd Biesheuvel  */
1443f1269a98SJean-Philippe Brucker bool get_phys_addr(CPUARMState *env, vaddr address,
1444e1ee56ecSJean-Philippe Brucker                    MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
1445f1269a98SJean-Philippe Brucker                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1446f1269a98SJean-Philippe Brucker     __attribute__((nonnull));
14477e98e21cSRichard Henderson 
1448ebae861fSPhilippe Mathieu-Daudé /**
1449d2c92e58SRichard Henderson  * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1450d2c92e58SRichard Henderson  *                                 address
1451e9fb7090SRichard Henderson  * @env: CPUARMState
1452e9fb7090SRichard Henderson  * @address: virtual address to get physical address for
1453d2c92e58SRichard Henderson  * @access_type: 0 for read, 1 for write, 2 for execute
1454fc6177afSPeter Maydell  * @memop: memory operation feeding this access, or 0 for none
1455b59f479bSPhilippe Mathieu-Daudé  * @mmu_idx: MMU index indicating required translation regime
1456ebae861fSPhilippe Mathieu-Daudé  * @space: security space for the access
1457ebae861fSPhilippe Mathieu-Daudé  * @result: set on translation success.
14584b779cebSRichard Henderson  * @fi: set to fault info if the translation fails
1459b64ee454SRichard Henderson  *
1460b64ee454SRichard Henderson  * Similar to get_phys_addr, but use the given security space and don't perform
1461b64ee454SRichard Henderson  * a Granule Protection Check on the resulting address.
1462b64ee454SRichard Henderson  */
1463b64ee454SRichard Henderson bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
1464b64ee454SRichard Henderson                                     MMUAccessType access_type, MemOp memop,
1465b64ee454SRichard Henderson                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1466b64ee454SRichard Henderson                                     GetPhysAddrResult *result,
1467b64ee454SRichard Henderson                                     ARMMMUFaultInfo *fi)
1468206adacfSRichard Henderson     __attribute__((nonnull));
1469206adacfSRichard Henderson 
1470206adacfSRichard Henderson bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1471206adacfSRichard Henderson                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1472206adacfSRichard Henderson                        bool is_secure, GetPhysAddrResult *result,
14730a405be2SRichard Henderson                        ARMMMUFaultInfo *fi, uint32_t *mregion);
14740a405be2SRichard Henderson 
14750a405be2SRichard Henderson void arm_log_exception(CPUState *cs);
14760a405be2SRichard Henderson 
14770a405be2SRichard Henderson #endif /* !CONFIG_USER_ONLY */
1478523da6b9SRichard Henderson 
1479b12a7671SRichard Henderson /*
14800a405be2SRichard Henderson  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1481d304d280SRichard Henderson  * the same simd_desc() encoding due to restrictions on size.
1482bd47b61cSRichard Henderson  * Use these instead.
14832e34ff45SRichard Henderson  */
148481639989SPeter Maydell FIELD(PREDDESC, OPRSZ, 0, 6)
148581639989SPeter Maydell FIELD(PREDDESC, ESZ, 6, 2)
148681639989SPeter Maydell FIELD(PREDDESC, DATA, 8, 24)
148781639989SPeter Maydell 
148881639989SPeter Maydell /*
148981639989SPeter Maydell  * The SVE simd_data field, for memory ops, contains either
149081639989SPeter Maydell  * rd (5 bits) or a shift count (2 bits).
149181639989SPeter Maydell  */
149281639989SPeter Maydell #define SVE_MTEDESC_SHIFT 5
149381639989SPeter Maydell 
149481639989SPeter Maydell /* Bits within a descriptor passed to the helper_mte_check* functions. */
149581639989SPeter Maydell FIELD(MTEDESC, MIDX,  0, 4)
149681639989SPeter Maydell FIELD(MTEDESC, TBI,   4, 2)
149781639989SPeter Maydell FIELD(MTEDESC, TCMA,  6, 2)
149881639989SPeter Maydell FIELD(MTEDESC, WRITE, 8, 1)
149981639989SPeter Maydell FIELD(MTEDESC, ALIGN, 9, 3)
150081639989SPeter Maydell FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12)  /* size - 1 */
150169c51dc3SPeter Maydell 
150269c51dc3SPeter Maydell bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
150369c51dc3SPeter Maydell uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
150469c51dc3SPeter Maydell 
150569c51dc3SPeter Maydell /**
150669c51dc3SPeter Maydell  * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
150769c51dc3SPeter Maydell  * @env: CPU env
150869c51dc3SPeter Maydell  * @ptr: start address of memory region (dirty pointer)
150969c51dc3SPeter Maydell  * @size: length of region (guaranteed not to cross a page boundary)
151069c51dc3SPeter Maydell  * @desc: MTEDESC descriptor word (0 means no MTE checks)
151169c51dc3SPeter Maydell  * Returns: the size of the region that can be copied without hitting
151269c51dc3SPeter Maydell  *          an MTE tag failure
151369c51dc3SPeter Maydell  *
151469c51dc3SPeter Maydell  * Note that we assume that the caller has already checked the TBI
151569c51dc3SPeter Maydell  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
151669c51dc3SPeter Maydell  * required.
151769c51dc3SPeter Maydell  */
151881639989SPeter Maydell uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
151981639989SPeter Maydell                         uint32_t desc);
152081639989SPeter Maydell 
152181639989SPeter Maydell /**
152281639989SPeter Maydell  * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
152381639989SPeter Maydell  *                     operation going in the reverse direction
152481639989SPeter Maydell  * @env: CPU env
152581639989SPeter Maydell  * @ptr: *end* address of memory region (dirty pointer)
152681639989SPeter Maydell  * @size: length of region (guaranteed not to cross a page boundary)
152781639989SPeter Maydell  * @desc: MTEDESC descriptor word (0 means no MTE checks)
152881639989SPeter Maydell  * Returns: the size of the region that can be copied without hitting
15296087df57SPeter Maydell  *          an MTE tag failure
15306087df57SPeter Maydell  *
15316087df57SPeter Maydell  * Note that we assume that the caller has already checked the TBI
15326087df57SPeter Maydell  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
15336087df57SPeter Maydell  * required.
15346087df57SPeter Maydell  */
15356087df57SPeter Maydell uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
15366087df57SPeter Maydell                             uint32_t desc);
15376087df57SPeter Maydell 
15386087df57SPeter Maydell /**
1539efbc78adSRichard Henderson  * mte_check_fail: Record an MTE tag check failure
1540efbc78adSRichard Henderson  * @env: CPU env
1541efbc78adSRichard Henderson  * @desc: MTEDESC descriptor word
1542efbc78adSRichard Henderson  * @dirty_ptr: Failing dirty address
1543efbc78adSRichard Henderson  * @ra: TCG retaddr
1544da54941fSRichard Henderson  *
1545da54941fSRichard Henderson  * This may never return (if the MTE tag checks are configured to fault).
1546da54941fSRichard Henderson  */
1547da54941fSRichard Henderson void mte_check_fail(CPUARMState *env, uint32_t desc,
1548da54941fSRichard Henderson                     uint64_t dirty_ptr, uintptr_t ra);
15492e34ff45SRichard Henderson 
15502e34ff45SRichard Henderson /**
15512e34ff45SRichard Henderson  * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
15522e34ff45SRichard Henderson  * @env: CPU env
15532e34ff45SRichard Henderson  * @dirty_ptr: Start address of memory region (dirty pointer)
15542e34ff45SRichard Henderson  * @size: length of region (guaranteed not to cross page boundary)
15552e34ff45SRichard Henderson  * @desc: MTEDESC descriptor word
15562e34ff45SRichard Henderson  */
15572e34ff45SRichard Henderson void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
15582e34ff45SRichard Henderson                        uint32_t desc);
15592e34ff45SRichard Henderson 
allocation_tag_from_addr(uint64_t ptr)15602e34ff45SRichard Henderson static inline int allocation_tag_from_addr(uint64_t ptr)
15612e34ff45SRichard Henderson {
15622e34ff45SRichard Henderson     return extract64(ptr, 56, 4);
15632e34ff45SRichard Henderson }
15642e34ff45SRichard Henderson 
address_with_allocation_tag(uint64_t ptr,int rtag)15652e34ff45SRichard Henderson static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
15662e34ff45SRichard Henderson {
15672e34ff45SRichard Henderson     return deposit64(ptr, 56, 4, rtag);
15682e34ff45SRichard Henderson }
15692e34ff45SRichard Henderson 
15702e34ff45SRichard Henderson /* Return true if tbi bits mean that the access is checked.  */
tbi_check(uint32_t desc,int bit55)15712e34ff45SRichard Henderson static inline bool tbi_check(uint32_t desc, int bit55)
15722e34ff45SRichard Henderson {
15732e34ff45SRichard Henderson     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
15742e34ff45SRichard Henderson }
15752e34ff45SRichard Henderson 
157616c84978SRichard Henderson /* Return true if tcma bits mean that the access is unchecked.  */
tcma_check(uint32_t desc,int bit55,int ptr_tag)157716c84978SRichard Henderson static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
15782e34ff45SRichard Henderson {
15792e34ff45SRichard Henderson     /*
15802e34ff45SRichard Henderson      * We had extracted bit55 and ptr_tag for other reasons, so fold
15812e34ff45SRichard Henderson      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
15822e34ff45SRichard Henderson      */
15832e34ff45SRichard Henderson     bool match = ((ptr_tag + bit55) & 0xf) == 0;
15842e34ff45SRichard Henderson     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
15852e34ff45SRichard Henderson     return tcma && match;
15862e34ff45SRichard Henderson }
15872e34ff45SRichard Henderson 
15882e34ff45SRichard Henderson /*
15892e34ff45SRichard Henderson  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
15902e34ff45SRichard Henderson  * for the tag to be present in the FAR_ELx register.  But for user-only
15912e34ff45SRichard Henderson  * mode, we do not have a TLB with which to implement this, so we must
15922e34ff45SRichard Henderson  * remove the top byte.
1593507b6a50SPeter Maydell  */
useronly_clean_ptr(uint64_t ptr)1594507b6a50SPeter Maydell static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1595507b6a50SPeter Maydell {
1596507b6a50SPeter Maydell #ifdef CONFIG_USER_ONLY
1597507b6a50SPeter Maydell     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1598507b6a50SPeter Maydell     ptr &= sextract64(ptr, 0, 56);
1599507b6a50SPeter Maydell #endif
1600507b6a50SPeter Maydell     return ptr;
1601507b6a50SPeter Maydell }
1602507b6a50SPeter Maydell 
useronly_maybe_clean_ptr(uint32_t desc,uint64_t ptr)1603507b6a50SPeter Maydell static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
16040130895dSAlexander Graf {
16050130895dSAlexander Graf #ifdef CONFIG_USER_ONLY
16060130895dSAlexander Graf     int64_t clean_ptr = sextract64(ptr, 0, 56);
160747b385daSPeter Maydell     if (tbi_check(desc, clean_ptr < 0)) {
16080130895dSAlexander Graf         ptr = clean_ptr;
16090130895dSAlexander Graf     }
16100130895dSAlexander Graf #endif
16110130895dSAlexander Graf     return ptr;
16120130895dSAlexander Graf }
16130130895dSAlexander Graf 
16140130895dSAlexander Graf /* Values for M-profile PSR.ECI for MVE insns */
16150130895dSAlexander Graf enum MVEECIState {
16169323e79fSPeter Maydell     ECI_NONE = 0, /* No completed beats */
16170130895dSAlexander Graf     ECI_A0 = 1, /* Completed: A0 */
16180130895dSAlexander Graf     ECI_A0A1 = 2, /* Completed: A0, A1 */
161947b385daSPeter Maydell     /* 3 is reserved */
16200130895dSAlexander Graf     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
16210130895dSAlexander Graf     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
16220130895dSAlexander Graf     /* All other values reserved */
16230130895dSAlexander Graf };
16240130895dSAlexander Graf 
16250130895dSAlexander Graf /* Definitions for the PMU registers */
16260130895dSAlexander Graf #define PMCRN_MASK  0xf800
16270130895dSAlexander Graf #define PMCRN_SHIFT 11
16280130895dSAlexander Graf #define PMCRLP  0x80
16290130895dSAlexander Graf #define PMCRLC  0x40
16300130895dSAlexander Graf #define PMCRDP  0x20
16310130895dSAlexander Graf #define PMCRX   0x10
16320130895dSAlexander Graf #define PMCRD   0x8
16330130895dSAlexander Graf #define PMCRC   0x4
16340130895dSAlexander Graf #define PMCRP   0x2
16350130895dSAlexander Graf #define PMCRE   0x1
16360130895dSAlexander Graf /*
16370130895dSAlexander Graf  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
16380130895dSAlexander Graf  * which can be written as 1 to trigger behaviour but which stay RAZ).
16390130895dSAlexander Graf  */
164024526bb9SPeter Maydell #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
164124526bb9SPeter Maydell 
164224526bb9SPeter Maydell #define PMXEVTYPER_P          0x80000000
16430130895dSAlexander Graf #define PMXEVTYPER_U          0x40000000
16440130895dSAlexander Graf #define PMXEVTYPER_NSK        0x20000000
16450130895dSAlexander Graf #define PMXEVTYPER_NSU        0x10000000
16460130895dSAlexander Graf #define PMXEVTYPER_NSH        0x08000000
16470130895dSAlexander Graf #define PMXEVTYPER_M          0x04000000
1648c117c064SPeter Maydell #define PMXEVTYPER_MT         0x02000000
16490130895dSAlexander Graf #define PMXEVTYPER_EVTCOUNT   0x0000ffff
16500130895dSAlexander Graf #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
165189f4f20eSPeter Maydell                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1652690bd97bSAkihiko Odaki                                PMXEVTYPER_M | PMXEVTYPER_MT | \
165366260159SAkihiko Odaki                                PMXEVTYPER_EVTCOUNT)
165466260159SAkihiko Odaki 
165566260159SAkihiko Odaki #define PMCCFILTR             0xf8000000
165666260159SAkihiko Odaki #define PMCCFILTR_M           PMXEVTYPER_M
165766260159SAkihiko Odaki #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
165866260159SAkihiko Odaki 
pmu_num_counters(CPUARMState * env)1659f81198ceSGustavo Romero static inline uint32_t pmu_num_counters(CPUARMState *env)
1660f81198ceSGustavo Romero {
166107301161SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
1662e74c0976SRichard Henderson 
166307301161SRichard Henderson     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
166407301161SRichard Henderson }
166539920a04SFabiano Rosas 
166639920a04SFabiano Rosas /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
pmu_counter_mask(CPUARMState * env)166739920a04SFabiano Rosas static inline uint64_t pmu_counter_mask(CPUARMState *env)
166839920a04SFabiano Rosas {
166989f4f20eSPeter Maydell   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
167089f4f20eSPeter Maydell }
167148688c94SDavid Reiss 
167248688c94SDavid Reiss #ifdef TARGET_AARCH64
167348688c94SDavid Reiss GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
16746c867651SDavid Reiss int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
16756c867651SDavid Reiss int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
16766c867651SDavid Reiss int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
16776c867651SDavid Reiss int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
16786c867651SDavid Reiss int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
16796c867651SDavid Reiss int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
16806c867651SDavid Reiss int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
16816c867651SDavid Reiss int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
16826c867651SDavid Reiss void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
16836c867651SDavid Reiss void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
168419668718SRichard Henderson void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
168519668718SRichard Henderson void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1686b6f8b358SRichard Henderson void aarch64_max_tcg_initfn(Object *obj);
168757287a6eSRichard Henderson void aarch64_add_pauth_properties(Object *obj);
168855ba15b7SRichard Henderson void aarch64_add_sve_properties(Object *obj);
168931c8df53SRichard Henderson void aarch64_add_sme_properties(Object *obj);
1690b6f8b358SRichard Henderson #endif
1691abf1f1b0SRichard Henderson 
1692abf1f1b0SRichard Henderson /* Read the CONTROL register as the MRS instruction would. */
1693b15bdc96SPhilippe Mathieu-Daudé uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1694abf1f1b0SRichard Henderson 
1695b15bdc96SPhilippe Mathieu-Daudé /*
1696b15bdc96SPhilippe Mathieu-Daudé  * Return a pointer to the location where we currently store the
1697abf1f1b0SRichard Henderson  * stack pointer for the requested security state and thread mode.
1698b15bdc96SPhilippe Mathieu-Daudé  * This pointer will become invalid if the CPU state is updated
1699b15bdc96SPhilippe Mathieu-Daudé  * such that the stack pointers are switched around (eg changing
1700b15bdc96SPhilippe Mathieu-Daudé  * the SPSEL control bit).
1701b15bdc96SPhilippe Mathieu-Daudé  */
1702b15bdc96SPhilippe Mathieu-Daudé uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1703b15bdc96SPhilippe Mathieu-Daudé                              bool threadmode, bool spsel);
1704b15bdc96SPhilippe Mathieu-Daudé 
1705abf1f1b0SRichard Henderson bool el_is_in_host(CPUARMState *env, int el);
1706f43ee493SPeter Maydell 
1707f43ee493SPeter Maydell void aa32_max_features(ARMCPU *cpu);
1708f43ee493SPeter Maydell int exception_target_el(CPUARMState *env);
1709f43ee493SPeter Maydell bool arm_singlestep_active(CPUARMState *env);
1710f43ee493SPeter Maydell bool arm_generate_debug_exceptions(CPUARMState *env);
1711f43ee493SPeter Maydell 
1712f43ee493SPeter Maydell /**
1713f43ee493SPeter Maydell  * pauth_ptr_mask:
1714f43ee493SPeter Maydell  * @param: parameters defining the MMU setup
1715886902ecSRichard Henderson  *
1716886902ecSRichard Henderson  * Return a mask of the address bits that contain the authentication code,
1717886902ecSRichard Henderson  * given the MMU config defined by @param.
1718886902ecSRichard Henderson  */
pauth_ptr_mask(ARMVAParameters param)1719886902ecSRichard Henderson static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1720361c33f6SPeter Maydell {
1721361c33f6SPeter Maydell     int bot_pac_bit = 64 - param.tsz;
1722361c33f6SPeter Maydell     int top_pac_bit = 64 - 8 * param.tbi;
1723361c33f6SPeter Maydell 
1724361c33f6SPeter Maydell     return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1725361c33f6SPeter Maydell }
1726361c33f6SPeter Maydell 
1727361c33f6SPeter Maydell /* Add the cpreg definitions for debug related system registers */
1728361c33f6SPeter Maydell void define_debug_regs(ARMCPU *cpu);
1729361c33f6SPeter Maydell 
1730361c33f6SPeter Maydell /* Effective value of MDCR_EL2 */
arm_mdcr_el2_eff(CPUARMState * env)1731361c33f6SPeter Maydell static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1732361c33f6SPeter Maydell {
1733361c33f6SPeter Maydell     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1734361c33f6SPeter Maydell }
1735361c33f6SPeter Maydell 
1736361c33f6SPeter Maydell /* Powers of 2 for sve_vq_map et al. */
1737361c33f6SPeter Maydell #define SVE_VQ_POW2_MAP                                 \
1738361c33f6SPeter Maydell     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1739361c33f6SPeter Maydell      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1740671efad1SFabiano Rosas 
17410ca52a5fSFrancesco Cagnin /*
17420ca52a5fSFrancesco Cagnin  * Return true if it is possible to take a fine-grained-trap to EL2.
17430ca52a5fSFrancesco Cagnin  */
arm_fgt_active(CPUARMState * env,int el)17440ca52a5fSFrancesco Cagnin static inline bool arm_fgt_active(CPUARMState *env, int el)
17450ca52a5fSFrancesco Cagnin {
17460ca52a5fSFrancesco Cagnin     /*
17470ca52a5fSFrancesco Cagnin      * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
17480ca52a5fSFrancesco Cagnin      * that can affect EL0, but it is harmless to do the test also for
17490ca52a5fSFrancesco Cagnin      * traps on registers that are only accessible at EL1 because if the test
17500ca52a5fSFrancesco Cagnin      * returns true then we can't be executing at EL1 anyway.
17510ca52a5fSFrancesco Cagnin      * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
17520ca52a5fSFrancesco Cagnin      * traps from AArch32 only happen for the EL0 is AArch32 case.
17530ca52a5fSFrancesco Cagnin      */
17540ca52a5fSFrancesco Cagnin     return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
17550ca52a5fSFrancesco Cagnin         el < 2 && arm_is_el2_enabled(env) &&
17560ca52a5fSFrancesco Cagnin         arm_el_is_aa64(env, 1) &&
17570ca52a5fSFrancesco Cagnin         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
17580ca52a5fSFrancesco Cagnin         (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
17590ca52a5fSFrancesco Cagnin }
17600ca52a5fSFrancesco Cagnin 
17610ca52a5fSFrancesco Cagnin void assert_hflags_rebuild_correctly(CPUARMState *env);
17620ca52a5fSFrancesco Cagnin 
17630ca52a5fSFrancesco Cagnin /*
17640ca52a5fSFrancesco Cagnin  * Although the ARM implementation of hardware assisted debugging
17650ca52a5fSFrancesco Cagnin  * allows for different breakpoints per-core, the current GDB
17660ca52a5fSFrancesco Cagnin  * interface treats them as a global pool of registers (which seems to
17670ca52a5fSFrancesco Cagnin  * be the case for x86, ppc and s390). As a result we store one copy
17680ca52a5fSFrancesco Cagnin  * of registers which is used for all active cores.
17690ca52a5fSFrancesco Cagnin  *
17700ca52a5fSFrancesco Cagnin  * Write access is serialised by virtue of the GDB protocol which
17710ca52a5fSFrancesco Cagnin  * updates things. Read access (i.e. when the values are copied to the
17720ca52a5fSFrancesco Cagnin  * vCPU) is also gated by GDB's run control.
17730ca52a5fSFrancesco Cagnin  *
17740ca52a5fSFrancesco Cagnin  * This is not unreasonable as most of the time debugging kernels you
17750ca52a5fSFrancesco Cagnin  * never know which core will eventually execute your function.
17760ca52a5fSFrancesco Cagnin  */
17770ca52a5fSFrancesco Cagnin 
17780ca52a5fSFrancesco Cagnin typedef struct {
17790ca52a5fSFrancesco Cagnin     uint64_t bcr;
17800ca52a5fSFrancesco Cagnin     uint64_t bvr;
17810ca52a5fSFrancesco Cagnin } HWBreakpoint;
17820ca52a5fSFrancesco Cagnin 
17830ca52a5fSFrancesco Cagnin /*
17840ca52a5fSFrancesco Cagnin  * The watchpoint registers can cover more area than the requested
17850ca52a5fSFrancesco Cagnin  * watchpoint so we need to store the additional information
17860ca52a5fSFrancesco Cagnin  * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
17870ca52a5fSFrancesco Cagnin  * when the watchpoint is hit.
17880ca52a5fSFrancesco Cagnin  */
17890ca52a5fSFrancesco Cagnin typedef struct {
17900ca52a5fSFrancesco Cagnin     uint64_t wcr;
1791a96edb68SPeter Maydell     uint64_t wvr;
1792a96edb68SPeter Maydell     CPUWatchpoint details;
1793a96edb68SPeter Maydell } HWWatchpoint;
1794a96edb68SPeter Maydell 
1795a96edb68SPeter Maydell /* Maximum and current break/watch point counts */
1796a96edb68SPeter Maydell extern int max_hw_bps, max_hw_wps;
1797a96edb68SPeter Maydell extern GArray *hw_breakpoints, *hw_watchpoints;
1798a96edb68SPeter Maydell 
1799fcf5ef2aSThomas Huth #define cur_hw_wps      (hw_watchpoints->len)
1800 #define cur_hw_bps      (hw_breakpoints->len)
1801 #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1802 #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1803 
1804 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1805 int insert_hw_breakpoint(target_ulong pc);
1806 int delete_hw_breakpoint(target_ulong pc);
1807 
1808 bool check_watchpoint_in_range(int i, target_ulong addr);
1809 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1810 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1811 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1812 
1813 /* Return the current value of the system counter in ticks */
1814 uint64_t gt_get_countervalue(CPUARMState *env);
1815 /*
1816  * Return the currently applicable offset between the system counter
1817  * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2).
1818  */
1819 uint64_t gt_virt_cnt_offset(CPUARMState *env);
1820 #endif
1821