1fcf5ef2aSThomas Huth /*
2fcf5ef2aSThomas Huth * ARM virtual CPU header
3fcf5ef2aSThomas Huth *
4fcf5ef2aSThomas Huth * Copyright (c) 2003 Fabrice Bellard
5fcf5ef2aSThomas Huth *
6fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or
7fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public
8fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either
950f57e09SChetan Pant * version 2.1 of the License, or (at your option) any later version.
10fcf5ef2aSThomas Huth *
11fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful,
12fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of
13fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14fcf5ef2aSThomas Huth * Lesser General Public License for more details.
15fcf5ef2aSThomas Huth *
16fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public
17fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18fcf5ef2aSThomas Huth */
19fcf5ef2aSThomas Huth
20fcf5ef2aSThomas Huth #ifndef ARM_CPU_H
21fcf5ef2aSThomas Huth #define ARM_CPU_H
22fcf5ef2aSThomas Huth
23fcf5ef2aSThomas Huth #include "kvm-consts.h"
2469242e7eSMarc-André Lureau #include "qemu/cpu-float.h"
252c4da50dSPeter Maydell #include "hw/registerfields.h"
2674433bf0SRichard Henderson #include "cpu-qom.h"
2774433bf0SRichard Henderson #include "exec/cpu-defs.h"
28690bd97bSAkihiko Odaki #include "exec/gdbstub.h"
2974781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
3068970d1eSAndrew Jones #include "qapi/qapi-types-common.h"
31e2d8cf9bSPhilippe Mathieu-Daudé #include "target/arm/multiprocessing.h"
32f4f318b4SPhilippe Mathieu-Daudé #include "target/arm/gtimer.h"
33fcf5ef2aSThomas Huth
34e24fd076SDongjiu Geng #ifdef TARGET_AARCH64
35e24fd076SDongjiu Geng #define KVM_HAVE_MCE_INJECTION 1
36e24fd076SDongjiu Geng #endif
37e24fd076SDongjiu Geng
38fcf5ef2aSThomas Huth #define EXCP_UDEF 1 /* undefined instruction */
39fcf5ef2aSThomas Huth #define EXCP_SWI 2 /* software interrupt */
40fcf5ef2aSThomas Huth #define EXCP_PREFETCH_ABORT 3
41fcf5ef2aSThomas Huth #define EXCP_DATA_ABORT 4
42fcf5ef2aSThomas Huth #define EXCP_IRQ 5
43fcf5ef2aSThomas Huth #define EXCP_FIQ 6
44fcf5ef2aSThomas Huth #define EXCP_BKPT 7
45fcf5ef2aSThomas Huth #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
46fcf5ef2aSThomas Huth #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
47fcf5ef2aSThomas Huth #define EXCP_HVC 11 /* HyperVisor Call */
48fcf5ef2aSThomas Huth #define EXCP_HYP_TRAP 12
49fcf5ef2aSThomas Huth #define EXCP_SMC 13 /* Secure Monitor Call */
50fcf5ef2aSThomas Huth #define EXCP_VIRQ 14
51fcf5ef2aSThomas Huth #define EXCP_VFIQ 15
52fcf5ef2aSThomas Huth #define EXCP_SEMIHOST 16 /* semihosting call */
537517748eSPeter Maydell #define EXCP_NOCP 17 /* v7M NOCP UsageFault */
54e13886e3SPeter Maydell #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
5586f026deSPeter Maydell #define EXCP_STKOF 19 /* v8M STKOF UsageFault */
56e33cf0f8SPeter Maydell #define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */
57019076b0SPeter Maydell #define EXCP_LSERR 21 /* v8M LSERR SecureFault */
58019076b0SPeter Maydell #define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
59e5346292SPeter Maydell #define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
603c29632fSRichard Henderson #define EXCP_VSERR 24
6111b76fdaSRichard Henderson #define EXCP_GPC 25 /* v9 Granule Protection Check Fault */
62b36a32eaSJinjie Ruan #define EXCP_NMI 26
63b36a32eaSJinjie Ruan #define EXCP_VINMI 27
64b36a32eaSJinjie Ruan #define EXCP_VFNMI 28
652c4a7cc5SPeter Maydell /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
66fcf5ef2aSThomas Huth
67fcf5ef2aSThomas Huth #define ARMV7M_EXCP_RESET 1
68fcf5ef2aSThomas Huth #define ARMV7M_EXCP_NMI 2
69fcf5ef2aSThomas Huth #define ARMV7M_EXCP_HARD 3
70fcf5ef2aSThomas Huth #define ARMV7M_EXCP_MEM 4
71fcf5ef2aSThomas Huth #define ARMV7M_EXCP_BUS 5
72fcf5ef2aSThomas Huth #define ARMV7M_EXCP_USAGE 6
731e577cc7SPeter Maydell #define ARMV7M_EXCP_SECURE 7
74fcf5ef2aSThomas Huth #define ARMV7M_EXCP_SVC 11
75fcf5ef2aSThomas Huth #define ARMV7M_EXCP_DEBUG 12
76fcf5ef2aSThomas Huth #define ARMV7M_EXCP_PENDSV 14
77fcf5ef2aSThomas Huth #define ARMV7M_EXCP_SYSTICK 15
78fcf5ef2aSThomas Huth
79fcf5ef2aSThomas Huth /* ARM-specific interrupt pending bits. */
80fcf5ef2aSThomas Huth #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
81fcf5ef2aSThomas Huth #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
82fcf5ef2aSThomas Huth #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
833c29632fSRichard Henderson #define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0
84b36a32eaSJinjie Ruan #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_4
85b36a32eaSJinjie Ruan #define CPU_INTERRUPT_VINMI CPU_INTERRUPT_TGT_EXT_0
86b36a32eaSJinjie Ruan #define CPU_INTERRUPT_VFNMI CPU_INTERRUPT_TGT_INT_1
87fcf5ef2aSThomas Huth
88fcf5ef2aSThomas Huth /* The usual mapping for an AArch64 system register to its AArch32
89fcf5ef2aSThomas Huth * counterpart is for the 32 bit world to have access to the lower
90fcf5ef2aSThomas Huth * half only (with writes leaving the upper half untouched). It's
91fcf5ef2aSThomas Huth * therefore useful to be able to pass TCG the offset of the least
92fcf5ef2aSThomas Huth * significant half of a uint64_t struct member.
93fcf5ef2aSThomas Huth */
94e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
95fcf5ef2aSThomas Huth #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
96fcf5ef2aSThomas Huth #define offsetofhigh32(S, M) offsetof(S, M)
97fcf5ef2aSThomas Huth #else
98fcf5ef2aSThomas Huth #define offsetoflow32(S, M) offsetof(S, M)
99fcf5ef2aSThomas Huth #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
100fcf5ef2aSThomas Huth #endif
101fcf5ef2aSThomas Huth
102fcf5ef2aSThomas Huth /* ARM-specific extra insn start words:
103fcf5ef2aSThomas Huth * 1: Conditional execution bits
104fcf5ef2aSThomas Huth * 2: Partial exception syndrome for data aborts
105fcf5ef2aSThomas Huth */
106fcf5ef2aSThomas Huth #define TARGET_INSN_START_EXTRA_WORDS 2
107fcf5ef2aSThomas Huth
108fcf5ef2aSThomas Huth /* The 2nd extra word holding syndrome info for data aborts does not use
109674e5345SPeter Maydell * the upper 6 bits nor the lower 13 bits. We mask and shift it down to
110fcf5ef2aSThomas Huth * help the sleb128 encoder do a better job.
111fcf5ef2aSThomas Huth * When restoring the CPU state, we shift it back up.
112fcf5ef2aSThomas Huth */
113fcf5ef2aSThomas Huth #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
114674e5345SPeter Maydell #define ARM_INSN_START_WORD2_SHIFT 13
115fcf5ef2aSThomas Huth
116fcf5ef2aSThomas Huth /* We currently assume float and double are IEEE single and double
117fcf5ef2aSThomas Huth precision respectively.
118fcf5ef2aSThomas Huth Doing runtime conversions is tricky because VFP registers may contain
119fcf5ef2aSThomas Huth integer values (eg. as the result of a FTOSI instruction).
120fcf5ef2aSThomas Huth s<2n> maps to the least significant half of d<n>
121fcf5ef2aSThomas Huth s<2n+1> maps to the most significant half of d<n>
122fcf5ef2aSThomas Huth */
123fcf5ef2aSThomas Huth
124200bf5b7SAbdallah Bouassida /**
125690bd97bSAkihiko Odaki * DynamicGDBFeatureInfo:
126690bd97bSAkihiko Odaki * @desc: Contains the feature descriptions.
127448d4d14SAlex Bennée * @data: A union with data specific to the set of registers
128200bf5b7SAbdallah Bouassida * @cpregs_keys: Array that contains the corresponding Key of
129448d4d14SAlex Bennée * a given cpreg with the same order of the cpreg
130448d4d14SAlex Bennée * in the XML description.
131200bf5b7SAbdallah Bouassida */
132690bd97bSAkihiko Odaki typedef struct DynamicGDBFeatureInfo {
133690bd97bSAkihiko Odaki GDBFeature desc;
134448d4d14SAlex Bennée union {
135448d4d14SAlex Bennée struct {
136448d4d14SAlex Bennée uint32_t *keys;
137448d4d14SAlex Bennée } cpregs;
138448d4d14SAlex Bennée } data;
139690bd97bSAkihiko Odaki } DynamicGDBFeatureInfo;
140200bf5b7SAbdallah Bouassida
141fcf5ef2aSThomas Huth /* CPU state for each instance of a generic timer (in cp15 c14) */
142fcf5ef2aSThomas Huth typedef struct ARMGenericTimer {
143fcf5ef2aSThomas Huth uint64_t cval; /* Timer CompareValue register */
144fcf5ef2aSThomas Huth uint64_t ctl; /* Timer Control register */
145fcf5ef2aSThomas Huth } ARMGenericTimer;
146fcf5ef2aSThomas Huth
147c39c2b90SRichard Henderson /* Define a maximum sized vector register.
148c39c2b90SRichard Henderson * For 32-bit, this is a 128-bit NEON/AdvSIMD register.
149c39c2b90SRichard Henderson * For 64-bit, this is a 2048-bit SVE register.
150c39c2b90SRichard Henderson *
151c39c2b90SRichard Henderson * Note that the mapping between S, D, and Q views of the register bank
152c39c2b90SRichard Henderson * differs between AArch64 and AArch32.
153c39c2b90SRichard Henderson * In AArch32:
154c39c2b90SRichard Henderson * Qn = regs[n].d[1]:regs[n].d[0]
155c39c2b90SRichard Henderson * Dn = regs[n / 2].d[n & 1]
156c39c2b90SRichard Henderson * Sn = regs[n / 4].d[n % 4 / 2],
157c39c2b90SRichard Henderson * bits 31..0 for even n, and bits 63..32 for odd n
158c39c2b90SRichard Henderson * (and regs[16] to regs[31] are inaccessible)
159c39c2b90SRichard Henderson * In AArch64:
160c39c2b90SRichard Henderson * Zn = regs[n].d[*]
161c39c2b90SRichard Henderson * Qn = regs[n].d[1]:regs[n].d[0]
162c39c2b90SRichard Henderson * Dn = regs[n].d[0]
163c39c2b90SRichard Henderson * Sn = regs[n].d[0] bits 31..0
164d0e69ea8SAlex Bennée * Hn = regs[n].d[0] bits 15..0
165c39c2b90SRichard Henderson *
166c39c2b90SRichard Henderson * This corresponds to the architecturally defined mapping between
167c39c2b90SRichard Henderson * the two execution states, and means we do not need to explicitly
168c39c2b90SRichard Henderson * map these registers when changing states.
169c39c2b90SRichard Henderson *
170c39c2b90SRichard Henderson * Align the data for use with TCG host vector operations.
171c39c2b90SRichard Henderson */
172c39c2b90SRichard Henderson
173c39c2b90SRichard Henderson #ifdef TARGET_AARCH64
174c39c2b90SRichard Henderson # define ARM_MAX_VQ 16
175c39c2b90SRichard Henderson #else
176c39c2b90SRichard Henderson # define ARM_MAX_VQ 1
177c39c2b90SRichard Henderson #endif
178c39c2b90SRichard Henderson
179c39c2b90SRichard Henderson typedef struct ARMVectorReg {
180c39c2b90SRichard Henderson uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
181c39c2b90SRichard Henderson } ARMVectorReg;
182c39c2b90SRichard Henderson
1833c7d3086SRichard Henderson #ifdef TARGET_AARCH64
184991ad91bSRichard Henderson /* In AArch32 mode, predicate registers do not exist at all. */
1853c7d3086SRichard Henderson typedef struct ARMPredicateReg {
18646417784SAndrew Jones uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
1873c7d3086SRichard Henderson } ARMPredicateReg;
188991ad91bSRichard Henderson
189991ad91bSRichard Henderson /* In AArch32 mode, PAC keys do not exist at all. */
190991ad91bSRichard Henderson typedef struct ARMPACKey {
191991ad91bSRichard Henderson uint64_t lo, hi;
192991ad91bSRichard Henderson } ARMPACKey;
1933c7d3086SRichard Henderson #endif
1943c7d3086SRichard Henderson
1953902bfc6SRichard Henderson /* See the commentary above the TBFLAG field definitions. */
1963902bfc6SRichard Henderson typedef struct CPUARMTBFlags {
1973902bfc6SRichard Henderson uint32_t flags;
198a378206aSRichard Henderson target_ulong flags2;
1993902bfc6SRichard Henderson } CPUARMTBFlags;
200c39c2b90SRichard Henderson
201f3639a64SRichard Henderson typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
202f3639a64SRichard Henderson
2038f4e07c9SPhilippe Mathieu-Daudé typedef struct NVICState NVICState;
2048f4e07c9SPhilippe Mathieu-Daudé
2051ea4a06aSPhilippe Mathieu-Daudé typedef struct CPUArchState {
206fcf5ef2aSThomas Huth /* Regs for current mode. */
207fcf5ef2aSThomas Huth uint32_t regs[16];
208fcf5ef2aSThomas Huth
209fcf5ef2aSThomas Huth /* 32/64 switch only happens when taking and returning from
210fcf5ef2aSThomas Huth * exceptions so the overlap semantics are taken care of then
211fcf5ef2aSThomas Huth * instead of having a complicated union.
212fcf5ef2aSThomas Huth */
213fcf5ef2aSThomas Huth /* Regs for A64 mode. */
214fcf5ef2aSThomas Huth uint64_t xregs[32];
215fcf5ef2aSThomas Huth uint64_t pc;
216fcf5ef2aSThomas Huth /* PSTATE isn't an architectural register for ARMv8. However, it is
217fcf5ef2aSThomas Huth * convenient for us to assemble the underlying state into a 32 bit format
218fcf5ef2aSThomas Huth * identical to the architectural format used for the SPSR. (This is also
219fcf5ef2aSThomas Huth * what the Linux kernel's 'pstate' field in signal handlers and KVM's
220fcf5ef2aSThomas Huth * 'pstate' register are.) Of the PSTATE bits:
221fcf5ef2aSThomas Huth * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
222fcf5ef2aSThomas Huth * semantics as for AArch32, as described in the comments on each field)
223fcf5ef2aSThomas Huth * nRW (also known as M[4]) is kept, inverted, in env->aarch64
224fcf5ef2aSThomas Huth * DAIF (exception masks) are kept in env->daif
225f6e52eaaSRichard Henderson * BTYPE is kept in env->btype
226c37e6ac9SRichard Henderson * SM and ZA are kept in env->svcr
227fcf5ef2aSThomas Huth * all other bits are stored in their correct places in env->pstate
228fcf5ef2aSThomas Huth */
229fcf5ef2aSThomas Huth uint32_t pstate;
23053221552SRichard Henderson bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */
231063bbd80SRichard Henderson bool thumb; /* True if CPU is in thumb mode; cpsr[5] */
232fcf5ef2aSThomas Huth
233fdd1b228SRichard Henderson /* Cached TBFLAGS state. See below for which bits are included. */
2343902bfc6SRichard Henderson CPUARMTBFlags hflags;
235fdd1b228SRichard Henderson
236fcf5ef2aSThomas Huth /* Frequently accessed CPSR bits are stored separately for efficiency.
237fcf5ef2aSThomas Huth This contains all the other bits. Use cpsr_{read,write} to access
238fcf5ef2aSThomas Huth the whole CPSR. */
239fcf5ef2aSThomas Huth uint32_t uncached_cpsr;
240fcf5ef2aSThomas Huth uint32_t spsr;
241fcf5ef2aSThomas Huth
242fcf5ef2aSThomas Huth /* Banked registers. */
243fcf5ef2aSThomas Huth uint64_t banked_spsr[8];
244fcf5ef2aSThomas Huth uint32_t banked_r13[8];
245fcf5ef2aSThomas Huth uint32_t banked_r14[8];
246fcf5ef2aSThomas Huth
247fcf5ef2aSThomas Huth /* These hold r8-r12. */
248fcf5ef2aSThomas Huth uint32_t usr_regs[5];
249fcf5ef2aSThomas Huth uint32_t fiq_regs[5];
250fcf5ef2aSThomas Huth
251fcf5ef2aSThomas Huth /* cpsr flag cache for faster execution */
252fcf5ef2aSThomas Huth uint32_t CF; /* 0 or 1 */
253fcf5ef2aSThomas Huth uint32_t VF; /* V is the bit 31. All other bits are undefined */
254fcf5ef2aSThomas Huth uint32_t NF; /* N is bit 31. All other bits are undefined. */
255fcf5ef2aSThomas Huth uint32_t ZF; /* Z set if zero. */
256fcf5ef2aSThomas Huth uint32_t QF; /* 0 or 1 */
257fcf5ef2aSThomas Huth uint32_t GE; /* cpsr[19:16] */
258fcf5ef2aSThomas Huth uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
259f6e52eaaSRichard Henderson uint32_t btype; /* BTI branch type. spsr[11:10]. */
260fcf5ef2aSThomas Huth uint64_t daif; /* exception masks, in the bits they are in PSTATE */
261c37e6ac9SRichard Henderson uint64_t svcr; /* PSTATE.{SM,ZA} in the bits they are in SVCR */
262fcf5ef2aSThomas Huth
263fcf5ef2aSThomas Huth uint64_t elr_el[4]; /* AArch64 exception link regs */
264fcf5ef2aSThomas Huth uint64_t sp_el[4]; /* AArch64 banked stack pointers */
265fcf5ef2aSThomas Huth
266fcf5ef2aSThomas Huth /* System control coprocessor (cp15) */
267fcf5ef2aSThomas Huth struct {
268fcf5ef2aSThomas Huth uint32_t c0_cpuid;
269fcf5ef2aSThomas Huth union { /* Cache size selection */
270fcf5ef2aSThomas Huth struct {
271fcf5ef2aSThomas Huth uint64_t _unused_csselr0;
272fcf5ef2aSThomas Huth uint64_t csselr_ns;
273fcf5ef2aSThomas Huth uint64_t _unused_csselr1;
274fcf5ef2aSThomas Huth uint64_t csselr_s;
275fcf5ef2aSThomas Huth };
276fcf5ef2aSThomas Huth uint64_t csselr_el[4];
277fcf5ef2aSThomas Huth };
278fcf5ef2aSThomas Huth union { /* System control register. */
279fcf5ef2aSThomas Huth struct {
280fcf5ef2aSThomas Huth uint64_t _unused_sctlr;
281fcf5ef2aSThomas Huth uint64_t sctlr_ns;
282fcf5ef2aSThomas Huth uint64_t hsctlr;
283fcf5ef2aSThomas Huth uint64_t sctlr_s;
284fcf5ef2aSThomas Huth };
285fcf5ef2aSThomas Huth uint64_t sctlr_el[4];
286fcf5ef2aSThomas Huth };
287761c4642STobias Röhmel uint64_t vsctlr; /* Virtualization System control register. */
288fcf5ef2aSThomas Huth uint64_t cpacr_el1; /* Architectural feature access control register */
289fcf5ef2aSThomas Huth uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
290fcf5ef2aSThomas Huth uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
291fcf5ef2aSThomas Huth uint64_t sder; /* Secure debug enable register. */
292fcf5ef2aSThomas Huth uint32_t nsacr; /* Non-secure access control register. */
293fcf5ef2aSThomas Huth union { /* MMU translation table base 0. */
294fcf5ef2aSThomas Huth struct {
295fcf5ef2aSThomas Huth uint64_t _unused_ttbr0_0;
296fcf5ef2aSThomas Huth uint64_t ttbr0_ns;
297fcf5ef2aSThomas Huth uint64_t _unused_ttbr0_1;
298fcf5ef2aSThomas Huth uint64_t ttbr0_s;
299fcf5ef2aSThomas Huth };
300fcf5ef2aSThomas Huth uint64_t ttbr0_el[4];
301fcf5ef2aSThomas Huth };
302fcf5ef2aSThomas Huth union { /* MMU translation table base 1. */
303fcf5ef2aSThomas Huth struct {
304fcf5ef2aSThomas Huth uint64_t _unused_ttbr1_0;
305fcf5ef2aSThomas Huth uint64_t ttbr1_ns;
306fcf5ef2aSThomas Huth uint64_t _unused_ttbr1_1;
307fcf5ef2aSThomas Huth uint64_t ttbr1_s;
308fcf5ef2aSThomas Huth };
309fcf5ef2aSThomas Huth uint64_t ttbr1_el[4];
310fcf5ef2aSThomas Huth };
311fcf5ef2aSThomas Huth uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
312e9152ee9SRémi Denis-Courmont uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
313fcf5ef2aSThomas Huth /* MMU translation table base control. */
314cb4a0a34SPeter Maydell uint64_t tcr_el[4];
315988cc190SPeter Maydell uint64_t vtcr_el2; /* Virtualization Translation Control. */
316988cc190SPeter Maydell uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
317fcf5ef2aSThomas Huth uint32_t c2_data; /* MPU data cacheable bits. */
318fcf5ef2aSThomas Huth uint32_t c2_insn; /* MPU instruction cacheable bits. */
319fcf5ef2aSThomas Huth union { /* MMU domain access control register
320fcf5ef2aSThomas Huth * MPU write buffer control.
321fcf5ef2aSThomas Huth */
322fcf5ef2aSThomas Huth struct {
323fcf5ef2aSThomas Huth uint64_t dacr_ns;
324fcf5ef2aSThomas Huth uint64_t dacr_s;
325fcf5ef2aSThomas Huth };
326fcf5ef2aSThomas Huth struct {
327fcf5ef2aSThomas Huth uint64_t dacr32_el2;
328fcf5ef2aSThomas Huth };
329fcf5ef2aSThomas Huth };
330fcf5ef2aSThomas Huth uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
331fcf5ef2aSThomas Huth uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
332fcf5ef2aSThomas Huth uint64_t hcr_el2; /* Hypervisor configuration register */
3335814d587SRichard Henderson uint64_t hcrx_el2; /* Extended Hypervisor configuration register */
334fcf5ef2aSThomas Huth uint64_t scr_el3; /* Secure configuration register. */
335fcf5ef2aSThomas Huth union { /* Fault status registers. */
336fcf5ef2aSThomas Huth struct {
337fcf5ef2aSThomas Huth uint64_t ifsr_ns;
338fcf5ef2aSThomas Huth uint64_t ifsr_s;
339fcf5ef2aSThomas Huth };
340fcf5ef2aSThomas Huth struct {
341fcf5ef2aSThomas Huth uint64_t ifsr32_el2;
342fcf5ef2aSThomas Huth };
343fcf5ef2aSThomas Huth };
344fcf5ef2aSThomas Huth union {
345fcf5ef2aSThomas Huth struct {
346fcf5ef2aSThomas Huth uint64_t _unused_dfsr;
347fcf5ef2aSThomas Huth uint64_t dfsr_ns;
348fcf5ef2aSThomas Huth uint64_t hsr;
349fcf5ef2aSThomas Huth uint64_t dfsr_s;
350fcf5ef2aSThomas Huth };
351fcf5ef2aSThomas Huth uint64_t esr_el[4];
352fcf5ef2aSThomas Huth };
353fcf5ef2aSThomas Huth uint32_t c6_region[8]; /* MPU base/size registers. */
354fcf5ef2aSThomas Huth union { /* Fault address registers. */
355fcf5ef2aSThomas Huth struct {
356fcf5ef2aSThomas Huth uint64_t _unused_far0;
357e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
358fcf5ef2aSThomas Huth uint32_t ifar_ns;
359fcf5ef2aSThomas Huth uint32_t dfar_ns;
360fcf5ef2aSThomas Huth uint32_t ifar_s;
361fcf5ef2aSThomas Huth uint32_t dfar_s;
362fcf5ef2aSThomas Huth #else
363fcf5ef2aSThomas Huth uint32_t dfar_ns;
364fcf5ef2aSThomas Huth uint32_t ifar_ns;
365fcf5ef2aSThomas Huth uint32_t dfar_s;
366fcf5ef2aSThomas Huth uint32_t ifar_s;
367fcf5ef2aSThomas Huth #endif
368fcf5ef2aSThomas Huth uint64_t _unused_far3;
369fcf5ef2aSThomas Huth };
370fcf5ef2aSThomas Huth uint64_t far_el[4];
371fcf5ef2aSThomas Huth };
372fcf5ef2aSThomas Huth uint64_t hpfar_el2;
373fcf5ef2aSThomas Huth uint64_t hstr_el2;
374fcf5ef2aSThomas Huth union { /* Translation result. */
375fcf5ef2aSThomas Huth struct {
376fcf5ef2aSThomas Huth uint64_t _unused_par_0;
377fcf5ef2aSThomas Huth uint64_t par_ns;
378fcf5ef2aSThomas Huth uint64_t _unused_par_1;
379fcf5ef2aSThomas Huth uint64_t par_s;
380fcf5ef2aSThomas Huth };
381fcf5ef2aSThomas Huth uint64_t par_el[4];
382fcf5ef2aSThomas Huth };
383fcf5ef2aSThomas Huth
384fcf5ef2aSThomas Huth uint32_t c9_insn; /* Cache lockdown registers. */
385fcf5ef2aSThomas Huth uint32_t c9_data;
386fcf5ef2aSThomas Huth uint64_t c9_pmcr; /* performance monitor control register */
387fcf5ef2aSThomas Huth uint64_t c9_pmcnten; /* perf monitor counter enables */
388e4e91a21SAaron Lindsay uint64_t c9_pmovsr; /* perf monitor overflow status */
389e4e91a21SAaron Lindsay uint64_t c9_pmuserenr; /* perf monitor user enable */
3906b040780SWei Huang uint64_t c9_pmselr; /* perf monitor counter selection register */
391e6ec5457SWei Huang uint64_t c9_pminten; /* perf monitor interrupt enables */
392fcf5ef2aSThomas Huth union { /* Memory attribute redirection */
393fcf5ef2aSThomas Huth struct {
394e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
395fcf5ef2aSThomas Huth uint64_t _unused_mair_0;
396fcf5ef2aSThomas Huth uint32_t mair1_ns;
397fcf5ef2aSThomas Huth uint32_t mair0_ns;
398fcf5ef2aSThomas Huth uint64_t _unused_mair_1;
399fcf5ef2aSThomas Huth uint32_t mair1_s;
400fcf5ef2aSThomas Huth uint32_t mair0_s;
401fcf5ef2aSThomas Huth #else
402fcf5ef2aSThomas Huth uint64_t _unused_mair_0;
403fcf5ef2aSThomas Huth uint32_t mair0_ns;
404fcf5ef2aSThomas Huth uint32_t mair1_ns;
405fcf5ef2aSThomas Huth uint64_t _unused_mair_1;
406fcf5ef2aSThomas Huth uint32_t mair0_s;
407fcf5ef2aSThomas Huth uint32_t mair1_s;
408fcf5ef2aSThomas Huth #endif
409fcf5ef2aSThomas Huth };
410fcf5ef2aSThomas Huth uint64_t mair_el[4];
411fcf5ef2aSThomas Huth };
412fcf5ef2aSThomas Huth union { /* vector base address register */
413fcf5ef2aSThomas Huth struct {
414fcf5ef2aSThomas Huth uint64_t _unused_vbar;
415fcf5ef2aSThomas Huth uint64_t vbar_ns;
416fcf5ef2aSThomas Huth uint64_t hvbar;
417fcf5ef2aSThomas Huth uint64_t vbar_s;
418fcf5ef2aSThomas Huth };
419fcf5ef2aSThomas Huth uint64_t vbar_el[4];
420fcf5ef2aSThomas Huth };
421fcf5ef2aSThomas Huth uint32_t mvbar; /* (monitor) vector base address register */
4224a7319b7SEdgar E. Iglesias uint64_t rvbar; /* rvbar sampled from rvbar property at reset */
423fcf5ef2aSThomas Huth struct { /* FCSE PID. */
424fcf5ef2aSThomas Huth uint32_t fcseidr_ns;
425fcf5ef2aSThomas Huth uint32_t fcseidr_s;
426fcf5ef2aSThomas Huth };
427fcf5ef2aSThomas Huth union { /* Context ID. */
428fcf5ef2aSThomas Huth struct {
429fcf5ef2aSThomas Huth uint64_t _unused_contextidr_0;
430fcf5ef2aSThomas Huth uint64_t contextidr_ns;
431fcf5ef2aSThomas Huth uint64_t _unused_contextidr_1;
432fcf5ef2aSThomas Huth uint64_t contextidr_s;
433fcf5ef2aSThomas Huth };
434fcf5ef2aSThomas Huth uint64_t contextidr_el[4];
435fcf5ef2aSThomas Huth };
436fcf5ef2aSThomas Huth union { /* User RW Thread register. */
437fcf5ef2aSThomas Huth struct {
438fcf5ef2aSThomas Huth uint64_t tpidrurw_ns;
439fcf5ef2aSThomas Huth uint64_t tpidrprw_ns;
440fcf5ef2aSThomas Huth uint64_t htpidr;
441fcf5ef2aSThomas Huth uint64_t _tpidr_el3;
442fcf5ef2aSThomas Huth };
443fcf5ef2aSThomas Huth uint64_t tpidr_el[4];
444fcf5ef2aSThomas Huth };
4459e5ec745SRichard Henderson uint64_t tpidr2_el0;
446fcf5ef2aSThomas Huth /* The secure banks of these registers don't map anywhere */
447fcf5ef2aSThomas Huth uint64_t tpidrurw_s;
448fcf5ef2aSThomas Huth uint64_t tpidrprw_s;
449fcf5ef2aSThomas Huth uint64_t tpidruro_s;
450fcf5ef2aSThomas Huth
451fcf5ef2aSThomas Huth union { /* User RO Thread register. */
452fcf5ef2aSThomas Huth uint64_t tpidruro_ns;
453fcf5ef2aSThomas Huth uint64_t tpidrro_el[1];
454fcf5ef2aSThomas Huth };
455fcf5ef2aSThomas Huth uint64_t c14_cntfrq; /* Counter Frequency register */
456fcf5ef2aSThomas Huth uint64_t c14_cntkctl; /* Timer Control register */
457bb461330SRichard Henderson uint64_t cnthctl_el2; /* Counter/Timer Hyp Control register */
458fcf5ef2aSThomas Huth uint64_t cntvoff_el2; /* Counter Virtual Offset register */
4592808d3b3SPeter Maydell uint64_t cntpoff_el2; /* Counter Physical Offset register */
460fcf5ef2aSThomas Huth ARMGenericTimer c14_timer[NUM_GTIMERS];
461fcf5ef2aSThomas Huth uint32_t c15_cpar; /* XScale Coprocessor Access Register */
462fcf5ef2aSThomas Huth uint32_t c15_ticonfig; /* TI925T configuration byte. */
463fcf5ef2aSThomas Huth uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
464fcf5ef2aSThomas Huth uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
465fcf5ef2aSThomas Huth uint32_t c15_threadid; /* TI debugger thread-ID. */
466fcf5ef2aSThomas Huth uint32_t c15_config_base_address; /* SCU base address. */
467fcf5ef2aSThomas Huth uint32_t c15_diagnostic; /* diagnostic register */
468fcf5ef2aSThomas Huth uint32_t c15_power_diagnostic;
469fcf5ef2aSThomas Huth uint32_t c15_power_control; /* power control */
470fcf5ef2aSThomas Huth uint64_t dbgbvr[16]; /* breakpoint value registers */
471fcf5ef2aSThomas Huth uint64_t dbgbcr[16]; /* breakpoint control registers */
472fcf5ef2aSThomas Huth uint64_t dbgwvr[16]; /* watchpoint value registers */
473fcf5ef2aSThomas Huth uint64_t dbgwcr[16]; /* watchpoint control registers */
4745fc83f11SEvgeny Iakovlev uint64_t dbgclaim; /* DBGCLAIM bits */
475fcf5ef2aSThomas Huth uint64_t mdscr_el1;
476fcf5ef2aSThomas Huth uint64_t oslsr_el1; /* OS Lock Status */
477f94a6df5SPeter Maydell uint64_t osdlr_el1; /* OS DoubleLock status */
478fcf5ef2aSThomas Huth uint64_t mdcr_el2;
479fcf5ef2aSThomas Huth uint64_t mdcr_el3;
4805d05b9d4SAaron Lindsay /* Stores the architectural value of the counter *the last time it was
4815d05b9d4SAaron Lindsay * updated* by pmccntr_op_start. Accesses should always be surrounded
4825d05b9d4SAaron Lindsay * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest
4835d05b9d4SAaron Lindsay * architecturally-correct value is being read/set.
484fcf5ef2aSThomas Huth */
485fcf5ef2aSThomas Huth uint64_t c15_ccnt;
4865d05b9d4SAaron Lindsay /* Stores the delta between the architectural value and the underlying
4875d05b9d4SAaron Lindsay * cycle count during normal operation. It is used to update c15_ccnt
4885d05b9d4SAaron Lindsay * to be the correct architectural value before accesses. During
4895d05b9d4SAaron Lindsay * accesses, c15_ccnt_delta contains the underlying count being used
4905d05b9d4SAaron Lindsay * for the access, after which it reverts to the delta value in
4915d05b9d4SAaron Lindsay * pmccntr_op_finish.
4925d05b9d4SAaron Lindsay */
4935d05b9d4SAaron Lindsay uint64_t c15_ccnt_delta;
4945ecdd3e4SAaron Lindsay uint64_t c14_pmevcntr[31];
4955ecdd3e4SAaron Lindsay uint64_t c14_pmevcntr_delta[31];
4965ecdd3e4SAaron Lindsay uint64_t c14_pmevtyper[31];
497fcf5ef2aSThomas Huth uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
498fcf5ef2aSThomas Huth uint64_t vpidr_el2; /* Virtualization Processor ID Register */
499fcf5ef2aSThomas Huth uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
5004b779cebSRichard Henderson uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0. */
5014b779cebSRichard Henderson uint64_t gcr_el1;
5024b779cebSRichard Henderson uint64_t rgsr_el1;
50358e93b48SRichard Henderson
50458e93b48SRichard Henderson /* Minimal RAS registers */
50558e93b48SRichard Henderson uint64_t disr_el1;
50658e93b48SRichard Henderson uint64_t vdisr_el2;
50758e93b48SRichard Henderson uint64_t vsesr_el2;
50815126d9cSPeter Maydell
50915126d9cSPeter Maydell /*
51015126d9cSPeter Maydell * Fine-Grained Trap registers. We store these as arrays so the
51115126d9cSPeter Maydell * access checking code doesn't have to manually select
51215126d9cSPeter Maydell * HFGRTR_EL2 vs HFDFGRTR_EL2 etc when looking up the bit to test.
51315126d9cSPeter Maydell * FEAT_FGT2 will add more elements to these arrays.
51415126d9cSPeter Maydell */
51515126d9cSPeter Maydell uint64_t fgt_read[2]; /* HFGRTR, HDFGRTR */
51615126d9cSPeter Maydell uint64_t fgt_write[2]; /* HFGWTR, HDFGWTR */
51715126d9cSPeter Maydell uint64_t fgt_exec[1]; /* HFGITR */
518ef1febe7SRichard Henderson
519ef1febe7SRichard Henderson /* RME registers */
520ef1febe7SRichard Henderson uint64_t gpccr_el3;
521ef1febe7SRichard Henderson uint64_t gptbr_el3;
522ef1febe7SRichard Henderson uint64_t mfar_el3;
523b5ba6c99SPeter Maydell
524b5ba6c99SPeter Maydell /* NV2 register */
525b5ba6c99SPeter Maydell uint64_t vncr_el2;
526fcf5ef2aSThomas Huth } cp15;
527fcf5ef2aSThomas Huth
528fcf5ef2aSThomas Huth struct {
529fb602cb7SPeter Maydell /* M profile has up to 4 stack pointers:
530fb602cb7SPeter Maydell * a Main Stack Pointer and a Process Stack Pointer for each
531fb602cb7SPeter Maydell * of the Secure and Non-Secure states. (If the CPU doesn't support
532fb602cb7SPeter Maydell * the security extension then it has only two SPs.)
533fb602cb7SPeter Maydell * In QEMU we always store the currently active SP in regs[13],
534fb602cb7SPeter Maydell * and the non-active SP for the current security state in
535fb602cb7SPeter Maydell * v7m.other_sp. The stack pointers for the inactive security state
536fb602cb7SPeter Maydell * are stored in other_ss_msp and other_ss_psp.
537fb602cb7SPeter Maydell * switch_v7m_security_state() is responsible for rearranging them
538fb602cb7SPeter Maydell * when we change security state.
539fb602cb7SPeter Maydell */
540fcf5ef2aSThomas Huth uint32_t other_sp;
541fb602cb7SPeter Maydell uint32_t other_ss_msp;
542fb602cb7SPeter Maydell uint32_t other_ss_psp;
5434a16724fSPeter Maydell uint32_t vecbase[M_REG_NUM_BANKS];
5444a16724fSPeter Maydell uint32_t basepri[M_REG_NUM_BANKS];
5454a16724fSPeter Maydell uint32_t control[M_REG_NUM_BANKS];
5464a16724fSPeter Maydell uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */
5474a16724fSPeter Maydell uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */
5482c4da50dSPeter Maydell uint32_t hfsr; /* HardFault Status */
5492c4da50dSPeter Maydell uint32_t dfsr; /* Debug Fault Status Register */
550bed079daSPeter Maydell uint32_t sfsr; /* Secure Fault Status Register */
5514a16724fSPeter Maydell uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */
5522c4da50dSPeter Maydell uint32_t bfar; /* BusFault Address */
553bed079daSPeter Maydell uint32_t sfar; /* Secure Fault Address Register */
5544a16724fSPeter Maydell unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */
555fcf5ef2aSThomas Huth int exception;
5564a16724fSPeter Maydell uint32_t primask[M_REG_NUM_BANKS];
5574a16724fSPeter Maydell uint32_t faultmask[M_REG_NUM_BANKS];
5583b2e9344SPeter Maydell uint32_t aircr; /* only holds r/w state if security extn implemented */
5591e577cc7SPeter Maydell uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
56043bbce7fSPeter Maydell uint32_t csselr[M_REG_NUM_BANKS];
56124ac0fb1SPeter Maydell uint32_t scr[M_REG_NUM_BANKS];
56257bb3156SPeter Maydell uint32_t msplim[M_REG_NUM_BANKS];
56357bb3156SPeter Maydell uint32_t psplim[M_REG_NUM_BANKS];
564d33abe82SPeter Maydell uint32_t fpcar[M_REG_NUM_BANKS];
565d33abe82SPeter Maydell uint32_t fpccr[M_REG_NUM_BANKS];
566d33abe82SPeter Maydell uint32_t fpdscr[M_REG_NUM_BANKS];
567d33abe82SPeter Maydell uint32_t cpacr[M_REG_NUM_BANKS];
568d33abe82SPeter Maydell uint32_t nsacr;
569b26b5629SPeter Maydell uint32_t ltpsize;
5707c3d47daSPeter Maydell uint32_t vpr;
571fcf5ef2aSThomas Huth } v7m;
572fcf5ef2aSThomas Huth
573fcf5ef2aSThomas Huth /* Information associated with an exception about to be taken:
574fcf5ef2aSThomas Huth * code which raises an exception must set cs->exception_index and
575fcf5ef2aSThomas Huth * the relevant parts of this structure; the cpu_do_interrupt function
576fcf5ef2aSThomas Huth * will then set the guest-visible registers as part of the exception
577fcf5ef2aSThomas Huth * entry process.
578fcf5ef2aSThomas Huth */
579fcf5ef2aSThomas Huth struct {
580fcf5ef2aSThomas Huth uint32_t syndrome; /* AArch64 format syndrome register */
581fcf5ef2aSThomas Huth uint32_t fsr; /* AArch32 format fault status register info */
582fcf5ef2aSThomas Huth uint64_t vaddress; /* virtual addr associated with exception, if any */
583fcf5ef2aSThomas Huth uint32_t target_el; /* EL the exception should be targeted for */
584fcf5ef2aSThomas Huth /* If we implement EL2 we will also need to store information
585fcf5ef2aSThomas Huth * about the intermediate physical address for stage 2 faults.
586fcf5ef2aSThomas Huth */
587fcf5ef2aSThomas Huth } exception;
588fcf5ef2aSThomas Huth
589202ccb6bSDongjiu Geng /* Information associated with an SError */
590202ccb6bSDongjiu Geng struct {
591202ccb6bSDongjiu Geng uint8_t pending;
592202ccb6bSDongjiu Geng uint8_t has_esr;
593202ccb6bSDongjiu Geng uint64_t esr;
594202ccb6bSDongjiu Geng } serror;
595202ccb6bSDongjiu Geng
5961711bfa5SBeata Michalska uint8_t ext_dabt_raised; /* Tracking/verifying injection of ext DABT */
5971711bfa5SBeata Michalska
598ed89f078SPeter Maydell /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */
599ed89f078SPeter Maydell uint32_t irq_line_state;
600ed89f078SPeter Maydell
601fcf5ef2aSThomas Huth /* Thumb-2 EE state. */
602fcf5ef2aSThomas Huth uint32_t teecr;
603fcf5ef2aSThomas Huth uint32_t teehbr;
604fcf5ef2aSThomas Huth
605fcf5ef2aSThomas Huth /* VFP coprocessor state. */
606fcf5ef2aSThomas Huth struct {
607c39c2b90SRichard Henderson ARMVectorReg zregs[32];
608fcf5ef2aSThomas Huth
6093c7d3086SRichard Henderson #ifdef TARGET_AARCH64
6103c7d3086SRichard Henderson /* Store FFR as pregs[16] to make it easier to treat as any other. */
611028e2a7bSRichard Henderson #define FFR_PRED_NUM 16
6123c7d3086SRichard Henderson ARMPredicateReg pregs[17];
613516e246aSRichard Henderson /* Scratch space for aa64 sve predicate temporary. */
614516e246aSRichard Henderson ARMPredicateReg preg_tmp;
6153c7d3086SRichard Henderson #endif
6163c7d3086SRichard Henderson
617fcf5ef2aSThomas Huth /* We store these fpcsr fields separately for convenience. */
618a4d58462SRichard Henderson uint32_t qc[4] QEMU_ALIGNED(16);
619fcf5ef2aSThomas Huth int vec_len;
620fcf5ef2aSThomas Huth int vec_stride;
621fcf5ef2aSThomas Huth
622ce07ea61SPeter Maydell /*
623ce07ea61SPeter Maydell * Floating point status and control registers. Some bits are
624ce07ea61SPeter Maydell * stored separately in other fields or in the float_status below.
625ce07ea61SPeter Maydell */
626ce07ea61SPeter Maydell uint64_t fpsr;
627ce07ea61SPeter Maydell uint64_t fpcr;
628ce07ea61SPeter Maydell
629a4d58462SRichard Henderson uint32_t xregs[16];
630a4d58462SRichard Henderson
631516e246aSRichard Henderson /* Scratch space for aa32 neon expansion. */
632fcf5ef2aSThomas Huth uint32_t scratch[8];
633fcf5ef2aSThomas Huth
634d81ce0efSAlex Bennée /* There are a number of distinct float control structures:
635d81ce0efSAlex Bennée *
636d81ce0efSAlex Bennée * fp_status: is the "normal" fp status.
637d81ce0efSAlex Bennée * fp_status_fp16: used for half-precision calculations
638d81ce0efSAlex Bennée * standard_fp_status : the ARM "Standard FPSCR Value"
639aaae563bSPeter Maydell * standard_fp_status_fp16 : used for half-precision
640aaae563bSPeter Maydell * calculations with the ARM "Standard FPSCR Value"
641d81ce0efSAlex Bennée *
642d81ce0efSAlex Bennée * Half-precision operations are governed by a separate
643d81ce0efSAlex Bennée * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
644d81ce0efSAlex Bennée * status structure to control this.
645d81ce0efSAlex Bennée *
646d81ce0efSAlex Bennée * The "Standard FPSCR", ie default-NaN, flush-to-zero,
647d81ce0efSAlex Bennée * round-to-nearest and is used by any operations (generally
648d81ce0efSAlex Bennée * Neon) which the architecture defines as controlled by the
649d81ce0efSAlex Bennée * standard FPSCR value rather than the FPSCR.
650fcf5ef2aSThomas Huth *
651aaae563bSPeter Maydell * The "standard FPSCR but for fp16 ops" is needed because
652aaae563bSPeter Maydell * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
653aaae563bSPeter Maydell * using a fixed value for it.
654aaae563bSPeter Maydell *
655fcf5ef2aSThomas Huth * To avoid having to transfer exception bits around, we simply
656fcf5ef2aSThomas Huth * say that the FPSCR cumulative exception flags are the logical
657aaae563bSPeter Maydell * OR of the flags in the four fp statuses. This relies on the
658fcf5ef2aSThomas Huth * only thing which needs to read the exception flags being
659fcf5ef2aSThomas Huth * an explicit FPSCR read.
660fcf5ef2aSThomas Huth */
661fcf5ef2aSThomas Huth float_status fp_status;
662d81ce0efSAlex Bennée float_status fp_status_f16;
663fcf5ef2aSThomas Huth float_status standard_fp_status;
664aaae563bSPeter Maydell float_status standard_fp_status_f16;
6655be5e8edSRichard Henderson
666de561988SRichard Henderson uint64_t zcr_el[4]; /* ZCR_EL[1-3] */
667de561988SRichard Henderson uint64_t smcr_el[4]; /* SMCR_EL[1-3] */
668fcf5ef2aSThomas Huth } vfp;
6690f08429cSRichard Henderson
670fcf5ef2aSThomas Huth uint64_t exclusive_addr;
671fcf5ef2aSThomas Huth uint64_t exclusive_val;
6720f08429cSRichard Henderson /*
6730f08429cSRichard Henderson * Contains the 'val' for the second 64-bit register of LDXP, which comes
6740f08429cSRichard Henderson * from the higher address, not the high part of a complete 128-bit value.
6750f08429cSRichard Henderson * In some ways it might be more convenient to record the exclusive value
6760f08429cSRichard Henderson * as the low and high halves of a 128 bit data value, but the current
6770f08429cSRichard Henderson * semantics of these fields are baked into the migration format.
6780f08429cSRichard Henderson */
679fcf5ef2aSThomas Huth uint64_t exclusive_high;
680fcf5ef2aSThomas Huth
681fcf5ef2aSThomas Huth /* iwMMXt coprocessor state. */
682fcf5ef2aSThomas Huth struct {
683fcf5ef2aSThomas Huth uint64_t regs[16];
684fcf5ef2aSThomas Huth uint64_t val;
685fcf5ef2aSThomas Huth
686fcf5ef2aSThomas Huth uint32_t cregs[16];
687fcf5ef2aSThomas Huth } iwmmxt;
688fcf5ef2aSThomas Huth
689991ad91bSRichard Henderson #ifdef TARGET_AARCH64
690108b3ba8SRichard Henderson struct {
691108b3ba8SRichard Henderson ARMPACKey apia;
692108b3ba8SRichard Henderson ARMPACKey apib;
693108b3ba8SRichard Henderson ARMPACKey apda;
694108b3ba8SRichard Henderson ARMPACKey apdb;
695108b3ba8SRichard Henderson ARMPACKey apga;
696108b3ba8SRichard Henderson } keys;
6977cb1e618SRichard Henderson
6987cb1e618SRichard Henderson uint64_t scxtnum_el[4];
699dc993a01SRichard Henderson
700dc993a01SRichard Henderson /*
701dc993a01SRichard Henderson * SME ZA storage -- 256 x 256 byte array, with bytes in host word order,
702dc993a01SRichard Henderson * as we do with vfp.zregs[]. This corresponds to the architectural ZA
703dc993a01SRichard Henderson * array, where ZA[N] is in the least-significant bytes of env->zarray[N].
704dc993a01SRichard Henderson * When SVL is less than the architectural maximum, the accessible
705dc993a01SRichard Henderson * storage is restricted, such that if the SVL is X bytes the guest can
706dc993a01SRichard Henderson * see only the bottom X elements of zarray[], and only the least
707dc993a01SRichard Henderson * significant X bytes of each element of the array. (In other words,
708dc993a01SRichard Henderson * the observable part is always square.)
709dc993a01SRichard Henderson *
710dc993a01SRichard Henderson * The ZA storage can also be considered as a set of square tiles of
711dc993a01SRichard Henderson * elements of different sizes. The mapping from tiles to the ZA array
712dc993a01SRichard Henderson * is architecturally defined, such that for tiles of elements of esz
713dc993a01SRichard Henderson * bytes, the Nth row (or "horizontal slice") of tile T is in
714dc993a01SRichard Henderson * ZA[T + N * esz]. Note that this means that each tile is not contiguous
715dc993a01SRichard Henderson * in the ZA storage, because its rows are striped through the ZA array.
716dc993a01SRichard Henderson *
717dc993a01SRichard Henderson * Because this is so large, keep this toward the end of the reset area,
718dc993a01SRichard Henderson * to keep the offsets into the rest of the structure smaller.
719dc993a01SRichard Henderson */
720dc993a01SRichard Henderson ARMVectorReg zarray[ARM_MAX_VQ * 16];
721991ad91bSRichard Henderson #endif
722991ad91bSRichard Henderson
723fcf5ef2aSThomas Huth struct CPUBreakpoint *cpu_breakpoint[16];
724fcf5ef2aSThomas Huth struct CPUWatchpoint *cpu_watchpoint[16];
725fcf5ef2aSThomas Huth
726f3639a64SRichard Henderson /* Optional fault info across tlb lookup. */
727f3639a64SRichard Henderson ARMMMUFaultInfo *tlb_fi;
728f3639a64SRichard Henderson
7291f5c00cfSAlex Bennée /* Fields up to this point are cleared by a CPU reset */
7301f5c00cfSAlex Bennée struct {} end_reset_fields;
7311f5c00cfSAlex Bennée
732e8b5fae5SRichard Henderson /* Fields after this point are preserved across CPU reset. */
733fcf5ef2aSThomas Huth
734fcf5ef2aSThomas Huth /* Internal CPU feature flags. */
735fcf5ef2aSThomas Huth uint64_t features;
736fcf5ef2aSThomas Huth
737fcf5ef2aSThomas Huth /* PMSAv7 MPU */
738fcf5ef2aSThomas Huth struct {
739fcf5ef2aSThomas Huth uint32_t *drbar;
740fcf5ef2aSThomas Huth uint32_t *drsr;
741fcf5ef2aSThomas Huth uint32_t *dracr;
7424a16724fSPeter Maydell uint32_t rnr[M_REG_NUM_BANKS];
743fcf5ef2aSThomas Huth } pmsav7;
744fcf5ef2aSThomas Huth
7450e1a46bbSPeter Maydell /* PMSAv8 MPU */
7460e1a46bbSPeter Maydell struct {
7470e1a46bbSPeter Maydell /* The PMSAv8 implementation also shares some PMSAv7 config
7480e1a46bbSPeter Maydell * and state:
7490e1a46bbSPeter Maydell * pmsav7.rnr (region number register)
7500e1a46bbSPeter Maydell * pmsav7_dregion (number of configured regions)
7510e1a46bbSPeter Maydell */
7524a16724fSPeter Maydell uint32_t *rbar[M_REG_NUM_BANKS];
7534a16724fSPeter Maydell uint32_t *rlar[M_REG_NUM_BANKS];
754761c4642STobias Röhmel uint32_t *hprbar;
755761c4642STobias Röhmel uint32_t *hprlar;
7564a16724fSPeter Maydell uint32_t mair0[M_REG_NUM_BANKS];
7574a16724fSPeter Maydell uint32_t mair1[M_REG_NUM_BANKS];
758761c4642STobias Röhmel uint32_t hprselr;
7590e1a46bbSPeter Maydell } pmsav8;
7600e1a46bbSPeter Maydell
7619901c576SPeter Maydell /* v8M SAU */
7629901c576SPeter Maydell struct {
7639901c576SPeter Maydell uint32_t *rbar;
7649901c576SPeter Maydell uint32_t *rlar;
7659901c576SPeter Maydell uint32_t rnr;
7669901c576SPeter Maydell uint32_t ctrl;
7679901c576SPeter Maydell } sau;
7689901c576SPeter Maydell
7691701d70eSPhilippe Mathieu-Daudé #if !defined(CONFIG_USER_ONLY)
7708f4e07c9SPhilippe Mathieu-Daudé NVICState *nvic;
7712a94a507SPhilippe Mathieu-Daudé const struct arm_boot_info *boot_info;
772d3a3e529SVijaya Kumar K /* Store GICv3CPUState to access from this struct */
773d3a3e529SVijaya Kumar K void *gicv3state;
7741701d70eSPhilippe Mathieu-Daudé #else /* CONFIG_USER_ONLY */
77526f08561SPhilippe Mathieu-Daudé /* For usermode syscall translation. */
77626f08561SPhilippe Mathieu-Daudé bool eabi;
77726f08561SPhilippe Mathieu-Daudé #endif /* CONFIG_USER_ONLY */
7780e0c030cSRichard Henderson
7790e0c030cSRichard Henderson #ifdef TARGET_TAGGED_ADDRESSES
7800e0c030cSRichard Henderson /* Linux syscall tagged address support */
7810e0c030cSRichard Henderson bool tagged_addr_enable;
7820e0c030cSRichard Henderson #endif
783fcf5ef2aSThomas Huth } CPUARMState;
784fcf5ef2aSThomas Huth
set_feature(CPUARMState * env,int feature)7855fda9504SThomas Huth static inline void set_feature(CPUARMState *env, int feature)
7865fda9504SThomas Huth {
7875fda9504SThomas Huth env->features |= 1ULL << feature;
7885fda9504SThomas Huth }
7895fda9504SThomas Huth
unset_feature(CPUARMState * env,int feature)7905fda9504SThomas Huth static inline void unset_feature(CPUARMState *env, int feature)
7915fda9504SThomas Huth {
7925fda9504SThomas Huth env->features &= ~(1ULL << feature);
7935fda9504SThomas Huth }
7945fda9504SThomas Huth
795fcf5ef2aSThomas Huth /**
79608267487SAaron Lindsay * ARMELChangeHookFn:
797fcf5ef2aSThomas Huth * type of a function which can be registered via arm_register_el_change_hook()
798fcf5ef2aSThomas Huth * to get callbacks when the CPU changes its exception level or mode.
799fcf5ef2aSThomas Huth */
80008267487SAaron Lindsay typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque);
80108267487SAaron Lindsay typedef struct ARMELChangeHook ARMELChangeHook;
80208267487SAaron Lindsay struct ARMELChangeHook {
80308267487SAaron Lindsay ARMELChangeHookFn *hook;
80408267487SAaron Lindsay void *opaque;
80508267487SAaron Lindsay QLIST_ENTRY(ARMELChangeHook) node;
80608267487SAaron Lindsay };
807062ba099SAlex Bennée
808062ba099SAlex Bennée /* These values map onto the return values for
809062ba099SAlex Bennée * QEMU_PSCI_0_2_FN_AFFINITY_INFO */
810062ba099SAlex Bennée typedef enum ARMPSCIState {
811d5affb0dSAndrew Jones PSCI_ON = 0,
812d5affb0dSAndrew Jones PSCI_OFF = 1,
813062ba099SAlex Bennée PSCI_ON_PENDING = 2
814062ba099SAlex Bennée } ARMPSCIState;
815062ba099SAlex Bennée
816962fcbf2SRichard Henderson typedef struct ARMISARegisters ARMISARegisters;
817962fcbf2SRichard Henderson
8187f9e25a6SRichard Henderson /*
8197f9e25a6SRichard Henderson * In map, each set bit is a supported vector length of (bit-number + 1) * 16
8207f9e25a6SRichard Henderson * bytes, i.e. each bit number + 1 is the vector length in quadwords.
8217f9e25a6SRichard Henderson *
8227f9e25a6SRichard Henderson * While processing properties during initialization, corresponding init bits
8237f9e25a6SRichard Henderson * are set for bits in sve_vq_map that have been set by properties.
8247f9e25a6SRichard Henderson *
8257f9e25a6SRichard Henderson * Bits set in supported represent valid vector lengths for the CPU type.
8267f9e25a6SRichard Henderson */
8277f9e25a6SRichard Henderson typedef struct {
8287f9e25a6SRichard Henderson uint32_t map, init, supported;
8297f9e25a6SRichard Henderson } ARMVQMap;
8307f9e25a6SRichard Henderson
831fcf5ef2aSThomas Huth /**
832fcf5ef2aSThomas Huth * ARMCPU:
833fcf5ef2aSThomas Huth * @env: #CPUARMState
834fcf5ef2aSThomas Huth *
835fcf5ef2aSThomas Huth * An ARM CPU core.
836fcf5ef2aSThomas Huth */
837b36e239eSPhilippe Mathieu-Daudé struct ArchCPU {
838fcf5ef2aSThomas Huth CPUState parent_obj;
839fcf5ef2aSThomas Huth
840fcf5ef2aSThomas Huth CPUARMState env;
841fcf5ef2aSThomas Huth
842fcf5ef2aSThomas Huth /* Coprocessor information */
843fcf5ef2aSThomas Huth GHashTable *cp_regs;
844fcf5ef2aSThomas Huth /* For marshalling (mostly coprocessor) register state between the
845fcf5ef2aSThomas Huth * kernel and QEMU (for KVM) and between two QEMUs (for migration),
846fcf5ef2aSThomas Huth * we use these arrays.
847fcf5ef2aSThomas Huth */
848fcf5ef2aSThomas Huth /* List of register indexes managed via these arrays; (full KVM style
849fcf5ef2aSThomas Huth * 64 bit indexes, not CPRegInfo 32 bit indexes)
850fcf5ef2aSThomas Huth */
851fcf5ef2aSThomas Huth uint64_t *cpreg_indexes;
852fcf5ef2aSThomas Huth /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
853fcf5ef2aSThomas Huth uint64_t *cpreg_values;
854fcf5ef2aSThomas Huth /* Length of the indexes, values, reset_values arrays */
855fcf5ef2aSThomas Huth int32_t cpreg_array_len;
856fcf5ef2aSThomas Huth /* These are used only for migration: incoming data arrives in
857fcf5ef2aSThomas Huth * these fields and is sanity checked in post_load before copying
858fcf5ef2aSThomas Huth * to the working data structures above.
859fcf5ef2aSThomas Huth */
860fcf5ef2aSThomas Huth uint64_t *cpreg_vmstate_indexes;
861fcf5ef2aSThomas Huth uint64_t *cpreg_vmstate_values;
862fcf5ef2aSThomas Huth int32_t cpreg_vmstate_array_len;
863fcf5ef2aSThomas Huth
864690bd97bSAkihiko Odaki DynamicGDBFeatureInfo dyn_sysreg_feature;
865690bd97bSAkihiko Odaki DynamicGDBFeatureInfo dyn_svereg_feature;
866690bd97bSAkihiko Odaki DynamicGDBFeatureInfo dyn_m_systemreg_feature;
867690bd97bSAkihiko Odaki DynamicGDBFeatureInfo dyn_m_secextreg_feature;
868200bf5b7SAbdallah Bouassida
869fcf5ef2aSThomas Huth /* Timers used by the generic (architected) timer */
870fcf5ef2aSThomas Huth QEMUTimer *gt_timer[NUM_GTIMERS];
8714e7beb0cSAaron Lindsay OS /*
8724e7beb0cSAaron Lindsay OS * Timer used by the PMU. Its state is restored after migration by
8734e7beb0cSAaron Lindsay OS * pmu_op_finish() - it does not need other handling during migration
8744e7beb0cSAaron Lindsay OS */
8754e7beb0cSAaron Lindsay OS QEMUTimer *pmu_timer;
876a96edb68SPeter Maydell /* Timer used for WFxT timeouts */
877a96edb68SPeter Maydell QEMUTimer *wfxt_timer;
878a96edb68SPeter Maydell
879fcf5ef2aSThomas Huth /* GPIO outputs for generic timer */
880fcf5ef2aSThomas Huth qemu_irq gt_timer_outputs[NUM_GTIMERS];
881aa1b3111SPeter Maydell /* GPIO output for GICv3 maintenance interrupt signal */
882aa1b3111SPeter Maydell qemu_irq gicv3_maintenance_interrupt;
88307f48730SAndrew Jones /* GPIO output for the PMU interrupt */
88407f48730SAndrew Jones qemu_irq pmu_interrupt;
885fcf5ef2aSThomas Huth
886fcf5ef2aSThomas Huth /* MemoryRegion to use for secure physical accesses */
887fcf5ef2aSThomas Huth MemoryRegion *secure_memory;
888fcf5ef2aSThomas Huth
8898bce44a2SRichard Henderson /* MemoryRegion to use for allocation tag accesses */
8908bce44a2SRichard Henderson MemoryRegion *tag_memory;
8918bce44a2SRichard Henderson MemoryRegion *secure_tag_memory;
8928bce44a2SRichard Henderson
893181962fdSPeter Maydell /* For v8M, pointer to the IDAU interface provided by board/SoC */
894181962fdSPeter Maydell Object *idau;
895181962fdSPeter Maydell
896fcf5ef2aSThomas Huth /* 'compatible' string for this CPU for Linux device trees */
897fcf5ef2aSThomas Huth const char *dtb_compatible;
898fcf5ef2aSThomas Huth
899fcf5ef2aSThomas Huth /* PSCI version for this CPU
900fcf5ef2aSThomas Huth * Bits[31:16] = Major Version
901fcf5ef2aSThomas Huth * Bits[15:0] = Minor Version
902fcf5ef2aSThomas Huth */
903fcf5ef2aSThomas Huth uint32_t psci_version;
904fcf5ef2aSThomas Huth
905062ba099SAlex Bennée /* Current power state, access guarded by BQL */
906062ba099SAlex Bennée ARMPSCIState power_state;
907062ba099SAlex Bennée
908c25bd18aSPeter Maydell /* CPU has virtualization extension */
909c25bd18aSPeter Maydell bool has_el2;
910fcf5ef2aSThomas Huth /* CPU has security extension */
911fcf5ef2aSThomas Huth bool has_el3;
912fcf5ef2aSThomas Huth /* CPU has PMU (Performance Monitor Unit) */
913fcf5ef2aSThomas Huth bool has_pmu;
91497a28b0eSPeter Maydell /* CPU has VFP */
91597a28b0eSPeter Maydell bool has_vfp;
91642bea956SCédric Le Goater /* CPU has 32 VFP registers */
91742bea956SCédric Le Goater bool has_vfp_d32;
91897a28b0eSPeter Maydell /* CPU has Neon */
91997a28b0eSPeter Maydell bool has_neon;
920ea90db0aSPeter Maydell /* CPU has M-profile DSP extension */
921ea90db0aSPeter Maydell bool has_dsp;
922fcf5ef2aSThomas Huth
923fcf5ef2aSThomas Huth /* CPU has memory protection unit */
924fcf5ef2aSThomas Huth bool has_mpu;
925fcf5ef2aSThomas Huth /* CPU has MTE enabled in KVM mode */
926fcf5ef2aSThomas Huth bool kvm_mte;
927761c4642STobias Röhmel /* PMSAv7 MPU number of supported regions */
928761c4642STobias Röhmel uint32_t pmsav7_dregion;
9299901c576SPeter Maydell /* PMSAv8 MPU number of supported hyp regions */
9309901c576SPeter Maydell uint32_t pmsav8r_hdregion;
931fcf5ef2aSThomas Huth /* v8M SAU number of supported regions */
932fcf5ef2aSThomas Huth uint32_t sau_sregion;
933fcf5ef2aSThomas Huth
934fcf5ef2aSThomas Huth /* PSCI conduit used to invoke PSCI methods
935fcf5ef2aSThomas Huth * 0 - disabled, 1 - smc, 2 - hvc
936fcf5ef2aSThomas Huth */
93738e2a77cSPeter Maydell uint32_t psci_conduit;
93838e2a77cSPeter Maydell
9397cda2149SPeter Maydell /* For v8M, initial value of the Secure VTOR */
9407cda2149SPeter Maydell uint32_t init_svtor;
94138e2a77cSPeter Maydell /* For v8M, initial value of the Non-secure VTOR */
942fcf5ef2aSThomas Huth uint32_t init_nsvtor;
943fcf5ef2aSThomas Huth
944fcf5ef2aSThomas Huth /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
945fcf5ef2aSThomas Huth * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
946fcf5ef2aSThomas Huth */
947cf43b5b6SPhilippe Mathieu-Daudé uint32_t kvm_target;
948fcf5ef2aSThomas Huth
949fcf5ef2aSThomas Huth #ifdef CONFIG_KVM
950fcf5ef2aSThomas Huth /* KVM init features for this CPU */
951e5ac4200SAndrew Jones uint32_t kvm_init_features[7];
952e5ac4200SAndrew Jones
953e5ac4200SAndrew Jones /* KVM CPU state */
954e5ac4200SAndrew Jones
955e5ac4200SAndrew Jones /* KVM virtual time adjustment */
956e5ac4200SAndrew Jones bool kvm_adjvtime;
957e5ac4200SAndrew Jones bool kvm_vtime_dirty;
95868970d1eSAndrew Jones uint64_t kvm_vtime;
95968970d1eSAndrew Jones
960cf43b5b6SPhilippe Mathieu-Daudé /* KVM steal time */
96168970d1eSAndrew Jones OnOffAuto kvm_steal_time;
962fcf5ef2aSThomas Huth #endif /* CONFIG_KVM */
963fcf5ef2aSThomas Huth
964fcf5ef2aSThomas Huth /* Uniprocessor system with MP extensions */
965c4487d76SPeter Maydell bool mp_is_up;
966c4487d76SPeter Maydell
967c4487d76SPeter Maydell /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init
968c4487d76SPeter Maydell * and the probe failed (so we need to report the error in realize)
969c4487d76SPeter Maydell */
970f037f5b4SPeter Maydell bool host_cpu_probe_failed;
971f037f5b4SPeter Maydell
972f037f5b4SPeter Maydell /* QOM property to indicate we should use the back-compat CNTFRQ default */
973f9a69711SAlistair Francis bool backcompat_cntfrq;
974f9a69711SAlistair Francis
975f9a69711SAlistair Francis /* Specify the number of cores in this CPU cluster. Used for the L2CTLR
976f9a69711SAlistair Francis * register.
977f9a69711SAlistair Francis */
978fcf5ef2aSThomas Huth int32_t core_count;
979fcf5ef2aSThomas Huth
980fcf5ef2aSThomas Huth /* The instance init functions for implementation-specific subclasses
981fcf5ef2aSThomas Huth * set these fields to specify the implementation-dependent values of
982fcf5ef2aSThomas Huth * various constant registers and reset values of non-constant
983fcf5ef2aSThomas Huth * registers.
984fcf5ef2aSThomas Huth * Some of these might become QOM properties eventually.
985fcf5ef2aSThomas Huth * Field names match the official register names as defined in the
986fcf5ef2aSThomas Huth * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
98747576b94SRichard Henderson * is used for reset values of non-constant registers; no reset_
98847576b94SRichard Henderson * prefix means a constant register.
9891548a7b2SPeter Maydell * Some of these registers are split out into a substructure that
9901548a7b2SPeter Maydell * is shared with the translators to control the ISA.
9911548a7b2SPeter Maydell *
9921548a7b2SPeter Maydell * Note that if you add an ID register to the ARMISARegisters struct
9931548a7b2SPeter Maydell * you need to also update the 32-bit and 64-bit versions of the
994fcf5ef2aSThomas Huth * kvm_arm_get_host_cpu_features() function to correctly populate the
99547576b94SRichard Henderson * field by reading the value from the KVM vCPU.
99647576b94SRichard Henderson */
99747576b94SRichard Henderson struct ARMISARegisters {
99847576b94SRichard Henderson uint32_t id_isar0;
99947576b94SRichard Henderson uint32_t id_isar1;
100047576b94SRichard Henderson uint32_t id_isar2;
100147576b94SRichard Henderson uint32_t id_isar3;
100247576b94SRichard Henderson uint32_t id_isar4;
100310054016SPeter Maydell uint32_t id_isar5;
100410054016SPeter Maydell uint32_t id_isar6;
100510054016SPeter Maydell uint32_t id_mmfr0;
100610054016SPeter Maydell uint32_t id_mmfr1;
100710054016SPeter Maydell uint32_t id_mmfr2;
100832957aadSPeter Maydell uint32_t id_mmfr3;
10098a130a7bSPeter Maydell uint32_t id_mmfr4;
10108a130a7bSPeter Maydell uint32_t id_mmfr5;
10111d51bc96SRichard Henderson uint32_t id_pfr0;
1012fcf5ef2aSThomas Huth uint32_t id_pfr1;
1013fcf5ef2aSThomas Huth uint32_t id_pfr2;
1014fcf5ef2aSThomas Huth uint32_t mvfr0;
1015a6179538SPeter Maydell uint32_t mvfr1;
1016d22c5649SPeter Maydell uint32_t mvfr2;
10174426d361SPeter Maydell uint32_t id_dfr0;
101809754ca8SPeter Maydell uint32_t id_dfr1;
101909754ca8SPeter Maydell uint32_t dbgdidr;
102047576b94SRichard Henderson uint32_t dbgdevid;
102147576b94SRichard Henderson uint32_t dbgdevid1;
1022a969fe97SAaron Lindsay uint64_t id_aa64isar0;
102347576b94SRichard Henderson uint64_t id_aa64isar1;
102447576b94SRichard Henderson uint64_t id_aa64isar2;
10253dc91ddbSPeter Maydell uint64_t id_aa64pfr0;
10263dc91ddbSPeter Maydell uint64_t id_aa64pfr1;
102764761e10SRichard Henderson uint64_t id_aa64mmfr0;
1028f7ddd7b6SPeter Maydell uint64_t id_aa64mmfr1;
10292a609df8SPeter Maydell uint64_t id_aa64mmfr2;
10302a609df8SPeter Maydell uint64_t id_aa64mmfr3;
10312dc10fa2SRichard Henderson uint64_t id_aa64dfr0;
1032414c54d5SRichard Henderson uint64_t id_aa64dfr1;
103324526bb9SPeter Maydell uint64_t id_aa64zfr0;
103447576b94SRichard Henderson uint64_t id_aa64smfr0;
1035e544f800SPhilippe Mathieu-Daudé uint64_t reset_pmcr_el0;
103647576b94SRichard Henderson } isar;
103747576b94SRichard Henderson uint64_t midr;
1038a5fd319aSLeif Lindholm uint32_t revidr;
1039fcf5ef2aSThomas Huth uint32_t reset_fpsid;
1040cad86737SAaron Lindsay uint64_t ctr;
1041cad86737SAaron Lindsay uint32_t reset_sctlr;
1042fcf5ef2aSThomas Huth uint64_t pmceid0;
1043fcf5ef2aSThomas Huth uint64_t pmceid1;
1044fcf5ef2aSThomas Huth uint32_t id_afr0;
1045f6450bcbSLeif Lindholm uint64_t id_aa64afr0;
1046fcf5ef2aSThomas Huth uint64_t id_aa64afr1;
1047fcf5ef2aSThomas Huth uint64_t clidr;
1048fcf5ef2aSThomas Huth uint64_t mp_affinity; /* MP ID without feature bits */
1049fcf5ef2aSThomas Huth /* The elements of this array are the CCSIDR values for each cache,
1050957e6155SPeter Maydell * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
1051fcf5ef2aSThomas Huth */
1052fcf5ef2aSThomas Huth uint64_t ccsidr[16];
1053fcf5ef2aSThomas Huth uint64_t reset_cbar;
1054ef1febe7SRichard Henderson uint32_t reset_auxcr;
1055eb94284dSRichard Henderson bool reset_hivecs;
1056eb94284dSRichard Henderson uint8_t reset_l0gptsz;
1057eb94284dSRichard Henderson
105869b2265dSRichard Henderson /*
1059eb94284dSRichard Henderson * Intermediate values used during property parsing.
1060eb94284dSRichard Henderson * Once finalized, the values should be read from ID_AA64*.
1061eb94284dSRichard Henderson */
1062399e5e71SRichard Henderson bool prop_pauth;
106369b2265dSRichard Henderson bool prop_pauth_impdef;
1064eb94284dSRichard Henderson bool prop_pauth_qarma3;
1065fcf5ef2aSThomas Huth bool prop_lpa2;
1066ae4acc69SRichard Henderson
1067851ec6ebSRichard Henderson /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
1068851ec6ebSRichard Henderson uint8_t dcz_blocksize;
1069ae4acc69SRichard Henderson /* GM blocksize, in log_2(words), ie low 4 bits of GMID_EL0 */
10704a7319b7SEdgar E. Iglesias uint8_t gm_blocksize;
1071fcf5ef2aSThomas Huth
1072e45868a3SPeter Maydell uint64_t rvbar_prop; /* Property/input signals. */
1073e45868a3SPeter Maydell
1074e45868a3SPeter Maydell /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
1075e45868a3SPeter Maydell int gic_num_lrs; /* number of list registers */
107639f29e59SPeter Maydell int gic_vpribits; /* number of virtual priority bits */
1077e45868a3SPeter Maydell int gic_vprebits; /* number of virtual preemption bits */
10783a062d57SJulian Brown int gic_pribits; /* number of physical priority bits */
10793a062d57SJulian Brown
10803a062d57SJulian Brown /* Whether the cfgend input is high (i.e. this CPU should reset into
10813a062d57SJulian Brown * big-endian mode). This setting isn't used directly: instead it modifies
10823a062d57SJulian Brown * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the
10833a062d57SJulian Brown * architecture version.
10843a062d57SJulian Brown */
1085b5c53d1bSAaron Lindsay bool cfgend;
108608267487SAaron Lindsay
108715f8b142SIgor Mammedov QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks;
108815f8b142SIgor Mammedov QLIST_HEAD(, ARMELChangeHook) el_change_hooks;
10895d721b78SAlexander Graf
10905d721b78SAlexander Graf int32_t node_id; /* NUMA node this CPU belongs to */
10915d721b78SAlexander Graf
1092adf92eabSRichard Henderson /* Used to synchronize KVM and QEMU in-kernel device levels */
1093adf92eabSRichard Henderson uint8_t device_irq_level;
1094adf92eabSRichard Henderson
10950df9142dSAndrew Jones /* Used to set the maximum vector length the cpu will support. */
1096b3d52804SRichard Henderson uint32_t sve_max_vq;
1097b3d52804SRichard Henderson
1098b3d52804SRichard Henderson #ifdef CONFIG_USER_ONLY
1099e74c0976SRichard Henderson /* Used to set the default vector length at process start. */
1100b3d52804SRichard Henderson uint32_t sve_default_vq;
1101b3d52804SRichard Henderson uint32_t sme_default_vq;
11027f9e25a6SRichard Henderson #endif
1103e74c0976SRichard Henderson
11047def8754SAndrew Jeffery ARMVQMap sve_vq;
11057def8754SAndrew Jeffery ARMVQMap sme_vq;
11067def8754SAndrew Jeffery
1107fcf5ef2aSThomas Huth /* Generic timer counter frequency, in Hz */
1108fcf5ef2aSThomas Huth uint64_t gt_cntfrq_hz;
11099348028eSPhilippe Mathieu-Daudé };
11109348028eSPhilippe Mathieu-Daudé
11119348028eSPhilippe Mathieu-Daudé typedef struct ARMCPUInfo {
11129348028eSPhilippe Mathieu-Daudé const char *name;
11139348028eSPhilippe Mathieu-Daudé void (*initfn)(Object *obj);
11149348028eSPhilippe Mathieu-Daudé void (*class_init)(ObjectClass *oc, void *data);
11159348028eSPhilippe Mathieu-Daudé } ARMCPUInfo;
11169348028eSPhilippe Mathieu-Daudé
11179348028eSPhilippe Mathieu-Daudé /**
11189348028eSPhilippe Mathieu-Daudé * ARMCPUClass:
11199348028eSPhilippe Mathieu-Daudé * @parent_realize: The parent class' realize handler.
11209348028eSPhilippe Mathieu-Daudé * @parent_phases: The parent class' reset phase handlers.
11219348028eSPhilippe Mathieu-Daudé *
11229348028eSPhilippe Mathieu-Daudé * An ARM CPU model.
11239348028eSPhilippe Mathieu-Daudé */
11249348028eSPhilippe Mathieu-Daudé struct ARMCPUClass {
11259348028eSPhilippe Mathieu-Daudé CPUClass parent_class;
11269348028eSPhilippe Mathieu-Daudé
11279348028eSPhilippe Mathieu-Daudé const ARMCPUInfo *info;
11289348028eSPhilippe Mathieu-Daudé DeviceRealize parent_realize;
11299348028eSPhilippe Mathieu-Daudé ResettablePhases parent_phases;
11309348028eSPhilippe Mathieu-Daudé };
11319348028eSPhilippe Mathieu-Daudé
11329348028eSPhilippe Mathieu-Daudé struct AArch64CPUClass {
11339348028eSPhilippe Mathieu-Daudé ARMCPUClass parent_class;
1134f6524ddfSPhilippe Mathieu-Daudé };
1135f6524ddfSPhilippe Mathieu-Daudé
1136f6524ddfSPhilippe Mathieu-Daudé /* Callback functions for the generic timer's timers. */
1137f6524ddfSPhilippe Mathieu-Daudé void arm_gt_ptimer_cb(void *opaque);
1138f6524ddfSPhilippe Mathieu-Daudé void arm_gt_vtimer_cb(void *opaque);
1139f6524ddfSPhilippe Mathieu-Daudé void arm_gt_htimer_cb(void *opaque);
1140f6524ddfSPhilippe Mathieu-Daudé void arm_gt_stimer_cb(void *opaque);
11417def8754SAndrew Jeffery void arm_gt_hvtimer_cb(void *opaque);
1142f6fc36deSJean-Philippe Brucker
11437def8754SAndrew Jeffery unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
114451e5ef45SMarc-André Lureau void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
114551e5ef45SMarc-André Lureau
1146f6524ddfSPhilippe Mathieu-Daudé void arm_cpu_post_init(Object *obj);
1147f6524ddfSPhilippe Mathieu-Daudé
1148f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF0_SHIFT 0
1149f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
1150f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF1_SHIFT 8
1151f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT)
1152f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF2_SHIFT 16
1153f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT)
1154f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF3_SHIFT 32
1155f6524ddfSPhilippe Mathieu-Daudé #define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT)
1156f6524ddfSPhilippe Mathieu-Daudé #define ARM_DEFAULT_CPUS_PER_CLUSTER 8
1157f6524ddfSPhilippe Mathieu-Daudé
1158f6524ddfSPhilippe Mathieu-Daudé #define ARM32_AFFINITY_MASK (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK)
1159f6524ddfSPhilippe Mathieu-Daudé #define ARM64_AFFINITY_MASK \
1160f6524ddfSPhilippe Mathieu-Daudé (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK | ARM_AFF3_MASK)
1161750245edSRichard Henderson #define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK)
116246de5913SIgor Mammedov
1163fcf5ef2aSThomas Huth uint64_t arm_build_mp_affinity(int idx, uint8_t clustersz);
11648a9358ccSMarkus Armbruster
1165fcf5ef2aSThomas Huth #ifndef CONFIG_USER_ONLY
1166fcf5ef2aSThomas Huth extern const VMStateDescription vmstate_arm_cpu;
1167fcf5ef2aSThomas Huth
1168fcf5ef2aSThomas Huth void arm_cpu_do_interrupt(CPUState *cpu);
1169fcf5ef2aSThomas Huth void arm_v7m_cpu_do_interrupt(CPUState *cpu);
1170fcf5ef2aSThomas Huth
11716d2d454aSPhilippe Mathieu-Daudé hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
1172fcf5ef2aSThomas Huth MemTxAttrs *attrs);
1173a010bdbeSAlex Bennée #endif /* !CONFIG_USER_ONLY */
1174fcf5ef2aSThomas Huth
1175fcf5ef2aSThomas Huth int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
1176fcf5ef2aSThomas Huth int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
11771af0006aSJanosch Frank
1178fcf5ef2aSThomas Huth int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
11791af0006aSJanosch Frank int cpuid, DumpState *s);
1180fcf5ef2aSThomas Huth int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
11813a45f4f5SPeter Maydell int cpuid, DumpState *s);
11823a45f4f5SPeter Maydell
11833a45f4f5SPeter Maydell /**
11843a45f4f5SPeter Maydell * arm_emulate_firmware_reset: Emulate firmware CPU reset handling
11853a45f4f5SPeter Maydell * @cpu: CPU (which must have been freshly reset)
11863a45f4f5SPeter Maydell * @target_el: exception level to put the CPU into
11873a45f4f5SPeter Maydell * @secure: whether to put the CPU in secure state
11883a45f4f5SPeter Maydell *
11893a45f4f5SPeter Maydell * When QEMU is directly running a guest kernel at a lower level than
11903a45f4f5SPeter Maydell * EL3 it implicitly emulates some aspects of the guest firmware.
11913a45f4f5SPeter Maydell * This includes that on reset we need to configure the parts of the
11923a45f4f5SPeter Maydell * CPU corresponding to EL3 so that the real guest code can run at its
11933a45f4f5SPeter Maydell * lower exception level. This function does that post-reset CPU setup,
11943a45f4f5SPeter Maydell * for when we do direct boot of a guest kernel, and for when we
11953a45f4f5SPeter Maydell * emulate PSCI and similar firmware interfaces starting a CPU at a
11963a45f4f5SPeter Maydell * lower exception level.
11973a45f4f5SPeter Maydell *
11983a45f4f5SPeter Maydell * @target_el must be an EL implemented by the CPU between 1 and 3.
11993a45f4f5SPeter Maydell * We do not support dropping into a Secure EL other than 3.
12003a45f4f5SPeter Maydell *
12013a45f4f5SPeter Maydell * It is the responsibility of the caller to call arm_rebuild_hflags().
12023a45f4f5SPeter Maydell */
1203fcf5ef2aSThomas Huth void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
1204a010bdbeSAlex Bennée
1205fcf5ef2aSThomas Huth #ifdef TARGET_AARCH64
120685fc7167SRichard Henderson int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
12079a05f7b6SRichard Henderson int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
12089a05f7b6SRichard Henderson void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
12092a8af382SRichard Henderson void aarch64_sve_change_el(CPUARMState *env, int old_el,
1210538baab2SAndrew Jones int new_el, bool el0_a64);
1211538baab2SAndrew Jones void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask);
1212538baab2SAndrew Jones
1213538baab2SAndrew Jones /*
1214538baab2SAndrew Jones * SVE registers are encoded in KVM's memory in an endianness-invariant format.
1215538baab2SAndrew Jones * The byte at offset i from the start of the in-memory representation contains
1216538baab2SAndrew Jones * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
1217538baab2SAndrew Jones * lowest offsets are stored in the lowest memory addresses, then that nearly
1218538baab2SAndrew Jones * matches QEMU's representation, which is to use an array of host-endian
1219538baab2SAndrew Jones * uint64_t's, where the lower offsets are at the lower indices. To complete
1220538baab2SAndrew Jones * the translation we just need to byte swap the uint64_t's on big-endian hosts.
1221538baab2SAndrew Jones */
sve_bswap64(uint64_t * dst,uint64_t * src,int nr)1222e03b5686SMarc-André Lureau static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
1223538baab2SAndrew Jones {
1224538baab2SAndrew Jones #if HOST_BIG_ENDIAN
1225538baab2SAndrew Jones int i;
1226538baab2SAndrew Jones
1227538baab2SAndrew Jones for (i = 0; i < nr; ++i) {
1228538baab2SAndrew Jones dst[i] = bswap64(src[i]);
1229538baab2SAndrew Jones }
1230538baab2SAndrew Jones
1231538baab2SAndrew Jones return dst;
1232538baab2SAndrew Jones #else
1233538baab2SAndrew Jones return src;
1234538baab2SAndrew Jones #endif
12350ab5953bSRichard Henderson }
12360ab5953bSRichard Henderson
12379a05f7b6SRichard Henderson #else
aarch64_sve_narrow_vq(CPUARMState * env,unsigned vq)12389a05f7b6SRichard Henderson static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
aarch64_sve_change_el(CPUARMState * env,int o,int n,bool a)12399a05f7b6SRichard Henderson static inline void aarch64_sve_change_el(CPUARMState *env, int o,
1240fcf5ef2aSThomas Huth int n, bool a)
1241fcf5ef2aSThomas Huth { }
1242fcf5ef2aSThomas Huth #endif
1243fcf5ef2aSThomas Huth
1244fcf5ef2aSThomas Huth void aarch64_sync_32_to_64(CPUARMState *env);
1245ced31551SRichard Henderson void aarch64_sync_64_to_32(CPUARMState *env);
1246ced31551SRichard Henderson
12476b2ca83eSRichard Henderson int fp_exception_el(CPUARMState *env, int cur_el);
12485ef3cc56SRichard Henderson int sve_exception_el(CPUARMState *env, int cur_el);
12495ef3cc56SRichard Henderson int sme_exception_el(CPUARMState *env, int cur_el);
12506ca54aa9SRichard Henderson
12515ef3cc56SRichard Henderson /**
12525ef3cc56SRichard Henderson * sve_vqm1_for_el_sm:
12536ca54aa9SRichard Henderson * @env: CPUARMState
12545ef3cc56SRichard Henderson * @el: exception level
12556ca54aa9SRichard Henderson * @sm: streaming mode
12565ef3cc56SRichard Henderson *
12576ca54aa9SRichard Henderson * Compute the current vector length for @el & @sm, in units of
12585ef3cc56SRichard Henderson * Quadwords Minus 1 -- the same scale used for ZCR_ELx.LEN.
12596ca54aa9SRichard Henderson * If @sm, compute for SVL, otherwise NVL.
12606ca54aa9SRichard Henderson */
12616ca54aa9SRichard Henderson uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm);
12625ef3cc56SRichard Henderson
1263ced31551SRichard Henderson /* Likewise, but using @sm = PSTATE.SM. */
1264fcf5ef2aSThomas Huth uint32_t sve_vqm1_for_el(CPUARMState *env, int el);
1265fcf5ef2aSThomas Huth
is_a64(CPUARMState * env)1266fcf5ef2aSThomas Huth static inline bool is_a64(CPUARMState *env)
1267fcf5ef2aSThomas Huth {
1268fcf5ef2aSThomas Huth return env->aarch64;
1269fcf5ef2aSThomas Huth }
12705d05b9d4SAaron Lindsay
12715d05b9d4SAaron Lindsay /**
12725d05b9d4SAaron Lindsay * pmu_op_start/finish
12735d05b9d4SAaron Lindsay * @env: CPUARMState
12745d05b9d4SAaron Lindsay *
12755d05b9d4SAaron Lindsay * Convert all PMU counters between their delta form (the typical mode when
12765d05b9d4SAaron Lindsay * they are enabled) and the guest-visible values. These two calls must
12775d05b9d4SAaron Lindsay * surround any action which might affect the counters.
12785d05b9d4SAaron Lindsay */
1279fcf5ef2aSThomas Huth void pmu_op_start(CPUARMState *env);
12804e7beb0cSAaron Lindsay OS void pmu_op_finish(CPUARMState *env);
12814e7beb0cSAaron Lindsay OS
12824e7beb0cSAaron Lindsay OS /*
12834e7beb0cSAaron Lindsay OS * Called when a PMU counter is due to overflow
12844e7beb0cSAaron Lindsay OS */
1285033614c4SAaron Lindsay void arm_pmu_timer_cb(void *opaque);
1286033614c4SAaron Lindsay
1287033614c4SAaron Lindsay /**
1288033614c4SAaron Lindsay * Functions to register as EL change hooks for PMU mode filtering
1289033614c4SAaron Lindsay */
1290033614c4SAaron Lindsay void pmu_pre_el_change(ARMCPU *cpu, void *ignored);
129157a4a11bSAaron Lindsay void pmu_post_el_change(ARMCPU *cpu, void *ignored);
1292bf8d0969SAaron Lindsay OS
1293bf8d0969SAaron Lindsay OS /*
129457a4a11bSAaron Lindsay * pmu_init
1295bf8d0969SAaron Lindsay OS * @cpu: ARMCPU
1296bf8d0969SAaron Lindsay OS *
129757a4a11bSAaron Lindsay * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state
1298bf8d0969SAaron Lindsay OS * for the current configuration
129957a4a11bSAaron Lindsay */
1300fcf5ef2aSThomas Huth void pmu_init(ARMCPU *cpu);
1301fcf5ef2aSThomas Huth
1302fcf5ef2aSThomas Huth /* SCTLR bit meanings. Several bits have been reused in newer
1303fcf5ef2aSThomas Huth * versions of the architecture; in that case we define constants
1304fcf5ef2aSThomas Huth * for both old and new bit meanings. Code which tests against those
1305fcf5ef2aSThomas Huth * bits should probably check or otherwise arrange that the CPU
1306fcf5ef2aSThomas Huth * is the architectural version it expects.
1307fcf5ef2aSThomas Huth */
1308fcf5ef2aSThomas Huth #define SCTLR_M (1U << 0)
1309fcf5ef2aSThomas Huth #define SCTLR_A (1U << 1)
1310b2af69d0SRichard Henderson #define SCTLR_C (1U << 2)
1311b2af69d0SRichard Henderson #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
1312fcf5ef2aSThomas Huth #define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */
1313b2af69d0SRichard Henderson #define SCTLR_SA (1U << 3) /* AArch64 only */
1314fcf5ef2aSThomas Huth #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
1315fcf5ef2aSThomas Huth #define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */
1316fcf5ef2aSThomas Huth #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
1317fcf5ef2aSThomas Huth #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
131883f624d9SRichard Henderson #define SCTLR_CP15BEN (1U << 5) /* v7 onward */
1319fcf5ef2aSThomas Huth #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
1320fcf5ef2aSThomas Huth #define SCTLR_nAA (1U << 6) /* when FEAT_LSE2 is implemented */
1321fcf5ef2aSThomas Huth #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
1322fcf5ef2aSThomas Huth #define SCTLR_ITD (1U << 7) /* v8 onward */
1323fcf5ef2aSThomas Huth #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
1324fcf5ef2aSThomas Huth #define SCTLR_SED (1U << 8) /* v8 onward */
1325fcf5ef2aSThomas Huth #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
1326cb570bd3SRichard Henderson #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
1327cb570bd3SRichard Henderson #define SCTLR_F (1U << 10) /* up to v6 */
1328b2af69d0SRichard Henderson #define SCTLR_SW (1U << 10) /* v7 */
1329b2af69d0SRichard Henderson #define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */
1330fcf5ef2aSThomas Huth #define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */
1331b2af69d0SRichard Henderson #define SCTLR_EOS (1U << 11) /* v8.5-ExS */
1332b2af69d0SRichard Henderson #define SCTLR_I (1U << 12)
1333fcf5ef2aSThomas Huth #define SCTLR_V (1U << 13) /* AArch32 only */
1334fcf5ef2aSThomas Huth #define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */
1335fcf5ef2aSThomas Huth #define SCTLR_RR (1U << 14) /* up to v7 */
1336fcf5ef2aSThomas Huth #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
1337fcf5ef2aSThomas Huth #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
1338fcf5ef2aSThomas Huth #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
1339b2af69d0SRichard Henderson #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
1340fcf5ef2aSThomas Huth #define SCTLR_nTWI (1U << 16) /* v8 onward */
1341fcf5ef2aSThomas Huth #define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */
1342fcf5ef2aSThomas Huth #define SCTLR_BR (1U << 17) /* PMSA only */
1343fcf5ef2aSThomas Huth #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
1344fcf5ef2aSThomas Huth #define SCTLR_nTWE (1U << 18) /* v8 onward */
1345b2af69d0SRichard Henderson #define SCTLR_WXN (1U << 19)
13467cb1e618SRichard Henderson #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
1347b2af69d0SRichard Henderson #define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */
1348b2af69d0SRichard Henderson #define SCTLR_TSCXT (1U << 20) /* FEAT_CSV2_1p2, AArch64 only */
1349b2af69d0SRichard Henderson #define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */
1350b2af69d0SRichard Henderson #define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */
1351fcf5ef2aSThomas Huth #define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */
1352b2af69d0SRichard Henderson #define SCTLR_EIS (1U << 22) /* v8.5-ExS */
1353fcf5ef2aSThomas Huth #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
1354fcf5ef2aSThomas Huth #define SCTLR_SPAN (1U << 23) /* v8.1-PAN */
1355fcf5ef2aSThomas Huth #define SCTLR_VE (1U << 24) /* up to v7 */
1356fcf5ef2aSThomas Huth #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
1357fcf5ef2aSThomas Huth #define SCTLR_EE (1U << 25)
1358b2af69d0SRichard Henderson #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
1359b2af69d0SRichard Henderson #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
1360b2af69d0SRichard Henderson #define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */
1361b2af69d0SRichard Henderson #define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */
1362b2af69d0SRichard Henderson #define SCTLR_TRE (1U << 28) /* AArch32 only */
1363b2af69d0SRichard Henderson #define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */
1364b2af69d0SRichard Henderson #define SCTLR_AFE (1U << 29) /* AArch32 only */
1365b2af69d0SRichard Henderson #define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */
1366b2af69d0SRichard Henderson #define SCTLR_TE (1U << 30) /* AArch32 only */
1367f2f68a78SRebecca Cran #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
1368dbc678f9SPeter Maydell #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
1369b2af69d0SRichard Henderson #define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
1370b2af69d0SRichard Henderson #define SCTLR_CMOW (1ULL << 32) /* FEAT_CMOW */
1371b2af69d0SRichard Henderson #define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */
1372b2af69d0SRichard Henderson #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
1373b2af69d0SRichard Henderson #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
1374b2af69d0SRichard Henderson #define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */
1375b2af69d0SRichard Henderson #define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */
1376f2f68a78SRebecca Cran #define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */
1377ad1e6018SRichard Henderson #define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */
1378ad1e6018SRichard Henderson #define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */
1379ad1e6018SRichard Henderson #define SCTLR_DSSBS_64 (1ULL << 44) /* v8.5, AArch64 only */
1380ad1e6018SRichard Henderson #define SCTLR_TWEDEn (1ULL << 45) /* FEAT_TWED */
1381ad1e6018SRichard Henderson #define SCTLR_TWEDEL MAKE_64_MASK(46, 4) /* FEAT_TWED */
1382ad1e6018SRichard Henderson #define SCTLR_TMT0 (1ULL << 50) /* FEAT_TME */
1383ad1e6018SRichard Henderson #define SCTLR_TMT (1ULL << 51) /* FEAT_TME */
1384ad1e6018SRichard Henderson #define SCTLR_TME0 (1ULL << 52) /* FEAT_TME */
1385ad1e6018SRichard Henderson #define SCTLR_TME (1ULL << 53) /* FEAT_TME */
1386ad1e6018SRichard Henderson #define SCTLR_EnASR (1ULL << 54) /* FEAT_LS64_V */
1387ad1e6018SRichard Henderson #define SCTLR_EnAS0 (1ULL << 55) /* FEAT_LS64_ACCDATA */
1388ad1e6018SRichard Henderson #define SCTLR_EnALS (1ULL << 56) /* FEAT_LS64 */
1389ad1e6018SRichard Henderson #define SCTLR_EPAN (1ULL << 57) /* FEAT_PAN3 */
1390ad1e6018SRichard Henderson #define SCTLR_EnTP2 (1ULL << 60) /* FEAT_SME */
1391fcf5ef2aSThomas Huth #define SCTLR_NMI (1ULL << 61) /* FEAT_NMI */
1392fcf5ef2aSThomas Huth #define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */
1393fcf5ef2aSThomas Huth #define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */
1394fcf5ef2aSThomas Huth
1395fcf5ef2aSThomas Huth #define CPSR_M (0x1fU)
1396fcf5ef2aSThomas Huth #define CPSR_T (1U << 5)
1397fcf5ef2aSThomas Huth #define CPSR_F (1U << 6)
1398fcf5ef2aSThomas Huth #define CPSR_I (1U << 7)
1399fcf5ef2aSThomas Huth #define CPSR_A (1U << 8)
1400fcf5ef2aSThomas Huth #define CPSR_E (1U << 9)
1401dc8b1853SRebecca Cran #define CPSR_IT_2_7 (0xfc00U)
1402220f508fSRichard Henderson #define CPSR_GE (0xfU << 16)
1403f2f68a78SRebecca Cran #define CPSR_IL (1U << 20)
1404fcf5ef2aSThomas Huth #define CPSR_DIT (1U << 21)
1405fcf5ef2aSThomas Huth #define CPSR_PAN (1U << 22)
1406fcf5ef2aSThomas Huth #define CPSR_SSBS (1U << 23)
1407fcf5ef2aSThomas Huth #define CPSR_J (1U << 24)
1408fcf5ef2aSThomas Huth #define CPSR_IT_0_1 (3U << 25)
1409fcf5ef2aSThomas Huth #define CPSR_Q (1U << 27)
1410fcf5ef2aSThomas Huth #define CPSR_V (1U << 28)
1411fcf5ef2aSThomas Huth #define CPSR_C (1U << 29)
1412fcf5ef2aSThomas Huth #define CPSR_Z (1U << 30)
14132e0be5f6SJinjie Ruan #define CPSR_N (1U << 31)
14142e0be5f6SJinjie Ruan #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
1415fcf5ef2aSThomas Huth #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
1416fcf5ef2aSThomas Huth #define ISR_FS (1U << 9)
1417fcf5ef2aSThomas Huth #define ISR_IS (1U << 10)
1418fcf5ef2aSThomas Huth
1419fcf5ef2aSThomas Huth #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
1420268b1b3dSPeter Maydell #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
1421fcf5ef2aSThomas Huth | CPSR_NZCV)
1422fcf5ef2aSThomas Huth /* Bits writable in user mode. */
1423fcf5ef2aSThomas Huth #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE | CPSR_E)
1424987ab45eSPeter Maydell /* Execution state bits. MRS read as zero, MSR writes ignored. */
1425987ab45eSPeter Maydell #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
1426987ab45eSPeter Maydell
1427987ab45eSPeter Maydell /* Bit definitions for M profile XPSR. Most are the same as CPSR. */
1428987ab45eSPeter Maydell #define XPSR_EXCP 0x1ffU
1429987ab45eSPeter Maydell #define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
1430987ab45eSPeter Maydell #define XPSR_IT_2_7 CPSR_IT_2_7
1431987ab45eSPeter Maydell #define XPSR_GE CPSR_GE
1432987ab45eSPeter Maydell #define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
1433987ab45eSPeter Maydell #define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
1434987ab45eSPeter Maydell #define XPSR_IT_0_1 CPSR_IT_0_1
1435987ab45eSPeter Maydell #define XPSR_Q CPSR_Q
1436987ab45eSPeter Maydell #define XPSR_V CPSR_V
1437987ab45eSPeter Maydell #define XPSR_C CPSR_C
1438987ab45eSPeter Maydell #define XPSR_Z CPSR_Z
1439987ab45eSPeter Maydell #define XPSR_N CPSR_N
1440fcf5ef2aSThomas Huth #define XPSR_NZCV CPSR_NZCV
1441fcf5ef2aSThomas Huth #define XPSR_IT CPSR_IT
1442fcf5ef2aSThomas Huth
1443fcf5ef2aSThomas Huth /* Bit definitions for ARMv8 SPSR (PSTATE) format.
1444fcf5ef2aSThomas Huth * Only these are valid when in AArch64 mode; in
1445fcf5ef2aSThomas Huth * AArch32 mode SPSRs are basically CPSR-format.
1446fcf5ef2aSThomas Huth */
1447fcf5ef2aSThomas Huth #define PSTATE_SP (1U)
1448fcf5ef2aSThomas Huth #define PSTATE_M (0xFU)
1449fcf5ef2aSThomas Huth #define PSTATE_nRW (1U << 4)
1450fcf5ef2aSThomas Huth #define PSTATE_F (1U << 6)
1451f6e52eaaSRichard Henderson #define PSTATE_I (1U << 7)
1452f2f68a78SRebecca Cran #define PSTATE_A (1U << 8)
14536aa20415SJinjie Ruan #define PSTATE_D (1U << 9)
1454fcf5ef2aSThomas Huth #define PSTATE_BTYPE (3U << 10)
1455fcf5ef2aSThomas Huth #define PSTATE_SSBS (1U << 12)
1456220f508fSRichard Henderson #define PSTATE_ALLINT (1U << 13)
14579eeb7a1cSRichard Henderson #define PSTATE_IL (1U << 20)
1458dc8b1853SRebecca Cran #define PSTATE_SS (1U << 21)
14594b779cebSRichard Henderson #define PSTATE_PAN (1U << 22)
1460fcf5ef2aSThomas Huth #define PSTATE_UAO (1U << 23)
1461fcf5ef2aSThomas Huth #define PSTATE_DIT (1U << 24)
1462fcf5ef2aSThomas Huth #define PSTATE_TCO (1U << 25)
1463fcf5ef2aSThomas Huth #define PSTATE_V (1U << 28)
1464fcf5ef2aSThomas Huth #define PSTATE_C (1U << 29)
1465fcf5ef2aSThomas Huth #define PSTATE_Z (1U << 30)
1466f6e52eaaSRichard Henderson #define PSTATE_N (1U << 31)
1467fcf5ef2aSThomas Huth #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
1468fcf5ef2aSThomas Huth #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
1469fcf5ef2aSThomas Huth #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
1470fcf5ef2aSThomas Huth /* Mode values for AArch64 */
1471fcf5ef2aSThomas Huth #define PSTATE_MODE_EL3h 13
1472fcf5ef2aSThomas Huth #define PSTATE_MODE_EL3t 12
1473fcf5ef2aSThomas Huth #define PSTATE_MODE_EL2h 9
1474fcf5ef2aSThomas Huth #define PSTATE_MODE_EL2t 8
1475fcf5ef2aSThomas Huth #define PSTATE_MODE_EL1h 5
1476c37e6ac9SRichard Henderson #define PSTATE_MODE_EL1t 4
1477c37e6ac9SRichard Henderson #define PSTATE_MODE_EL0t 0
1478c37e6ac9SRichard Henderson
1479c37e6ac9SRichard Henderson /* PSTATE bits that are accessed via SVCR and not stored in SPSR_ELx. */
1480de561988SRichard Henderson FIELD(SVCR, SM, 0, 1)
1481de561988SRichard Henderson FIELD(SVCR, ZA, 1, 1)
1482de561988SRichard Henderson
1483de561988SRichard Henderson /* Fields for SMCR_ELx. */
1484de2db7ecSPeter Maydell FIELD(SMCR, LEN, 0, 4)
1485de2db7ecSPeter Maydell FIELD(SMCR, FA64, 31, 1)
1486de2db7ecSPeter Maydell
1487de2db7ecSPeter Maydell /* Write a new value to v7m.exception, thus transitioning into or out
1488de2db7ecSPeter Maydell * of Handler mode; this may result in a change of active stack pointer.
1489fcf5ef2aSThomas Huth */
1490fcf5ef2aSThomas Huth void write_v7m_exception(CPUARMState *env, uint32_t new_exc);
1491fcf5ef2aSThomas Huth
1492fcf5ef2aSThomas Huth /* Map EL and handler into a PSTATE_MODE. */
aarch64_pstate_mode(unsigned int el,bool handler)1493fcf5ef2aSThomas Huth static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
1494fcf5ef2aSThomas Huth {
1495fcf5ef2aSThomas Huth return (el << 2) | handler;
1496fcf5ef2aSThomas Huth }
1497fcf5ef2aSThomas Huth
1498fcf5ef2aSThomas Huth /* Return the current PSTATE value. For the moment we don't support 32<->64 bit
1499fcf5ef2aSThomas Huth * interprocessing, so we don't attempt to sync with the cpsr state used by
1500fcf5ef2aSThomas Huth * the 32 bit decoder.
1501fcf5ef2aSThomas Huth */
pstate_read(CPUARMState * env)1502fcf5ef2aSThomas Huth static inline uint32_t pstate_read(CPUARMState *env)
1503fcf5ef2aSThomas Huth {
1504fcf5ef2aSThomas Huth int ZF;
1505fcf5ef2aSThomas Huth
1506f6e52eaaSRichard Henderson ZF = (env->ZF == 0);
1507fcf5ef2aSThomas Huth return (env->NF & 0x80000000) | (ZF << 30)
1508fcf5ef2aSThomas Huth | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
1509fcf5ef2aSThomas Huth | env->pstate | env->daif | (env->btype << 10);
1510fcf5ef2aSThomas Huth }
1511fcf5ef2aSThomas Huth
pstate_write(CPUARMState * env,uint32_t val)1512fcf5ef2aSThomas Huth static inline void pstate_write(CPUARMState *env, uint32_t val)
1513fcf5ef2aSThomas Huth {
1514fcf5ef2aSThomas Huth env->ZF = (~val) & PSTATE_Z;
1515fcf5ef2aSThomas Huth env->NF = val;
1516f6e52eaaSRichard Henderson env->CF = (val >> 29) & 1;
1517fcf5ef2aSThomas Huth env->VF = (val << 3) & 0x80000000;
1518fcf5ef2aSThomas Huth env->daif = val & PSTATE_DAIF;
1519fcf5ef2aSThomas Huth env->btype = (val >> 10) & 3;
1520fcf5ef2aSThomas Huth env->pstate = val & ~CACHED_PSTATE_BITS;
1521fcf5ef2aSThomas Huth }
1522fcf5ef2aSThomas Huth
1523fcf5ef2aSThomas Huth /* Return the current CPSR value. */
1524fcf5ef2aSThomas Huth uint32_t cpsr_read(CPUARMState *env);
1525fcf5ef2aSThomas Huth
1526e784807cSPeter Maydell typedef enum CPSRWriteType {
1527e784807cSPeter Maydell CPSRWriteByInstr = 0, /* from guest MSR or CPS */
1528fcf5ef2aSThomas Huth CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
1529fcf5ef2aSThomas Huth CPSRWriteRaw = 2,
1530fcf5ef2aSThomas Huth /* trust values, no reg bank switch, no hflags rebuild */
1531e784807cSPeter Maydell CPSRWriteByGDBStub = 3, /* from the GDB stub */
1532e784807cSPeter Maydell } CPSRWriteType;
1533e784807cSPeter Maydell
1534e784807cSPeter Maydell /*
1535e784807cSPeter Maydell * Set the CPSR. Note that some bits of mask must be all-set or all-clear.
1536e784807cSPeter Maydell * This will do an arm_rebuild_hflags() if any of the bits in @mask
1537fcf5ef2aSThomas Huth * correspond to TB flags bits cached in the hflags, unless @write_type
1538fcf5ef2aSThomas Huth * is CPSRWriteRaw.
1539fcf5ef2aSThomas Huth */
1540fcf5ef2aSThomas Huth void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
1541fcf5ef2aSThomas Huth CPSRWriteType write_type);
1542fcf5ef2aSThomas Huth
1543fcf5ef2aSThomas Huth /* Return the current xPSR value. */
xpsr_read(CPUARMState * env)1544fcf5ef2aSThomas Huth static inline uint32_t xpsr_read(CPUARMState *env)
1545fcf5ef2aSThomas Huth {
1546fcf5ef2aSThomas Huth int ZF;
1547fcf5ef2aSThomas Huth ZF = (env->ZF == 0);
1548fcf5ef2aSThomas Huth return (env->NF & 0x80000000) | (ZF << 30)
1549f1e2598cSPeter Maydell | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
1550fcf5ef2aSThomas Huth | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
1551fcf5ef2aSThomas Huth | ((env->condexec_bits & 0xfc) << 8)
1552fcf5ef2aSThomas Huth | (env->GE << 16)
1553fcf5ef2aSThomas Huth | env->v7m.exception;
1554fcf5ef2aSThomas Huth }
1555fcf5ef2aSThomas Huth
1556987ab45eSPeter Maydell /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
xpsr_write(CPUARMState * env,uint32_t val,uint32_t mask)1557987ab45eSPeter Maydell static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
1558fcf5ef2aSThomas Huth {
1559fcf5ef2aSThomas Huth if (mask & XPSR_NZCV) {
1560fcf5ef2aSThomas Huth env->ZF = (~val) & XPSR_Z;
1561fcf5ef2aSThomas Huth env->NF = val;
1562987ab45eSPeter Maydell env->CF = (val >> 29) & 1;
1563987ab45eSPeter Maydell env->VF = (val << 3) & 0x80000000;
1564987ab45eSPeter Maydell }
1565f1e2598cSPeter Maydell if (mask & XPSR_Q) {
1566f1e2598cSPeter Maydell env->QF = ((val & XPSR_Q) != 0);
1567f1e2598cSPeter Maydell }
156804c9c81bSRichard Henderson if (mask & XPSR_GE) {
1569987ab45eSPeter Maydell env->GE = (val & XPSR_GE) >> 16;
1570987ab45eSPeter Maydell }
1571987ab45eSPeter Maydell #ifndef CONFIG_USER_ONLY
1572987ab45eSPeter Maydell if (mask & XPSR_T) {
1573fcf5ef2aSThomas Huth env->thumb = ((val & XPSR_T) != 0);
1574fcf5ef2aSThomas Huth }
1575fcf5ef2aSThomas Huth if (mask & XPSR_IT_0_1) {
1576987ab45eSPeter Maydell env->condexec_bits &= ~3;
1577fcf5ef2aSThomas Huth env->condexec_bits |= (val >> 25) & 3;
1578fcf5ef2aSThomas Huth }
1579fcf5ef2aSThomas Huth if (mask & XPSR_IT_2_7) {
1580987ab45eSPeter Maydell env->condexec_bits &= 3;
1581de2db7ecSPeter Maydell env->condexec_bits |= (val >> 8) & 0xfc;
1582de2db7ecSPeter Maydell }
1583fcf5ef2aSThomas Huth if (mask & XPSR_EXCP) {
158404c9c81bSRichard Henderson /* Note that this only happens on exception exit */
1585fcf5ef2aSThomas Huth write_v7m_exception(env, val & XPSR_EXCP);
1586fcf5ef2aSThomas Huth }
1587fcf5ef2aSThomas Huth #endif
1588fcf5ef2aSThomas Huth }
1589fcf5ef2aSThomas Huth
1590fcf5ef2aSThomas Huth #define HCR_VM (1ULL << 0)
1591fcf5ef2aSThomas Huth #define HCR_SWIO (1ULL << 1)
1592fcf5ef2aSThomas Huth #define HCR_PTW (1ULL << 2)
1593fcf5ef2aSThomas Huth #define HCR_FMO (1ULL << 3)
1594fcf5ef2aSThomas Huth #define HCR_IMO (1ULL << 4)
1595fcf5ef2aSThomas Huth #define HCR_AMO (1ULL << 5)
1596fcf5ef2aSThomas Huth #define HCR_VF (1ULL << 6)
1597fcf5ef2aSThomas Huth #define HCR_VI (1ULL << 7)
1598fcf5ef2aSThomas Huth #define HCR_VSE (1ULL << 8)
1599fcf5ef2aSThomas Huth #define HCR_FB (1ULL << 9)
1600fcf5ef2aSThomas Huth #define HCR_BSU_MASK (3ULL << 10)
1601fcf5ef2aSThomas Huth #define HCR_DC (1ULL << 12)
1602fcf5ef2aSThomas Huth #define HCR_TWI (1ULL << 13)
1603fcf5ef2aSThomas Huth #define HCR_TWE (1ULL << 14)
1604fcf5ef2aSThomas Huth #define HCR_TID0 (1ULL << 15)
1605fcf5ef2aSThomas Huth #define HCR_TID1 (1ULL << 16)
1606fcf5ef2aSThomas Huth #define HCR_TID2 (1ULL << 17)
1607fcf5ef2aSThomas Huth #define HCR_TID3 (1ULL << 18)
1608fcf5ef2aSThomas Huth #define HCR_TSC (1ULL << 19)
1609099bf53bSRichard Henderson #define HCR_TIDCP (1ULL << 20)
1610fcf5ef2aSThomas Huth #define HCR_TACR (1ULL << 21)
1611fcf5ef2aSThomas Huth #define HCR_TSW (1ULL << 22)
1612fcf5ef2aSThomas Huth #define HCR_TPCP (1ULL << 23)
1613fcf5ef2aSThomas Huth #define HCR_TPU (1ULL << 24)
1614fcf5ef2aSThomas Huth #define HCR_TTLB (1ULL << 25)
1615fcf5ef2aSThomas Huth #define HCR_TVM (1ULL << 26)
1616fcf5ef2aSThomas Huth #define HCR_TGE (1ULL << 27)
1617fcf5ef2aSThomas Huth #define HCR_TDZ (1ULL << 28)
1618fcf5ef2aSThomas Huth #define HCR_HCD (1ULL << 29)
1619fcf5ef2aSThomas Huth #define HCR_TRVM (1ULL << 30)
1620ac656b16SPeter Maydell #define HCR_RW (1ULL << 31)
1621099bf53bSRichard Henderson #define HCR_CD (1ULL << 32)
1622099bf53bSRichard Henderson #define HCR_ID (1ULL << 33)
1623099bf53bSRichard Henderson #define HCR_E2H (1ULL << 34)
1624099bf53bSRichard Henderson #define HCR_TLOR (1ULL << 35)
1625aa3cc42cSRichard Henderson #define HCR_TERR (1ULL << 36)
1626099bf53bSRichard Henderson #define HCR_TEA (1ULL << 37)
1627099bf53bSRichard Henderson #define HCR_MIOCNCE (1ULL << 38)
1628099bf53bSRichard Henderson #define HCR_TME (1ULL << 39)
1629099bf53bSRichard Henderson #define HCR_APK (1ULL << 40)
1630099bf53bSRichard Henderson #define HCR_API (1ULL << 41)
1631099bf53bSRichard Henderson #define HCR_NV (1ULL << 42)
1632099bf53bSRichard Henderson #define HCR_NV1 (1ULL << 43)
1633099bf53bSRichard Henderson #define HCR_AT (1ULL << 44)
1634aa3cc42cSRichard Henderson #define HCR_NV2 (1ULL << 45)
1635099bf53bSRichard Henderson #define HCR_FWB (1ULL << 46)
1636099bf53bSRichard Henderson #define HCR_FIEN (1ULL << 47)
1637e0a38bb3SRichard Henderson #define HCR_GPF (1ULL << 48)
1638099bf53bSRichard Henderson #define HCR_TID4 (1ULL << 49)
1639e0a38bb3SRichard Henderson #define HCR_TICAB (1ULL << 50)
1640099bf53bSRichard Henderson #define HCR_AMVOFFEN (1ULL << 51)
1641099bf53bSRichard Henderson #define HCR_TOCU (1ULL << 52)
1642099bf53bSRichard Henderson #define HCR_ENSCXT (1ULL << 53)
1643099bf53bSRichard Henderson #define HCR_TTLBIS (1ULL << 54)
1644e0a38bb3SRichard Henderson #define HCR_TTLBOS (1ULL << 55)
1645e0a38bb3SRichard Henderson #define HCR_ATA (1ULL << 56)
1646e0a38bb3SRichard Henderson #define HCR_DCT (1ULL << 57)
1647099bf53bSRichard Henderson #define HCR_TID5 (1ULL << 58)
164806f2adccSJerome Forissier #define HCR_TWEDEN (1ULL << 59)
164906f2adccSJerome Forissier #define HCR_TWEDEL MAKE_64BIT_MASK(60, 4)
165006f2adccSJerome Forissier
165106f2adccSJerome Forissier #define SCR_NS (1ULL << 0)
165206f2adccSJerome Forissier #define SCR_IRQ (1ULL << 1)
165306f2adccSJerome Forissier #define SCR_FIQ (1ULL << 2)
165406f2adccSJerome Forissier #define SCR_EA (1ULL << 3)
165506f2adccSJerome Forissier #define SCR_FW (1ULL << 4)
165606f2adccSJerome Forissier #define SCR_AW (1ULL << 5)
165706f2adccSJerome Forissier #define SCR_NET (1ULL << 6)
165806f2adccSJerome Forissier #define SCR_SMD (1ULL << 7)
165906f2adccSJerome Forissier #define SCR_HCE (1ULL << 8)
166006f2adccSJerome Forissier #define SCR_SIF (1ULL << 9)
166106f2adccSJerome Forissier #define SCR_RW (1ULL << 10)
166206f2adccSJerome Forissier #define SCR_ST (1ULL << 11)
166306f2adccSJerome Forissier #define SCR_TWI (1ULL << 12)
166406f2adccSJerome Forissier #define SCR_TWE (1ULL << 13)
166506f2adccSJerome Forissier #define SCR_TLOR (1ULL << 14)
166606f2adccSJerome Forissier #define SCR_TERR (1ULL << 15)
166706f2adccSJerome Forissier #define SCR_APK (1ULL << 16)
166806f2adccSJerome Forissier #define SCR_API (1ULL << 17)
166906f2adccSJerome Forissier #define SCR_EEL2 (1ULL << 18)
167006f2adccSJerome Forissier #define SCR_EASE (1ULL << 19)
167106f2adccSJerome Forissier #define SCR_NMEA (1ULL << 20)
167206f2adccSJerome Forissier #define SCR_FIEN (1ULL << 21)
167306f2adccSJerome Forissier #define SCR_ENSCXT (1ULL << 25)
167406f2adccSJerome Forissier #define SCR_ATA (1ULL << 26)
1675f527d661SRichard Henderson #define SCR_FGTEN (1ULL << 27)
1676f527d661SRichard Henderson #define SCR_ECVEN (1ULL << 28)
1677f527d661SRichard Henderson #define SCR_TWEDEN (1ULL << 29)
1678f527d661SRichard Henderson #define SCR_TWEDEL MAKE_64BIT_MASK(30, 4)
1679f527d661SRichard Henderson #define SCR_TME (1ULL << 34)
1680f527d661SRichard Henderson #define SCR_AMVOFFEN (1ULL << 35)
1681f527d661SRichard Henderson #define SCR_ENAS0 (1ULL << 36)
1682f527d661SRichard Henderson #define SCR_ADEN (1ULL << 37)
1683f527d661SRichard Henderson #define SCR_HXEN (1ULL << 38)
1684aa3cc42cSRichard Henderson #define SCR_TRNDR (1ULL << 40)
1685fcf5ef2aSThomas Huth #define SCR_ENTP2 (1ULL << 41)
1686fcf5ef2aSThomas Huth #define SCR_GPF (1ULL << 48)
1687fcf5ef2aSThomas Huth #define SCR_NSE (1ULL << 62)
1688fcf5ef2aSThomas Huth
1689fcf5ef2aSThomas Huth /* Return the current FPSCR value. */
1690db397a81SPeter Maydell uint32_t vfp_get_fpscr(CPUARMState *env);
1691db397a81SPeter Maydell void vfp_set_fpscr(CPUARMState *env, uint32_t val);
1692db397a81SPeter Maydell
1693d81ce0efSAlex Bennée /*
1694db397a81SPeter Maydell * FPCR, Floating Point Control Register
1695db397a81SPeter Maydell * FPSR, Floating Point Status Register
1696db397a81SPeter Maydell *
1697db397a81SPeter Maydell * For A64 floating point control and status bits are stored in
1698db397a81SPeter Maydell * two logically distinct registers, FPCR and FPSR. We store these
1699db397a81SPeter Maydell * in QEMU in vfp.fpcr and vfp.fpsr.
1700db397a81SPeter Maydell * For A32 there was only one register, FPSCR. The bits are arranged
1701db397a81SPeter Maydell * such that FPSCR bits map to FPCR or FPSR bits in the same bit positions,
1702fcf5ef2aSThomas Huth * so we can use appropriate masking to handle FPSCR reads and writes.
1703d81ce0efSAlex Bennée * Note that the FPCR has some bits which are not visible in the
1704a26db547SPeter Maydell * AArch32 view (for FEAT_AFP). Writing the FPSCR leaves these unchanged.
1705a15945d9SPeter Maydell */
1706a15945d9SPeter Maydell
1707a15945d9SPeter Maydell /* FPCR bits */
1708a15945d9SPeter Maydell #define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
1709a15945d9SPeter Maydell #define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
1710a15945d9SPeter Maydell #define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
1711db397a81SPeter Maydell #define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
1712d81ce0efSAlex Bennée #define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
1713db397a81SPeter Maydell #define FPCR_EBF (1 << 13) /* Extended BFloat16 behaviors */
171499c7834fSPeter Maydell #define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
1715d81ce0efSAlex Bennée #define FPCR_LEN_MASK (7 << 16) /* LEN, A-profile only */
1716d81ce0efSAlex Bennée #define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
171799c7834fSPeter Maydell #define FPCR_STRIDE_MASK (3 << 20) /* Stride */
17189542c30bSPeter Maydell #define FPCR_RMODE_MASK (3 << 22) /* Rounding mode */
171999c7834fSPeter Maydell #define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
172099c7834fSPeter Maydell #define FPCR_DN (1 << 25) /* Default NaN enable bit */
1721b26b5629SPeter Maydell #define FPCR_AHP (1 << 26) /* Alternative half-precision */
172299c7834fSPeter Maydell
1723db397a81SPeter Maydell #define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */
1724db397a81SPeter Maydell #define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
1725db397a81SPeter Maydell #define FPCR_LTPSIZE_LENGTH 3
1726a26db547SPeter Maydell
1727db397a81SPeter Maydell /* Cumulative exception trap enable bits */
1728db397a81SPeter Maydell #define FPCR_EEXC_MASK (FPCR_IOE | FPCR_DZE | FPCR_OFE | FPCR_UFE | FPCR_IXE | FPCR_IDE)
1729db397a81SPeter Maydell
1730db397a81SPeter Maydell /* FPSR bits */
1731db397a81SPeter Maydell #define FPSR_IOC (1 << 0) /* Invalid Operation cumulative exception */
1732db397a81SPeter Maydell #define FPSR_DZC (1 << 1) /* Divide by Zero cumulative exception */
1733a26db547SPeter Maydell #define FPSR_OFC (1 << 2) /* Overflow cumulative exception */
1734a26db547SPeter Maydell #define FPSR_UFC (1 << 3) /* Underflow cumulative exception */
1735a26db547SPeter Maydell #define FPSR_IXC (1 << 4) /* Inexact cumulative exception */
1736a26db547SPeter Maydell #define FPSR_IDC (1 << 7) /* Input Denormal cumulative exception */
1737a26db547SPeter Maydell #define FPSR_QC (1 << 27) /* Cumulative saturation bit */
1738a26db547SPeter Maydell #define FPSR_V (1 << 28) /* FP overflow flag */
1739db397a81SPeter Maydell #define FPSR_C (1 << 29) /* FP carry flag */
1740db397a81SPeter Maydell #define FPSR_Z (1 << 30) /* FP zero flag */
1741db397a81SPeter Maydell #define FPSR_N (1 << 31) /* FP negative flag */
1742a26db547SPeter Maydell
1743a26db547SPeter Maydell /* Cumulative exception status bits */
1744d81ce0efSAlex Bennée #define FPSR_CEXC_MASK (FPSR_IOC | FPSR_DZC | FPSR_OFC | FPSR_UFC | FPSR_IXC | FPSR_IDC)
1745db397a81SPeter Maydell
1746db397a81SPeter Maydell #define FPSR_NZCV_MASK (FPSR_N | FPSR_Z | FPSR_C | FPSR_V)
1747db397a81SPeter Maydell #define FPSR_NZCVQC_MASK (FPSR_NZCV_MASK | FPSR_QC)
1748db397a81SPeter Maydell
1749db397a81SPeter Maydell /* A32 FPSCR bits which architecturally map to FPSR bits */
1750db397a81SPeter Maydell #define FPSCR_FPSR_MASK (FPSR_NZCVQC_MASK | FPSR_CEXC_MASK)
1751db397a81SPeter Maydell /* A32 FPSCR bits which architecturally map to FPCR bits */
1752db397a81SPeter Maydell #define FPSCR_FPCR_MASK (FPCR_EEXC_MASK | FPCR_LEN_MASK | FPCR_FZ16 | \
1753db397a81SPeter Maydell FPCR_STRIDE_MASK | FPCR_RMODE_MASK | \
17542de7cf9eSPeter Maydell FPCR_FZ | FPCR_DN | FPCR_AHP)
17552de7cf9eSPeter Maydell /* These masks don't overlap: each bit lives in only one place */
17562de7cf9eSPeter Maydell QEMU_BUILD_BUG_ON(FPSCR_FPSR_MASK & FPSCR_FPCR_MASK);
17572de7cf9eSPeter Maydell
17582de7cf9eSPeter Maydell /**
17592de7cf9eSPeter Maydell * vfp_get_fpsr: read the AArch64 FPSR
17602de7cf9eSPeter Maydell * @env: CPU context
17612de7cf9eSPeter Maydell *
17622de7cf9eSPeter Maydell * Return the current AArch64 FPSR value
17632de7cf9eSPeter Maydell */
17642de7cf9eSPeter Maydell uint32_t vfp_get_fpsr(CPUARMState *env);
17652de7cf9eSPeter Maydell
17662de7cf9eSPeter Maydell /**
17672de7cf9eSPeter Maydell * vfp_get_fpcr: read the AArch64 FPCR
17682de7cf9eSPeter Maydell * @env: CPU context
1769fcf5ef2aSThomas Huth *
1770b167325eSPeter Maydell * Return the current AArch64 FPCR value
1771b167325eSPeter Maydell */
1772b167325eSPeter Maydell uint32_t vfp_get_fpcr(CPUARMState *env);
1773b167325eSPeter Maydell
1774b167325eSPeter Maydell /**
1775b167325eSPeter Maydell * vfp_set_fpsr: write the AArch64 FPSR
1776fcf5ef2aSThomas Huth * @env: CPU context
1777b167325eSPeter Maydell * @value: new value
1778b167325eSPeter Maydell */
1779b167325eSPeter Maydell void vfp_set_fpsr(CPUARMState *env, uint32_t value);
1780b167325eSPeter Maydell
1781b167325eSPeter Maydell /**
1782b167325eSPeter Maydell * vfp_set_fpcr: write the AArch64 FPCR
1783fcf5ef2aSThomas Huth * @env: CPU context
1784fcf5ef2aSThomas Huth * @value: new value
1785fcf5ef2aSThomas Huth */
1786fcf5ef2aSThomas Huth void vfp_set_fpcr(CPUARMState *env, uint32_t value);
1787fcf5ef2aSThomas Huth
1788fcf5ef2aSThomas Huth enum arm_cpu_mode {
1789fcf5ef2aSThomas Huth ARM_CPU_MODE_USR = 0x10,
1790fcf5ef2aSThomas Huth ARM_CPU_MODE_FIQ = 0x11,
1791fcf5ef2aSThomas Huth ARM_CPU_MODE_IRQ = 0x12,
1792fcf5ef2aSThomas Huth ARM_CPU_MODE_SVC = 0x13,
1793fcf5ef2aSThomas Huth ARM_CPU_MODE_MON = 0x16,
1794fcf5ef2aSThomas Huth ARM_CPU_MODE_ABT = 0x17,
1795fcf5ef2aSThomas Huth ARM_CPU_MODE_HYP = 0x1a,
1796fcf5ef2aSThomas Huth ARM_CPU_MODE_UND = 0x1b,
1797fcf5ef2aSThomas Huth ARM_CPU_MODE_SYS = 0x1f
1798fcf5ef2aSThomas Huth };
1799fcf5ef2aSThomas Huth
1800fcf5ef2aSThomas Huth /* VFP system registers. */
1801fcf5ef2aSThomas Huth #define ARM_VFP_FPSID 0
1802fcf5ef2aSThomas Huth #define ARM_VFP_FPSCR 1
1803fcf5ef2aSThomas Huth #define ARM_VFP_MVFR2 5
1804fcf5ef2aSThomas Huth #define ARM_VFP_MVFR1 6
18059542c30bSPeter Maydell #define ARM_VFP_MVFR0 7
18069542c30bSPeter Maydell #define ARM_VFP_FPEXC 8
18079542c30bSPeter Maydell #define ARM_VFP_FPINST 9
18089542c30bSPeter Maydell #define ARM_VFP_FPINST2 10
18099542c30bSPeter Maydell /* These ones are M-profile only */
18109542c30bSPeter Maydell #define ARM_VFP_FPSCR_NZCVQC 2
1811fcf5ef2aSThomas Huth #define ARM_VFP_VPR 12
181232a290b8SPeter Maydell #define ARM_VFP_P0 13
181332a290b8SPeter Maydell #define ARM_VFP_FPCXT_NS 14
181432a290b8SPeter Maydell #define ARM_VFP_FPCXT_S 15
1815fcf5ef2aSThomas Huth
1816fcf5ef2aSThomas Huth /* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
1817fcf5ef2aSThomas Huth #define QEMU_VFP_FPSCR_NZCV 0xffff
1818fcf5ef2aSThomas Huth
1819fcf5ef2aSThomas Huth /* iwMMXt coprocessor control registers. */
1820fcf5ef2aSThomas Huth #define ARM_IWMMXT_wCID 0
1821fcf5ef2aSThomas Huth #define ARM_IWMMXT_wCon 1
1822fcf5ef2aSThomas Huth #define ARM_IWMMXT_wCSSF 2
1823fcf5ef2aSThomas Huth #define ARM_IWMMXT_wCASF 3
1824fcf5ef2aSThomas Huth #define ARM_IWMMXT_wCGR0 8
18252c4da50dSPeter Maydell #define ARM_IWMMXT_wCGR1 9
18262c4da50dSPeter Maydell #define ARM_IWMMXT_wCGR2 10
18272c4da50dSPeter Maydell #define ARM_IWMMXT_wCGR3 11
18282c4da50dSPeter Maydell
18292c4da50dSPeter Maydell /* V7M CCR bits */
18302c4da50dSPeter Maydell FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
18312c4da50dSPeter Maydell FIELD(V7M_CCR, USERSETMPEND, 1, 1)
18324730fb85SPeter Maydell FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
18332c4da50dSPeter Maydell FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
18342c4da50dSPeter Maydell FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
18354730fb85SPeter Maydell FIELD(V7M_CCR, STKALIGN, 9, 1)
18360e83f905SPeter Maydell FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
18370e83f905SPeter Maydell FIELD(V7M_CCR, DC, 16, 1)
18382c4da50dSPeter Maydell FIELD(V7M_CCR, IC, 17, 1)
183924ac0fb1SPeter Maydell FIELD(V7M_CCR, BP, 18, 1)
184024ac0fb1SPeter Maydell FIELD(V7M_CCR, LOB, 19, 1)
184124ac0fb1SPeter Maydell FIELD(V7M_CCR, TRD, 20, 1)
184224ac0fb1SPeter Maydell
184324ac0fb1SPeter Maydell /* V7M SCR bits */
184424ac0fb1SPeter Maydell FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
18453b2e9344SPeter Maydell FIELD(V7M_SCR, SLEEPDEEP, 2, 1)
18463b2e9344SPeter Maydell FIELD(V7M_SCR, SLEEPDEEPS, 3, 1)
18473b2e9344SPeter Maydell FIELD(V7M_SCR, SEVONPEND, 4, 1)
18483b2e9344SPeter Maydell
18493b2e9344SPeter Maydell /* V7M AIRCR bits */
18503b2e9344SPeter Maydell FIELD(V7M_AIRCR, VECTRESET, 0, 1)
18513b2e9344SPeter Maydell FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1)
18523b2e9344SPeter Maydell FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1)
18533b2e9344SPeter Maydell FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1)
18543b2e9344SPeter Maydell FIELD(V7M_AIRCR, PRIGROUP, 8, 3)
18553b2e9344SPeter Maydell FIELD(V7M_AIRCR, BFHFNMINS, 13, 1)
18562c4da50dSPeter Maydell FIELD(V7M_AIRCR, PRIS, 14, 1)
18572c4da50dSPeter Maydell FIELD(V7M_AIRCR, ENDIANNESS, 15, 1)
18582c4da50dSPeter Maydell FIELD(V7M_AIRCR, VECTKEY, 16, 16)
18592c4da50dSPeter Maydell
18602c4da50dSPeter Maydell /* V7M CFSR bits for MMFSR */
18612c4da50dSPeter Maydell FIELD(V7M_CFSR, IACCVIOL, 0, 1)
18622c4da50dSPeter Maydell FIELD(V7M_CFSR, DACCVIOL, 1, 1)
18632c4da50dSPeter Maydell FIELD(V7M_CFSR, MUNSTKERR, 3, 1)
18642c4da50dSPeter Maydell FIELD(V7M_CFSR, MSTKERR, 4, 1)
18652c4da50dSPeter Maydell FIELD(V7M_CFSR, MLSPERR, 5, 1)
18662c4da50dSPeter Maydell FIELD(V7M_CFSR, MMARVALID, 7, 1)
18672c4da50dSPeter Maydell
18682c4da50dSPeter Maydell /* V7M CFSR bits for BFSR */
18692c4da50dSPeter Maydell FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1)
18702c4da50dSPeter Maydell FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1)
18712c4da50dSPeter Maydell FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1)
18722c4da50dSPeter Maydell FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1)
18732c4da50dSPeter Maydell FIELD(V7M_CFSR, STKERR, 8 + 4, 1)
18742c4da50dSPeter Maydell FIELD(V7M_CFSR, LSPERR, 8 + 5, 1)
18752c4da50dSPeter Maydell FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1)
18762c4da50dSPeter Maydell
18772c4da50dSPeter Maydell /* V7M CFSR bits for UFSR */
187886f026deSPeter Maydell FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
18792c4da50dSPeter Maydell FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
18802c4da50dSPeter Maydell FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
18812c4da50dSPeter Maydell FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
1882334e8dadSPeter Maydell FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
1883334e8dadSPeter Maydell FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
1884334e8dadSPeter Maydell FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
1885334e8dadSPeter Maydell
1886334e8dadSPeter Maydell /* V7M CFSR bit masks covering all of the subregister bits */
18872c4da50dSPeter Maydell FIELD(V7M_CFSR, MMFSR, 0, 8)
18882c4da50dSPeter Maydell FIELD(V7M_CFSR, BFSR, 8, 8)
18892c4da50dSPeter Maydell FIELD(V7M_CFSR, UFSR, 16, 16)
18902c4da50dSPeter Maydell
18912c4da50dSPeter Maydell /* V7M HFSR bits */
18922c4da50dSPeter Maydell FIELD(V7M_HFSR, VECTTBL, 1, 1)
18932c4da50dSPeter Maydell FIELD(V7M_HFSR, FORCED, 30, 1)
18942c4da50dSPeter Maydell FIELD(V7M_HFSR, DEBUGEVT, 31, 1)
18952c4da50dSPeter Maydell
18962c4da50dSPeter Maydell /* V7M DFSR bits */
18972c4da50dSPeter Maydell FIELD(V7M_DFSR, HALTED, 0, 1)
18982c4da50dSPeter Maydell FIELD(V7M_DFSR, BKPT, 1, 1)
1899bed079daSPeter Maydell FIELD(V7M_DFSR, DWTTRAP, 2, 1)
1900bed079daSPeter Maydell FIELD(V7M_DFSR, VCATCH, 3, 1)
1901bed079daSPeter Maydell FIELD(V7M_DFSR, EXTERNAL, 4, 1)
1902bed079daSPeter Maydell
1903bed079daSPeter Maydell /* V7M SFSR bits */
1904bed079daSPeter Maydell FIELD(V7M_SFSR, INVEP, 0, 1)
1905bed079daSPeter Maydell FIELD(V7M_SFSR, INVIS, 1, 1)
1906bed079daSPeter Maydell FIELD(V7M_SFSR, INVER, 2, 1)
1907bed079daSPeter Maydell FIELD(V7M_SFSR, AUVIOL, 3, 1)
1908bed079daSPeter Maydell FIELD(V7M_SFSR, INVTRAN, 4, 1)
190929c483a5SMichael Davidsaver FIELD(V7M_SFSR, LSPERR, 5, 1)
191029c483a5SMichael Davidsaver FIELD(V7M_SFSR, SFARVALID, 6, 1)
191129c483a5SMichael Davidsaver FIELD(V7M_SFSR, LSERR, 7, 1)
191229c483a5SMichael Davidsaver
191329c483a5SMichael Davidsaver /* v7M MPU_CTRL bits */
191443bbce7fSPeter Maydell FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
191543bbce7fSPeter Maydell FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
191643bbce7fSPeter Maydell FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
191743bbce7fSPeter Maydell
191843bbce7fSPeter Maydell /* v7M CLIDR bits */
191943bbce7fSPeter Maydell FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21)
192043bbce7fSPeter Maydell FIELD(V7M_CLIDR, LOUIS, 21, 3)
192143bbce7fSPeter Maydell FIELD(V7M_CLIDR, LOC, 24, 3)
192243bbce7fSPeter Maydell FIELD(V7M_CLIDR, LOUU, 27, 3)
192343bbce7fSPeter Maydell FIELD(V7M_CLIDR, ICB, 30, 2)
192443bbce7fSPeter Maydell
192543bbce7fSPeter Maydell FIELD(V7M_CSSELR, IND, 0, 1)
192643bbce7fSPeter Maydell FIELD(V7M_CSSELR, LEVEL, 1, 3)
192743bbce7fSPeter Maydell /* We use the combination of InD and Level to index into cpu->ccsidr[];
192843bbce7fSPeter Maydell * define a mask for this and check that it doesn't permit running off
1929d33abe82SPeter Maydell * the end of the array.
1930d33abe82SPeter Maydell */
1931d33abe82SPeter Maydell FIELD(V7M_CSSELR, INDEX, 0, 4)
1932d33abe82SPeter Maydell
1933d33abe82SPeter Maydell /* v7M FPCCR bits */
1934d33abe82SPeter Maydell FIELD(V7M_FPCCR, LSPACT, 0, 1)
1935d33abe82SPeter Maydell FIELD(V7M_FPCCR, USER, 1, 1)
1936d33abe82SPeter Maydell FIELD(V7M_FPCCR, S, 2, 1)
1937d33abe82SPeter Maydell FIELD(V7M_FPCCR, THREAD, 3, 1)
1938d33abe82SPeter Maydell FIELD(V7M_FPCCR, HFRDY, 4, 1)
1939d33abe82SPeter Maydell FIELD(V7M_FPCCR, MMRDY, 5, 1)
1940d33abe82SPeter Maydell FIELD(V7M_FPCCR, BFRDY, 6, 1)
1941d33abe82SPeter Maydell FIELD(V7M_FPCCR, SFRDY, 7, 1)
1942d33abe82SPeter Maydell FIELD(V7M_FPCCR, MONRDY, 8, 1)
1943d33abe82SPeter Maydell FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1)
1944d33abe82SPeter Maydell FIELD(V7M_FPCCR, UFRDY, 10, 1)
1945d33abe82SPeter Maydell FIELD(V7M_FPCCR, RES0, 11, 15)
1946d33abe82SPeter Maydell FIELD(V7M_FPCCR, TS, 26, 1)
1947d33abe82SPeter Maydell FIELD(V7M_FPCCR, CLRONRETS, 27, 1)
1948d33abe82SPeter Maydell FIELD(V7M_FPCCR, CLRONRET, 28, 1)
1949d33abe82SPeter Maydell FIELD(V7M_FPCCR, LSPENS, 29, 1)
1950d33abe82SPeter Maydell FIELD(V7M_FPCCR, LSPEN, 30, 1)
1951d33abe82SPeter Maydell FIELD(V7M_FPCCR, ASPEN, 31, 1)
1952d33abe82SPeter Maydell /* These bits are banked. Others are non-banked and live in the M_REG_S bank */
1953d33abe82SPeter Maydell #define R_V7M_FPCCR_BANKED_MASK \
1954d33abe82SPeter Maydell (R_V7M_FPCCR_LSPACT_MASK | \
1955d33abe82SPeter Maydell R_V7M_FPCCR_USER_MASK | \
1956d33abe82SPeter Maydell R_V7M_FPCCR_THREAD_MASK | \
1957d33abe82SPeter Maydell R_V7M_FPCCR_MMRDY_MASK | \
19587c3d47daSPeter Maydell R_V7M_FPCCR_SPLIMVIOL_MASK | \
19597c3d47daSPeter Maydell R_V7M_FPCCR_UFRDY_MASK | \
19607c3d47daSPeter Maydell R_V7M_FPCCR_ASPEN_MASK)
19617c3d47daSPeter Maydell
19627c3d47daSPeter Maydell /* v7M VPR bits */
1963a62e62afSRichard Henderson FIELD(V7M_VPR, P0, 0, 16)
1964a62e62afSRichard Henderson FIELD(V7M_VPR, MASK01, 16, 4)
1965a62e62afSRichard Henderson FIELD(V7M_VPR, MASK23, 20, 4)
19662a14526aSLeif Lindholm
19672a14526aSLeif Lindholm /*
19682a14526aSLeif Lindholm * System register ID fields.
19692a14526aSLeif Lindholm */
19702a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE1, 0, 3)
19712a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE2, 3, 3)
19722a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE3, 6, 3)
19732a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE4, 9, 3)
19742a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE5, 12, 3)
19752a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE6, 15, 3)
19762a14526aSLeif Lindholm FIELD(CLIDR_EL1, CTYPE7, 18, 3)
19772a14526aSLeif Lindholm FIELD(CLIDR_EL1, LOUIS, 21, 3)
19782a14526aSLeif Lindholm FIELD(CLIDR_EL1, LOC, 24, 3)
19792a14526aSLeif Lindholm FIELD(CLIDR_EL1, LOUU, 27, 3)
19802a14526aSLeif Lindholm FIELD(CLIDR_EL1, ICB, 30, 3)
19812a14526aSLeif Lindholm
19822a14526aSLeif Lindholm /* When FEAT_CCIDX is implemented */
19832a14526aSLeif Lindholm FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3)
19842a14526aSLeif Lindholm FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21)
19852a14526aSLeif Lindholm FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24)
19862a14526aSLeif Lindholm
19872a14526aSLeif Lindholm /* When FEAT_CCIDX is not implemented */
19882a14526aSLeif Lindholm FIELD(CCSIDR_EL1, LINESIZE, 0, 3)
19892a14526aSLeif Lindholm FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10)
19902a14526aSLeif Lindholm FIELD(CCSIDR_EL1, NUMSETS, 13, 15)
19912a14526aSLeif Lindholm
19922a14526aSLeif Lindholm FIELD(CTR_EL0, IMINLINE, 0, 4)
19932a14526aSLeif Lindholm FIELD(CTR_EL0, L1IP, 14, 2)
19942a14526aSLeif Lindholm FIELD(CTR_EL0, DMINLINE, 16, 4)
19952a14526aSLeif Lindholm FIELD(CTR_EL0, ERG, 20, 4)
19962a14526aSLeif Lindholm FIELD(CTR_EL0, CWG, 24, 4)
19972bd5f41cSAlex Bennée FIELD(CTR_EL0, IDC, 28, 1)
19982bd5f41cSAlex Bennée FIELD(CTR_EL0, DIC, 29, 1)
19992bd5f41cSAlex Bennée FIELD(CTR_EL0, TMINLINE, 32, 6)
20002bd5f41cSAlex Bennée
20012bd5f41cSAlex Bennée FIELD(MIDR_EL1, REVISION, 0, 4)
20022bd5f41cSAlex Bennée FIELD(MIDR_EL1, PARTNUM, 4, 12)
2003a62e62afSRichard Henderson FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
2004a62e62afSRichard Henderson FIELD(MIDR_EL1, VARIANT, 20, 4)
2005a62e62afSRichard Henderson FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
2006a62e62afSRichard Henderson
2007a62e62afSRichard Henderson FIELD(ID_ISAR0, SWAP, 0, 4)
2008a62e62afSRichard Henderson FIELD(ID_ISAR0, BITCOUNT, 4, 4)
2009a62e62afSRichard Henderson FIELD(ID_ISAR0, BITFIELD, 8, 4)
2010a62e62afSRichard Henderson FIELD(ID_ISAR0, CMPBRANCH, 12, 4)
2011a62e62afSRichard Henderson FIELD(ID_ISAR0, COPROC, 16, 4)
2012a62e62afSRichard Henderson FIELD(ID_ISAR0, DEBUG, 20, 4)
2013a62e62afSRichard Henderson FIELD(ID_ISAR0, DIVIDE, 24, 4)
2014a62e62afSRichard Henderson
2015a62e62afSRichard Henderson FIELD(ID_ISAR1, ENDIAN, 0, 4)
2016a62e62afSRichard Henderson FIELD(ID_ISAR1, EXCEPT, 4, 4)
2017a62e62afSRichard Henderson FIELD(ID_ISAR1, EXCEPT_AR, 8, 4)
2018a62e62afSRichard Henderson FIELD(ID_ISAR1, EXTEND, 12, 4)
2019a62e62afSRichard Henderson FIELD(ID_ISAR1, IFTHEN, 16, 4)
2020a62e62afSRichard Henderson FIELD(ID_ISAR1, IMMEDIATE, 20, 4)
2021a62e62afSRichard Henderson FIELD(ID_ISAR1, INTERWORK, 24, 4)
2022a62e62afSRichard Henderson FIELD(ID_ISAR1, JAZELLE, 28, 4)
2023a62e62afSRichard Henderson
2024a62e62afSRichard Henderson FIELD(ID_ISAR2, LOADSTORE, 0, 4)
2025a62e62afSRichard Henderson FIELD(ID_ISAR2, MEMHINT, 4, 4)
2026a62e62afSRichard Henderson FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4)
2027a62e62afSRichard Henderson FIELD(ID_ISAR2, MULT, 12, 4)
2028a62e62afSRichard Henderson FIELD(ID_ISAR2, MULTS, 16, 4)
2029a62e62afSRichard Henderson FIELD(ID_ISAR2, MULTU, 20, 4)
2030a62e62afSRichard Henderson FIELD(ID_ISAR2, PSR_AR, 24, 4)
2031a62e62afSRichard Henderson FIELD(ID_ISAR2, REVERSAL, 28, 4)
2032a62e62afSRichard Henderson
2033a62e62afSRichard Henderson FIELD(ID_ISAR3, SATURATE, 0, 4)
2034a62e62afSRichard Henderson FIELD(ID_ISAR3, SIMD, 4, 4)
2035a62e62afSRichard Henderson FIELD(ID_ISAR3, SVC, 8, 4)
2036a62e62afSRichard Henderson FIELD(ID_ISAR3, SYNCHPRIM, 12, 4)
2037a62e62afSRichard Henderson FIELD(ID_ISAR3, TABBRANCH, 16, 4)
2038a62e62afSRichard Henderson FIELD(ID_ISAR3, T32COPY, 20, 4)
2039a62e62afSRichard Henderson FIELD(ID_ISAR3, TRUENOP, 24, 4)
2040a62e62afSRichard Henderson FIELD(ID_ISAR3, T32EE, 28, 4)
2041a62e62afSRichard Henderson
2042a62e62afSRichard Henderson FIELD(ID_ISAR4, UNPRIV, 0, 4)
2043a62e62afSRichard Henderson FIELD(ID_ISAR4, WITHSHIFTS, 4, 4)
2044a62e62afSRichard Henderson FIELD(ID_ISAR4, WRITEBACK, 8, 4)
2045a62e62afSRichard Henderson FIELD(ID_ISAR4, SMC, 12, 4)
2046a62e62afSRichard Henderson FIELD(ID_ISAR4, BARRIER, 16, 4)
2047a62e62afSRichard Henderson FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4)
2048a62e62afSRichard Henderson FIELD(ID_ISAR4, PSR_M, 24, 4)
2049a62e62afSRichard Henderson FIELD(ID_ISAR4, SWP_FRAC, 28, 4)
2050a62e62afSRichard Henderson
2051a62e62afSRichard Henderson FIELD(ID_ISAR5, SEVL, 0, 4)
2052a62e62afSRichard Henderson FIELD(ID_ISAR5, AES, 4, 4)
2053a62e62afSRichard Henderson FIELD(ID_ISAR5, SHA1, 8, 4)
2054a62e62afSRichard Henderson FIELD(ID_ISAR5, SHA2, 12, 4)
2055a62e62afSRichard Henderson FIELD(ID_ISAR5, CRC32, 16, 4)
2056a62e62afSRichard Henderson FIELD(ID_ISAR5, RDM, 24, 4)
2057a62e62afSRichard Henderson FIELD(ID_ISAR5, VCMA, 28, 4)
2058a62e62afSRichard Henderson
2059a62e62afSRichard Henderson FIELD(ID_ISAR6, JSCVT, 0, 4)
2060bd78b6beSLeif Lindholm FIELD(ID_ISAR6, DP, 4, 4)
2061bd78b6beSLeif Lindholm FIELD(ID_ISAR6, FHM, 8, 4)
2062a62e62afSRichard Henderson FIELD(ID_ISAR6, SB, 12, 4)
20630ae0326bSPeter Maydell FIELD(ID_ISAR6, SPECRES, 16, 4)
20640ae0326bSPeter Maydell FIELD(ID_ISAR6, BF16, 20, 4)
20650ae0326bSPeter Maydell FIELD(ID_ISAR6, I8MM, 24, 4)
20660ae0326bSPeter Maydell
20670ae0326bSPeter Maydell FIELD(ID_MMFR0, VMSA, 0, 4)
20680ae0326bSPeter Maydell FIELD(ID_MMFR0, PMSA, 4, 4)
20690ae0326bSPeter Maydell FIELD(ID_MMFR0, OUTERSHR, 8, 4)
20700ae0326bSPeter Maydell FIELD(ID_MMFR0, SHARELVL, 12, 4)
20710ae0326bSPeter Maydell FIELD(ID_MMFR0, TCM, 16, 4)
2072bd78b6beSLeif Lindholm FIELD(ID_MMFR0, AUXREG, 20, 4)
2073bd78b6beSLeif Lindholm FIELD(ID_MMFR0, FCSE, 24, 4)
2074bd78b6beSLeif Lindholm FIELD(ID_MMFR0, INNERSHR, 28, 4)
2075bd78b6beSLeif Lindholm
2076bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1HVDVA, 0, 4)
2077bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1UNIVA, 4, 4)
2078bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1HVDSW, 8, 4)
2079bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1UNISW, 12, 4)
2080bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1HVD, 16, 4)
2081bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1UNI, 20, 4)
2082bd78b6beSLeif Lindholm FIELD(ID_MMFR1, L1TSTCLN, 24, 4)
2083bd78b6beSLeif Lindholm FIELD(ID_MMFR1, BPRED, 28, 4)
2084bd78b6beSLeif Lindholm
2085bd78b6beSLeif Lindholm FIELD(ID_MMFR2, L1HVDFG, 0, 4)
2086bd78b6beSLeif Lindholm FIELD(ID_MMFR2, L1HVDBG, 4, 4)
2087bd78b6beSLeif Lindholm FIELD(ID_MMFR2, L1HVDRNG, 8, 4)
2088bd78b6beSLeif Lindholm FIELD(ID_MMFR2, HVDTLB, 12, 4)
2089bd78b6beSLeif Lindholm FIELD(ID_MMFR2, UNITLB, 16, 4)
20903d6ad6bbSRichard Henderson FIELD(ID_MMFR2, MEMBARR, 20, 4)
20913d6ad6bbSRichard Henderson FIELD(ID_MMFR2, WFISTALL, 24, 4)
20923d6ad6bbSRichard Henderson FIELD(ID_MMFR2, HWACCFLG, 28, 4)
20933d6ad6bbSRichard Henderson
20943d6ad6bbSRichard Henderson FIELD(ID_MMFR3, CMAINTVA, 0, 4)
20953d6ad6bbSRichard Henderson FIELD(ID_MMFR3, CMAINTSW, 4, 4)
20963d6ad6bbSRichard Henderson FIELD(ID_MMFR3, BPMAINT, 8, 4)
20973d6ad6bbSRichard Henderson FIELD(ID_MMFR3, MAINTBCST, 12, 4)
20983d6ad6bbSRichard Henderson FIELD(ID_MMFR3, PAN, 16, 4)
2099ab638a32SRichard Henderson FIELD(ID_MMFR3, COHWALK, 20, 4)
2100ab638a32SRichard Henderson FIELD(ID_MMFR3, CMEMSZ, 24, 4)
2101ab638a32SRichard Henderson FIELD(ID_MMFR3, SUPERSEC, 28, 4)
2102ab638a32SRichard Henderson
2103ab638a32SRichard Henderson FIELD(ID_MMFR4, SPECSEI, 0, 4)
2104ab638a32SRichard Henderson FIELD(ID_MMFR4, AC2, 4, 4)
2105ab638a32SRichard Henderson FIELD(ID_MMFR4, XNX, 8, 4)
2106ab638a32SRichard Henderson FIELD(ID_MMFR4, CNP, 12, 4)
2107ab638a32SRichard Henderson FIELD(ID_MMFR4, HPDS, 16, 4)
2108bd78b6beSLeif Lindholm FIELD(ID_MMFR4, LSM, 20, 4)
2109c42fb26bSRichard Henderson FIELD(ID_MMFR4, CCIDX, 24, 4)
2110bd78b6beSLeif Lindholm FIELD(ID_MMFR4, EVT, 28, 4)
211146f4976fSPeter Maydell
211246f4976fSPeter Maydell FIELD(ID_MMFR5, ETS, 0, 4)
211346f4976fSPeter Maydell FIELD(ID_MMFR5, NTLBPA, 4, 4)
211446f4976fSPeter Maydell
211546f4976fSPeter Maydell FIELD(ID_PFR0, STATE0, 0, 4)
211646f4976fSPeter Maydell FIELD(ID_PFR0, STATE1, 4, 4)
211746f4976fSPeter Maydell FIELD(ID_PFR0, STATE2, 8, 4)
211846f4976fSPeter Maydell FIELD(ID_PFR0, STATE3, 12, 4)
211946f4976fSPeter Maydell FIELD(ID_PFR0, CSV2, 16, 4)
2120dfc523a8SPeter Maydell FIELD(ID_PFR0, AMU, 20, 4)
2121dfc523a8SPeter Maydell FIELD(ID_PFR0, DIT, 24, 4)
2122dfc523a8SPeter Maydell FIELD(ID_PFR0, RAS, 28, 4)
2123dfc523a8SPeter Maydell
2124dfc523a8SPeter Maydell FIELD(ID_PFR1, PROGMOD, 0, 4)
2125dfc523a8SPeter Maydell FIELD(ID_PFR1, SECURITY, 4, 4)
2126dfc523a8SPeter Maydell FIELD(ID_PFR1, MPROGMOD, 8, 4)
2127dfc523a8SPeter Maydell FIELD(ID_PFR1, VIRTUALIZATION, 12, 4)
2128dfc523a8SPeter Maydell FIELD(ID_PFR1, GENTIMER, 16, 4)
2129bd78b6beSLeif Lindholm FIELD(ID_PFR1, SEC_FRAC, 20, 4)
2130bd78b6beSLeif Lindholm FIELD(ID_PFR1, VIRT_FRAC, 24, 4)
2131bd78b6beSLeif Lindholm FIELD(ID_PFR1, GIC, 28, 4)
2132bd78b6beSLeif Lindholm
2133a62e62afSRichard Henderson FIELD(ID_PFR2, CSV3, 0, 4)
2134a62e62afSRichard Henderson FIELD(ID_PFR2, SSBS, 4, 4)
2135a62e62afSRichard Henderson FIELD(ID_PFR2, RAS_FRAC, 8, 4)
2136a62e62afSRichard Henderson
2137a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, AES, 4, 4)
21384d9eb296SPeter Maydell FIELD(ID_AA64ISAR0, SHA1, 8, 4)
2139a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, SHA2, 12, 4)
2140a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, CRC32, 16, 4)
2141a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
2142a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, TME, 24, 4)
2143a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, RDM, 28, 4)
2144a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, SHA3, 32, 4)
2145a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, SM3, 36, 4)
2146a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, SM4, 40, 4)
2147a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, DP, 44, 4)
2148a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, FHM, 48, 4)
2149a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, TS, 52, 4)
2150a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, TLB, 56, 4)
2151a62e62afSRichard Henderson FIELD(ID_AA64ISAR0, RNDR, 60, 4)
2152a62e62afSRichard Henderson
2153a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, DPB, 0, 4)
2154a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, APA, 4, 4)
2155a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, API, 8, 4)
2156a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, JSCVT, 12, 4)
2157a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, FCMA, 16, 4)
2158a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, LRCPC, 20, 4)
2159a62e62afSRichard Henderson FIELD(ID_AA64ISAR1, GPA, 24, 4)
216000a92832SLeif Lindholm FIELD(ID_AA64ISAR1, GPI, 28, 4)
216100a92832SLeif Lindholm FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
216200a92832SLeif Lindholm FIELD(ID_AA64ISAR1, SB, 36, 4)
2163c42fb26bSRichard Henderson FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
2164c42fb26bSRichard Henderson FIELD(ID_AA64ISAR1, BF16, 44, 4)
2165c42fb26bSRichard Henderson FIELD(ID_AA64ISAR1, DGH, 48, 4)
2166c42fb26bSRichard Henderson FIELD(ID_AA64ISAR1, I8MM, 52, 4)
2167c42fb26bSRichard Henderson FIELD(ID_AA64ISAR1, XS, 56, 4)
2168c42fb26bSRichard Henderson FIELD(ID_AA64ISAR1, LS64, 60, 4)
2169c42fb26bSRichard Henderson
2170c42fb26bSRichard Henderson FIELD(ID_AA64ISAR2, WFXT, 0, 4)
2171c42fb26bSRichard Henderson FIELD(ID_AA64ISAR2, RPRES, 4, 4)
2172c42fb26bSRichard Henderson FIELD(ID_AA64ISAR2, GPA3, 8, 4)
21734d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, APA3, 12, 4)
21744d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, MOPS, 16, 4)
21754d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, BC, 20, 4)
21764d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4)
21774d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, CLRBHB, 28, 4)
21784d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4)
21794d9eb296SPeter Maydell FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4)
2180a62e62afSRichard Henderson FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4)
2181cd208a1cSRichard Henderson FIELD(ID_AA64ISAR2, RPRFM, 48, 4)
2182cd208a1cSRichard Henderson FIELD(ID_AA64ISAR2, CSSC, 52, 4)
2183cd208a1cSRichard Henderson FIELD(ID_AA64ISAR2, ATS1A, 60, 4)
2184cd208a1cSRichard Henderson
2185cd208a1cSRichard Henderson FIELD(ID_AA64PFR0, EL0, 0, 4)
2186cd208a1cSRichard Henderson FIELD(ID_AA64PFR0, EL1, 4, 4)
2187cd208a1cSRichard Henderson FIELD(ID_AA64PFR0, EL2, 8, 4)
2188cd208a1cSRichard Henderson FIELD(ID_AA64PFR0, EL3, 12, 4)
2189cd208a1cSRichard Henderson FIELD(ID_AA64PFR0, FP, 16, 4)
219000a92832SLeif Lindholm FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
219100a92832SLeif Lindholm FIELD(ID_AA64PFR0, GIC, 24, 4)
219200a92832SLeif Lindholm FIELD(ID_AA64PFR0, RAS, 28, 4)
219300a92832SLeif Lindholm FIELD(ID_AA64PFR0, SVE, 32, 4)
2194b9f335c2SRichard Henderson FIELD(ID_AA64PFR0, SEL2, 36, 4)
219500a92832SLeif Lindholm FIELD(ID_AA64PFR0, MPAM, 40, 4)
219600a92832SLeif Lindholm FIELD(ID_AA64PFR0, AMU, 44, 4)
2197cd208a1cSRichard Henderson FIELD(ID_AA64PFR0, DIT, 48, 4)
2198be53b6f4SRichard Henderson FIELD(ID_AA64PFR0, RME, 52, 4)
21999a286bcdSLeif Lindholm FIELD(ID_AA64PFR0, CSV2, 56, 4)
2200be53b6f4SRichard Henderson FIELD(ID_AA64PFR0, CSV3, 60, 4)
2201be53b6f4SRichard Henderson
220200a92832SLeif Lindholm FIELD(ID_AA64PFR1, BT, 0, 4)
2203c42fb26bSRichard Henderson FIELD(ID_AA64PFR1, SSBS, 4, 4)
2204c42fb26bSRichard Henderson FIELD(ID_AA64PFR1, MTE, 8, 4)
2205c42fb26bSRichard Henderson FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
2206c42fb26bSRichard Henderson FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4)
22074d9eb296SPeter Maydell FIELD(ID_AA64PFR1, SME, 24, 4)
22084d9eb296SPeter Maydell FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4)
22094d9eb296SPeter Maydell FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4)
22104d9eb296SPeter Maydell FIELD(ID_AA64PFR1, NMI, 36, 4)
22114d9eb296SPeter Maydell FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4)
22124d9eb296SPeter Maydell FIELD(ID_AA64PFR1, GCS, 44, 4)
2213be53b6f4SRichard Henderson FIELD(ID_AA64PFR1, THE, 48, 4)
22143dc91ddbSPeter Maydell FIELD(ID_AA64PFR1, MTEX, 52, 4)
22153dc91ddbSPeter Maydell FIELD(ID_AA64PFR1, DF2, 56, 4)
22163dc91ddbSPeter Maydell FIELD(ID_AA64PFR1, PFAR, 60, 4)
22173dc91ddbSPeter Maydell
22183dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
22193dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
22203dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
22213dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, SNSMEM, 12, 4)
22223dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4)
22233dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, TGRAN16, 20, 4)
22243dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, TGRAN64, 24, 4)
22253dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, TGRAN4, 28, 4)
222600a92832SLeif Lindholm FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4)
222700a92832SLeif Lindholm FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4)
22283dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4)
22293dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, EXS, 44, 4)
22303dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, FGT, 56, 4)
22313dc91ddbSPeter Maydell FIELD(ID_AA64MMFR0, ECV, 60, 4)
22323dc91ddbSPeter Maydell
22333dc91ddbSPeter Maydell FIELD(ID_AA64MMFR1, HAFDBS, 0, 4)
22343dc91ddbSPeter Maydell FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4)
22353dc91ddbSPeter Maydell FIELD(ID_AA64MMFR1, VH, 8, 4)
22363dc91ddbSPeter Maydell FIELD(ID_AA64MMFR1, HPDS, 12, 4)
223700a92832SLeif Lindholm FIELD(ID_AA64MMFR1, LO, 16, 4)
223800a92832SLeif Lindholm FIELD(ID_AA64MMFR1, PAN, 20, 4)
2239c42fb26bSRichard Henderson FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
2240c42fb26bSRichard Henderson FIELD(ID_AA64MMFR1, XNX, 28, 4)
2241c42fb26bSRichard Henderson FIELD(ID_AA64MMFR1, TWED, 32, 4)
2242c42fb26bSRichard Henderson FIELD(ID_AA64MMFR1, ETS, 36, 4)
2243c42fb26bSRichard Henderson FIELD(ID_AA64MMFR1, HCX, 40, 4)
22444d9eb296SPeter Maydell FIELD(ID_AA64MMFR1, AFP, 44, 4)
22453dc91ddbSPeter Maydell FIELD(ID_AA64MMFR1, NTLBPA, 48, 4)
224664761e10SRichard Henderson FIELD(ID_AA64MMFR1, TIDCP1, 52, 4)
224764761e10SRichard Henderson FIELD(ID_AA64MMFR1, CMOW, 56, 4)
224864761e10SRichard Henderson FIELD(ID_AA64MMFR1, ECBHB, 60, 4)
224964761e10SRichard Henderson
225064761e10SRichard Henderson FIELD(ID_AA64MMFR2, CNP, 0, 4)
225164761e10SRichard Henderson FIELD(ID_AA64MMFR2, UAO, 4, 4)
225264761e10SRichard Henderson FIELD(ID_AA64MMFR2, LSM, 8, 4)
225364761e10SRichard Henderson FIELD(ID_AA64MMFR2, IESB, 12, 4)
225464761e10SRichard Henderson FIELD(ID_AA64MMFR2, VARANGE, 16, 4)
225564761e10SRichard Henderson FIELD(ID_AA64MMFR2, CCIDX, 20, 4)
225664761e10SRichard Henderson FIELD(ID_AA64MMFR2, NV, 24, 4)
225764761e10SRichard Henderson FIELD(ID_AA64MMFR2, ST, 28, 4)
225864761e10SRichard Henderson FIELD(ID_AA64MMFR2, AT, 32, 4)
225964761e10SRichard Henderson FIELD(ID_AA64MMFR2, IDS, 36, 4)
226064761e10SRichard Henderson FIELD(ID_AA64MMFR2, FWB, 40, 4)
226164761e10SRichard Henderson FIELD(ID_AA64MMFR2, TTL, 48, 4)
2262f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR2, BBM, 52, 4)
2263f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR2, EVT, 56, 4)
2264f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR2, E0PD, 60, 4)
2265f7ddd7b6SPeter Maydell
2266f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, TCRX, 0, 4)
2267f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, SCTLRX, 4, 4)
2268f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, S1PIE, 8, 4)
2269f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, S2PIE, 12, 4)
2270f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, S1POE, 16, 4)
2271f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, S2POE, 20, 4)
2272f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, AIE, 24, 4)
2273f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, MEC, 28, 4)
2274f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, D128, 32, 4)
2275f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, D128_2, 36, 4)
2276f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, SNERR, 40, 4)
2277f7ddd7b6SPeter Maydell FIELD(ID_AA64MMFR3, ANERR, 44, 4)
2278ceb2744bSPeter Maydell FIELD(ID_AA64MMFR3, SDERR, 52, 4)
2279ceb2744bSPeter Maydell FIELD(ID_AA64MMFR3, ADERR, 56, 4)
2280ceb2744bSPeter Maydell FIELD(ID_AA64MMFR3, SPEC_FPACC, 60, 4)
2281ceb2744bSPeter Maydell
22824d9eb296SPeter Maydell FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
2283ceb2744bSPeter Maydell FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
22844d9eb296SPeter Maydell FIELD(ID_AA64DFR0, PMUVER, 8, 4)
2285ceb2744bSPeter Maydell FIELD(ID_AA64DFR0, BRPS, 12, 4)
2286ceb2744bSPeter Maydell FIELD(ID_AA64DFR0, PMSS, 16, 4)
2287ceb2744bSPeter Maydell FIELD(ID_AA64DFR0, WRPS, 20, 4)
2288ceb2744bSPeter Maydell FIELD(ID_AA64DFR0, SEBEP, 24, 4)
2289c42fb26bSRichard Henderson FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
229000a92832SLeif Lindholm FIELD(ID_AA64DFR0, PMSVER, 32, 4)
2291c42fb26bSRichard Henderson FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
22924d9eb296SPeter Maydell FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
2293c42fb26bSRichard Henderson FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4)
2294ceb2744bSPeter Maydell FIELD(ID_AA64DFR0, MTPMU, 48, 4)
22952dc10fa2SRichard Henderson FIELD(ID_AA64DFR0, BRBE, 52, 4)
22962dc10fa2SRichard Henderson FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4)
22972dc10fa2SRichard Henderson FIELD(ID_AA64DFR0, HPMN0, 60, 4)
22982dc10fa2SRichard Henderson
22994d9eb296SPeter Maydell FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
23002dc10fa2SRichard Henderson FIELD(ID_AA64ZFR0, AES, 4, 4)
23012dc10fa2SRichard Henderson FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
23022dc10fa2SRichard Henderson FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
23032dc10fa2SRichard Henderson FIELD(ID_AA64ZFR0, B16B16, 24, 4)
23042dc10fa2SRichard Henderson FIELD(ID_AA64ZFR0, SHA3, 32, 4)
23052dc10fa2SRichard Henderson FIELD(ID_AA64ZFR0, SM4, 40, 4)
2306414c54d5SRichard Henderson FIELD(ID_AA64ZFR0, I8MM, 44, 4)
23074d9eb296SPeter Maydell FIELD(ID_AA64ZFR0, F32MM, 52, 4)
2308414c54d5SRichard Henderson FIELD(ID_AA64ZFR0, F64MM, 56, 4)
2309414c54d5SRichard Henderson
2310414c54d5SRichard Henderson FIELD(ID_AA64SMFR0, F32F32, 32, 1)
23114d9eb296SPeter Maydell FIELD(ID_AA64SMFR0, BI32I32, 33, 1)
23124d9eb296SPeter Maydell FIELD(ID_AA64SMFR0, B16F32, 34, 1)
23134d9eb296SPeter Maydell FIELD(ID_AA64SMFR0, F16F32, 35, 1)
2314414c54d5SRichard Henderson FIELD(ID_AA64SMFR0, I8I32, 36, 4)
2315414c54d5SRichard Henderson FIELD(ID_AA64SMFR0, F16F16, 42, 1)
2316414c54d5SRichard Henderson FIELD(ID_AA64SMFR0, B16B16, 43, 1)
2317414c54d5SRichard Henderson FIELD(ID_AA64SMFR0, I16I32, 44, 4)
2318414c54d5SRichard Henderson FIELD(ID_AA64SMFR0, F64F64, 48, 1)
2319beceb99cSAaron Lindsay FIELD(ID_AA64SMFR0, I16I64, 52, 4)
2320beceb99cSAaron Lindsay FIELD(ID_AA64SMFR0, SMEVER, 56, 4)
2321beceb99cSAaron Lindsay FIELD(ID_AA64SMFR0, FA64, 63, 1)
2322beceb99cSAaron Lindsay
2323beceb99cSAaron Lindsay FIELD(ID_DFR0, COPDBG, 0, 4)
2324beceb99cSAaron Lindsay FIELD(ID_DFR0, COPSDBG, 4, 4)
2325beceb99cSAaron Lindsay FIELD(ID_DFR0, MMAPDBG, 8, 4)
2326beceb99cSAaron Lindsay FIELD(ID_DFR0, COPTRC, 12, 4)
2327beceb99cSAaron Lindsay FIELD(ID_DFR0, MMAPTRC, 16, 4)
2328bd78b6beSLeif Lindholm FIELD(ID_DFR0, MPROFDBG, 20, 4)
2329c42fb26bSRichard Henderson FIELD(ID_DFR0, PERFMON, 24, 4)
2330bd78b6beSLeif Lindholm FIELD(ID_DFR0, TRACEFILT, 28, 4)
233188ce6c6eSPeter Maydell
233288ce6c6eSPeter Maydell FIELD(ID_DFR1, MTPMU, 0, 4)
233388ce6c6eSPeter Maydell FIELD(ID_DFR1, HPMN0, 4, 4)
233488ce6c6eSPeter Maydell
233588ce6c6eSPeter Maydell FIELD(DBGDIDR, SE_IMP, 12, 1)
233688ce6c6eSPeter Maydell FIELD(DBGDIDR, NSUHD_IMP, 14, 1)
233788ce6c6eSPeter Maydell FIELD(DBGDIDR, VERSION, 16, 4)
2338f94a6df5SPeter Maydell FIELD(DBGDIDR, CTX_CMPS, 20, 4)
2339f94a6df5SPeter Maydell FIELD(DBGDIDR, BRPS, 24, 4)
2340f94a6df5SPeter Maydell FIELD(DBGDIDR, WRPS, 28, 4)
2341f94a6df5SPeter Maydell
2342f94a6df5SPeter Maydell FIELD(DBGDEVID, PCSAMPLE, 0, 4)
2343f94a6df5SPeter Maydell FIELD(DBGDEVID, WPADDRMASK, 4, 4)
2344f94a6df5SPeter Maydell FIELD(DBGDEVID, BPADDRMASK, 8, 4)
2345f94a6df5SPeter Maydell FIELD(DBGDEVID, VECTORCATCH, 12, 4)
2346f94a6df5SPeter Maydell FIELD(DBGDEVID, VIRTEXTNS, 16, 4)
2347c5f9e8bbSGustavo Romero FIELD(DBGDEVID, DOUBLELOCK, 20, 4)
2348c5f9e8bbSGustavo Romero FIELD(DBGDEVID, AUXREGS, 24, 4)
2349602f6e42SPeter Maydell FIELD(DBGDEVID, CIDMASK, 28, 4)
2350602f6e42SPeter Maydell
2351602f6e42SPeter Maydell FIELD(DBGDEVID1, PCSROFFSET, 0, 4)
2352602f6e42SPeter Maydell
2353602f6e42SPeter Maydell FIELD(MVFR0, SIMDREG, 0, 4)
2354602f6e42SPeter Maydell FIELD(MVFR0, FPSP, 4, 4)
2355602f6e42SPeter Maydell FIELD(MVFR0, FPDP, 8, 4)
2356602f6e42SPeter Maydell FIELD(MVFR0, FPTRAP, 12, 4)
2357602f6e42SPeter Maydell FIELD(MVFR0, FPDIVIDE, 16, 4)
2358602f6e42SPeter Maydell FIELD(MVFR0, FPSQRT, 20, 4)
2359602f6e42SPeter Maydell FIELD(MVFR0, FPSHVEC, 24, 4)
2360dfc523a8SPeter Maydell FIELD(MVFR0, FPROUND, 28, 4)
2361dfc523a8SPeter Maydell
2362dfc523a8SPeter Maydell FIELD(MVFR1, FPFTZ, 0, 4)
2363dfc523a8SPeter Maydell FIELD(MVFR1, FPDNAN, 4, 4)
2364dfc523a8SPeter Maydell FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */
2365dfc523a8SPeter Maydell FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */
2366602f6e42SPeter Maydell FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */
2367602f6e42SPeter Maydell FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */
2368602f6e42SPeter Maydell FIELD(MVFR1, MVE, 8, 4) /* M-profile only */
2369602f6e42SPeter Maydell FIELD(MVFR1, FP16, 20, 4) /* M-profile only */
2370602f6e42SPeter Maydell FIELD(MVFR1, FPHP, 24, 4)
2371602f6e42SPeter Maydell FIELD(MVFR1, SIMDFMAC, 28, 4)
2372ef1febe7SRichard Henderson
2373ef1febe7SRichard Henderson FIELD(MVFR2, SIMDMISC, 0, 4)
2374ef1febe7SRichard Henderson FIELD(MVFR2, FPMISC, 4, 4)
2375ef1febe7SRichard Henderson
2376ef1febe7SRichard Henderson FIELD(GPCCR, PPS, 0, 3)
2377ef1febe7SRichard Henderson FIELD(GPCCR, IRGN, 8, 2)
2378ef1febe7SRichard Henderson FIELD(GPCCR, ORGN, 10, 2)
2379ef1febe7SRichard Henderson FIELD(GPCCR, SH, 12, 2)
2380ef1febe7SRichard Henderson FIELD(GPCCR, PGS, 14, 2)
2381ef1febe7SRichard Henderson FIELD(GPCCR, GPC, 16, 1)
2382ef1febe7SRichard Henderson FIELD(GPCCR, GPCP, 17, 1)
2383ef1febe7SRichard Henderson FIELD(GPCCR, L0GPTSZ, 20, 4)
2384ef1febe7SRichard Henderson
238543bbce7fSPeter Maydell FIELD(MFAR, FPA, 12, 40)
238643bbce7fSPeter Maydell FIELD(MFAR, NSE, 62, 1)
2387fcf5ef2aSThomas Huth FIELD(MFAR, NS, 63, 1)
2388fcf5ef2aSThomas Huth
2389fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
2390fcf5ef2aSThomas Huth
2391fcf5ef2aSThomas Huth /* If adding a feature bit which corresponds to a Linux ELF
2392fcf5ef2aSThomas Huth * HWCAP bit, remember to update the feature-bit-to-hwcap
2393fcf5ef2aSThomas Huth * mapping in linux-user/elfload.c:get_elf_hwcap().
2394fcf5ef2aSThomas Huth */
2395fcf5ef2aSThomas Huth enum arm_features {
2396fcf5ef2aSThomas Huth ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
2397fcf5ef2aSThomas Huth ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
2398fcf5ef2aSThomas Huth ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
2399452a0955SPeter Maydell ARM_FEATURE_V6,
2400fcf5ef2aSThomas Huth ARM_FEATURE_V6K,
2401fcf5ef2aSThomas Huth ARM_FEATURE_V7,
2402fcf5ef2aSThomas Huth ARM_FEATURE_THUMB2,
2403fcf5ef2aSThomas Huth ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */
2404fcf5ef2aSThomas Huth ARM_FEATURE_NEON,
24055110e683SAaron Lindsay ARM_FEATURE_M, /* Microcontroller profile. */
2406fcf5ef2aSThomas Huth ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
2407fcf5ef2aSThomas Huth ARM_FEATURE_THUMB2EE,
2408fcf5ef2aSThomas Huth ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
2409fcf5ef2aSThomas Huth ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
2410fcf5ef2aSThomas Huth ARM_FEATURE_V4T,
2411fcf5ef2aSThomas Huth ARM_FEATURE_V5,
2412fcf5ef2aSThomas Huth ARM_FEATURE_STRONGARM,
2413fcf5ef2aSThomas Huth ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
2414fcf5ef2aSThomas Huth ARM_FEATURE_GENERIC_TIMER,
2415fcf5ef2aSThomas Huth ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
2416fcf5ef2aSThomas Huth ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
2417fcf5ef2aSThomas Huth ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
2418fcf5ef2aSThomas Huth ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
2419fcf5ef2aSThomas Huth ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
2420fcf5ef2aSThomas Huth ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
2421fcf5ef2aSThomas Huth ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
2422fcf5ef2aSThomas Huth ARM_FEATURE_V8,
2423fcf5ef2aSThomas Huth ARM_FEATURE_AARCH64, /* supports 64 bit mode */
2424fcf5ef2aSThomas Huth ARM_FEATURE_CBAR, /* has cp15 CBAR */
2425fcf5ef2aSThomas Huth ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
242691db4642SCédric Le Goater ARM_FEATURE_EL2, /* has EL2 Virtualization support */
24271e577cc7SPeter Maydell ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
2428cc2ae7c9SJulia Suvorova ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
24295d2555a1SPeter Maydell ARM_FEATURE_PMU, /* has PMU support */
2430f037f5b4SPeter Maydell ARM_FEATURE_VBAR, /* has cp15 VBAR */
2431f037f5b4SPeter Maydell ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
2432f037f5b4SPeter Maydell ARM_FEATURE_M_MAIN, /* M profile Main Extension */
2433f037f5b4SPeter Maydell ARM_FEATURE_V8_1M, /* M profile extras only in v8.1M and later */
2434f037f5b4SPeter Maydell /*
2435f037f5b4SPeter Maydell * ARM_FEATURE_BACKCOMPAT_CNTFRQ makes the CPU default cntfrq be 62.5MHz
2436f037f5b4SPeter Maydell * if the board doesn't set a value, instead of 1GHz. It is for backwards
2437f037f5b4SPeter Maydell * compatibility and used only with CPU definitions that were already
2438fcf5ef2aSThomas Huth * in QEMU before we changed the default. It should not be set on any
2439fcf5ef2aSThomas Huth * CPU types added in future.
2440fcf5ef2aSThomas Huth */
2441fcf5ef2aSThomas Huth ARM_FEATURE_BACKCOMPAT_CNTFRQ, /* 62.5MHz timer default */
2442fcf5ef2aSThomas Huth };
2443fcf5ef2aSThomas Huth
arm_feature(CPUARMState * env,int feature)2444fcf5ef2aSThomas Huth static inline int arm_feature(CPUARMState *env, int feature)
24450df9142dSAndrew Jones {
24460df9142dSAndrew Jones return (env->features & (1ULL << feature)) != 0;
2447fcc7404eSRichard Henderson }
24485d28ac0cSRichard Henderson
24495d28ac0cSRichard Henderson void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
24505d28ac0cSRichard Henderson
24515d28ac0cSRichard Henderson /*
24525d28ac0cSRichard Henderson * ARM v9 security states.
24535d28ac0cSRichard Henderson * The ordering of the enumeration corresponds to the low 2 bits
24545d28ac0cSRichard Henderson * of the GPI value, and (except for Root) the concat of NSE:NS.
24555d28ac0cSRichard Henderson */
24565d28ac0cSRichard Henderson
24575d28ac0cSRichard Henderson typedef enum ARMSecuritySpace {
24585d28ac0cSRichard Henderson ARMSS_Secure = 0,
24595d28ac0cSRichard Henderson ARMSS_NonSecure = 1,
24605d28ac0cSRichard Henderson ARMSS_Root = 2,
24615d28ac0cSRichard Henderson ARMSS_Realm = 3,
24625d28ac0cSRichard Henderson } ARMSecuritySpace;
24635d28ac0cSRichard Henderson
24645d28ac0cSRichard Henderson /* Return true if @space is secure, in the pre-v9 sense. */
arm_space_is_secure(ARMSecuritySpace space)24655d28ac0cSRichard Henderson static inline bool arm_space_is_secure(ARMSecuritySpace space)
24665d28ac0cSRichard Henderson {
24675d28ac0cSRichard Henderson return space == ARMSS_Secure || space == ARMSS_Root;
24685d28ac0cSRichard Henderson }
24695d28ac0cSRichard Henderson
24705d28ac0cSRichard Henderson /* Return the ARMSecuritySpace for @secure, assuming !RME or EL[0-2]. */
arm_secure_to_space(bool secure)24715d28ac0cSRichard Henderson static inline ARMSecuritySpace arm_secure_to_space(bool secure)
24725d28ac0cSRichard Henderson {
24735d28ac0cSRichard Henderson return secure ? ARMSS_Secure : ARMSS_NonSecure;
24745d28ac0cSRichard Henderson }
24755d28ac0cSRichard Henderson
24765d28ac0cSRichard Henderson #if !defined(CONFIG_USER_ONLY)
24775d28ac0cSRichard Henderson /**
24785d28ac0cSRichard Henderson * arm_security_space_below_el3:
24795d28ac0cSRichard Henderson * @env: cpu context
24805d28ac0cSRichard Henderson *
24815d28ac0cSRichard Henderson * Return the security space of exception levels below EL3, following
24825d28ac0cSRichard Henderson * an exception return to those levels. Unlike arm_security_space,
24835d28ac0cSRichard Henderson * this doesn't care about the current EL.
24845d28ac0cSRichard Henderson */
24855d28ac0cSRichard Henderson ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env);
24865d28ac0cSRichard Henderson
2487fcc7404eSRichard Henderson /**
24885d28ac0cSRichard Henderson * arm_is_secure_below_el3:
2489fcf5ef2aSThomas Huth * @env: cpu context
2490fcf5ef2aSThomas Huth *
2491fcf5ef2aSThomas Huth * Return true if exception levels below EL3 are in secure state,
24925d28ac0cSRichard Henderson * or would be following an exception return to those levels.
24935d28ac0cSRichard Henderson */
arm_is_secure_below_el3(CPUARMState * env)2494fcf5ef2aSThomas Huth static inline bool arm_is_secure_below_el3(CPUARMState *env)
2495fcf5ef2aSThomas Huth {
2496fcf5ef2aSThomas Huth ARMSecuritySpace ss = arm_security_space_below_el3(env);
2497fcf5ef2aSThomas Huth return ss == ARMSS_Secure;
2498fcf5ef2aSThomas Huth }
2499fcc7404eSRichard Henderson
2500fcf5ef2aSThomas Huth /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
arm_is_el3_or_mon(CPUARMState * env)2501fcf5ef2aSThomas Huth static inline bool arm_is_el3_or_mon(CPUARMState *env)
2502fcf5ef2aSThomas Huth {
2503fcf5ef2aSThomas Huth assert(!arm_feature(env, ARM_FEATURE_M));
2504fcf5ef2aSThomas Huth if (arm_feature(env, ARM_FEATURE_EL3)) {
2505fcf5ef2aSThomas Huth if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
2506fcf5ef2aSThomas Huth /* CPU currently in AArch64 state and EL3 */
2507fcf5ef2aSThomas Huth return true;
2508fcf5ef2aSThomas Huth } else if (!is_a64(env) &&
2509fcf5ef2aSThomas Huth (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
2510fcf5ef2aSThomas Huth /* CPU currently in AArch32 state and monitor mode */
2511fcf5ef2aSThomas Huth return true;
2512fcf5ef2aSThomas Huth }
25135d28ac0cSRichard Henderson }
25145d28ac0cSRichard Henderson return false;
25155d28ac0cSRichard Henderson }
25165d28ac0cSRichard Henderson
25175d28ac0cSRichard Henderson /**
25185d28ac0cSRichard Henderson * arm_security_space:
25195d28ac0cSRichard Henderson * @env: cpu context
25205d28ac0cSRichard Henderson *
25215d28ac0cSRichard Henderson * Return the current security space of the cpu.
25225d28ac0cSRichard Henderson */
25235d28ac0cSRichard Henderson ARMSecuritySpace arm_security_space(CPUARMState *env);
25245d28ac0cSRichard Henderson
25255d28ac0cSRichard Henderson /**
25265d28ac0cSRichard Henderson * arm_is_secure:
2527fcf5ef2aSThomas Huth * @env: cpu context
2528fcf5ef2aSThomas Huth *
25295d28ac0cSRichard Henderson * Return true if the processor is in secure state.
2530fcf5ef2aSThomas Huth */
arm_is_secure(CPUARMState * env)2531fcf5ef2aSThomas Huth static inline bool arm_is_secure(CPUARMState *env)
2532f3ee5160SRémi Denis-Courmont {
2533f3ee5160SRémi Denis-Courmont return arm_space_is_secure(arm_security_space(env));
25344477020dSPeter Maydell }
2535f3ee5160SRémi Denis-Courmont
25364477020dSPeter Maydell /*
25374477020dSPeter Maydell * Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
2538b74c0443SRichard Henderson * This corresponds to the pseudocode EL2Enabled().
25394477020dSPeter Maydell */
arm_is_el2_enabled_secstate(CPUARMState * env,ARMSecuritySpace space)2540b74c0443SRichard Henderson static inline bool arm_is_el2_enabled_secstate(CPUARMState *env,
25414477020dSPeter Maydell ARMSecuritySpace space)
2542b74c0443SRichard Henderson {
2543b74c0443SRichard Henderson assert(space != ARMSS_Root);
2544f3ee5160SRémi Denis-Courmont return arm_feature(env, ARM_FEATURE_EL2)
2545f3ee5160SRémi Denis-Courmont && (space != ARMSS_Secure || (env->cp15.scr_el3 & SCR_EEL2));
25464477020dSPeter Maydell }
2547f3ee5160SRémi Denis-Courmont
arm_is_el2_enabled(CPUARMState * env)2548f3ee5160SRémi Denis-Courmont static inline bool arm_is_el2_enabled(CPUARMState *env)
2549fcf5ef2aSThomas Huth {
25505d28ac0cSRichard Henderson return arm_is_el2_enabled_secstate(env, arm_security_space_below_el3(env));
25515d28ac0cSRichard Henderson }
25525d28ac0cSRichard Henderson
25535d28ac0cSRichard Henderson #else
arm_security_space_below_el3(CPUARMState * env)25545d28ac0cSRichard Henderson static inline ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
2555fcf5ef2aSThomas Huth {
2556fcf5ef2aSThomas Huth return ARMSS_NonSecure;
2557fcf5ef2aSThomas Huth }
2558fcf5ef2aSThomas Huth
arm_is_secure_below_el3(CPUARMState * env)2559fcf5ef2aSThomas Huth static inline bool arm_is_secure_below_el3(CPUARMState *env)
25605d28ac0cSRichard Henderson {
25615d28ac0cSRichard Henderson return false;
25625d28ac0cSRichard Henderson }
25635d28ac0cSRichard Henderson
arm_security_space(CPUARMState * env)25645d28ac0cSRichard Henderson static inline ARMSecuritySpace arm_security_space(CPUARMState *env)
2565fcf5ef2aSThomas Huth {
2566fcf5ef2aSThomas Huth return ARMSS_NonSecure;
2567fcf5ef2aSThomas Huth }
2568fcf5ef2aSThomas Huth
arm_is_secure(CPUARMState * env)2569f3ee5160SRémi Denis-Courmont static inline bool arm_is_secure(CPUARMState *env)
25704477020dSPeter Maydell {
25714477020dSPeter Maydell return false;
2572b74c0443SRichard Henderson }
2573b74c0443SRichard Henderson
arm_is_el2_enabled_secstate(CPUARMState * env,ARMSecuritySpace space)2574b74c0443SRichard Henderson static inline bool arm_is_el2_enabled_secstate(CPUARMState *env,
2575b74c0443SRichard Henderson ARMSecuritySpace space)
2576f3ee5160SRémi Denis-Courmont {
2577f3ee5160SRémi Denis-Courmont return false;
2578f3ee5160SRémi Denis-Courmont }
2579f3ee5160SRémi Denis-Courmont
arm_is_el2_enabled(CPUARMState * env)2580fcf5ef2aSThomas Huth static inline bool arm_is_el2_enabled(CPUARMState *env)
2581fcf5ef2aSThomas Huth {
2582f7778444SRichard Henderson return false;
2583f7778444SRichard Henderson }
2584f7778444SRichard Henderson #endif
2585f7778444SRichard Henderson
2586f7778444SRichard Henderson /**
2587f7778444SRichard Henderson * arm_hcr_el2_eff(): Return the effective value of HCR_EL2.
25882d12bb96SPeter Maydell * E.g. when in secure state, fields in HCR_EL2 are suppressed,
2589f7778444SRichard Henderson * "for all purposes other than a direct read or write access of HCR_EL2."
25905814d587SRichard Henderson * Not included here is HCR_RW.
2591f7778444SRichard Henderson */
2592fcf5ef2aSThomas Huth uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
2593fcf5ef2aSThomas Huth uint64_t arm_hcr_el2_eff(CPUARMState *env);
2594fcf5ef2aSThomas Huth uint64_t arm_hcrx_el2_eff(CPUARMState *env);
2595fcf5ef2aSThomas Huth
2596fcf5ef2aSThomas Huth /* Return true if the specified exception level is running in AArch64 state. */
arm_el_is_aa64(CPUARMState * env,int el)2597fcf5ef2aSThomas Huth static inline bool arm_el_is_aa64(CPUARMState *env, int el)
2598fcf5ef2aSThomas Huth {
2599fcf5ef2aSThomas Huth /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
2600fcf5ef2aSThomas Huth * and if we're not in EL0 then the state of EL0 isn't well defined.)
2601fcf5ef2aSThomas Huth */
2602fcf5ef2aSThomas Huth assert(el >= 1 && el <= 3);
2603fcf5ef2aSThomas Huth bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
2604fcf5ef2aSThomas Huth
2605fcf5ef2aSThomas Huth /* The highest exception level is always at the maximum supported
2606fcf5ef2aSThomas Huth * register width, and then lower levels have a register width controlled
2607fcf5ef2aSThomas Huth * by bits in the SCR or HCR registers.
2608fcf5ef2aSThomas Huth */
2609926c1b97SRémi Denis-Courmont if (el == 3) {
2610926c1b97SRémi Denis-Courmont return aa64;
2611fcf5ef2aSThomas Huth }
2612fcf5ef2aSThomas Huth
2613fcf5ef2aSThomas Huth if (arm_feature(env, ARM_FEATURE_EL3) &&
2614fcf5ef2aSThomas Huth ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
2615fcf5ef2aSThomas Huth aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
2616fcf5ef2aSThomas Huth }
2617fcf5ef2aSThomas Huth
2618e6ef0169SRémi Denis-Courmont if (el == 2) {
2619fcf5ef2aSThomas Huth return aa64;
2620fcf5ef2aSThomas Huth }
2621fcf5ef2aSThomas Huth
2622fcf5ef2aSThomas Huth if (arm_is_el2_enabled(env)) {
2623fcf5ef2aSThomas Huth aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
2624fcf5ef2aSThomas Huth }
2625673d8215SMichael Tokarev
2626fcf5ef2aSThomas Huth return aa64;
2627fcf5ef2aSThomas Huth }
2628fcf5ef2aSThomas Huth
2629fcf5ef2aSThomas Huth /* Function for determining whether guest cp register reads and writes should
2630fcf5ef2aSThomas Huth * access the secure or non-secure bank of a cp register. When EL3 is
2631fcf5ef2aSThomas Huth * operating in AArch32 state, the NS-bit determines whether the secure
2632fcf5ef2aSThomas Huth * instance of a cp register should be used. When EL3 is AArch64 (or if
2633fcf5ef2aSThomas Huth * it doesn't exist at all) then there is no register banking, and all
2634fcf5ef2aSThomas Huth * accesses are to the non-secure version.
2635fcf5ef2aSThomas Huth */
access_secure_reg(CPUARMState * env)2636fcf5ef2aSThomas Huth static inline bool access_secure_reg(CPUARMState *env)
2637fcf5ef2aSThomas Huth {
2638fcf5ef2aSThomas Huth bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
2639fcf5ef2aSThomas Huth !arm_el_is_aa64(env, 3) &&
2640fcf5ef2aSThomas Huth !(env->cp15.scr_el3 & SCR_NS));
2641fcf5ef2aSThomas Huth
2642fcf5ef2aSThomas Huth return ret;
2643fcf5ef2aSThomas Huth }
2644fcf5ef2aSThomas Huth
2645fcf5ef2aSThomas Huth /* Macros for accessing a specified CP register bank */
2646fcf5ef2aSThomas Huth #define A32_BANKED_REG_GET(_env, _regname, _secure) \
2647fcf5ef2aSThomas Huth ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
2648fcf5ef2aSThomas Huth
2649fcf5ef2aSThomas Huth #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
2650fcf5ef2aSThomas Huth do { \
2651fcf5ef2aSThomas Huth if (_secure) { \
2652fcf5ef2aSThomas Huth (_env)->cp15._regname##_s = (_val); \
2653fcf5ef2aSThomas Huth } else { \
2654fcf5ef2aSThomas Huth (_env)->cp15._regname##_ns = (_val); \
2655fcf5ef2aSThomas Huth } \
2656fcf5ef2aSThomas Huth } while (0)
2657fcf5ef2aSThomas Huth
2658fcf5ef2aSThomas Huth /* Macros for automatically accessing a specific CP register bank depending on
2659fcf5ef2aSThomas Huth * the current secure state of the system. These macros are not intended for
2660fcf5ef2aSThomas Huth * supporting instruction translation reads/writes as these are dependent
2661fcf5ef2aSThomas Huth * solely on the SCR.NS bit and not the mode.
2662fcf5ef2aSThomas Huth */
2663fcf5ef2aSThomas Huth #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
2664fcf5ef2aSThomas Huth A32_BANKED_REG_GET((_env), _regname, \
2665fcf5ef2aSThomas Huth (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
2666fcf5ef2aSThomas Huth
2667fcf5ef2aSThomas Huth #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
2668fcf5ef2aSThomas Huth A32_BANKED_REG_SET((_env), _regname, \
2669fcf5ef2aSThomas Huth (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
2670fcf5ef2aSThomas Huth (_val))
2671fcf5ef2aSThomas Huth
2672fcf5ef2aSThomas Huth uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
2673fcf5ef2aSThomas Huth uint32_t cur_el, bool secure);
2674fcf5ef2aSThomas Huth
2675fcf5ef2aSThomas Huth /* Return the highest implemented Exception Level */
arm_highest_el(CPUARMState * env)2676fcf5ef2aSThomas Huth static inline int arm_highest_el(CPUARMState *env)
2677fcf5ef2aSThomas Huth {
2678fcf5ef2aSThomas Huth if (arm_feature(env, ARM_FEATURE_EL3)) {
2679fcf5ef2aSThomas Huth return 3;
2680fcf5ef2aSThomas Huth }
2681fcf5ef2aSThomas Huth if (arm_feature(env, ARM_FEATURE_EL2)) {
2682fcf5ef2aSThomas Huth return 2;
268315b3f556SPeter Maydell }
268415b3f556SPeter Maydell return 1;
268515b3f556SPeter Maydell }
268615b3f556SPeter Maydell
268715b3f556SPeter Maydell /* Return true if a v7M CPU is in Handler mode */
arm_v7m_is_handler_mode(CPUARMState * env)268815b3f556SPeter Maydell static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
2689fcf5ef2aSThomas Huth {
2690fcf5ef2aSThomas Huth return env->v7m.exception != 0;
2691fcf5ef2aSThomas Huth }
2692fcf5ef2aSThomas Huth
2693fcf5ef2aSThomas Huth /* Return the current Exception Level (as per ARMv8; note that this differs
2694fcf5ef2aSThomas Huth * from the ARMv7 Privilege Level).
26958bfc26eaSPeter Maydell */
arm_current_el(CPUARMState * env)26968bfc26eaSPeter Maydell static inline int arm_current_el(CPUARMState *env)
2697fcf5ef2aSThomas Huth {
2698fcf5ef2aSThomas Huth if (arm_feature(env, ARM_FEATURE_M)) {
2699fcf5ef2aSThomas Huth return arm_v7m_is_handler_mode(env) ||
2700fcf5ef2aSThomas Huth !(env->v7m.control[env->v7m.secure] & 1);
2701fcf5ef2aSThomas Huth }
2702fcf5ef2aSThomas Huth
2703fcf5ef2aSThomas Huth if (is_a64(env)) {
2704fcf5ef2aSThomas Huth return extract32(env->pstate, 2, 2);
2705fcf5ef2aSThomas Huth }
2706fcf5ef2aSThomas Huth
2707fcf5ef2aSThomas Huth switch (env->uncached_cpsr & 0x1f) {
2708fcf5ef2aSThomas Huth case ARM_CPU_MODE_USR:
2709fcf5ef2aSThomas Huth return 0;
2710fcf5ef2aSThomas Huth case ARM_CPU_MODE_HYP:
2711fcf5ef2aSThomas Huth return 2;
2712fcf5ef2aSThomas Huth case ARM_CPU_MODE_MON:
2713fcf5ef2aSThomas Huth return 3;
2714fcf5ef2aSThomas Huth default:
2715fcf5ef2aSThomas Huth if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
2716fcf5ef2aSThomas Huth /* If EL3 is 32-bit then all secure privileged modes run in
2717fcf5ef2aSThomas Huth * EL3
2718fcf5ef2aSThomas Huth */
2719fcf5ef2aSThomas Huth return 3;
2720fcf5ef2aSThomas Huth }
2721fcf5ef2aSThomas Huth
2722fcf5ef2aSThomas Huth return 1;
2723fcf5ef2aSThomas Huth }
2724fcf5ef2aSThomas Huth }
2725fcf5ef2aSThomas Huth
2726fcf5ef2aSThomas Huth /**
2727fcf5ef2aSThomas Huth * write_list_to_cpustate
2728fcf5ef2aSThomas Huth * @cpu: ARMCPU
2729fcf5ef2aSThomas Huth *
2730fcf5ef2aSThomas Huth * For each register listed in the ARMCPU cpreg_indexes list, write
2731fcf5ef2aSThomas Huth * its value from the cpreg_values list into the ARMCPUState structure.
2732fcf5ef2aSThomas Huth * This updates TCG's working data structures from KVM data or
2733fcf5ef2aSThomas Huth * from incoming migration state.
2734fcf5ef2aSThomas Huth *
2735fcf5ef2aSThomas Huth * Returns: true if all register values were updated correctly,
2736fcf5ef2aSThomas Huth * false if some register was unknown or could not be written.
2737fcf5ef2aSThomas Huth * Note that we do not stop early on failure -- we will attempt
2738fcf5ef2aSThomas Huth * writing all registers in the list.
2739fcf5ef2aSThomas Huth */
2740fcf5ef2aSThomas Huth bool write_list_to_cpustate(ARMCPU *cpu);
2741b698e4eeSPeter Maydell
2742fcf5ef2aSThomas Huth /**
2743fcf5ef2aSThomas Huth * write_cpustate_to_list:
2744fcf5ef2aSThomas Huth * @cpu: ARMCPU
2745fcf5ef2aSThomas Huth * @kvm_sync: true if this is for syncing back to KVM
2746fcf5ef2aSThomas Huth *
2747fcf5ef2aSThomas Huth * For each register listed in the ARMCPU cpreg_indexes list, write
2748b698e4eeSPeter Maydell * its value from the ARMCPUState structure into the cpreg_values list.
2749b698e4eeSPeter Maydell * This is used to copy info from TCG's working data structures into
2750b698e4eeSPeter Maydell * KVM or for outbound migration.
2751b698e4eeSPeter Maydell *
2752b698e4eeSPeter Maydell * @kvm_sync is true if we are doing this in order to sync the
2753b698e4eeSPeter Maydell * register state back to KVM. In this case we will only update
2754fcf5ef2aSThomas Huth * values in the list if the previous list->cpustate sync actually
2755fcf5ef2aSThomas Huth * successfully wrote the CPU state. Otherwise we will keep the value
2756fcf5ef2aSThomas Huth * that is in the list.
2757fcf5ef2aSThomas Huth *
2758fcf5ef2aSThomas Huth * Returns: true if all register values were read correctly,
2759b698e4eeSPeter Maydell * false if some register was unknown or could not be read.
2760fcf5ef2aSThomas Huth * Note that we do not stop early on failure -- we will attempt
2761fcf5ef2aSThomas Huth * reading all registers in the list.
2762fcf5ef2aSThomas Huth */
2763fcf5ef2aSThomas Huth bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
27640dacec87SIgor Mammedov
2765ba1ba5ccSIgor Mammedov #define ARM_CPUID_TI915T 0x54029152
2766585df85eSPeter Maydell #define ARM_CPUID_TI925T 0x54029252
2767585df85eSPeter Maydell
2768fcf5ef2aSThomas Huth #define CPU_RESOLVING_TYPE TYPE_ARM_CPU
2769fcf5ef2aSThomas Huth
2770fcf5ef2aSThomas Huth #define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
2771fcf5ef2aSThomas Huth
2772fcf5ef2aSThomas Huth /* ARM has the following "translation regimes" (as the ARM ARM calls them):
2773fcf5ef2aSThomas Huth *
2774b9f6033cSRichard Henderson * If EL3 is 64-bit:
2775150c24f3SPeter Maydell * + NonSecure EL1 & 0 stage 1
2776150c24f3SPeter Maydell * + NonSecure EL1 & 0 stage 2
2777150c24f3SPeter Maydell * + NonSecure EL2
2778150c24f3SPeter Maydell * + NonSecure EL2 & 0 (ARMv8.1-VHE)
2779150c24f3SPeter Maydell * + Secure EL1 & 0 stage 1
2780150c24f3SPeter Maydell * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
2781150c24f3SPeter Maydell * + Secure EL2 (FEAT_SEL2)
2782150c24f3SPeter Maydell * + Secure EL2 & 0 (FEAT_SEL2)
2783fcf5ef2aSThomas Huth * + Realm EL1 & 0 stage 1 (FEAT_RME)
2784fcf5ef2aSThomas Huth * + Realm EL1 & 0 stage 2 (FEAT_RME)
2785fcf5ef2aSThomas Huth * + Realm EL2 (FEAT_RME)
2786fcf5ef2aSThomas Huth * + EL3
2787*6d62f309SPeter Maydell * If EL3 is 32-bit:
2788fcf5ef2aSThomas Huth * + NonSecure PL1 & 0 stage 1
2789fcf5ef2aSThomas Huth * + NonSecure PL1 & 0 stage 2
2790fcf5ef2aSThomas Huth * + NonSecure PL2
2791b9f6033cSRichard Henderson * + Secure PL1 & 0
2792b9f6033cSRichard Henderson * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
2793b9f6033cSRichard Henderson *
2794fcf5ef2aSThomas Huth * For QEMU, an mmu_idx is not quite the same as a translation regime because:
2795fcf5ef2aSThomas Huth * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
2796fcf5ef2aSThomas Huth * because they may differ in access permissions even if the VA->PA map is
2797fcf5ef2aSThomas Huth * the same
2798fcf5ef2aSThomas Huth * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
2799fcf5ef2aSThomas Huth * translation, which means that we have one mmu_idx that deals with two
2800fcf5ef2aSThomas Huth * concatenated translation regimes [this sort of combined s1+2 TLB is
2801fcf5ef2aSThomas Huth * architecturally permitted]
2802bf05340cSPeter Maydell * 3. we don't need to allocate an mmu_idx to translations that we won't be
2803bf05340cSPeter Maydell * handling via the TLB. The only way to do a stage 1 translation without
2804bf05340cSPeter Maydell * the immediate stage 2 translation is via the ATS or AT system insns,
2805f147ed37SPeter Maydell * which can be slow-pathed and always do a page table walk.
2806f147ed37SPeter Maydell * The only use of stage 2 translations is either as part of an s1+2
2807f147ed37SPeter Maydell * lookup or when loading the descriptors during a stage 1 page table walk,
2808f147ed37SPeter Maydell * and in both those cases we don't use the TLB.
2809b9f6033cSRichard Henderson * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
2810b9f6033cSRichard Henderson * translation regimes, because they map reasonably well to each other
2811f147ed37SPeter Maydell * and they can't both be active at the same time.
2812452ef8cbSRichard Henderson * 5. we want to be able to use the TLB for accesses done as part of a
2813f147ed37SPeter Maydell * stage1 page table walk, rather than having to walk the stage2 page
2814d902ae75SRichard Henderson * table over and over.
2815d902ae75SRichard Henderson * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
2816d902ae75SRichard Henderson * Never (PAN) bit within PSTATE.
2817f147ed37SPeter Maydell * 7. we fold together most secure and non-secure regimes for A-profile,
2818150c24f3SPeter Maydell * because there are no banked system registers for aarch64, so the
2819fcf5ef2aSThomas Huth * process of switching between secure and non-secure is
2820b9f6033cSRichard Henderson * already heavyweight.
2821b9f6033cSRichard Henderson * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
2822*6d62f309SPeter Maydell * because both are in use simultaneously for Secure EL2.
2823*6d62f309SPeter Maydell *
2824*6d62f309SPeter Maydell * This gives us the following list of cases:
2825d902ae75SRichard Henderson *
2826d902ae75SRichard Henderson * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
2827d902ae75SRichard Henderson * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
2828d902ae75SRichard Henderson * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
2829*6d62f309SPeter Maydell * EL0 EL2&0
2830*6d62f309SPeter Maydell * EL2 EL2&0
2831*6d62f309SPeter Maydell * EL2 EL2&0 +PAN
2832150c24f3SPeter Maydell * EL2 (aka NS PL2)
2833150c24f3SPeter Maydell * EL3 (aka AArch32 S PL1 PL1&0)
2834150c24f3SPeter Maydell * AArch32 S PL0 PL1&0 (we call this EL30_0)
2835fcf5ef2aSThomas Huth * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
2836*6d62f309SPeter Maydell * Stage2 Secure
2837fcf5ef2aSThomas Huth * Stage2 NonSecure
28383bef7012SPeter Maydell * plus one TLB per Physical address space: S, NS, Realm, Root
2839d902ae75SRichard Henderson *
2840150c24f3SPeter Maydell * for a total of 16 different mmu_idx.
28413bef7012SPeter Maydell *
28423bef7012SPeter Maydell * R profile CPUs have an MPU, but can use the same set of MMU indexes
28433bef7012SPeter Maydell * as A profile. They only need to distinguish EL0 and EL1 (and
28443bef7012SPeter Maydell * EL2 for cores like the Cortex-R52).
28453bef7012SPeter Maydell *
284662593718SPeter Maydell * M profile CPUs are rather different as they do not have a true MMU.
284762593718SPeter Maydell * They have the following different MMU indexes:
284866787c78SPeter Maydell * User
284966787c78SPeter Maydell * Privileged
285066787c78SPeter Maydell * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
285162593718SPeter Maydell * Privileged, execution priority negative (ditto)
285262593718SPeter Maydell * If the CPU supports the v8M Security Extension then there are also:
28533bef7012SPeter Maydell * Secure User
28548bd5c820SPeter Maydell * Secure Privileged
28558bd5c820SPeter Maydell * Secure User, execution priority negative
28568bd5c820SPeter Maydell * Secure Privileged, execution priority negative
28578bd5c820SPeter Maydell *
2858bf05340cSPeter Maydell * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
2859bf05340cSPeter Maydell * are not quite the same -- different CPU types (most notably M profile
28608bd5c820SPeter Maydell * vs A/R profile) would like to use MMU indexes with different semantics,
28618bd5c820SPeter Maydell * but since we don't ever need to use all of those in a single CPU we
28628bd5c820SPeter Maydell * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
28638bd5c820SPeter Maydell * modes + total number of M profile MMU modes". The lower bits of
28648bd5c820SPeter Maydell * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
2865fcf5ef2aSThomas Huth * the same for any particular CPU.
2866fcf5ef2aSThomas Huth * Variables of type ARMMUIdx are always full values, and the core
2867fcf5ef2aSThomas Huth * index values are in variables of type 'int'.
2868fcf5ef2aSThomas Huth *
2869fcf5ef2aSThomas Huth * Our enumeration includes at the end some entries which are not "true"
2870fcf5ef2aSThomas Huth * mmu_idx values in that they don't have corresponding TLBs and are only
2871fcf5ef2aSThomas Huth * valid for doing slow path page table walks.
287262593718SPeter Maydell *
287362593718SPeter Maydell * The constant names here are patterned after the general style of the names
2874fcf5ef2aSThomas Huth * of the AT/ATS operations.
2875e7b921c2SPeter Maydell * The values used are carefully arranged to make mmu_idx => EL lookup easy.
28768bd5c820SPeter Maydell * For M profile we arrange them to have a bit for priv, a bit for negpri
2877e7b921c2SPeter Maydell * and a bit for secure.
28788bd5c820SPeter Maydell */
2879b9f6033cSRichard Henderson #define ARM_MMU_IDX_A 0x10 /* A profile */
288062593718SPeter Maydell #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
288162593718SPeter Maydell #define ARM_MMU_IDX_M 0x40 /* M profile */
2882b9f6033cSRichard Henderson
288362593718SPeter Maydell /* Meanings of the bits for M profile mmu idx values */
2884b9f6033cSRichard Henderson #define ARM_MMU_IDX_M_PRIV 0x1
2885b9f6033cSRichard Henderson #define ARM_MMU_IDX_M_NEGPRI 0x2
2886b9f6033cSRichard Henderson #define ARM_MMU_IDX_M_S 0x4 /* Secure */
28878bd5c820SPeter Maydell
2888fcf5ef2aSThomas Huth #define ARM_MMU_IDX_TYPE_MASK \
2889b9f6033cSRichard Henderson (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
2890b9f6033cSRichard Henderson #define ARM_MMU_IDX_COREIDX_MASK 0xf
2891b9f6033cSRichard Henderson
2892d902ae75SRichard Henderson typedef enum ARMMMUIdx {
2893d902ae75SRichard Henderson /*
2894d902ae75SRichard Henderson * A-profile.
2895d902ae75SRichard Henderson */
2896d902ae75SRichard Henderson ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
2897d902ae75SRichard Henderson ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
2898d902ae75SRichard Henderson ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
2899d902ae75SRichard Henderson ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
2900*6d62f309SPeter Maydell ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
2901*6d62f309SPeter Maydell ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
2902b9f6033cSRichard Henderson ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
2903b9f6033cSRichard Henderson ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
2904575a94afSRichard Henderson ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
2905575a94afSRichard Henderson ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
2906575a94afSRichard Henderson
2907575a94afSRichard Henderson /*
2908575a94afSRichard Henderson * Used for second stage of an S12 page table walk, or for descriptor
2909*6d62f309SPeter Maydell * loads during first stage of an S1 page table walk. Note that both
2910*6d62f309SPeter Maydell * are in use simultaneously for SecureEL2: the security state for
2911d38fa967SRichard Henderson * the S2 ptw is selected by the NS bit from the S1 ptw.
2912d38fa967SRichard Henderson */
2913*6d62f309SPeter Maydell ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
2914*6d62f309SPeter Maydell ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
2915*6d62f309SPeter Maydell
2916*6d62f309SPeter Maydell /* TLBs with 1-1 mapping to the physical address spaces. */
2917575a94afSRichard Henderson ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
2918575a94afSRichard Henderson ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
2919b9f6033cSRichard Henderson ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
2920b9f6033cSRichard Henderson ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
2921b9f6033cSRichard Henderson
2922b9f6033cSRichard Henderson /*
2923b9f6033cSRichard Henderson * These are not allocated TLBs and are used only for AT system
2924452ef8cbSRichard Henderson * instructions or for the first stage of an S12 page table walk.
2925b9f6033cSRichard Henderson */
2926b9f6033cSRichard Henderson ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
2927b9f6033cSRichard Henderson ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
2928b9f6033cSRichard Henderson ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
292925568316SRichard Henderson
293025568316SRichard Henderson /*
293125568316SRichard Henderson * M-profile.
293225568316SRichard Henderson */
293325568316SRichard Henderson ARMMMUIdx_MUser = ARM_MMU_IDX_M,
293425568316SRichard Henderson ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
293525568316SRichard Henderson ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
293625568316SRichard Henderson ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
2937fcf5ef2aSThomas Huth ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
2938fcf5ef2aSThomas Huth ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
29395f09a6dfSRichard Henderson ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
29405f09a6dfSRichard Henderson ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
29418bd5c820SPeter Maydell } ARMMMUIdx;
29428bd5c820SPeter Maydell
29435f09a6dfSRichard Henderson /*
29445f09a6dfSRichard Henderson * Bit macros for the core-mmu-index values for each index,
29455f09a6dfSRichard Henderson * for use when calling tlb_flush_by_mmuidx() and friends.
29468bd5c820SPeter Maydell */
29475f09a6dfSRichard Henderson #define TO_CORE_BIT(NAME) \
2948b9f6033cSRichard Henderson ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
29495f09a6dfSRichard Henderson
2950452ef8cbSRichard Henderson typedef enum ARMMMUIdxBit {
29515f09a6dfSRichard Henderson TO_CORE_BIT(E10_0),
2952b9f6033cSRichard Henderson TO_CORE_BIT(E20_0),
2953452ef8cbSRichard Henderson TO_CORE_BIT(E10_1),
2954d902ae75SRichard Henderson TO_CORE_BIT(E10_1_PAN),
2955*6d62f309SPeter Maydell TO_CORE_BIT(E2),
2956*6d62f309SPeter Maydell TO_CORE_BIT(E20_2),
2957575a94afSRichard Henderson TO_CORE_BIT(E20_2_PAN),
2958575a94afSRichard Henderson TO_CORE_BIT(E3),
29595f09a6dfSRichard Henderson TO_CORE_BIT(E30_0),
29605f09a6dfSRichard Henderson TO_CORE_BIT(E30_3_PAN),
29615f09a6dfSRichard Henderson TO_CORE_BIT(Stage2),
29625f09a6dfSRichard Henderson TO_CORE_BIT(Stage2_S),
29635f09a6dfSRichard Henderson
29645f09a6dfSRichard Henderson TO_CORE_BIT(MUser),
29655f09a6dfSRichard Henderson TO_CORE_BIT(MPriv),
29665f09a6dfSRichard Henderson TO_CORE_BIT(MUserNegPri),
29675f09a6dfSRichard Henderson TO_CORE_BIT(MPrivNegPri),
29688bd5c820SPeter Maydell TO_CORE_BIT(MSUser),
29698bd5c820SPeter Maydell TO_CORE_BIT(MSPriv),
29705f09a6dfSRichard Henderson TO_CORE_BIT(MSUserNegPri),
29715f09a6dfSRichard Henderson TO_CORE_BIT(MSPrivNegPri),
2972fcf5ef2aSThomas Huth } ARMMMUIdxBit;
2973fcf5ef2aSThomas Huth
2974fcf5ef2aSThomas Huth #undef TO_CORE_BIT
2975fcf5ef2aSThomas Huth
2976fcf5ef2aSThomas Huth #define MMU_USER_IDX 0
2977fcf5ef2aSThomas Huth
29788bce44a2SRichard Henderson /* Indexes used when registering address spaces with cpu_address_space_init */
29798bce44a2SRichard Henderson typedef enum ARMASIdx {
2980fcf5ef2aSThomas Huth ARMASIdx_NS = 0,
2981fcf5ef2aSThomas Huth ARMASIdx_S = 1,
2982bb5cc2c8SRichard Henderson ARMASIdx_TagNS = 2,
2983bb5cc2c8SRichard Henderson ARMASIdx_TagS = 3,
2984bb5cc2c8SRichard Henderson } ARMASIdx;
2985bb5cc2c8SRichard Henderson
arm_space_to_phys(ARMSecuritySpace space)2986bb5cc2c8SRichard Henderson static inline ARMMMUIdx arm_space_to_phys(ARMSecuritySpace space)
2987bb5cc2c8SRichard Henderson {
2988bb5cc2c8SRichard Henderson /* Assert the relative order of the physical mmu indexes. */
2989bb5cc2c8SRichard Henderson QEMU_BUILD_BUG_ON(ARMSS_Secure != 0);
2990bb5cc2c8SRichard Henderson QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS != ARMMMUIdx_Phys_S + ARMSS_NonSecure);
2991bb5cc2c8SRichard Henderson QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Root != ARMMMUIdx_Phys_S + ARMSS_Root);
2992bb5cc2c8SRichard Henderson QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Realm != ARMMMUIdx_Phys_S + ARMSS_Realm);
2993bb5cc2c8SRichard Henderson
2994bb5cc2c8SRichard Henderson return ARMMMUIdx_Phys_S + space;
2995bb5cc2c8SRichard Henderson }
2996bb5cc2c8SRichard Henderson
arm_phys_to_space(ARMMMUIdx idx)2997bb5cc2c8SRichard Henderson static inline ARMSecuritySpace arm_phys_to_space(ARMMMUIdx idx)
2998bb5cc2c8SRichard Henderson {
299943bbce7fSPeter Maydell assert(idx >= ARMMMUIdx_Phys_S && idx <= ARMMMUIdx_Phys_Realm);
300043bbce7fSPeter Maydell return idx - ARMMMUIdx_Phys_S;
300143bbce7fSPeter Maydell }
300243bbce7fSPeter Maydell
arm_v7m_csselr_razwi(ARMCPU * cpu)300343bbce7fSPeter Maydell static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
300443bbce7fSPeter Maydell {
300543bbce7fSPeter Maydell /* If all the CLIDR.Ctypem bits are 0 there are no caches, and
300643bbce7fSPeter Maydell * CSSELR is RAZ/WI.
3007fcf5ef2aSThomas Huth */
3008fcf5ef2aSThomas Huth return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
3009fcf5ef2aSThomas Huth }
3010fcf5ef2aSThomas Huth
arm_sctlr_b(CPUARMState * env)3011fcf5ef2aSThomas Huth static inline bool arm_sctlr_b(CPUARMState *env)
3012fcf5ef2aSThomas Huth {
3013fcf5ef2aSThomas Huth return
3014fcf5ef2aSThomas Huth /* We need not implement SCTLR.ITD in user-mode emulation, so
3015fcf5ef2aSThomas Huth * let linux-user ignore the fact that it conflicts with SCTLR_B.
3016fcf5ef2aSThomas Huth * This lets people run BE32 binaries with "-cpu any".
3017fcf5ef2aSThomas Huth */
3018fcf5ef2aSThomas Huth #ifndef CONFIG_USER_ONLY
3019fcf5ef2aSThomas Huth !arm_feature(env, ARM_FEATURE_V7) &&
3020aaec1432SRichard Henderson #endif
302164e40755SRichard Henderson (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
30228061a649SRichard Henderson }
30238061a649SRichard Henderson
3024fcf5ef2aSThomas Huth uint64_t arm_sctlr(CPUARMState *env, int el);
3025fcf5ef2aSThomas Huth
arm_cpu_data_is_big_endian_a32(CPUARMState * env,bool sctlr_b)30268061a649SRichard Henderson static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
30278061a649SRichard Henderson bool sctlr_b)
3028fcf5ef2aSThomas Huth {
3029fcf5ef2aSThomas Huth #ifdef CONFIG_USER_ONLY
3030fcf5ef2aSThomas Huth /*
3031fcf5ef2aSThomas Huth * In system mode, BE32 is modelled in line with the
3032fcf5ef2aSThomas Huth * architecture (as word-invariant big-endianness), where loads
3033fcf5ef2aSThomas Huth * and stores are done little endian but from addresses which
3034fcf5ef2aSThomas Huth * are adjusted by XORing with the appropriate constant. So the
3035fcf5ef2aSThomas Huth * endianness to use for the raw data access is not affected by
3036fcf5ef2aSThomas Huth * SCTLR.B.
3037fcf5ef2aSThomas Huth * In user mode, however, we model BE32 as byte-invariant
30388061a649SRichard Henderson * big-endianness (because user-only code cannot tell the
30398061a649SRichard Henderson * difference), and so we need to use a data access endianness
30408061a649SRichard Henderson * that depends on SCTLR.B.
3041fcf5ef2aSThomas Huth */
30428061a649SRichard Henderson if (sctlr_b) {
30438061a649SRichard Henderson return true;
30448061a649SRichard Henderson }
30458061a649SRichard Henderson #endif
30468061a649SRichard Henderson /* In 32bit endianness is determined by looking at CPSR's E bit */
30478061a649SRichard Henderson return env->uncached_cpsr & CPSR_E;
30488061a649SRichard Henderson }
30498061a649SRichard Henderson
arm_cpu_data_is_big_endian_a64(int el,uint64_t sctlr)30508061a649SRichard Henderson static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
30518061a649SRichard Henderson {
30528061a649SRichard Henderson return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
30538061a649SRichard Henderson }
30548061a649SRichard Henderson
30558061a649SRichard Henderson /* Return true if the processor is in big-endian mode. */
arm_cpu_data_is_big_endian(CPUARMState * env)305664e40755SRichard Henderson static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
305764e40755SRichard Henderson {
305864e40755SRichard Henderson if (!is_a64(env)) {
30598061a649SRichard Henderson return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
3060fcf5ef2aSThomas Huth } else {
3061fcf5ef2aSThomas Huth int cur_el = arm_current_el(env);
3062fcf5ef2aSThomas Huth uint64_t sctlr = arm_sctlr(env, cur_el);
3063fcf5ef2aSThomas Huth return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
3064fcf5ef2aSThomas Huth }
3065fdd1b228SRichard Henderson }
3066a378206aSRichard Henderson
3067a378206aSRichard Henderson #include "exec/cpu-all.h"
3068a378206aSRichard Henderson
3069a378206aSRichard Henderson /*
3070fdd1b228SRichard Henderson * We have more than 32-bits worth of state per TB, so we split the data
3071a378206aSRichard Henderson * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
3072a378206aSRichard Henderson * We collect these two parts in CPUARMTBFlags where they are named
3073a378206aSRichard Henderson * flags and flags2 respectively.
3074a378206aSRichard Henderson *
3075a378206aSRichard Henderson * The flags that are shared between all execution modes, TBFLAG_ANY,
3076a378206aSRichard Henderson * are stored in flags. The flags that are specific to a given mode
3077a378206aSRichard Henderson * are stores in flags2. Since cs_base is sized on the configured
3078a378206aSRichard Henderson * address size, flags2 always has 64-bits for A64, and a minimum of
30795896f392SRichard Henderson * 32-bits for A32 and M32.
30805896f392SRichard Henderson *
30815896f392SRichard Henderson * The bits for 32-bit A-profile and M-profile partially overlap:
30825896f392SRichard Henderson *
30835896f392SRichard Henderson * 31 23 11 10 0
30845896f392SRichard Henderson * +-------------+----------+----------------+
308526702213SPeter Maydell * | | | TBFLAG_A32 |
308679cabf1fSRichard Henderson * | TBFLAG_AM32 | +-----+----------+
3087fdd1b228SRichard Henderson * | | |TBFLAG_M32|
3088fcf5ef2aSThomas Huth * +-------------+----------------+----------+
3089eee81d41SRichard Henderson * 31 23 6 5 0
3090eee81d41SRichard Henderson *
3091eee81d41SRichard Henderson * Unless otherwise noted, these bits are cached in env->hflags.
3092eee81d41SRichard Henderson */
3093eee81d41SRichard Henderson FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
3094fcf5ef2aSThomas Huth FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
3095eee81d41SRichard Henderson FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */
30964479ec30SRichard Henderson FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
30978480e933SRichard Henderson FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
30988480e933SRichard Henderson /* Target EL if we take a floating-point-disabled exception */
3099361c33f6SPeter Maydell FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
310034a8a07eSPeter Maydell /* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
3101fcf5ef2aSThomas Huth FIELD(TBFLAG_ANY, ALIGN_MEM, 10, 1)
310279cabf1fSRichard Henderson FIELD(TBFLAG_ANY, PSTATE__IL, 11, 1)
310379cabf1fSRichard Henderson FIELD(TBFLAG_ANY, FGT_ACTIVE, 12, 1)
310479cabf1fSRichard Henderson FIELD(TBFLAG_ANY, FGT_SVC, 13, 1)
31055896f392SRichard Henderson
31065896f392SRichard Henderson /*
310779cabf1fSRichard Henderson * Bit usage when in AArch32 state, both A- and M-profile.
310879cabf1fSRichard Henderson */
310979cabf1fSRichard Henderson FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */
311079cabf1fSRichard Henderson FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
31115896f392SRichard Henderson
31125896f392SRichard Henderson /*
31137fbb535fSPeter Maydell * Bit usage when in AArch32 state, for A-profile only.
3114ea7ac69dSPeter Maydell */
3115ea7ac69dSPeter Maydell FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
3116ea7ac69dSPeter Maydell FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
3117fdd1b228SRichard Henderson /*
3118ea7ac69dSPeter Maydell * We store the bottom two bits of the CPAR as TB flags and handle
31195896f392SRichard Henderson * checks on the other bits at runtime. This shares the same bits as
31205896f392SRichard Henderson * VECSTRIDE, which is OK as no XScale CPU has VFP.
31215896f392SRichard Henderson * Not cached, because VECLEN+VECSTRIDE are not cached.
31225896f392SRichard Henderson */
3123ea7ac69dSPeter Maydell FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
31247fbb535fSPeter Maydell FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
31257fbb535fSPeter Maydell FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
31267fbb535fSPeter Maydell FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
31277fbb535fSPeter Maydell /*
31285896f392SRichard Henderson * Indicates whether cp register reads and writes by guest code should access
312975fe8356SRichard Henderson * the secure or nonsecure bank of banked registers; note that this is not
313075fe8356SRichard Henderson * the same thing as the current security state of the processor!
313175fe8356SRichard Henderson */
313275fe8356SRichard Henderson FIELD(TBFLAG_A32, NS, 10, 1)
313375fe8356SRichard Henderson /*
31345bb0a20bSMarc Zyngier * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not.
313579cabf1fSRichard Henderson * This requires an SME trap from AArch32 mode when using NEON.
313679cabf1fSRichard Henderson */
313779cabf1fSRichard Henderson FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
313879cabf1fSRichard Henderson
31395896f392SRichard Henderson /*
314079cabf1fSRichard Henderson * Bit usage when in AArch32 state, for M-profile only.
31415896f392SRichard Henderson */
314279cabf1fSRichard Henderson /* Handler (ie not Thread) mode */
31435896f392SRichard Henderson FIELD(TBFLAG_M32, HANDLER, 0, 1)
314479cabf1fSRichard Henderson /* Whether we should generate stack-limit checks */
31455896f392SRichard Henderson FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
314679cabf1fSRichard Henderson /* Set if FPCCR.LSPACT is set */
31475896f392SRichard Henderson FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
314826702213SPeter Maydell /* Set if we must create a new FP context */
314926702213SPeter Maydell FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
3150a393dee0SRichard Henderson /* Set if FPCCR.S does not match current security state */
3151a393dee0SRichard Henderson FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
3152fcf5ef2aSThomas Huth /* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */
315379cabf1fSRichard Henderson FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */
315479cabf1fSRichard Henderson /* Set if in secure mode */
315579cabf1fSRichard Henderson FIELD(TBFLAG_M32, SECURE, 6, 1)
3156476a4692SRichard Henderson
3157aad821acSRichard Henderson /*
3158f45ce4c3SRichard Henderson * Bit usage when in AArch64 state
3159f45ce4c3SRichard Henderson */
31600816ef1bSRichard Henderson FIELD(TBFLAG_A64, TBII, 0, 2)
316108f1434aSRichard Henderson FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
3162fdd1b228SRichard Henderson /* The current vector length, either NVL or SVL. */
31634a9ee99dSRichard Henderson FIELD(TBFLAG_A64, VL, 4, 4)
3164cc28fc30SRichard Henderson FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
316581ae05faSRichard Henderson FIELD(TBFLAG_A64, BT, 9, 1)
316681ae05faSRichard Henderson FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */
316781ae05faSRichard Henderson FIELD(TBFLAG_A64, TBID, 12, 2)
316881ae05faSRichard Henderson FIELD(TBFLAG_A64, UNPRIV, 14, 1)
31696b2ca83eSRichard Henderson FIELD(TBFLAG_A64, ATA, 15, 1)
3170a3637e88SRichard Henderson FIELD(TBFLAG_A64, TCMA, 16, 2)
3171a3637e88SRichard Henderson FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
31725d7953adSRichard Henderson FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
317375fe8356SRichard Henderson FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
317475fe8356SRichard Henderson FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
3175e37e98b7SPeter Maydell FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
317683f624d9SRichard Henderson FIELD(TBFLAG_A64, SVL, 24, 4)
3177179e9a3bSPeter Maydell /* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */
317867d10fc4SPeter Maydell FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
3179c35da11dSPeter Maydell FIELD(TBFLAG_A64, TRAP_ERET, 29, 1)
3180c35da11dSPeter Maydell FIELD(TBFLAG_A64, NAA, 30, 1)
3181daf9b4a0SPeter Maydell FIELD(TBFLAG_A64, ATA0, 31, 1)
3182daf9b4a0SPeter Maydell FIELD(TBFLAG_A64, NV, 32, 1)
3183daf9b4a0SPeter Maydell FIELD(TBFLAG_A64, NV1, 33, 1)
3184daf9b4a0SPeter Maydell FIELD(TBFLAG_A64, NV2, 34, 1)
3185fcf5ef2aSThomas Huth /* Set if FEAT_NV2 RAM accesses use the EL2&0 translation regime */
3186a729a46bSRichard Henderson FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1)
318729a15a61SPeter Maydell /* Set if FEAT_NV2 RAM accesses are big-endian */
318829a15a61SPeter Maydell FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
318929a15a61SPeter Maydell
3190a729a46bSRichard Henderson /*
3191a729a46bSRichard Henderson * Helpers for using the above. Note that only the A64 accessors use
31923902bfc6SRichard Henderson * FIELD_DP64() and FIELD_EX64(), because in the other cases the flags
3193a729a46bSRichard Henderson * word either is or might be 32 bits only.
319429a15a61SPeter Maydell */
3195a729a46bSRichard Henderson #define DP_TBFLAG_ANY(DST, WHICH, VAL) \
3196a378206aSRichard Henderson (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
3197a729a46bSRichard Henderson #define DP_TBFLAG_A64(DST, WHICH, VAL) \
3198a378206aSRichard Henderson (DST.flags2 = FIELD_DP64(DST.flags2, TBFLAG_A64, WHICH, VAL))
3199a729a46bSRichard Henderson #define DP_TBFLAG_A32(DST, WHICH, VAL) \
3200a378206aSRichard Henderson (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
3201a729a46bSRichard Henderson #define DP_TBFLAG_M32(DST, WHICH, VAL) \
32023902bfc6SRichard Henderson (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
320329a15a61SPeter Maydell #define DP_TBFLAG_AM32(DST, WHICH, VAL) \
3204a378206aSRichard Henderson (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
3205a378206aSRichard Henderson
3206a378206aSRichard Henderson #define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
3207a729a46bSRichard Henderson #define EX_TBFLAG_A64(IN, WHICH) FIELD_EX64(IN.flags2, TBFLAG_A64, WHICH)
3208fb901c90SRichard Henderson #define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
32098b599e5cSRichard Henderson #define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
32108b599e5cSRichard Henderson #define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
32118b599e5cSRichard Henderson
32128b599e5cSRichard Henderson /**
32138b599e5cSRichard Henderson * sve_vq
32148b599e5cSRichard Henderson * @env: the cpu context
32158b599e5cSRichard Henderson *
32168b599e5cSRichard Henderson * Return the VL cached within env->hflags, in units of quadwords.
32178b599e5cSRichard Henderson */
sve_vq(CPUARMState * env)32188b599e5cSRichard Henderson static inline int sve_vq(CPUARMState *env)
32195d7953adSRichard Henderson {
32205d7953adSRichard Henderson return EX_TBFLAG_A64(env->hflags, VL) + 1;
32215d7953adSRichard Henderson }
32225d7953adSRichard Henderson
32235d7953adSRichard Henderson /**
32245d7953adSRichard Henderson * sme_vq
32255d7953adSRichard Henderson * @env: the cpu context
32265d7953adSRichard Henderson *
32275d7953adSRichard Henderson * Return the SVL cached within env->hflags, in units of quadwords.
32285d7953adSRichard Henderson */
sme_vq(CPUARMState * env)32295d7953adSRichard Henderson static inline int sme_vq(CPUARMState *env)
3230fcf5ef2aSThomas Huth {
3231fcf5ef2aSThomas Huth return EX_TBFLAG_A64(env->hflags, SVL) + 1;
3232fcf5ef2aSThomas Huth }
3233ee3eb3a7SMarc-André Lureau
bswap_code(bool sctlr_b)3234ee3eb3a7SMarc-André Lureau static inline bool bswap_code(bool sctlr_b)
3235fcf5ef2aSThomas Huth {
3236fcf5ef2aSThomas Huth #ifdef CONFIG_USER_ONLY
3237ded625e7SThomas Huth /* BE8 (SCTLR.B = 0, TARGET_BIG_ENDIAN = 1) is mixed endian.
3238fcf5ef2aSThomas Huth * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_BIG_ENDIAN=0
3239fcf5ef2aSThomas Huth * would also end up as a mixed-endian mode with BE code, LE data.
3240fcf5ef2aSThomas Huth */
3241fcf5ef2aSThomas Huth return TARGET_BIG_ENDIAN ^ sctlr_b;
3242fcf5ef2aSThomas Huth #else
3243fcf5ef2aSThomas Huth /* All code access in ARM is little endian, and there are no loaders
3244fcf5ef2aSThomas Huth * doing swaps that need to be reversed
3245fcf5ef2aSThomas Huth */
3246fcf5ef2aSThomas Huth return 0;
3247fcf5ef2aSThomas Huth #endif
3248fcf5ef2aSThomas Huth }
3249ded625e7SThomas Huth
3250fcf5ef2aSThomas Huth #ifdef CONFIG_USER_ONLY
arm_cpu_bswap_data(CPUARMState * env)3251fcf5ef2aSThomas Huth static inline bool arm_cpu_bswap_data(CPUARMState *env)
3252fcf5ef2aSThomas Huth {
3253bb5de525SAnton Johansson return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
3254bb5de525SAnton Johansson }
3255fcf5ef2aSThomas Huth #endif
3256fcf5ef2aSThomas Huth
3257fcf5ef2aSThomas Huth void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
3258fcf5ef2aSThomas Huth uint64_t *cs_base, uint32_t *flags);
3259fcf5ef2aSThomas Huth
3260fcf5ef2aSThomas Huth enum {
3261fcf5ef2aSThomas Huth QEMU_PSCI_CONDUIT_DISABLED = 0,
3262fcf5ef2aSThomas Huth QEMU_PSCI_CONDUIT_SMC = 1,
3263fcf5ef2aSThomas Huth QEMU_PSCI_CONDUIT_HVC = 2,
3264fcf5ef2aSThomas Huth };
3265fcf5ef2aSThomas Huth
3266fcf5ef2aSThomas Huth #ifndef CONFIG_USER_ONLY
3267fcf5ef2aSThomas Huth /* Return the address space index to use for a memory access */
arm_asidx_from_attrs(CPUState * cs,MemTxAttrs attrs)3268fcf5ef2aSThomas Huth static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
3269fcf5ef2aSThomas Huth {
3270fcf5ef2aSThomas Huth return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
3271fcf5ef2aSThomas Huth }
3272fcf5ef2aSThomas Huth
3273fcf5ef2aSThomas Huth /* Return the AddressSpace to use for a memory access
3274fcf5ef2aSThomas Huth * (which depends on whether the access is S or NS, and whether
3275fcf5ef2aSThomas Huth * the board gave us a separate AddressSpace for S accesses).
3276fcf5ef2aSThomas Huth */
arm_addressspace(CPUState * cs,MemTxAttrs attrs)3277fcf5ef2aSThomas Huth static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
3278fcf5ef2aSThomas Huth {
3279fcf5ef2aSThomas Huth return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
3280b5c53d1bSAaron Lindsay }
3281b5c53d1bSAaron Lindsay #endif
3282fcf5ef2aSThomas Huth
3283fcf5ef2aSThomas Huth /**
3284fcf5ef2aSThomas Huth * arm_register_pre_el_change_hook:
3285b5c53d1bSAaron Lindsay * Register a hook function which will be called immediately before this
3286b5c53d1bSAaron Lindsay * CPU changes exception level or mode. The hook function will be
3287b5c53d1bSAaron Lindsay * passed a pointer to the ARMCPU and the opaque data pointer passed
3288fcf5ef2aSThomas Huth * to this function when the hook was registered.
3289b5c53d1bSAaron Lindsay *
3290fcf5ef2aSThomas Huth * Note that if a pre-change hook is called, any registered post-change hooks
3291b5c53d1bSAaron Lindsay * are guaranteed to subsequently be called.
3292b5c53d1bSAaron Lindsay */
3293b5c53d1bSAaron Lindsay void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
3294b5c53d1bSAaron Lindsay void *opaque);
3295b5c53d1bSAaron Lindsay /**
3296b5c53d1bSAaron Lindsay * arm_register_el_change_hook:
3297b5c53d1bSAaron Lindsay * Register a hook function which will be called immediately after this
3298b5c53d1bSAaron Lindsay * CPU changes exception level or mode. The hook function will be
3299b5c53d1bSAaron Lindsay * passed a pointer to the ARMCPU and the opaque data pointer passed
3300b5c53d1bSAaron Lindsay * to this function when the hook was registered.
3301b5c53d1bSAaron Lindsay *
3302b5c53d1bSAaron Lindsay * Note that any registered hooks registered here are guaranteed to be called
3303fcf5ef2aSThomas Huth * if pre-change hooks have been.
3304fcf5ef2aSThomas Huth */
33053d74e2e9SRichard Henderson void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void
33063d74e2e9SRichard Henderson *opaque);
33073d74e2e9SRichard Henderson
33083d74e2e9SRichard Henderson /**
33093d74e2e9SRichard Henderson * arm_rebuild_hflags:
33103d74e2e9SRichard Henderson * Rebuild the cached TBFLAGS for arbitrary changed processor state.
33119a2b5256SRichard Henderson */
33129a2b5256SRichard Henderson void arm_rebuild_hflags(CPUARMState *env);
33139a2b5256SRichard Henderson
33149a2b5256SRichard Henderson /**
33159a2b5256SRichard Henderson * aa32_vfp_dreg:
3316c39c2b90SRichard Henderson * Return a pointer to the Dn register within env in 32-bit mode.
33179a2b5256SRichard Henderson */
aa32_vfp_dreg(CPUARMState * env,unsigned regno)33189a2b5256SRichard Henderson static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
33199a2b5256SRichard Henderson {
33209a2b5256SRichard Henderson return &env->vfp.zregs[regno >> 1].d[regno & 1];
33219a2b5256SRichard Henderson }
33229a2b5256SRichard Henderson
33239a2b5256SRichard Henderson /**
33249a2b5256SRichard Henderson * aa32_vfp_qreg:
3325c39c2b90SRichard Henderson * Return a pointer to the Qn register within env in 32-bit mode.
33269a2b5256SRichard Henderson */
aa32_vfp_qreg(CPUARMState * env,unsigned regno)33279a2b5256SRichard Henderson static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
33289a2b5256SRichard Henderson {
33299a2b5256SRichard Henderson return &env->vfp.zregs[regno].d[0];
33309a2b5256SRichard Henderson }
33319a2b5256SRichard Henderson
33329a2b5256SRichard Henderson /**
33339a2b5256SRichard Henderson * aa64_vfp_qreg:
3334c39c2b90SRichard Henderson * Return a pointer to the Qn register within env in 64-bit mode.
33359a2b5256SRichard Henderson */
aa64_vfp_qreg(CPUARMState * env,unsigned regno)33369a2b5256SRichard Henderson static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
3337028e2a7bSRichard Henderson {
3338fca75f60SPeter Maydell return &env->vfp.zregs[regno].d[0];
3339028e2a7bSRichard Henderson }
3340962fcbf2SRichard Henderson
3341be5d6f48SRichard Henderson /* Shared between translate-sve.c and sve_helper.c. */
33427f2cf760SRichard Henderson extern const uint64_t pred_esz_masks[5];
33437f2cf760SRichard Henderson
3344be5d6f48SRichard Henderson /*
3345be5d6f48SRichard Henderson * AArch64 usage of the PAGE_TARGET_* bits for linux-user.
3346d109b46dSRichard Henderson * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
33477f2cf760SRichard Henderson * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR.
3348be5d6f48SRichard Henderson */
334950d4c8c1SRichard Henderson #define PAGE_BTI PAGE_TARGET_1
335050d4c8c1SRichard Henderson #define PAGE_MTE PAGE_TARGET_2
335150d4c8c1SRichard Henderson #define PAGE_TARGET_STICKY PAGE_MTE
335250d4c8c1SRichard Henderson
335350d4c8c1SRichard Henderson /* We associate one allocation tag per 16 bytes, the minimum. */
335450d4c8c1SRichard Henderson #define LOG2_TAG_GRANULE 4
335550d4c8c1SRichard Henderson #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
335650d4c8c1SRichard Henderson
33570e0c030cSRichard Henderson #ifdef CONFIG_USER_ONLY
33580e0c030cSRichard Henderson #define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
33590e0c030cSRichard Henderson #endif
33600e0c030cSRichard Henderson
33610e0c030cSRichard Henderson #ifdef TARGET_TAGGED_ADDRESSES
33620e0c030cSRichard Henderson /**
33630e0c030cSRichard Henderson * cpu_untagged_addr:
33640e0c030cSRichard Henderson * @cs: CPU context
33650e0c030cSRichard Henderson * @x: tagged address
33660e0c030cSRichard Henderson *
33670e0c030cSRichard Henderson * Remove any address tag from @x. This is explicitly related to the
33680e0c030cSRichard Henderson * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
33690e0c030cSRichard Henderson *
33700e0c030cSRichard Henderson * There should be a better place to put this, but we need this in
3371efceb7d2SRichard Henderson * include/exec/cpu_ldst.h, and not some place linux-user specific.
3372efceb7d2SRichard Henderson */
cpu_untagged_addr(CPUState * cs,target_ulong x)33730e0c030cSRichard Henderson static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
33740e0c030cSRichard Henderson {
33750e0c030cSRichard Henderson CPUARMState *env = cpu_env(cs);
33760e0c030cSRichard Henderson if (env->tagged_addr_enable) {
33770e0c030cSRichard Henderson /*
33780e0c030cSRichard Henderson * TBI is enabled for userspace but not kernelspace addresses.
33790e0c030cSRichard Henderson * Only clear the tag if bit 55 is clear.
33800e0c030cSRichard Henderson */
33810e0c030cSRichard Henderson x &= sextract64(x, 0, 56);
33820e0c030cSRichard Henderson }
3383fcf5ef2aSThomas Huth return x;
3384 }
3385 #endif
3386
3387 #endif
3388