xref: /openbmc/qemu/target/arm/helper.c (revision 53b41bb78950912ba2d9809eef6b45e4df30c647)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/timer.h"
19 #include "qemu/bitops.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/cputlb.h"
22 #include "exec/translation-block.h"
23 #include "hw/irq.h"
24 #include "system/cpu-timers.h"
25 #include "exec/icount.h"
26 #include "system/kvm.h"
27 #include "system/tcg.h"
28 #include "qapi/error.h"
29 #include "qemu/guest-random.h"
30 #ifdef CONFIG_TCG
31 #include "accel/tcg/probe.h"
32 #include "accel/tcg/getpc.h"
33 #include "semihosting/common-semi.h"
34 #endif
35 #include "cpregs.h"
36 #include "target/arm/gtimer.h"
37 #include "qemu/plugin.h"
38 
39 #define HELPER_H "tcg/helper.h"
40 #include "exec/helper-proto.h.inc"
41 
42 static void switch_mode(CPUARMState *env, int mode);
43 
44 int compare_u64(const void *a, const void *b)
45 {
46     if (*(uint64_t *)a > *(uint64_t *)b) {
47         return 1;
48     }
49     if (*(uint64_t *)a < *(uint64_t *)b) {
50         return -1;
51     }
52     return 0;
53 }
54 
55 /*
56  * Macros which are lvalues for the field in CPUARMState for the
57  * ARMCPRegInfo *ri.
58  */
59 #define CPREG_FIELD32(env, ri) \
60     (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
61 #define CPREG_FIELD64(env, ri) \
62     (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
63 
64 uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
65 {
66     assert(ri->fieldoffset);
67     switch (cpreg_field_type(ri)) {
68     case MO_64:
69         return CPREG_FIELD64(env, ri);
70     case MO_32:
71         return CPREG_FIELD32(env, ri);
72     default:
73         g_assert_not_reached();
74     }
75 }
76 
77 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
78 {
79     assert(ri->fieldoffset);
80     switch (cpreg_field_type(ri)) {
81     case MO_64:
82         CPREG_FIELD64(env, ri) = value;
83         break;
84     case MO_32:
85         CPREG_FIELD32(env, ri) = value;
86         break;
87     default:
88         g_assert_not_reached();
89     }
90 }
91 
92 #undef CPREG_FIELD32
93 #undef CPREG_FIELD64
94 
95 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
96 {
97     return (char *)env + ri->fieldoffset;
98 }
99 
100 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
101 {
102     /* Raw read of a coprocessor register (as needed for migration, etc). */
103     if (ri->type & ARM_CP_CONST) {
104         return ri->resetvalue;
105     } else if (ri->raw_readfn) {
106         return ri->raw_readfn(env, ri);
107     } else if (ri->readfn) {
108         return ri->readfn(env, ri);
109     } else {
110         return raw_read(env, ri);
111     }
112 }
113 
114 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
115                              uint64_t v)
116 {
117     /*
118      * Raw write of a coprocessor register (as needed for migration, etc).
119      * Note that constant registers are treated as write-ignored; the
120      * caller should check for success by whether a readback gives the
121      * value written.
122      */
123     if (ri->type & ARM_CP_CONST) {
124         return;
125     } else if (ri->raw_writefn) {
126         ri->raw_writefn(env, ri, v);
127     } else if (ri->writefn) {
128         ri->writefn(env, ri, v);
129     } else {
130         raw_write(env, ri, v);
131     }
132 }
133 
134 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
135 {
136    /*
137     * Return true if the regdef would cause an assertion if you called
138     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
139     * program bug for it not to have the NO_RAW flag).
140     * NB that returning false here doesn't necessarily mean that calling
141     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
142     * read/write access functions which are safe for raw use" from "has
143     * read/write access functions which have side effects but has forgotten
144     * to provide raw access functions".
145     * The tests here line up with the conditions in read/write_raw_cp_reg()
146     * and assertions in raw_read()/raw_write().
147     */
148     if ((ri->type & ARM_CP_CONST) ||
149         ri->fieldoffset ||
150         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
151         return false;
152     }
153     return true;
154 }
155 
156 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
157 {
158     /* Write the coprocessor state from cpu->env to the (index,value) list. */
159     int i;
160     bool ok = true;
161 
162     for (i = 0; i < cpu->cpreg_array_len; i++) {
163         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
164         const ARMCPRegInfo *ri;
165         uint64_t newval;
166 
167         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
168         if (!ri) {
169             ok = false;
170             continue;
171         }
172         if (ri->type & ARM_CP_NO_RAW) {
173             continue;
174         }
175 
176         newval = read_raw_cp_reg(&cpu->env, ri);
177         if (kvm_sync) {
178             /*
179              * Only sync if the previous list->cpustate sync succeeded.
180              * Rather than tracking the success/failure state for every
181              * item in the list, we just recheck "does the raw write we must
182              * have made in write_list_to_cpustate() read back OK" here.
183              */
184             uint64_t oldval = cpu->cpreg_values[i];
185 
186             if (oldval == newval) {
187                 continue;
188             }
189 
190             write_raw_cp_reg(&cpu->env, ri, oldval);
191             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
192                 continue;
193             }
194 
195             write_raw_cp_reg(&cpu->env, ri, newval);
196         }
197         cpu->cpreg_values[i] = newval;
198     }
199     return ok;
200 }
201 
202 bool write_list_to_cpustate(ARMCPU *cpu)
203 {
204     int i;
205     bool ok = true;
206 
207     for (i = 0; i < cpu->cpreg_array_len; i++) {
208         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
209         uint64_t v = cpu->cpreg_values[i];
210         const ARMCPRegInfo *ri;
211 
212         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
213         if (!ri) {
214             ok = false;
215             continue;
216         }
217         if (ri->type & ARM_CP_NO_RAW) {
218             continue;
219         }
220         /*
221          * Write value and confirm it reads back as written
222          * (to catch read-only registers and partially read-only
223          * registers where the incoming migration value doesn't match)
224          */
225         write_raw_cp_reg(&cpu->env, ri, v);
226         if (read_raw_cp_reg(&cpu->env, ri) != v) {
227             ok = false;
228         }
229     }
230     return ok;
231 }
232 
233 static void add_cpreg_to_list(gpointer key, gpointer value, gpointer opaque)
234 {
235     ARMCPU *cpu = opaque;
236     uint32_t regidx = (uintptr_t)key;
237     const ARMCPRegInfo *ri = value;
238 
239     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
240         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
241         /* The value array need not be initialized at this point */
242         cpu->cpreg_array_len++;
243     }
244 }
245 
246 static void count_cpreg(gpointer key, gpointer value, gpointer opaque)
247 {
248     ARMCPU *cpu = opaque;
249     const ARMCPRegInfo *ri = value;
250 
251     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
252         cpu->cpreg_array_len++;
253     }
254 }
255 
256 void arm_init_cpreg_list(ARMCPU *cpu)
257 {
258     /*
259      * Initialise the cpreg_tuples[] array based on the cp_regs hash.
260      * Note that we require cpreg_tuples[] to be sorted by key ID.
261      */
262     int arraylen;
263 
264     cpu->cpreg_array_len = 0;
265     g_hash_table_foreach(cpu->cp_regs, count_cpreg, cpu);
266 
267     arraylen = cpu->cpreg_array_len;
268     if (arraylen) {
269         cpu->cpreg_indexes = g_new(uint64_t, arraylen);
270         cpu->cpreg_values = g_new(uint64_t, arraylen);
271         cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
272         cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
273     } else {
274         cpu->cpreg_indexes = NULL;
275         cpu->cpreg_values = NULL;
276         cpu->cpreg_vmstate_indexes = NULL;
277         cpu->cpreg_vmstate_values = NULL;
278     }
279     cpu->cpreg_vmstate_array_len = arraylen;
280     cpu->cpreg_array_len = 0;
281 
282     g_hash_table_foreach(cpu->cp_regs, add_cpreg_to_list, cpu);
283 
284     assert(cpu->cpreg_array_len == arraylen);
285 
286     if (arraylen) {
287         qsort(cpu->cpreg_indexes, arraylen, sizeof(uint64_t), compare_u64);
288     }
289 }
290 
291 bool arm_pan_enabled(CPUARMState *env)
292 {
293     if (is_a64(env)) {
294         if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
295             return false;
296         }
297         return env->pstate & PSTATE_PAN;
298     } else {
299         return env->uncached_cpsr & CPSR_PAN;
300     }
301 }
302 
303 /*
304  * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
305  */
306 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
307                                         const ARMCPRegInfo *ri,
308                                         bool isread)
309 {
310     if (!is_a64(env) && arm_current_el(env) == 3 &&
311         arm_is_secure_below_el3(env)) {
312         return CP_ACCESS_UNDEFINED;
313     }
314     return CP_ACCESS_OK;
315 }
316 
317 /*
318  * Some secure-only AArch32 registers trap to EL3 if used from
319  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
320  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
321  * We assume that the .access field is set to PL1_RW.
322  */
323 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
324                                             const ARMCPRegInfo *ri,
325                                             bool isread)
326 {
327     if (arm_current_el(env) == 3) {
328         return CP_ACCESS_OK;
329     }
330     if (arm_is_secure_below_el3(env)) {
331         if (env->cp15.scr_el3 & SCR_EEL2) {
332             return CP_ACCESS_TRAP_EL2;
333         }
334         return CP_ACCESS_TRAP_EL3;
335     }
336     /* This will be EL1 NS and EL2 NS, which just UNDEF */
337     return CP_ACCESS_UNDEFINED;
338 }
339 
340 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
341 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
342                                bool isread)
343 {
344     if (arm_current_el(env) == 1) {
345         uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
346         if (arm_hcr_el2_eff(env) & trap) {
347             return CP_ACCESS_TRAP_EL2;
348         }
349     }
350     return CP_ACCESS_OK;
351 }
352 
353 /* Check for traps from EL1 due to HCR_EL2.TSW.  */
354 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
355                                  bool isread)
356 {
357     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
358         return CP_ACCESS_TRAP_EL2;
359     }
360     return CP_ACCESS_OK;
361 }
362 
363 /* Check for traps from EL1 due to HCR_EL2.TACR.  */
364 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
365                                   bool isread)
366 {
367     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
368         return CP_ACCESS_TRAP_EL2;
369     }
370     return CP_ACCESS_OK;
371 }
372 
373 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
374 {
375     ARMCPU *cpu = env_archcpu(env);
376 
377     raw_write(env, ri, value);
378     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
379 }
380 
381 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
382 {
383     ARMCPU *cpu = env_archcpu(env);
384 
385     if (raw_read(env, ri) != value) {
386         /*
387          * Unlike real hardware the qemu TLB uses virtual addresses,
388          * not modified virtual addresses, so this causes a TLB flush.
389          */
390         tlb_flush(CPU(cpu));
391         raw_write(env, ri, value);
392     }
393 }
394 
395 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
396                              uint64_t value)
397 {
398     ARMCPU *cpu = env_archcpu(env);
399 
400     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
401         && !extended_addresses_enabled(env)) {
402         /*
403          * For VMSA (when not using the LPAE long descriptor page table
404          * format) this register includes the ASID, so do a TLB flush.
405          * For PMSA it is purely a process ID and no action is needed.
406          */
407         tlb_flush(CPU(cpu));
408     }
409     raw_write(env, ri, value);
410 }
411 
412 int alle1_tlbmask(CPUARMState *env)
413 {
414     /*
415      * Note that the 'ALL' scope must invalidate both stage 1 and
416      * stage 2 translations, whereas most other scopes only invalidate
417      * stage 1 translations.
418      *
419      * For AArch32 this is only used for TLBIALLNSNH and VTTBR
420      * writes, so only needs to apply to NS PL1&0, not S PL1&0.
421      */
422     return (ARMMMUIdxBit_E10_1 |
423             ARMMMUIdxBit_E10_1_PAN |
424             ARMMMUIdxBit_E10_1_GCS |
425             ARMMMUIdxBit_E10_0 |
426             ARMMMUIdxBit_E10_0_GCS |
427             ARMMMUIdxBit_Stage2 |
428             ARMMMUIdxBit_Stage2_S);
429 }
430 
431 static const ARMCPRegInfo cp_reginfo[] = {
432     /*
433      * Define the secure and non-secure FCSE identifier CP registers
434      * separately because there is no secure bank in V8 (no _EL3).  This allows
435      * the secure register to be properly reset and migrated. There is also no
436      * v8 EL1 version of the register so the non-secure instance stands alone.
437      */
438     { .name = "FCSEIDR",
439       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
440       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
441       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
442       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
443     { .name = "FCSEIDR_S",
444       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
445       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
446       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
447       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
448     /*
449      * Define the secure and non-secure context identifier CP registers
450      * separately because there is no secure bank in V8 (no _EL3).  This allows
451      * the secure register to be properly reset and migrated.  In the
452      * non-secure case, the 32-bit register will have reset and migration
453      * disabled during registration as it is handled by the 64-bit instance.
454      */
455     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
456       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
457       .access = PL1_RW, .accessfn = access_tvm_trvm,
458       .fgt = FGT_CONTEXTIDR_EL1,
459       .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
460       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 1),
461       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 1),
462       .secure = ARM_CP_SECSTATE_NS,
463       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
464       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
465     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
466       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
467       .access = PL1_RW, .accessfn = access_tvm_trvm,
468       .secure = ARM_CP_SECSTATE_S,
469       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
470       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
471 };
472 
473 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
474     /*
475      * NB: Some of these registers exist in v8 but with more precise
476      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
477      */
478     /* MMU Domain access control / MPU write buffer control */
479     { .name = "DACR",
480       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
481       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
482       .writefn = dacr_write, .raw_writefn = raw_write,
483       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
484                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
485     /*
486      * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
487      * For v6 and v5, these mappings are overly broad.
488      */
489     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
490       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
491     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
492       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
493     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
494       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
495     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
496       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
497     /* Cache maintenance ops; some of this space may be overridden later. */
498     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
499       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
500       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
501 };
502 
503 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
504     /*
505      * Not all pre-v6 cores implemented this WFI, so this is slightly
506      * over-broad.
507      */
508     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
509       .access = PL1_W, .type = ARM_CP_WFI },
510 };
511 
512 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
513     /*
514      * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
515      * is UNPREDICTABLE; we choose to NOP as most implementations do).
516      */
517     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
518       .access = PL1_W, .type = ARM_CP_WFI },
519     /*
520      * L1 cache lockdown. Not architectural in v6 and earlier but in practice
521      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
522      * OMAPCP will override this space.
523      */
524     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
525       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
526       .resetvalue = 0 },
527     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
528       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
529       .resetvalue = 0 },
530     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
531     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
532       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
533       .resetvalue = 0 },
534     /*
535      * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
536      * implementing it as RAZ means the "debug architecture version" bits
537      * will read as a reserved value, which should cause Linux to not try
538      * to use the debug hardware.
539      */
540     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
541       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
542     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
543       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
544     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
545       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
546 };
547 
548 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
549                         uint64_t value)
550 {
551     uint32_t mask = 0;
552 
553     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
554     if (!arm_feature(env, ARM_FEATURE_V8)) {
555         /*
556          * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
557          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
558          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
559          */
560         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
561             /* VFP coprocessor: cp10 & cp11 [23:20] */
562             mask |= R_CPACR_ASEDIS_MASK |
563                     R_CPACR_D32DIS_MASK |
564                     R_CPACR_CP11_MASK |
565                     R_CPACR_CP10_MASK;
566 
567             if (!arm_feature(env, ARM_FEATURE_NEON)) {
568                 /* ASEDIS [31] bit is RAO/WI */
569                 value |= R_CPACR_ASEDIS_MASK;
570             }
571 
572             /*
573              * VFPv3 and upwards with NEON implement 32 double precision
574              * registers (D0-D31).
575              */
576             if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
577                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
578                 value |= R_CPACR_D32DIS_MASK;
579             }
580         }
581         value &= mask;
582     }
583 
584     /*
585      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
586      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
587      */
588     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
589         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
590         mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
591         value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
592     }
593 
594     env->cp15.cpacr_el1 = value;
595 }
596 
597 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
598 {
599     /*
600      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
601      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
602      */
603     uint64_t value = env->cp15.cpacr_el1;
604 
605     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
606         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
607         value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
608     }
609     return value;
610 }
611 
612 
613 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
614 {
615     /*
616      * Call cpacr_write() so that we reset with the correct RAO bits set
617      * for our CPU features.
618      */
619     cpacr_write(env, ri, 0);
620 }
621 
622 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
623                                    bool isread)
624 {
625     if (arm_feature(env, ARM_FEATURE_V8)) {
626         /* Check if CPACR accesses are to be trapped to EL2 */
627         if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
628             FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
629             return CP_ACCESS_TRAP_EL2;
630         /* Check if CPACR accesses are to be trapped to EL3 */
631         } else if (arm_current_el(env) < 3 &&
632                    FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
633             return CP_ACCESS_TRAP_EL3;
634         }
635     }
636 
637     return CP_ACCESS_OK;
638 }
639 
640 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
641                                   bool isread)
642 {
643     /* Check if CPTR accesses are set to trap to EL3 */
644     if (arm_current_el(env) == 2 &&
645         FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
646         return CP_ACCESS_TRAP_EL3;
647     }
648 
649     return CP_ACCESS_OK;
650 }
651 
652 static const ARMCPRegInfo v6_cp_reginfo[] = {
653     /* prefetch by MVA in v6, NOP in v7 */
654     { .name = "MVA_prefetch",
655       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
656       .access = PL1_W, .type = ARM_CP_NOP },
657     /*
658      * We need to break the TB after ISB to execute self-modifying code
659      * correctly and also to take any pending interrupts immediately.
660      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
661      */
662     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
663       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
664     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
665       .access = PL0_W, .type = ARM_CP_NOP },
666     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
667       .access = PL0_W, .type = ARM_CP_NOP },
668     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
669       .access = PL1_RW, .accessfn = access_tvm_trvm,
670       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
671                              offsetof(CPUARMState, cp15.ifar_ns) },
672       .resetvalue = 0, },
673     /*
674      * Watchpoint Fault Address Register : should actually only be present
675      * for 1136, 1176, 11MPCore.
676      */
677     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
678       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
679     { .name = "CPACR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
680       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
681       .fgt = FGT_CPACR_EL1,
682       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 1, 2),
683       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 2),
684       .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
685       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
686       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
687 };
688 
689 /*
690  * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
691  * We use these to decide whether we need to wrap a write to MDCR_EL2
692  * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
693  */
694 #define MDCR_EL2_PMU_ENABLE_BITS \
695     (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
696 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
697 
698 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
699                        uint64_t value)
700 {
701     /*
702      * Note that even though the AArch64 view of this register has bits
703      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
704      * architectural requirements for bits which are RES0 only in some
705      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
706      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
707      */
708     raw_write(env, ri, value & ~0x1FULL);
709 }
710 
711 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
712 {
713     /* Begin with base v8.0 state.  */
714     uint64_t valid_mask = 0x3fff;
715     ARMCPU *cpu = env_archcpu(env);
716     uint64_t changed;
717 
718     /*
719      * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
720      * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
721      * Instead, choose the format based on the mode of EL3.
722      */
723     if (arm_el_is_aa64(env, 3)) {
724         value |= SCR_FW | SCR_AW;      /* RES1 */
725         valid_mask &= ~SCR_NET;        /* RES0 */
726 
727         if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
728             !cpu_isar_feature(aa64_aa32_el2, cpu)) {
729             value |= SCR_RW;           /* RAO/WI */
730         }
731         if (cpu_isar_feature(aa64_ras, cpu)) {
732             valid_mask |= SCR_TERR;
733         }
734         if (cpu_isar_feature(aa64_lor, cpu)) {
735             valid_mask |= SCR_TLOR;
736         }
737         if (cpu_isar_feature(aa64_pauth, cpu)) {
738             valid_mask |= SCR_API | SCR_APK;
739         }
740         if (cpu_isar_feature(aa64_sel2, cpu)) {
741             valid_mask |= SCR_EEL2;
742         } else if (cpu_isar_feature(aa64_rme, cpu)) {
743             /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
744             value |= SCR_NS;
745         }
746         if (cpu_isar_feature(aa64_mte, cpu)) {
747             valid_mask |= SCR_ATA;
748         }
749         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
750             valid_mask |= SCR_ENSCXT;
751         }
752         if (cpu_isar_feature(aa64_doublefault, cpu)) {
753             valid_mask |= SCR_EASE | SCR_NMEA;
754         }
755         if (cpu_isar_feature(aa64_sme, cpu)) {
756             valid_mask |= SCR_ENTP2;
757         }
758         if (cpu_isar_feature(aa64_hcx, cpu)) {
759             valid_mask |= SCR_HXEN;
760         }
761         if (cpu_isar_feature(aa64_fgt, cpu)) {
762             valid_mask |= SCR_FGTEN;
763         }
764         if (cpu_isar_feature(aa64_rme, cpu)) {
765             valid_mask |= SCR_NSE | SCR_GPF;
766         }
767         if (cpu_isar_feature(aa64_ecv, cpu)) {
768             valid_mask |= SCR_ECVEN;
769         }
770         if (cpu_isar_feature(aa64_gcs, cpu)) {
771             valid_mask |= SCR_GCSEN;
772         }
773         if (cpu_isar_feature(aa64_tcr2, cpu)) {
774             valid_mask |= SCR_TCR2EN;
775         }
776         if (cpu_isar_feature(aa64_sctlr2, cpu)) {
777             valid_mask |= SCR_SCTLR2EN;
778         }
779         if (cpu_isar_feature(aa64_s1pie, cpu) ||
780             cpu_isar_feature(aa64_s2pie, cpu)) {
781             valid_mask |= SCR_PIEN;
782         }
783         if (cpu_isar_feature(aa64_aie, cpu)) {
784             valid_mask |= SCR_AIEN;
785         }
786         if (cpu_isar_feature(aa64_mec, cpu)) {
787             valid_mask |= SCR_MECEN;
788         }
789     } else {
790         valid_mask &= ~(SCR_RW | SCR_ST);
791         if (cpu_isar_feature(aa32_ras, cpu)) {
792             valid_mask |= SCR_TERR;
793         }
794     }
795 
796     if (!arm_feature(env, ARM_FEATURE_EL2)) {
797         valid_mask &= ~SCR_HCE;
798 
799         /*
800          * On ARMv7, SMD (or SCD as it is called in v7) is only
801          * supported if EL2 exists. The bit is UNK/SBZP when
802          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
803          * when EL2 is unavailable.
804          * On ARMv8, this bit is always available.
805          */
806         if (arm_feature(env, ARM_FEATURE_V7) &&
807             !arm_feature(env, ARM_FEATURE_V8)) {
808             valid_mask &= ~SCR_SMD;
809         }
810     }
811 
812     /* Clear all-context RES0 bits.  */
813     value &= valid_mask;
814     changed = env->cp15.scr_el3 ^ value;
815     env->cp15.scr_el3 = value;
816 
817     /*
818      * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
819      * we must invalidate all TLBs below EL3.
820      */
821     if (changed & (SCR_NS | SCR_NSE)) {
822         tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
823                                            ARMMMUIdxBit_E10_0_GCS |
824                                            ARMMMUIdxBit_E20_0 |
825                                            ARMMMUIdxBit_E20_0_GCS |
826                                            ARMMMUIdxBit_E10_1 |
827                                            ARMMMUIdxBit_E10_1_PAN |
828                                            ARMMMUIdxBit_E10_1_GCS |
829                                            ARMMMUIdxBit_E20_2 |
830                                            ARMMMUIdxBit_E20_2_PAN |
831                                            ARMMMUIdxBit_E20_2_GCS |
832                                            ARMMMUIdxBit_E2 |
833                                            ARMMMUIdxBit_E2_GCS));
834     }
835 }
836 
837 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
838 {
839     /*
840      * scr_write will set the RES1 bits on an AArch64-only CPU.
841      * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
842      */
843     scr_write(env, ri, 0);
844 }
845 
846 static CPAccessResult access_tid4(CPUARMState *env,
847                                   const ARMCPRegInfo *ri,
848                                   bool isread)
849 {
850     if (arm_current_el(env) == 1 &&
851         (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
852         return CP_ACCESS_TRAP_EL2;
853     }
854 
855     return CP_ACCESS_OK;
856 }
857 
858 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
859 {
860     ARMCPU *cpu = env_archcpu(env);
861 
862     /*
863      * Acquire the CSSELR index from the bank corresponding to the CCSIDR
864      * bank
865      */
866     uint32_t index = A32_BANKED_REG_GET(env, csselr,
867                                         ri->secure & ARM_CP_SECSTATE_S);
868 
869     return cpu->ccsidr[index];
870 }
871 
872 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
873                          uint64_t value)
874 {
875     raw_write(env, ri, value & 0xf);
876 }
877 
878 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
879 {
880     CPUState *cs = env_cpu(env);
881     bool el1 = arm_current_el(env) == 1;
882     uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
883     uint64_t ret = 0;
884 
885     if (hcr_el2 & HCR_IMO) {
886         if (cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) {
887             ret |= CPSR_I;
888         }
889         if (cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) {
890             ret |= ISR_IS;
891             ret |= CPSR_I;
892         }
893     } else {
894         if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
895             ret |= CPSR_I;
896         }
897 
898         if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
899             ret |= ISR_IS;
900             ret |= CPSR_I;
901         }
902     }
903 
904     if (hcr_el2 & HCR_FMO) {
905         if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) {
906             ret |= CPSR_F;
907         }
908         if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) {
909             ret |= ISR_FS;
910             ret |= CPSR_F;
911         }
912     } else {
913         if (cpu_test_interrupt(cs, CPU_INTERRUPT_FIQ)) {
914             ret |= CPSR_F;
915         }
916     }
917 
918     if (hcr_el2 & HCR_AMO) {
919         if (cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) {
920             ret |= CPSR_A;
921         }
922     }
923 
924     return ret;
925 }
926 
927 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
928                                        bool isread)
929 {
930     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
931         return CP_ACCESS_TRAP_EL2;
932     }
933 
934     return CP_ACCESS_OK;
935 }
936 
937 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
938                                        bool isread)
939 {
940     if (arm_feature(env, ARM_FEATURE_V8)) {
941         return access_aa64_tid1(env, ri, isread);
942     }
943 
944     return CP_ACCESS_OK;
945 }
946 
947 static const ARMCPRegInfo v7_cp_reginfo[] = {
948     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
949     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
950       .access = PL1_W, .type = ARM_CP_NOP },
951     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
952       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
953       .access = PL1_R,
954       .accessfn = access_tid4,
955       .fgt = FGT_CCSIDR_EL1,
956       .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
957     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
958       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
959       .access = PL1_RW,
960       .accessfn = access_tid4,
961       .fgt = FGT_CSSELR_EL1,
962       .writefn = csselr_write, .resetvalue = 0,
963       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
964                              offsetof(CPUARMState, cp15.csselr_ns) } },
965     /*
966      * Auxiliary ID register: this actually has an IMPDEF value but for now
967      * just RAZ for all cores:
968      */
969     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
970       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
971       .access = PL1_R, .type = ARM_CP_CONST,
972       .accessfn = access_aa64_tid1,
973       .fgt = FGT_AIDR_EL1,
974       .resetvalue = 0 },
975     /*
976      * Auxiliary fault status registers: these also are IMPDEF, and we
977      * choose to RAZ/WI for all cores.
978      */
979     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
980       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
981       .access = PL1_RW, .accessfn = access_tvm_trvm,
982       .fgt = FGT_AFSR0_EL1,
983       .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
984       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 0),
985       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 0),
986       .type = ARM_CP_CONST, .resetvalue = 0 },
987     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
988       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
989       .access = PL1_RW, .accessfn = access_tvm_trvm,
990       .fgt = FGT_AFSR1_EL1,
991       .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
992       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 1),
993       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 1),
994       .type = ARM_CP_CONST, .resetvalue = 0 },
995     /*
996      * MAIR can just read-as-written because we don't implement caches
997      * and so don't need to care about memory attributes.
998      */
999     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1000       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1001       .access = PL1_RW, .accessfn = access_tvm_trvm,
1002       .fgt = FGT_MAIR_EL1,
1003       .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
1004       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 0),
1005       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 0),
1006       .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1007       .resetvalue = 0 },
1008     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1009       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1010       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1011       .resetvalue = 0 },
1012     /*
1013      * For non-long-descriptor page tables these are PRRR and NMRR;
1014      * regardless they still act as reads-as-written for QEMU.
1015      */
1016      /*
1017       * MAIR0/1 are defined separately from their 64-bit counterpart which
1018       * allows them to assign the correct fieldoffset based on the endianness
1019       * handled in the field definitions.
1020       */
1021     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1022       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1023       .access = PL1_RW, .accessfn = access_tvm_trvm,
1024       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1025                              offsetof(CPUARMState, cp15.mair0_ns) },
1026       .resetfn = arm_cp_reset_ignore },
1027     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1028       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
1029       .access = PL1_RW, .accessfn = access_tvm_trvm,
1030       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1031                              offsetof(CPUARMState, cp15.mair1_ns) },
1032       .resetfn = arm_cp_reset_ignore },
1033     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1034       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1035       .fgt = FGT_ISR_EL1,
1036       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1037 };
1038 
1039 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1040                         uint64_t value)
1041 {
1042     value &= 1;
1043     env->teecr = value;
1044 }
1045 
1046 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1047                                    bool isread)
1048 {
1049     /*
1050      * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
1051      * at all, so we don't need to check whether we're v8A.
1052      */
1053     if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
1054         (env->cp15.hstr_el2 & HSTR_TTEE)) {
1055         return CP_ACCESS_TRAP_EL2;
1056     }
1057     return CP_ACCESS_OK;
1058 }
1059 
1060 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1061                                     bool isread)
1062 {
1063     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1064         return CP_ACCESS_TRAP_EL1;
1065     }
1066     return teecr_access(env, ri, isread);
1067 }
1068 
1069 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1070     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1071       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1072       .resetvalue = 0,
1073       .writefn = teecr_write, .accessfn = teecr_access },
1074     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1075       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1076       .accessfn = teehbr_access, .resetvalue = 0 },
1077 };
1078 
1079 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1080     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1081       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1082       .access = PL0_RW,
1083       .fgt = FGT_TPIDR_EL0,
1084       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1085     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1086       .access = PL0_RW,
1087       .fgt = FGT_TPIDR_EL0,
1088       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1089                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1090       .resetfn = arm_cp_reset_ignore },
1091     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1092       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1093       .access = PL0_R | PL1_W,
1094       .fgt = FGT_TPIDRRO_EL0,
1095       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1096       .resetvalue = 0},
1097     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1098       .access = PL0_R | PL1_W,
1099       .fgt = FGT_TPIDRRO_EL0,
1100       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1101                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1102       .resetfn = arm_cp_reset_ignore },
1103     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1104       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1105       .access = PL1_RW,
1106       .fgt = FGT_TPIDR_EL1,
1107       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1108     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1109       .access = PL1_RW,
1110       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1111                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1112       .resetvalue = 0 },
1113 };
1114 
1115 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1116 {
1117     ARMCPU *cpu = env_archcpu(env);
1118 
1119     cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
1120 }
1121 
1122 #ifndef CONFIG_USER_ONLY
1123 
1124 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1125                                        bool isread)
1126 {
1127     /*
1128      * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1129      * Writable only at the highest implemented exception level.
1130      */
1131     int el = arm_current_el(env);
1132     uint64_t hcr;
1133     uint32_t cntkctl;
1134 
1135     switch (el) {
1136     case 0:
1137         hcr = arm_hcr_el2_eff(env);
1138         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1139             cntkctl = env->cp15.cnthctl_el2;
1140         } else {
1141             cntkctl = env->cp15.c14_cntkctl;
1142         }
1143         if (!extract32(cntkctl, 0, 2)) {
1144             return CP_ACCESS_TRAP_EL1;
1145         }
1146         break;
1147     case 1:
1148         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1149             arm_is_secure_below_el3(env)) {
1150             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1151             return CP_ACCESS_UNDEFINED;
1152         }
1153         break;
1154     case 2:
1155     case 3:
1156         break;
1157     }
1158 
1159     if (!isread && el < arm_highest_el(env)) {
1160         return CP_ACCESS_UNDEFINED;
1161     }
1162 
1163     return CP_ACCESS_OK;
1164 }
1165 
1166 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1167                                         bool isread)
1168 {
1169     unsigned int cur_el = arm_current_el(env);
1170     bool has_el2 = arm_is_el2_enabled(env);
1171     uint64_t hcr = arm_hcr_el2_eff(env);
1172 
1173     switch (cur_el) {
1174     case 0:
1175         /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
1176         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1177             return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
1178                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1179         }
1180 
1181         /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
1182         if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1183             return CP_ACCESS_TRAP_EL1;
1184         }
1185         /* fall through */
1186     case 1:
1187         /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
1188         if (has_el2 && timeridx == GTIMER_PHYS &&
1189             (hcr & HCR_E2H
1190              ? !extract32(env->cp15.cnthctl_el2, 10, 1)
1191              : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
1192             return CP_ACCESS_TRAP_EL2;
1193         }
1194         if (has_el2 && timeridx == GTIMER_VIRT) {
1195             if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
1196                 return CP_ACCESS_TRAP_EL2;
1197             }
1198         }
1199         break;
1200     }
1201     return CP_ACCESS_OK;
1202 }
1203 
1204 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1205                                       bool isread)
1206 {
1207     unsigned int cur_el = arm_current_el(env);
1208     bool has_el2 = arm_is_el2_enabled(env);
1209     uint64_t hcr = arm_hcr_el2_eff(env);
1210 
1211     switch (cur_el) {
1212     case 0:
1213         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1214             /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
1215             return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
1216                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
1217         }
1218 
1219         /*
1220          * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
1221          * EL0 if EL0[PV]TEN is zero.
1222          */
1223         if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1224             return CP_ACCESS_TRAP_EL1;
1225         }
1226         /* fall through */
1227 
1228     case 1:
1229         if (has_el2 && timeridx == GTIMER_PHYS) {
1230             if (hcr & HCR_E2H) {
1231                 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
1232                 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
1233                     return CP_ACCESS_TRAP_EL2;
1234                 }
1235             } else {
1236                 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
1237                 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
1238                     return CP_ACCESS_TRAP_EL2;
1239                 }
1240             }
1241         }
1242         if (has_el2 && timeridx == GTIMER_VIRT) {
1243             if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
1244                 return CP_ACCESS_TRAP_EL2;
1245             }
1246         }
1247         break;
1248     }
1249     return CP_ACCESS_OK;
1250 }
1251 
1252 static CPAccessResult gt_pct_access(CPUARMState *env,
1253                                     const ARMCPRegInfo *ri,
1254                                     bool isread)
1255 {
1256     return gt_counter_access(env, GTIMER_PHYS, isread);
1257 }
1258 
1259 static CPAccessResult gt_vct_access(CPUARMState *env,
1260                                     const ARMCPRegInfo *ri,
1261                                     bool isread)
1262 {
1263     return gt_counter_access(env, GTIMER_VIRT, isread);
1264 }
1265 
1266 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1267                                        bool isread)
1268 {
1269     return gt_timer_access(env, GTIMER_PHYS, isread);
1270 }
1271 
1272 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1273                                        bool isread)
1274 {
1275     return gt_timer_access(env, GTIMER_VIRT, isread);
1276 }
1277 
1278 static CPAccessResult gt_stimer_access(CPUARMState *env,
1279                                        const ARMCPRegInfo *ri,
1280                                        bool isread)
1281 {
1282     /*
1283      * The AArch64 register view of the secure physical timer is
1284      * always accessible from EL3, and configurably accessible from
1285      * Secure EL1.
1286      */
1287     switch (arm_current_el(env)) {
1288     case 1:
1289         if (!arm_is_secure(env)) {
1290             return CP_ACCESS_UNDEFINED;
1291         }
1292         if (arm_is_el2_enabled(env)) {
1293             return CP_ACCESS_UNDEFINED;
1294         }
1295         if (!(env->cp15.scr_el3 & SCR_ST)) {
1296             return CP_ACCESS_TRAP_EL3;
1297         }
1298         return CP_ACCESS_OK;
1299     case 0:
1300     case 2:
1301         return CP_ACCESS_UNDEFINED;
1302     case 3:
1303         return CP_ACCESS_OK;
1304     default:
1305         g_assert_not_reached();
1306     }
1307 }
1308 
1309 static CPAccessResult gt_sel2timer_access(CPUARMState *env,
1310                                           const ARMCPRegInfo *ri,
1311                                           bool isread)
1312 {
1313     /*
1314      * The AArch64 register view of the secure EL2 timers are mostly
1315      * accessible from EL3 and EL2 although can also be trapped to EL2
1316      * from EL1 depending on nested virt config.
1317      */
1318     switch (arm_current_el(env)) {
1319     case 0: /* UNDEFINED */
1320         return CP_ACCESS_UNDEFINED;
1321     case 1:
1322         if (!arm_is_secure(env)) {
1323             /* UNDEFINED */
1324             return CP_ACCESS_UNDEFINED;
1325         } else if (arm_hcr_el2_eff(env) & HCR_NV) {
1326             /* Aarch64.SystemAccessTrap(EL2, 0x18) */
1327             return CP_ACCESS_TRAP_EL2;
1328         }
1329         /* UNDEFINED */
1330         return CP_ACCESS_UNDEFINED;
1331     case 2:
1332         if (!arm_is_secure(env)) {
1333             /* UNDEFINED */
1334             return CP_ACCESS_UNDEFINED;
1335         }
1336         return CP_ACCESS_OK;
1337     case 3:
1338         if (env->cp15.scr_el3 & SCR_EEL2) {
1339             return CP_ACCESS_OK;
1340         } else {
1341             return CP_ACCESS_UNDEFINED;
1342         }
1343     default:
1344         g_assert_not_reached();
1345     }
1346 }
1347 
1348 uint64_t gt_get_countervalue(CPUARMState *env)
1349 {
1350     ARMCPU *cpu = env_archcpu(env);
1351 
1352     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
1353 }
1354 
1355 static void gt_update_irq(ARMCPU *cpu, int timeridx)
1356 {
1357     CPUARMState *env = &cpu->env;
1358     uint64_t cnthctl = env->cp15.cnthctl_el2;
1359     ARMSecuritySpace ss = arm_security_space(env);
1360     /* ISTATUS && !IMASK */
1361     int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
1362 
1363     /*
1364      * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
1365      * It is RES0 in Secure and NonSecure state.
1366      */
1367     if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
1368         ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
1369          (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
1370         irqstate = 0;
1371     }
1372 
1373     qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1374     trace_arm_gt_update_irq(timeridx, irqstate);
1375 }
1376 
1377 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
1378 {
1379     /*
1380      * Changing security state between Root and Secure/NonSecure, which may
1381      * happen when switching EL, can change the effective value of CNTHCTL_EL2
1382      * mask bits. Update the IRQ state accordingly.
1383      */
1384     gt_update_irq(cpu, GTIMER_VIRT);
1385     gt_update_irq(cpu, GTIMER_PHYS);
1386 }
1387 
1388 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
1389 {
1390     if ((env->cp15.scr_el3 & SCR_ECVEN) &&
1391         FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
1392         arm_is_el2_enabled(env) &&
1393         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
1394         return env->cp15.cntpoff_el2;
1395     }
1396     return 0;
1397 }
1398 
1399 static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx)
1400 {
1401     /*
1402      * Return the timer offset to use for indirect accesses to the timer.
1403      * This is the Offset value as defined in D12.2.4.1 "Operation of the
1404      * CompareValue views of the timers".
1405      *
1406      * The condition here is not always the same as the condition for
1407      * whether to apply an offset register when doing a direct read of
1408      * the counter sysreg; those conditions are described in the
1409      * access pseudocode for each counter register.
1410      */
1411     switch (timeridx) {
1412     case GTIMER_PHYS:
1413         return gt_phys_raw_cnt_offset(env);
1414     case GTIMER_VIRT:
1415         return env->cp15.cntvoff_el2;
1416     case GTIMER_HYP:
1417     case GTIMER_SEC:
1418     case GTIMER_HYPVIRT:
1419     case GTIMER_S_EL2_PHYS:
1420     case GTIMER_S_EL2_VIRT:
1421         return 0;
1422     default:
1423         g_assert_not_reached();
1424     }
1425 }
1426 
1427 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx)
1428 {
1429     /*
1430      * Return the timer offset to use for direct accesses to the
1431      * counter registers CNTPCT and CNTVCT, and for direct accesses
1432      * to the CNT*_TVAL registers.
1433      *
1434      * This isn't exactly the same as the indirect-access offset,
1435      * because here we also care about what EL the register access
1436      * is being made from.
1437      *
1438      * This corresponds to the access pseudocode for the registers.
1439      */
1440     uint64_t hcr;
1441 
1442     switch (timeridx) {
1443     case GTIMER_PHYS:
1444         if (arm_current_el(env) >= 2) {
1445             return 0;
1446         }
1447         return gt_phys_raw_cnt_offset(env);
1448     case GTIMER_VIRT:
1449         switch (arm_current_el(env)) {
1450         case 2:
1451             hcr = arm_hcr_el2_eff(env);
1452             if (hcr & HCR_E2H) {
1453                 return 0;
1454             }
1455             break;
1456         case 0:
1457             hcr = arm_hcr_el2_eff(env);
1458             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
1459                 return 0;
1460             }
1461             break;
1462         }
1463         return env->cp15.cntvoff_el2;
1464     case GTIMER_HYP:
1465     case GTIMER_SEC:
1466     case GTIMER_HYPVIRT:
1467     case GTIMER_S_EL2_PHYS:
1468     case GTIMER_S_EL2_VIRT:
1469         return 0;
1470     default:
1471         g_assert_not_reached();
1472     }
1473 }
1474 
1475 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1476 {
1477     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1478 
1479     if (gt->ctl & 1) {
1480         /*
1481          * Timer enabled: calculate and set current ISTATUS, irq, and
1482          * reset timer to when ISTATUS next has to change
1483          */
1484         uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
1485         uint64_t count = gt_get_countervalue(&cpu->env);
1486         /* Note that this must be unsigned 64 bit arithmetic: */
1487         int istatus = count - offset >= gt->cval;
1488         uint64_t nexttick;
1489 
1490         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1491 
1492         if (istatus) {
1493             /*
1494              * Next transition is when (count - offset) rolls back over to 0.
1495              * If offset > count then this is when count == offset;
1496              * if offset <= count then this is when count == offset + 2^64
1497              * For the latter case we set nexttick to an "as far in future
1498              * as possible" value and let the code below handle it.
1499              */
1500             if (offset > count) {
1501                 nexttick = offset;
1502             } else {
1503                 nexttick = UINT64_MAX;
1504             }
1505         } else {
1506             /*
1507              * Next transition is when (count - offset) == cval, i.e.
1508              * when count == (cval + offset).
1509              * If that would overflow, then again we set up the next interrupt
1510              * for "as far in the future as possible" for the code below.
1511              */
1512             if (uadd64_overflow(gt->cval, offset, &nexttick)) {
1513                 nexttick = UINT64_MAX;
1514             }
1515         }
1516         /*
1517          * Note that the desired next expiry time might be beyond the
1518          * signed-64-bit range of a QEMUTimer -- in this case we just
1519          * set the timer for as far in the future as possible. When the
1520          * timer expires we will reset the timer for any remaining period.
1521          */
1522         if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
1523             timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
1524         } else {
1525             timer_mod(cpu->gt_timer[timeridx], nexttick);
1526         }
1527         trace_arm_gt_recalc(timeridx, nexttick);
1528     } else {
1529         /* Timer disabled: ISTATUS and timer output always clear */
1530         gt->ctl &= ~4;
1531         timer_del(cpu->gt_timer[timeridx]);
1532         trace_arm_gt_recalc_disabled(timeridx);
1533     }
1534     gt_update_irq(cpu, timeridx);
1535 }
1536 
1537 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1538                            int timeridx)
1539 {
1540     ARMCPU *cpu = env_archcpu(env);
1541 
1542     timer_del(cpu->gt_timer[timeridx]);
1543 }
1544 
1545 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1546 {
1547     uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS);
1548     return gt_get_countervalue(env) - offset;
1549 }
1550 
1551 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1552 {
1553     uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
1554     return gt_get_countervalue(env) - offset;
1555 }
1556 
1557 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1558                           int timeridx,
1559                           uint64_t value)
1560 {
1561     trace_arm_gt_cval_write(timeridx, value);
1562     env->cp15.c14_timer[timeridx].cval = value;
1563     gt_recalc_timer(env_archcpu(env), timeridx);
1564 }
1565 
1566 static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset)
1567 {
1568     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1569                       (gt_get_countervalue(env) - offset));
1570 }
1571 
1572 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1573                              int timeridx)
1574 {
1575     uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1576 
1577     return do_tval_read(env, timeridx, offset);
1578 }
1579 
1580 static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value,
1581                           uint64_t offset)
1582 {
1583     trace_arm_gt_tval_write(timeridx, value);
1584     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1585                                          sextract64(value, 0, 32);
1586     gt_recalc_timer(env_archcpu(env), timeridx);
1587 }
1588 
1589 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1590                           int timeridx,
1591                           uint64_t value)
1592 {
1593     uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
1594 
1595     do_tval_write(env, timeridx, value, offset);
1596 }
1597 
1598 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1599                          int timeridx,
1600                          uint64_t value)
1601 {
1602     ARMCPU *cpu = env_archcpu(env);
1603     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1604 
1605     trace_arm_gt_ctl_write(timeridx, value);
1606     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1607     if ((oldval ^ value) & 1) {
1608         /* Enable toggled */
1609         gt_recalc_timer(cpu, timeridx);
1610     } else if ((oldval ^ value) & 2) {
1611         /*
1612          * IMASK toggled: don't need to recalculate,
1613          * just set the interrupt line based on ISTATUS
1614          */
1615         trace_arm_gt_imask_toggle(timeridx);
1616         gt_update_irq(cpu, timeridx);
1617     }
1618 }
1619 
1620 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1621 {
1622     gt_timer_reset(env, ri, GTIMER_PHYS);
1623 }
1624 
1625 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1626                                uint64_t value)
1627 {
1628     gt_cval_write(env, ri, GTIMER_PHYS, value);
1629 }
1630 
1631 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1632 {
1633     return gt_tval_read(env, ri, GTIMER_PHYS);
1634 }
1635 
1636 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1637                                uint64_t value)
1638 {
1639     gt_tval_write(env, ri, GTIMER_PHYS, value);
1640 }
1641 
1642 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643                               uint64_t value)
1644 {
1645     gt_ctl_write(env, ri, GTIMER_PHYS, value);
1646 }
1647 
1648 static int gt_phys_redir_timeridx(CPUARMState *env)
1649 {
1650     switch (arm_mmu_idx(env)) {
1651     case ARMMMUIdx_E20_0:
1652     case ARMMMUIdx_E20_2:
1653     case ARMMMUIdx_E20_2_PAN:
1654         return GTIMER_HYP;
1655     default:
1656         return GTIMER_PHYS;
1657     }
1658 }
1659 
1660 static int gt_virt_redir_timeridx(CPUARMState *env)
1661 {
1662     switch (arm_mmu_idx(env)) {
1663     case ARMMMUIdx_E20_0:
1664     case ARMMMUIdx_E20_2:
1665     case ARMMMUIdx_E20_2_PAN:
1666         return GTIMER_HYPVIRT;
1667     default:
1668         return GTIMER_VIRT;
1669     }
1670 }
1671 
1672 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
1673                                         const ARMCPRegInfo *ri)
1674 {
1675     int timeridx = gt_phys_redir_timeridx(env);
1676     return env->cp15.c14_timer[timeridx].cval;
1677 }
1678 
1679 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1680                                      uint64_t value)
1681 {
1682     int timeridx = gt_phys_redir_timeridx(env);
1683     gt_cval_write(env, ri, timeridx, value);
1684 }
1685 
1686 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
1687                                         const ARMCPRegInfo *ri)
1688 {
1689     int timeridx = gt_phys_redir_timeridx(env);
1690     return gt_tval_read(env, ri, timeridx);
1691 }
1692 
1693 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1694                                      uint64_t value)
1695 {
1696     int timeridx = gt_phys_redir_timeridx(env);
1697     gt_tval_write(env, ri, timeridx, value);
1698 }
1699 
1700 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
1701                                        const ARMCPRegInfo *ri)
1702 {
1703     int timeridx = gt_phys_redir_timeridx(env);
1704     return env->cp15.c14_timer[timeridx].ctl;
1705 }
1706 
1707 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1708                                     uint64_t value)
1709 {
1710     int timeridx = gt_phys_redir_timeridx(env);
1711     gt_ctl_write(env, ri, timeridx, value);
1712 }
1713 
1714 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1715 {
1716     gt_timer_reset(env, ri, GTIMER_VIRT);
1717 }
1718 
1719 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1720                                uint64_t value)
1721 {
1722     gt_cval_write(env, ri, GTIMER_VIRT, value);
1723 }
1724 
1725 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1726 {
1727     /*
1728      * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0
1729      * we always apply CNTVOFF_EL2. Special case that here rather
1730      * than going into the generic gt_tval_read() and then having
1731      * to re-detect that it's this register.
1732      * Note that the accessfn/perms mean we know we're at EL2 or EL3 here.
1733      */
1734     return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
1735 }
1736 
1737 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1738                                uint64_t value)
1739 {
1740     /* Similarly for writes to CNTV_TVAL_EL02 */
1741     do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
1742 }
1743 
1744 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1745                               uint64_t value)
1746 {
1747     gt_ctl_write(env, ri, GTIMER_VIRT, value);
1748 }
1749 
1750 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751                              uint64_t value)
1752 {
1753     ARMCPU *cpu = env_archcpu(env);
1754     uint32_t oldval = env->cp15.cnthctl_el2;
1755     uint32_t valid_mask =
1756         R_CNTHCTL_EL0PCTEN_E2H1_MASK |
1757         R_CNTHCTL_EL0VCTEN_E2H1_MASK |
1758         R_CNTHCTL_EVNTEN_MASK |
1759         R_CNTHCTL_EVNTDIR_MASK |
1760         R_CNTHCTL_EVNTI_MASK |
1761         R_CNTHCTL_EL0VTEN_MASK |
1762         R_CNTHCTL_EL0PTEN_MASK |
1763         R_CNTHCTL_EL1PCTEN_E2H1_MASK |
1764         R_CNTHCTL_EL1PTEN_MASK;
1765 
1766     if (cpu_isar_feature(aa64_rme, cpu)) {
1767         valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
1768     }
1769     if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
1770         valid_mask |=
1771             R_CNTHCTL_EL1TVT_MASK |
1772             R_CNTHCTL_EL1TVCT_MASK |
1773             R_CNTHCTL_EL1NVPCT_MASK |
1774             R_CNTHCTL_EL1NVVCT_MASK |
1775             R_CNTHCTL_EVNTIS_MASK;
1776     }
1777     if (cpu_isar_feature(aa64_ecv, cpu)) {
1778         valid_mask |= R_CNTHCTL_ECV_MASK;
1779     }
1780 
1781     /* Clear RES0 bits */
1782     value &= valid_mask;
1783 
1784     raw_write(env, ri, value);
1785 
1786     if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
1787         gt_update_irq(cpu, GTIMER_VIRT);
1788     } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
1789         gt_update_irq(cpu, GTIMER_PHYS);
1790     }
1791 }
1792 
1793 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1794                               uint64_t value)
1795 {
1796     ARMCPU *cpu = env_archcpu(env);
1797 
1798     trace_arm_gt_cntvoff_write(value);
1799     raw_write(env, ri, value);
1800     gt_recalc_timer(cpu, GTIMER_VIRT);
1801 }
1802 
1803 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
1804                                         const ARMCPRegInfo *ri)
1805 {
1806     int timeridx = gt_virt_redir_timeridx(env);
1807     return env->cp15.c14_timer[timeridx].cval;
1808 }
1809 
1810 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1811                                      uint64_t value)
1812 {
1813     int timeridx = gt_virt_redir_timeridx(env);
1814     gt_cval_write(env, ri, timeridx, value);
1815 }
1816 
1817 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
1818                                         const ARMCPRegInfo *ri)
1819 {
1820     int timeridx = gt_virt_redir_timeridx(env);
1821     return gt_tval_read(env, ri, timeridx);
1822 }
1823 
1824 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1825                                      uint64_t value)
1826 {
1827     int timeridx = gt_virt_redir_timeridx(env);
1828     gt_tval_write(env, ri, timeridx, value);
1829 }
1830 
1831 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
1832                                        const ARMCPRegInfo *ri)
1833 {
1834     int timeridx = gt_virt_redir_timeridx(env);
1835     return env->cp15.c14_timer[timeridx].ctl;
1836 }
1837 
1838 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1839                                     uint64_t value)
1840 {
1841     int timeridx = gt_virt_redir_timeridx(env);
1842     gt_ctl_write(env, ri, timeridx, value);
1843 }
1844 
1845 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1846 {
1847     gt_timer_reset(env, ri, GTIMER_HYP);
1848 }
1849 
1850 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1851                               uint64_t value)
1852 {
1853     gt_cval_write(env, ri, GTIMER_HYP, value);
1854 }
1855 
1856 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1857 {
1858     return gt_tval_read(env, ri, GTIMER_HYP);
1859 }
1860 
1861 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1862                               uint64_t value)
1863 {
1864     gt_tval_write(env, ri, GTIMER_HYP, value);
1865 }
1866 
1867 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1868                               uint64_t value)
1869 {
1870     gt_ctl_write(env, ri, GTIMER_HYP, value);
1871 }
1872 
1873 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1874 {
1875     gt_timer_reset(env, ri, GTIMER_SEC);
1876 }
1877 
1878 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1879                               uint64_t value)
1880 {
1881     gt_cval_write(env, ri, GTIMER_SEC, value);
1882 }
1883 
1884 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1885 {
1886     return gt_tval_read(env, ri, GTIMER_SEC);
1887 }
1888 
1889 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1890                               uint64_t value)
1891 {
1892     gt_tval_write(env, ri, GTIMER_SEC, value);
1893 }
1894 
1895 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1896                               uint64_t value)
1897 {
1898     gt_ctl_write(env, ri, GTIMER_SEC, value);
1899 }
1900 
1901 static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1902 {
1903     gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS);
1904 }
1905 
1906 static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1907                                    uint64_t value)
1908 {
1909     gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1910 }
1911 
1912 static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1913 {
1914     return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS);
1915 }
1916 
1917 static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1918                               uint64_t value)
1919 {
1920     gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value);
1921 }
1922 
1923 static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1924                               uint64_t value)
1925 {
1926     gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value);
1927 }
1928 
1929 static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1930 {
1931     gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT);
1932 }
1933 
1934 static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1935                               uint64_t value)
1936 {
1937     gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1938 }
1939 
1940 static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1941 {
1942     return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT);
1943 }
1944 
1945 static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1946                                    uint64_t value)
1947 {
1948     gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value);
1949 }
1950 
1951 static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1952                               uint64_t value)
1953 {
1954     gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value);
1955 }
1956 
1957 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1958 {
1959     gt_timer_reset(env, ri, GTIMER_HYPVIRT);
1960 }
1961 
1962 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1963                              uint64_t value)
1964 {
1965     gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
1966 }
1967 
1968 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1969 {
1970     return gt_tval_read(env, ri, GTIMER_HYPVIRT);
1971 }
1972 
1973 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1974                              uint64_t value)
1975 {
1976     gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
1977 }
1978 
1979 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1980                             uint64_t value)
1981 {
1982     gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
1983 }
1984 
1985 void arm_gt_ptimer_cb(void *opaque)
1986 {
1987     ARMCPU *cpu = opaque;
1988 
1989     gt_recalc_timer(cpu, GTIMER_PHYS);
1990 }
1991 
1992 void arm_gt_vtimer_cb(void *opaque)
1993 {
1994     ARMCPU *cpu = opaque;
1995 
1996     gt_recalc_timer(cpu, GTIMER_VIRT);
1997 }
1998 
1999 void arm_gt_htimer_cb(void *opaque)
2000 {
2001     ARMCPU *cpu = opaque;
2002 
2003     gt_recalc_timer(cpu, GTIMER_HYP);
2004 }
2005 
2006 void arm_gt_stimer_cb(void *opaque)
2007 {
2008     ARMCPU *cpu = opaque;
2009 
2010     gt_recalc_timer(cpu, GTIMER_SEC);
2011 }
2012 
2013 void arm_gt_sel2timer_cb(void *opaque)
2014 {
2015     ARMCPU *cpu = opaque;
2016 
2017     gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS);
2018 }
2019 
2020 void arm_gt_sel2vtimer_cb(void *opaque)
2021 {
2022     ARMCPU *cpu = opaque;
2023 
2024     gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT);
2025 }
2026 
2027 void arm_gt_hvtimer_cb(void *opaque)
2028 {
2029     ARMCPU *cpu = opaque;
2030 
2031     gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2032 }
2033 
2034 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2035     /*
2036      * Note that CNTFRQ is purely reads-as-written for the benefit
2037      * of software; writing it doesn't actually change the timer frequency.
2038      * Our reset value matches the fixed frequency we implement the timer at.
2039      */
2040     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2041       .type = ARM_CP_ALIAS,
2042       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2043       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2044     },
2045     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2046       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2047       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2048       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2049       .resetfn = arm_gt_cntfrq_reset,
2050     },
2051     /* overall control: mostly access permissions */
2052     { .name = "CNTKCTL_EL1", .state = ARM_CP_STATE_BOTH,
2053       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2054       .access = PL1_RW,
2055       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 14, 1, 0),
2056       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 14, 1, 0),
2057       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2058       .resetvalue = 0,
2059     },
2060     /* per-timer control */
2061     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2062       .secure = ARM_CP_SECSTATE_NS,
2063       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2064       .accessfn = gt_ptimer_access,
2065       .fieldoffset = offsetoflow32(CPUARMState,
2066                                    cp15.c14_timer[GTIMER_PHYS].ctl),
2067       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2068       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2069     },
2070     { .name = "CNTP_CTL_S",
2071       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2072       .secure = ARM_CP_SECSTATE_S,
2073       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2074       .accessfn = gt_ptimer_access,
2075       .fieldoffset = offsetoflow32(CPUARMState,
2076                                    cp15.c14_timer[GTIMER_SEC].ctl),
2077       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2078     },
2079     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2080       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2081       .type = ARM_CP_IO, .access = PL0_RW,
2082       .accessfn = gt_ptimer_access,
2083       .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
2084       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2085       .resetvalue = 0,
2086       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2087       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2088     },
2089     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2090       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2091       .accessfn = gt_vtimer_access,
2092       .fieldoffset = offsetoflow32(CPUARMState,
2093                                    cp15.c14_timer[GTIMER_VIRT].ctl),
2094       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2095       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2096     },
2097     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2098       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2099       .type = ARM_CP_IO, .access = PL0_RW,
2100       .accessfn = gt_vtimer_access,
2101       .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
2102       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2103       .resetvalue = 0,
2104       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2105       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2106     },
2107     /* TimerValue views: a 32 bit downcounting view of the underlying state */
2108     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2109       .secure = ARM_CP_SECSTATE_NS,
2110       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2111       .accessfn = gt_ptimer_access,
2112       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2113     },
2114     { .name = "CNTP_TVAL_S",
2115       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2116       .secure = ARM_CP_SECSTATE_S,
2117       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2118       .accessfn = gt_ptimer_access,
2119       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2120     },
2121     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2122       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2123       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2124       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2125       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2126     },
2127     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2128       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2129       .accessfn = gt_vtimer_access,
2130       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2131     },
2132     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2133       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2134       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2135       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2136       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2137     },
2138     /* The counter itself */
2139     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2140       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2141       .accessfn = gt_pct_access,
2142       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2143     },
2144     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2145       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2146       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2147       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2148     },
2149     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2150       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2151       .accessfn = gt_vct_access,
2152       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2153     },
2154     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2155       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2156       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2157       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2158     },
2159     /* Comparison value, indicating when the timer goes off */
2160     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2161       .secure = ARM_CP_SECSTATE_NS,
2162       .access = PL0_RW,
2163       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2164       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2165       .accessfn = gt_ptimer_access,
2166       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2167       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2168     },
2169     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2170       .secure = ARM_CP_SECSTATE_S,
2171       .access = PL0_RW,
2172       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2173       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2174       .accessfn = gt_ptimer_access,
2175       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2176     },
2177     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2178       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2179       .access = PL0_RW,
2180       .type = ARM_CP_IO,
2181       .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
2182       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2183       .resetvalue = 0, .accessfn = gt_ptimer_access,
2184       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2185       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
2186     },
2187     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2188       .access = PL0_RW,
2189       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2190       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2191       .accessfn = gt_vtimer_access,
2192       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2193       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2194     },
2195     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2196       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2197       .access = PL0_RW,
2198       .type = ARM_CP_IO,
2199       .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
2200       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2201       .resetvalue = 0, .accessfn = gt_vtimer_access,
2202       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2203       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
2204     },
2205     /*
2206      * Secure timer -- this is actually restricted to only EL3
2207      * and configurably Secure-EL1 via the accessfn.
2208      */
2209     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2210       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2211       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2212       .accessfn = gt_stimer_access,
2213       .readfn = gt_sec_tval_read,
2214       .writefn = gt_sec_tval_write,
2215       .resetfn = gt_sec_timer_reset,
2216     },
2217     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2218       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2219       .type = ARM_CP_IO, .access = PL1_RW,
2220       .accessfn = gt_stimer_access,
2221       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2222       .resetvalue = 0,
2223       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2224     },
2225     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2226       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2227       .type = ARM_CP_IO, .access = PL1_RW,
2228       .accessfn = gt_stimer_access,
2229       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2230       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2231     },
2232 };
2233 
2234 /*
2235  * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
2236  * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
2237  * so our implementations here are identical to the normal registers.
2238  */
2239 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2240     { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
2241       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2242       .accessfn = gt_vct_access,
2243       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2244     },
2245     { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2246       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2247       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2248       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2249     },
2250     { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
2251       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2252       .accessfn = gt_pct_access,
2253       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2254     },
2255     { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
2256       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
2257       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2258       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2259     },
2260 };
2261 
2262 static CPAccessResult gt_cntpoff_access(CPUARMState *env,
2263                                         const ARMCPRegInfo *ri,
2264                                         bool isread)
2265 {
2266     if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
2267         !(env->cp15.scr_el3 & SCR_ECVEN)) {
2268         return CP_ACCESS_TRAP_EL3;
2269     }
2270     return CP_ACCESS_OK;
2271 }
2272 
2273 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2274                               uint64_t value)
2275 {
2276     ARMCPU *cpu = env_archcpu(env);
2277 
2278     trace_arm_gt_cntpoff_write(value);
2279     raw_write(env, ri, value);
2280     gt_recalc_timer(cpu, GTIMER_PHYS);
2281 }
2282 
2283 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
2284     .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
2285     .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
2286     .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
2287     .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
2288     .nv2_redirect_offset = 0x1a8,
2289     .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
2290 };
2291 #else
2292 
2293 /*
2294  * In user-mode most of the generic timer registers are inaccessible
2295  * however modern kernels (4.12+) allow access to cntvct_el0
2296  */
2297 
2298 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2299 {
2300     ARMCPU *cpu = env_archcpu(env);
2301 
2302     /*
2303      * Currently we have no support for QEMUTimer in linux-user so we
2304      * can't call gt_get_countervalue(env), instead we directly
2305      * call the lower level functions.
2306      */
2307     return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
2308 }
2309 
2310 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2311     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2312       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2313       .access = PL0_R /* no PL1_RW in linux-user */,
2314       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2315       .resetfn = arm_gt_cntfrq_reset,
2316     },
2317     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2318       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2319       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2320       .readfn = gt_virt_cnt_read,
2321     },
2322 };
2323 
2324 /*
2325  * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
2326  * is exposed to userspace by Linux.
2327  */
2328 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
2329     { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
2330       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
2331       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2332       .readfn = gt_virt_cnt_read,
2333     },
2334 };
2335 
2336 #endif
2337 
2338 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2339 {
2340     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2341         raw_write(env, ri, value);
2342     } else if (arm_feature(env, ARM_FEATURE_V7)) {
2343         raw_write(env, ri, value & 0xfffff6ff);
2344     } else {
2345         raw_write(env, ri, value & 0xfffff1ff);
2346     }
2347 }
2348 
2349 /* Return basic MPU access permission bits.  */
2350 static uint32_t simple_mpu_ap_bits(uint32_t val)
2351 {
2352     uint32_t ret;
2353     uint32_t mask;
2354     int i;
2355     ret = 0;
2356     mask = 3;
2357     for (i = 0; i < 16; i += 2) {
2358         ret |= (val >> i) & mask;
2359         mask <<= 2;
2360     }
2361     return ret;
2362 }
2363 
2364 /* Pad basic MPU access permission bits to extended format.  */
2365 static uint32_t extended_mpu_ap_bits(uint32_t val)
2366 {
2367     uint32_t ret;
2368     uint32_t mask;
2369     int i;
2370     ret = 0;
2371     mask = 3;
2372     for (i = 0; i < 16; i += 2) {
2373         ret |= (val & mask) << i;
2374         mask <<= 2;
2375     }
2376     return ret;
2377 }
2378 
2379 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2380                                  uint64_t value)
2381 {
2382     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2383 }
2384 
2385 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2386 {
2387     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2388 }
2389 
2390 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2391                                  uint64_t value)
2392 {
2393     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2394 }
2395 
2396 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2397 {
2398     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2399 }
2400 
2401 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2402 {
2403     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2404 
2405     if (!u32p) {
2406         return 0;
2407     }
2408 
2409     u32p += env->pmsav7.rnr[M_REG_NS];
2410     return *u32p;
2411 }
2412 
2413 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2414                          uint64_t value)
2415 {
2416     ARMCPU *cpu = env_archcpu(env);
2417     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2418 
2419     if (!u32p) {
2420         return;
2421     }
2422 
2423     u32p += env->pmsav7.rnr[M_REG_NS];
2424     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2425     *u32p = value;
2426 }
2427 
2428 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2429                               uint64_t value)
2430 {
2431     ARMCPU *cpu = env_archcpu(env);
2432     uint32_t nrgs = cpu->pmsav7_dregion;
2433 
2434     if (value >= nrgs) {
2435         qemu_log_mask(LOG_GUEST_ERROR,
2436                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2437                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2438         return;
2439     }
2440 
2441     raw_write(env, ri, value);
2442 }
2443 
2444 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2445                           uint64_t value)
2446 {
2447     ARMCPU *cpu = env_archcpu(env);
2448 
2449     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2450     env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2451 }
2452 
2453 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2454 {
2455     return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2456 }
2457 
2458 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2459                           uint64_t value)
2460 {
2461     ARMCPU *cpu = env_archcpu(env);
2462 
2463     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2464     env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
2465 }
2466 
2467 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2468 {
2469     return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
2470 }
2471 
2472 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2473                            uint64_t value)
2474 {
2475     ARMCPU *cpu = env_archcpu(env);
2476 
2477     /*
2478      * Ignore writes that would select not implemented region.
2479      * This is architecturally UNPREDICTABLE.
2480      */
2481     if (value >= cpu->pmsav7_dregion) {
2482         return;
2483     }
2484 
2485     env->pmsav7.rnr[M_REG_NS] = value;
2486 }
2487 
2488 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2489                           uint64_t value)
2490 {
2491     ARMCPU *cpu = env_archcpu(env);
2492 
2493     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2494     env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
2495 }
2496 
2497 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2498 {
2499     return env->pmsav8.hprbar[env->pmsav8.hprselr];
2500 }
2501 
2502 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2503                           uint64_t value)
2504 {
2505     ARMCPU *cpu = env_archcpu(env);
2506 
2507     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2508     env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
2509 }
2510 
2511 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
2512 {
2513     return env->pmsav8.hprlar[env->pmsav8.hprselr];
2514 }
2515 
2516 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2517                           uint64_t value)
2518 {
2519     uint32_t n;
2520     uint32_t bit;
2521     ARMCPU *cpu = env_archcpu(env);
2522 
2523     /* Ignore writes to unimplemented regions */
2524     int rmax = MIN(cpu->pmsav8r_hdregion, 32);
2525     value &= MAKE_64BIT_MASK(0, rmax);
2526 
2527     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2528 
2529     /* Register alias is only valid for first 32 indexes */
2530     for (n = 0; n < rmax; ++n) {
2531         bit = extract32(value, n, 1);
2532         env->pmsav8.hprlar[n] = deposit32(
2533                     env->pmsav8.hprlar[n], 0, 1, bit);
2534     }
2535 }
2536 
2537 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2538 {
2539     uint32_t n;
2540     uint32_t result = 0x0;
2541     ARMCPU *cpu = env_archcpu(env);
2542 
2543     /* Register alias is only valid for first 32 indexes */
2544     for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
2545         if (env->pmsav8.hprlar[n] & 0x1) {
2546             result |= (0x1 << n);
2547         }
2548     }
2549     return result;
2550 }
2551 
2552 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2553                            uint64_t value)
2554 {
2555     ARMCPU *cpu = env_archcpu(env);
2556 
2557     /*
2558      * Ignore writes that would select not implemented region.
2559      * This is architecturally UNPREDICTABLE.
2560      */
2561     if (value >= cpu->pmsav8r_hdregion) {
2562         return;
2563     }
2564 
2565     env->pmsav8.hprselr = value;
2566 }
2567 
2568 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
2569                           uint64_t value)
2570 {
2571     ARMCPU *cpu = env_archcpu(env);
2572     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2573                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2574 
2575     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2576 
2577     if (ri->opc1 & 4) {
2578         if (index >= cpu->pmsav8r_hdregion) {
2579             return;
2580         }
2581         if (ri->opc2 & 0x1) {
2582             env->pmsav8.hprlar[index] = value;
2583         } else {
2584             env->pmsav8.hprbar[index] = value;
2585         }
2586     } else {
2587         if (index >= cpu->pmsav7_dregion) {
2588             return;
2589         }
2590         if (ri->opc2 & 0x1) {
2591             env->pmsav8.rlar[M_REG_NS][index] = value;
2592         } else {
2593             env->pmsav8.rbar[M_REG_NS][index] = value;
2594         }
2595     }
2596 }
2597 
2598 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
2599 {
2600     ARMCPU *cpu = env_archcpu(env);
2601     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
2602                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
2603 
2604     if (ri->opc1 & 4) {
2605         if (index >= cpu->pmsav8r_hdregion) {
2606             return 0x0;
2607         }
2608         if (ri->opc2 & 0x1) {
2609             return env->pmsav8.hprlar[index];
2610         } else {
2611             return env->pmsav8.hprbar[index];
2612         }
2613     } else {
2614         if (index >= cpu->pmsav7_dregion) {
2615             return 0x0;
2616         }
2617         if (ri->opc2 & 0x1) {
2618             return env->pmsav8.rlar[M_REG_NS][index];
2619         } else {
2620             return env->pmsav8.rbar[M_REG_NS][index];
2621         }
2622     }
2623 }
2624 
2625 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
2626     { .name = "PRBAR",
2627       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
2628       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2629       .accessfn = access_tvm_trvm,
2630       .readfn = prbar_read, .writefn = prbar_write },
2631     { .name = "PRLAR",
2632       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
2633       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2634       .accessfn = access_tvm_trvm,
2635       .readfn = prlar_read, .writefn = prlar_write },
2636     { .name = "PRSELR", .resetvalue = 0,
2637       .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
2638       .access = PL1_RW, .accessfn = access_tvm_trvm,
2639       .writefn = prselr_write,
2640       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
2641     { .name = "HPRBAR", .resetvalue = 0,
2642       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
2643       .access = PL2_RW, .type = ARM_CP_NO_RAW,
2644       .readfn = hprbar_read, .writefn = hprbar_write },
2645     { .name = "HPRLAR",
2646       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
2647       .access = PL2_RW, .type = ARM_CP_NO_RAW,
2648       .readfn = hprlar_read, .writefn = hprlar_write },
2649     { .name = "HPRSELR", .resetvalue = 0,
2650       .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
2651       .access = PL2_RW,
2652       .writefn = hprselr_write,
2653       .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
2654     { .name = "HPRENR",
2655       .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
2656       .access = PL2_RW, .type = ARM_CP_NO_RAW,
2657       .readfn = hprenr_read, .writefn = hprenr_write },
2658 };
2659 
2660 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2661     /*
2662      * Reset for all these registers is handled in arm_cpu_reset(),
2663      * because the PMSAv7 is also used by M-profile CPUs, which do
2664      * not register cpregs but still need the state to be reset.
2665      */
2666     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2667       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2668       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2669       .readfn = pmsav7_read, .writefn = pmsav7_write,
2670       .resetfn = arm_cp_reset_ignore },
2671     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2672       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2673       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2674       .readfn = pmsav7_read, .writefn = pmsav7_write,
2675       .resetfn = arm_cp_reset_ignore },
2676     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2677       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2678       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2679       .readfn = pmsav7_read, .writefn = pmsav7_write,
2680       .resetfn = arm_cp_reset_ignore },
2681     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2682       .access = PL1_RW,
2683       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2684       .writefn = pmsav7_rgnr_write,
2685       .resetfn = arm_cp_reset_ignore },
2686 };
2687 
2688 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2689     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2690       .access = PL1_RW, .type = ARM_CP_ALIAS,
2691       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2692       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2693     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2694       .access = PL1_RW, .type = ARM_CP_ALIAS,
2695       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2696       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2697     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2698       .access = PL1_RW,
2699       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2700       .resetvalue = 0, },
2701     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2702       .access = PL1_RW,
2703       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2704       .resetvalue = 0, },
2705     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2706       .access = PL1_RW,
2707       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2708     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2709       .access = PL1_RW,
2710       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2711     /* Protection region base and size registers */
2712     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2713       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2714       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2715     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2716       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2717       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2718     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2719       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2720       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2721     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2722       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2723       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2724     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2725       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2726       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2727     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2728       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2729       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2730     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2731       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2732       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2733     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2734       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2735       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2736 };
2737 
2738 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2739                              uint64_t value)
2740 {
2741     ARMCPU *cpu = env_archcpu(env);
2742 
2743     if (!arm_feature(env, ARM_FEATURE_V8)) {
2744         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2745             /*
2746              * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2747              * using Long-descriptor translation table format
2748              */
2749             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2750         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2751             /*
2752              * In an implementation that includes the Security Extensions
2753              * TTBCR has additional fields PD0 [4] and PD1 [5] for
2754              * Short-descriptor translation table format.
2755              */
2756             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2757         } else {
2758             value &= TTBCR_N;
2759         }
2760     }
2761 
2762     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2763         /*
2764          * With LPAE the TTBCR could result in a change of ASID
2765          * via the TTBCR.A1 bit, so do a TLB flush.
2766          */
2767         tlb_flush(CPU(cpu));
2768     }
2769     raw_write(env, ri, value);
2770 }
2771 
2772 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
2773                                uint64_t value)
2774 {
2775     ARMCPU *cpu = env_archcpu(env);
2776 
2777     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2778     tlb_flush(CPU(cpu));
2779     raw_write(env, ri, value);
2780 }
2781 
2782 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2783                             uint64_t value)
2784 {
2785     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
2786     if (cpreg_field_type(ri) == MO_64 &&
2787         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2788         ARMCPU *cpu = env_archcpu(env);
2789         tlb_flush(CPU(cpu));
2790     }
2791     raw_write(env, ri, value);
2792 }
2793 
2794 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2795                                     uint64_t value)
2796 {
2797     /*
2798      * If we are running with E2&0 regime, then an ASID is active.
2799      * Flush if that might be changing.  Note we're not checking
2800      * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
2801      * holds the active ASID, only checking the field that might.
2802      */
2803     if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
2804         (arm_hcr_el2_eff(env) & HCR_E2H)) {
2805         uint16_t mask = ARMMMUIdxBit_E20_2 |
2806                         ARMMMUIdxBit_E20_2_PAN |
2807                         ARMMMUIdxBit_E20_2_GCS |
2808                         ARMMMUIdxBit_E20_0 |
2809                         ARMMMUIdxBit_E20_0_GCS;
2810         tlb_flush_by_mmuidx(env_cpu(env), mask);
2811     }
2812     raw_write(env, ri, value);
2813 }
2814 
2815 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2816                         uint64_t value)
2817 {
2818     ARMCPU *cpu = env_archcpu(env);
2819     CPUState *cs = CPU(cpu);
2820 
2821     /*
2822      * A change in VMID to the stage2 page table (Stage2) invalidates
2823      * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
2824      */
2825     if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2826         tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
2827     }
2828     raw_write(env, ri, value);
2829 }
2830 
2831 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2832     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2833       .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
2834       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2835                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2836     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2837       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2838       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2839                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2840     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2841       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
2842       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2843                              offsetof(CPUARMState, cp15.dfar_ns) } },
2844     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2845       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2846       .access = PL1_RW, .accessfn = access_tvm_trvm,
2847       .fgt = FGT_FAR_EL1,
2848       .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
2849       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 6, 0, 0),
2850       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 6, 0, 0),
2851       .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2852       .resetvalue = 0, },
2853 };
2854 
2855 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2856     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2857       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2858       .access = PL1_RW, .accessfn = access_tvm_trvm,
2859       .fgt = FGT_ESR_EL1,
2860       .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
2861       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 2, 0),
2862       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 2, 0),
2863       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2864     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2865       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2866       .access = PL1_RW, .accessfn = access_tvm_trvm,
2867       .fgt = FGT_TTBR0_EL1,
2868       .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
2869       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 0),
2870       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 0),
2871       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2872       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2873                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
2874     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2875       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2876       .access = PL1_RW, .accessfn = access_tvm_trvm,
2877       .fgt = FGT_TTBR1_EL1,
2878       .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
2879       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 1),
2880       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 1),
2881       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
2882       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2883                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
2884     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2885       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2886       .access = PL1_RW, .accessfn = access_tvm_trvm,
2887       .fgt = FGT_TCR_EL1,
2888       .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
2889       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 2),
2890       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 2),
2891       .writefn = vmsa_tcr_el12_write,
2892       .raw_writefn = raw_write,
2893       .resetvalue = 0,
2894       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2895     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2896       .access = PL1_RW, .accessfn = access_tvm_trvm,
2897       .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2898       .raw_writefn = raw_write,
2899       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2900                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2901 };
2902 
2903 /*
2904  * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
2905  * qemu tlbs nor adjusting cached masks.
2906  */
2907 static const ARMCPRegInfo ttbcr2_reginfo = {
2908     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
2909     .access = PL1_RW, .accessfn = access_tvm_trvm,
2910     .type = ARM_CP_ALIAS,
2911     .bank_fieldoffsets = {
2912         offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
2913         offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
2914     },
2915 };
2916 
2917 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2918                                 uint64_t value)
2919 {
2920     env->cp15.c15_ticonfig = value & 0xe7;
2921     /* The OS_TYPE bit in this register changes the reported CPUID! */
2922     env->cp15.c0_cpuid = (value & (1 << 5)) ?
2923         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2924 }
2925 
2926 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2927                                 uint64_t value)
2928 {
2929     env->cp15.c15_threadid = value & 0xffff;
2930 }
2931 
2932 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2933                            uint64_t value)
2934 {
2935 #ifdef CONFIG_USER_ONLY
2936     g_assert_not_reached();
2937 #else
2938     /* Wait-for-interrupt (deprecated) */
2939     cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
2940 #endif
2941 }
2942 
2943 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2944                                   uint64_t value)
2945 {
2946     /*
2947      * On OMAP there are registers indicating the max/min index of dcache lines
2948      * containing a dirty line; cache flush operations have to reset these.
2949      */
2950     env->cp15.c15_i_max = 0x000;
2951     env->cp15.c15_i_min = 0xff0;
2952 }
2953 
2954 static const ARMCPRegInfo omap_cp_reginfo[] = {
2955     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2956       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2957       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2958       .resetvalue = 0, },
2959     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2960       .access = PL1_RW, .type = ARM_CP_NOP },
2961     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2962       .access = PL1_RW,
2963       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2964       .writefn = omap_ticonfig_write },
2965     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2966       .access = PL1_RW,
2967       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2968     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2969       .access = PL1_RW, .resetvalue = 0xff0,
2970       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2971     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2972       .access = PL1_RW,
2973       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2974       .writefn = omap_threadid_write },
2975     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2976       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2977       .type = ARM_CP_NO_RAW,
2978       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2979     /*
2980      * TODO: Peripheral port remap register:
2981      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2982      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2983      * when MMU is off.
2984      */
2985     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2986       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2987       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2988       .writefn = omap_cachemaint_write },
2989     { .name = "C9", .cp = 15, .crn = 9,
2990       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2991       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2992 };
2993 
2994 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2995     /*
2996      * RAZ/WI the whole crn=15 space, when we don't have a more specific
2997      * implementation of this implementation-defined space.
2998      * Ideally this should eventually disappear in favour of actually
2999      * implementing the correct behaviour for all cores.
3000      */
3001     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3002       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3003       .access = PL1_RW,
3004       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3005       .resetvalue = 0 },
3006 };
3007 
3008 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3009     /* Cache status: RAZ because we have no cache so it's always clean */
3010     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3011       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3012       .resetvalue = 0 },
3013 };
3014 
3015 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3016     /* We never have a block transfer operation in progress */
3017     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3018       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3019       .resetvalue = 0 },
3020     /* The cache ops themselves: these all NOP for QEMU */
3021     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3022       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3023     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3024       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3025     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3026       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3027     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3028       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3029     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3030       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3031     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3032       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
3033 };
3034 
3035 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3036     /*
3037      * The cache test-and-clean instructions always return (1 << 30)
3038      * to indicate that there are no dirty cache lines.
3039      */
3040     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3041       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3042       .resetvalue = (1 << 30) },
3043     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3044       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3045       .resetvalue = (1 << 30) },
3046 };
3047 
3048 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3049     /* Ignore ReadBuffer accesses */
3050     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3051       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3052       .access = PL1_RW, .resetvalue = 0,
3053       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3054 };
3055 
3056 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3057 {
3058     unsigned int cur_el = arm_current_el(env);
3059 
3060     if (arm_is_el2_enabled(env) && cur_el == 1) {
3061         return env->cp15.vpidr_el2;
3062     }
3063     return raw_read(env, ri);
3064 }
3065 
3066 static uint64_t mpidr_read_val(CPUARMState *env)
3067 {
3068     ARMCPU *cpu = env_archcpu(env);
3069     uint64_t mpidr = cpu->mp_affinity;
3070 
3071     if (arm_feature(env, ARM_FEATURE_V7MP)) {
3072         mpidr |= (1U << 31);
3073         /*
3074          * Cores which are uniprocessor (non-coherent)
3075          * but still implement the MP extensions set
3076          * bit 30. (For instance, Cortex-R5).
3077          */
3078         if (cpu->mp_is_up) {
3079             mpidr |= (1u << 30);
3080         }
3081     }
3082     return mpidr;
3083 }
3084 
3085 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3086 {
3087     unsigned int cur_el = arm_current_el(env);
3088 
3089     if (arm_is_el2_enabled(env) && cur_el == 1) {
3090         return env->cp15.vmpidr_el2;
3091     }
3092     return mpidr_read_val(env);
3093 }
3094 
3095 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3096     /* AMAIR0 is mapped to AMAIR_EL1[31:0] */
3097     { .name = "AMAIR_EL1", .state = ARM_CP_STATE_BOTH,
3098       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3099       .access = PL1_RW, .accessfn = access_tvm_trvm,
3100       .fgt = FGT_AMAIR_EL1,
3101       .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
3102       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 0),
3103       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 0),
3104       .type = ARM_CP_CONST, .resetvalue = 0 },
3105     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3106     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3107       .access = PL1_RW, .accessfn = access_tvm_trvm,
3108       .type = ARM_CP_CONST, .resetvalue = 0 },
3109     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3110       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3111       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3112                              offsetof(CPUARMState, cp15.par_ns)} },
3113     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3114       .access = PL1_RW, .accessfn = access_tvm_trvm,
3115       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3116       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3117                              offsetof(CPUARMState, cp15.ttbr0_ns) },
3118       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3119     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3120       .access = PL1_RW, .accessfn = access_tvm_trvm,
3121       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3122       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3123                              offsetof(CPUARMState, cp15.ttbr1_ns) },
3124       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
3125 };
3126 
3127 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3128 {
3129     return vfp_get_fpcr(env);
3130 }
3131 
3132 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3133                             uint64_t value)
3134 {
3135     vfp_set_fpcr(env, value);
3136 }
3137 
3138 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3139 {
3140     return vfp_get_fpsr(env);
3141 }
3142 
3143 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3144                             uint64_t value)
3145 {
3146     vfp_set_fpsr(env, value);
3147 }
3148 
3149 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3150                                        bool isread)
3151 {
3152     if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
3153         return CP_ACCESS_TRAP_EL1;
3154     }
3155     return CP_ACCESS_OK;
3156 }
3157 
3158 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3159                             uint64_t value)
3160 {
3161     env->daif = value & PSTATE_DAIF;
3162 }
3163 
3164 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
3165 {
3166     return env->pstate & PSTATE_PAN;
3167 }
3168 
3169 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
3170                            uint64_t value)
3171 {
3172     env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
3173 }
3174 
3175 static const ARMCPRegInfo pan_reginfo = {
3176     .name = "PAN", .state = ARM_CP_STATE_AA64,
3177     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
3178     .type = ARM_CP_NO_RAW, .access = PL1_RW,
3179     .readfn = aa64_pan_read, .writefn = aa64_pan_write
3180 };
3181 
3182 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
3183 {
3184     return env->pstate & PSTATE_UAO;
3185 }
3186 
3187 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
3188                            uint64_t value)
3189 {
3190     env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
3191 }
3192 
3193 static const ARMCPRegInfo uao_reginfo = {
3194     .name = "UAO", .state = ARM_CP_STATE_AA64,
3195     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
3196     .type = ARM_CP_NO_RAW, .access = PL1_RW,
3197     .readfn = aa64_uao_read, .writefn = aa64_uao_write
3198 };
3199 
3200 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
3201 {
3202     return env->pstate & PSTATE_DIT;
3203 }
3204 
3205 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
3206                            uint64_t value)
3207 {
3208     env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
3209 }
3210 
3211 static const ARMCPRegInfo dit_reginfo = {
3212     .name = "DIT", .state = ARM_CP_STATE_AA64,
3213     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
3214     .type = ARM_CP_NO_RAW, .access = PL0_RW,
3215     .readfn = aa64_dit_read, .writefn = aa64_dit_write
3216 };
3217 
3218 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
3219 {
3220     return env->pstate & PSTATE_SSBS;
3221 }
3222 
3223 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
3224                            uint64_t value)
3225 {
3226     env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
3227 }
3228 
3229 static const ARMCPRegInfo ssbs_reginfo = {
3230     .name = "SSBS", .state = ARM_CP_STATE_AA64,
3231     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
3232     .type = ARM_CP_NO_RAW, .access = PL0_RW,
3233     .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
3234 };
3235 
3236 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
3237                                               const ARMCPRegInfo *ri,
3238                                               bool isread)
3239 {
3240     /* Cache invalidate/clean to Point of Coherency or Persistence...  */
3241     switch (arm_current_el(env)) {
3242     case 0:
3243         /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set.  */
3244         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3245             return CP_ACCESS_TRAP_EL1;
3246         }
3247         /* fall through */
3248     case 1:
3249         /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
3250         if (arm_hcr_el2_eff(env) & HCR_TPCP) {
3251             return CP_ACCESS_TRAP_EL2;
3252         }
3253         break;
3254     }
3255     return CP_ACCESS_OK;
3256 }
3257 
3258 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
3259 {
3260     /* Cache invalidate/clean to Point of Unification... */
3261     switch (arm_current_el(env)) {
3262     case 0:
3263         /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set.  */
3264         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
3265             return CP_ACCESS_TRAP_EL1;
3266         }
3267         /* fall through */
3268     case 1:
3269         /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set.  */
3270         if (arm_hcr_el2_eff(env) & hcrflags) {
3271             return CP_ACCESS_TRAP_EL2;
3272         }
3273         break;
3274     }
3275     return CP_ACCESS_OK;
3276 }
3277 
3278 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
3279                                    bool isread)
3280 {
3281     return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
3282 }
3283 
3284 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
3285                                   bool isread)
3286 {
3287     return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
3288 }
3289 
3290 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3291                                       bool isread)
3292 {
3293     int cur_el = arm_current_el(env);
3294 
3295     if (cur_el < 2) {
3296         uint64_t hcr = arm_hcr_el2_eff(env);
3297 
3298         if (cur_el == 0) {
3299             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
3300                 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
3301                     return CP_ACCESS_TRAP_EL2;
3302                 }
3303             } else {
3304                 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3305                     return CP_ACCESS_TRAP_EL1;
3306                 }
3307                 if (hcr & HCR_TDZ) {
3308                     return CP_ACCESS_TRAP_EL2;
3309                 }
3310             }
3311         } else if (hcr & HCR_TDZ) {
3312             return CP_ACCESS_TRAP_EL2;
3313         }
3314     }
3315     return CP_ACCESS_OK;
3316 }
3317 
3318 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3319 {
3320     ARMCPU *cpu = env_archcpu(env);
3321     int dzp_bit = 1 << 4;
3322 
3323     /* DZP indicates whether DC ZVA access is allowed */
3324     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3325         dzp_bit = 0;
3326     }
3327     return cpu->dcz_blocksize | dzp_bit;
3328 }
3329 
3330 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3331                                     bool isread)
3332 {
3333     if (!(env->pstate & PSTATE_SP)) {
3334         /*
3335          * Access to SP_EL0 is undefined if it's being used as
3336          * the stack pointer.
3337          */
3338         return CP_ACCESS_UNDEFINED;
3339     }
3340     return CP_ACCESS_OK;
3341 }
3342 
3343 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3344 {
3345     return env->pstate & PSTATE_SP;
3346 }
3347 
3348 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3349 {
3350     update_spsel(env, val);
3351 }
3352 
3353 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3354                         uint64_t value)
3355 {
3356     ARMCPU *cpu = env_archcpu(env);
3357 
3358     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3359         /* M bit is RAZ/WI for PMSA with no MPU implemented */
3360         value &= ~SCTLR_M;
3361     }
3362 
3363     /* ??? Lots of these bits are not implemented.  */
3364 
3365     if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
3366         if (ri->opc1 == 6) { /* SCTLR_EL3 */
3367             value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
3368         } else {
3369             value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
3370                        SCTLR_ATA0 | SCTLR_ATA);
3371         }
3372     }
3373 
3374     if (raw_read(env, ri) == value) {
3375         /*
3376          * Skip the TLB flush if nothing actually changed; Linux likes
3377          * to do a lot of pointless SCTLR writes.
3378          */
3379         return;
3380     }
3381 
3382     raw_write(env, ri, value);
3383 
3384     /* This may enable/disable the MMU, so do a TLB flush.  */
3385     tlb_flush(CPU(cpu));
3386 }
3387 
3388 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3389                            uint64_t value)
3390 {
3391     /*
3392      * Some MDCR_EL3 bits affect whether PMU counters are running:
3393      * if we are trying to change any of those then we must
3394      * bracket this update with PMU start/finish calls.
3395      */
3396     bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
3397 
3398     if (pmu_op) {
3399         pmu_op_start(env);
3400     }
3401     env->cp15.mdcr_el3 = value;
3402     if (pmu_op) {
3403         pmu_op_finish(env);
3404     }
3405 }
3406 
3407 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3408                        uint64_t value)
3409 {
3410     /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
3411     mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
3412 }
3413 
3414 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3415                            uint64_t value)
3416 {
3417     /*
3418      * Some MDCR_EL2 bits affect whether PMU counters are running:
3419      * if we are trying to change any of those then we must
3420      * bracket this update with PMU start/finish calls.
3421      */
3422     bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
3423 
3424     if (pmu_op) {
3425         pmu_op_start(env);
3426     }
3427     env->cp15.mdcr_el2 = value;
3428     if (pmu_op) {
3429         pmu_op_finish(env);
3430     }
3431 }
3432 
3433 static CPAccessResult access_nv1_with_nvx(uint64_t hcr_nv)
3434 {
3435     return hcr_nv == (HCR_NV | HCR_NV1) ? CP_ACCESS_TRAP_EL2 : CP_ACCESS_OK;
3436 }
3437 
3438 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
3439                                  bool isread)
3440 {
3441     if (arm_current_el(env) == 1) {
3442         return access_nv1_with_nvx(arm_hcr_el2_nvx_eff(env));
3443     }
3444     return CP_ACCESS_OK;
3445 }
3446 
3447 static CPAccessResult access_nv1_or_exlock_el1(CPUARMState *env,
3448                                                const ARMCPRegInfo *ri,
3449                                                bool isread)
3450 {
3451     if (arm_current_el(env) == 1) {
3452         uint64_t nvx = arm_hcr_el2_nvx_eff(env);
3453 
3454         if (!isread &&
3455             (env->pstate & PSTATE_EXLOCK) &&
3456             (env->cp15.gcscr_el[1] & GCSCR_EXLOCKEN) &&
3457             !(nvx & HCR_NV1)) {
3458             return CP_ACCESS_EXLOCK;
3459         }
3460         return access_nv1_with_nvx(nvx);
3461     }
3462 
3463     /*
3464      * At EL2, since VHE redirection is done at translation time,
3465      * el_is_in_host is always false here, so EXLOCK does not apply.
3466      */
3467     return CP_ACCESS_OK;
3468 }
3469 
3470 static CPAccessResult access_exlock_el2(CPUARMState *env,
3471                                         const ARMCPRegInfo *ri, bool isread)
3472 {
3473     int el = arm_current_el(env);
3474 
3475     if (el == 3) {
3476         return CP_ACCESS_OK;
3477     }
3478 
3479     /*
3480      * Access to the EL2 register from EL1 means NV is set, and
3481      * EXLOCK has priority over an NV1 trap to EL2.
3482      */
3483     if (!isread &&
3484         (env->pstate & PSTATE_EXLOCK) &&
3485         (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN)) {
3486         return CP_ACCESS_EXLOCK;
3487     }
3488     return CP_ACCESS_OK;
3489 }
3490 
3491 static CPAccessResult access_exlock_el3(CPUARMState *env,
3492                                         const ARMCPRegInfo *ri, bool isread)
3493 {
3494     if (!isread &&
3495         (env->pstate & PSTATE_EXLOCK) &&
3496         (env->cp15.gcscr_el[3] & GCSCR_EXLOCKEN)) {
3497         return CP_ACCESS_EXLOCK;
3498     }
3499     return CP_ACCESS_OK;
3500 }
3501 
3502 #ifdef CONFIG_USER_ONLY
3503 /*
3504  * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
3505  * code to get around W^X restrictions, where one region is writable and the
3506  * other is executable.
3507  *
3508  * Since the executable region is never written to we cannot detect code
3509  * changes when running in user mode, and rely on the emulated JIT telling us
3510  * that the code has changed by executing this instruction.
3511  */
3512 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
3513                           uint64_t value)
3514 {
3515     uint64_t icache_line_mask, start_address, end_address;
3516     const ARMCPU *cpu;
3517 
3518     cpu = env_archcpu(env);
3519 
3520     icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
3521     start_address = value & ~icache_line_mask;
3522     end_address = value | icache_line_mask;
3523 
3524     mmap_lock();
3525 
3526     tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
3527 
3528     mmap_unlock();
3529 }
3530 #endif
3531 
3532 static const ARMCPRegInfo v8_cp_reginfo[] = {
3533     /*
3534      * Minimal set of EL0-visible registers. This will need to be expanded
3535      * significantly for system emulation of AArch64 CPUs.
3536      */
3537     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3538       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3539       .access = PL0_RW, .type = ARM_CP_NZCV },
3540     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3541       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3542       .type = ARM_CP_NO_RAW,
3543       .access = PL0_RW, .accessfn = aa64_daif_access,
3544       .fieldoffset = offsetof(CPUARMState, daif),
3545       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3546     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3547       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3548       .access = PL0_RW, .type = ARM_CP_FPU,
3549       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3550     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3551       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3552       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3553       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3554     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3555       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3556       .access = PL0_R, .type = ARM_CP_NO_RAW,
3557       .fgt = FGT_DCZID_EL0,
3558       .readfn = aa64_dczid_read },
3559     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3560       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3561       .access = PL0_W, .type = ARM_CP_DC_ZVA,
3562 #ifndef CONFIG_USER_ONLY
3563       /* Avoid overhead of an access check that always passes in user-mode */
3564       .accessfn = aa64_zva_access,
3565       .fgt = FGT_DCZVA,
3566 #endif
3567     },
3568     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3569       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3570       .access = PL1_R, .type = ARM_CP_CURRENTEL },
3571     /*
3572      * Instruction cache ops. All of these except `IC IVAU` NOP because we
3573      * don't emulate caches.
3574      */
3575     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3576       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3577       .access = PL1_W, .type = ARM_CP_NOP,
3578       .fgt = FGT_ICIALLUIS,
3579       .accessfn = access_ticab },
3580     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3581       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3582       .access = PL1_W, .type = ARM_CP_NOP,
3583       .fgt = FGT_ICIALLU,
3584       .accessfn = access_tocu },
3585     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3586       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3587       .access = PL0_W,
3588       .fgt = FGT_ICIVAU,
3589       .accessfn = access_tocu,
3590 #ifdef CONFIG_USER_ONLY
3591       .type = ARM_CP_NO_RAW,
3592       .writefn = ic_ivau_write
3593 #else
3594       .type = ARM_CP_NOP
3595 #endif
3596     },
3597     /* Cache ops: all NOPs since we don't emulate caches */
3598     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3599       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3600       .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
3601       .fgt = FGT_DCIVAC,
3602       .type = ARM_CP_NOP },
3603     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3604       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3605       .fgt = FGT_DCISW,
3606       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3607     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3608       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3609       .access = PL0_W, .type = ARM_CP_NOP,
3610       .fgt = FGT_DCCVAC,
3611       .accessfn = aa64_cacheop_poc_access },
3612     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3613       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3614       .fgt = FGT_DCCSW,
3615       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3616     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3617       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3618       .access = PL0_W, .type = ARM_CP_NOP,
3619       .fgt = FGT_DCCVAU,
3620       .accessfn = access_tocu },
3621     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3622       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3623       .access = PL0_W, .type = ARM_CP_NOP,
3624       .fgt = FGT_DCCIVAC,
3625       .accessfn = aa64_cacheop_poc_access },
3626     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3627       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3628       .fgt = FGT_DCCISW,
3629       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
3630     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3631       .type = ARM_CP_ALIAS,
3632       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3633       .access = PL1_RW, .resetvalue = 0,
3634       .fgt = FGT_PAR_EL1,
3635       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3636       .writefn = par_write },
3637     /* 32 bit cache operations */
3638     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3639       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
3640     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3641       .type = ARM_CP_NOP, .access = PL1_W },
3642     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3643       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3644     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3645       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3646     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3647       .type = ARM_CP_NOP, .access = PL1_W },
3648     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3649       .type = ARM_CP_NOP, .access = PL1_W },
3650     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3651       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3652     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3653       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3654     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3655       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3656     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3657       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3658     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3659       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
3660     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3661       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
3662     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3663       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
3664     /* MMU Domain access control / MPU write buffer control */
3665     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3666       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3667       .writefn = dacr_write, .raw_writefn = raw_write,
3668       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3669                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3670     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3671       .type = ARM_CP_ALIAS,
3672       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3673       .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
3674       .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
3675       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1),
3676       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1),
3677       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3678     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3679       .type = ARM_CP_ALIAS,
3680       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3681       .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
3682       .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
3683       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0),
3684       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0),
3685       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3686     /*
3687      * We rely on the access checks not allowing the guest to write to the
3688      * state field when SPSel indicates that it's being used as the stack
3689      * pointer.
3690      */
3691     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3692       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3693       .access = PL1_RW, .accessfn = sp_el0_access,
3694       .type = ARM_CP_ALIAS,
3695       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3696     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3697       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3698       .nv2_redirect_offset = 0x240,
3699       .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
3700       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3701     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3702       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3703       .type = ARM_CP_NO_RAW,
3704       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3705     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3706       .type = ARM_CP_ALIAS,
3707       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3708       .access = PL2_RW,
3709       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3710     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3711       .type = ARM_CP_ALIAS,
3712       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3713       .access = PL2_RW,
3714       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3715     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3716       .type = ARM_CP_ALIAS,
3717       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3718       .access = PL2_RW,
3719       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3720     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3721       .type = ARM_CP_ALIAS,
3722       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3723       .access = PL2_RW,
3724       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3725     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3726       .type = ARM_CP_IO,
3727       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3728       .resetvalue = 0,
3729       .access = PL3_RW,
3730       .writefn = mdcr_el3_write,
3731       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3732     { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
3733       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3734       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3735       .writefn = sdcr_write,
3736       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3737 };
3738 
3739 /* These are present only when EL1 supports AArch32 */
3740 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
3741     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3742       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3743       .access = PL2_RW,
3744       .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
3745       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
3746     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3747       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3748       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3749       .writefn = dacr_write, .raw_writefn = raw_write,
3750       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3751     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3752       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3753       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
3754       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3755 };
3756 
3757 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
3758 {
3759     ARMCPU *cpu = env_archcpu(env);
3760 
3761     if (arm_feature(env, ARM_FEATURE_V8)) {
3762         valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
3763     } else {
3764         valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
3765     }
3766 
3767     if (arm_feature(env, ARM_FEATURE_EL3)) {
3768         valid_mask &= ~HCR_HCD;
3769     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3770         /*
3771          * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3772          * However, if we're using the SMC PSCI conduit then QEMU is
3773          * effectively acting like EL3 firmware and so the guest at
3774          * EL2 should retain the ability to prevent EL1 from being
3775          * able to make SMC calls into the ersatz firmware, so in
3776          * that case HCR.TSC should be read/write.
3777          */
3778         valid_mask &= ~HCR_TSC;
3779     }
3780 
3781     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
3782         if (cpu_isar_feature(aa64_vh, cpu)) {
3783             valid_mask |= HCR_E2H;
3784         }
3785         if (cpu_isar_feature(aa64_ras, cpu)) {
3786             valid_mask |= HCR_TERR | HCR_TEA;
3787         }
3788         if (cpu_isar_feature(aa64_lor, cpu)) {
3789             valid_mask |= HCR_TLOR;
3790         }
3791         if (cpu_isar_feature(aa64_pauth, cpu)) {
3792             valid_mask |= HCR_API | HCR_APK;
3793         }
3794         if (cpu_isar_feature(aa64_mte, cpu)) {
3795             valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
3796         }
3797         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
3798             valid_mask |= HCR_ENSCXT;
3799         }
3800         if (cpu_isar_feature(aa64_fwb, cpu)) {
3801             valid_mask |= HCR_FWB;
3802         }
3803         if (cpu_isar_feature(aa64_rme, cpu)) {
3804             valid_mask |= HCR_GPF;
3805         }
3806         if (cpu_isar_feature(aa64_nv, cpu)) {
3807             valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
3808         }
3809         if (cpu_isar_feature(aa64_nv2, cpu)) {
3810             valid_mask |= HCR_NV2;
3811         }
3812     }
3813 
3814     if (cpu_isar_feature(any_evt, cpu)) {
3815         valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
3816     } else if (cpu_isar_feature(any_half_evt, cpu)) {
3817         valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
3818     }
3819 
3820     /* Clear RES0 bits.  */
3821     value &= valid_mask;
3822 
3823     /* RW is RAO/WI if EL1 is AArch64 only */
3824     if (arm_feature(env, ARM_FEATURE_AARCH64) &&
3825         !cpu_isar_feature(aa64_aa32_el1, cpu)) {
3826         value |= HCR_RW;
3827     }
3828 
3829     /*
3830      * These bits change the MMU setup:
3831      * HCR_VM enables stage 2 translation
3832      * HCR_PTW forbids certain page-table setups
3833      * HCR_DC disables stage1 and enables stage2 translation
3834      * HCR_DCT enables tagging on (disabled) stage1 translation
3835      * HCR_FWB changes the interpretation of stage2 descriptor bits
3836      * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
3837      */
3838     if ((env->cp15.hcr_el2 ^ value) &
3839         (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
3840         tlb_flush(CPU(cpu));
3841     }
3842     env->cp15.hcr_el2 = value;
3843 
3844     /*
3845      * Updates to VI and VF require us to update the status of
3846      * virtual interrupts, which are the logical OR of these bits
3847      * and the state of the input lines from the GIC. (This requires
3848      * that we have the BQL, which is done by marking the
3849      * reginfo structs as ARM_CP_IO.)
3850      * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
3851      * VFNMI, it is never possible for it to be taken immediately
3852      * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
3853      * at EL0 or EL1, and HCR can only be written at EL2.
3854      */
3855     g_assert(bql_locked());
3856     arm_cpu_update_virq(cpu);
3857     arm_cpu_update_vfiq(cpu);
3858     arm_cpu_update_vserr(cpu);
3859     if (cpu_isar_feature(aa64_nmi, cpu)) {
3860         arm_cpu_update_vinmi(cpu);
3861         arm_cpu_update_vfnmi(cpu);
3862     }
3863 }
3864 
3865 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3866 {
3867     do_hcr_write(env, value, 0);
3868 }
3869 
3870 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3871                           uint64_t value)
3872 {
3873     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3874     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3875     do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
3876 }
3877 
3878 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3879                          uint64_t value)
3880 {
3881     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3882     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3883     do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
3884 }
3885 
3886 static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3887 {
3888     /* hcr_write will set the RES1 bits on an AArch64-only CPU */
3889     hcr_write(env, ri, 0);
3890 }
3891 
3892 /*
3893  * Return the effective value of HCR_EL2, at the given security state.
3894  * Bits that are not included here:
3895  * RW       (read from SCR_EL3.RW as needed)
3896  */
3897 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
3898 {
3899     uint64_t ret = env->cp15.hcr_el2;
3900 
3901     assert(space != ARMSS_Root);
3902 
3903     if (!arm_is_el2_enabled_secstate(env, space)) {
3904         /*
3905          * "This register has no effect if EL2 is not enabled in the
3906          * current Security state".  This is ARMv8.4-SecEL2 speak for
3907          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
3908          *
3909          * Prior to that, the language was "In an implementation that
3910          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
3911          * as if this field is 0 for all purposes other than a direct
3912          * read or write access of HCR_EL2".  With lots of enumeration
3913          * on a per-field basis.  In current QEMU, this is condition
3914          * is arm_is_secure_below_el3.
3915          *
3916          * Since the v8.4 language applies to the entire register, and
3917          * appears to be backward compatible, use that.
3918          */
3919         return 0;
3920     }
3921 
3922     /*
3923      * For a cpu that supports both aarch64 and aarch32, we can set bits
3924      * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
3925      * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
3926      */
3927     if (!arm_el_is_aa64(env, 2)) {
3928         uint64_t aa32_valid;
3929 
3930         /*
3931          * These bits are up-to-date as of ARMv8.6.
3932          * For HCR, it's easiest to list just the 2 bits that are invalid.
3933          * For HCR2, list those that are valid.
3934          */
3935         aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
3936         aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
3937                        HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
3938         ret &= aa32_valid;
3939     }
3940 
3941     if (ret & HCR_TGE) {
3942         /* These bits are up-to-date as of ARMv8.6.  */
3943         if (ret & HCR_E2H) {
3944             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
3945                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
3946                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
3947                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
3948                      HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
3949                      HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
3950         } else {
3951             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
3952         }
3953         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
3954                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
3955                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
3956                  HCR_TLOR);
3957     }
3958 
3959     return ret;
3960 }
3961 
3962 uint64_t arm_hcr_el2_eff(CPUARMState *env)
3963 {
3964     if (arm_feature(env, ARM_FEATURE_M)) {
3965         return 0;
3966     }
3967     return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
3968 }
3969 
3970 uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env)
3971 {
3972     uint64_t hcr = arm_hcr_el2_eff(env);
3973 
3974     if (!(hcr & HCR_NV)) {
3975         return 0; /* CONSTRAINED UNPREDICTABLE wrt NV1 */
3976     }
3977     return hcr & (HCR_NV2 | HCR_NV1 | HCR_NV);
3978 }
3979 
3980 /*
3981  * Corresponds to ARM pseudocode function ELIsInHost().
3982  */
3983 bool el_is_in_host(CPUARMState *env, int el)
3984 {
3985     uint64_t mask;
3986 
3987     /*
3988      * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
3989      * Perform the simplest bit tests first, and validate EL2 afterward.
3990      */
3991     if (el & 1) {
3992         return false; /* EL1 or EL3 */
3993     }
3994 
3995     /*
3996      * Note that hcr_write() checks isar_feature_aa64_vh(),
3997      * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
3998      */
3999     mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
4000     if ((env->cp15.hcr_el2 & mask) != mask) {
4001         return false;
4002     }
4003 
4004     /* TGE and/or E2H set: double check those bits are currently legal. */
4005     return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
4006 }
4007 
4008 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
4009                        uint64_t value)
4010 {
4011     ARMCPU *cpu = env_archcpu(env);
4012     uint64_t valid_mask = 0;
4013 
4014     if (cpu_isar_feature(aa64_mops, cpu)) {
4015         valid_mask |= HCRX_MSCEN | HCRX_MCE2;
4016     }
4017     if (cpu_isar_feature(aa64_nmi, cpu)) {
4018         valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
4019     }
4020     if (cpu_isar_feature(aa64_cmow, cpu)) {
4021         valid_mask |= HCRX_CMOW;
4022     }
4023     if (cpu_isar_feature(aa64_xs, cpu)) {
4024         valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
4025     }
4026     if (cpu_isar_feature(aa64_tcr2, cpu)) {
4027         valid_mask |= HCRX_TCR2EN;
4028     }
4029     if (cpu_isar_feature(aa64_sctlr2, cpu)) {
4030         valid_mask |= HCRX_SCTLR2EN;
4031     }
4032     if (cpu_isar_feature(aa64_gcs, cpu)) {
4033         valid_mask |= HCRX_GCSEN;
4034     }
4035 
4036     /* Clear RES0 bits.  */
4037     env->cp15.hcrx_el2 = value & valid_mask;
4038 
4039     /*
4040      * Updates to VINMI and VFNMI require us to update the status of
4041      * virtual NMI, which are the logical OR of these bits
4042      * and the state of the input lines from the GIC. (This requires
4043      * that we have the BQL, which is done by marking the
4044      * reginfo structs as ARM_CP_IO.)
4045      * Note that if a write to HCRX pends a VINMI or VFNMI it is never
4046      * possible for it to be taken immediately, because VINMI and
4047      * VFNMI are masked unless running at EL0 or EL1, and HCRX
4048      * can only be written at EL2.
4049      */
4050     if (cpu_isar_feature(aa64_nmi, cpu)) {
4051         g_assert(bql_locked());
4052         arm_cpu_update_vinmi(cpu);
4053         arm_cpu_update_vfnmi(cpu);
4054     }
4055 }
4056 
4057 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
4058                                   bool isread)
4059 {
4060     if (arm_current_el(env) == 2
4061         && arm_feature(env, ARM_FEATURE_EL3)
4062         && !(env->cp15.scr_el3 & SCR_HXEN)) {
4063         return CP_ACCESS_TRAP_EL3;
4064     }
4065     return CP_ACCESS_OK;
4066 }
4067 
4068 static const ARMCPRegInfo hcrx_el2_reginfo = {
4069     .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
4070     .type = ARM_CP_IO,
4071     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
4072     .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
4073     .nv2_redirect_offset = 0xa0,
4074     .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
4075 };
4076 
4077 /* Return the effective value of HCRX_EL2.  */
4078 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
4079 {
4080     /*
4081      * The bits in this register behave as 0 for all purposes other than
4082      * direct reads of the register if SCR_EL3.HXEn is 0.
4083      * If EL2 is not enabled in the current security state, then the
4084      * bit may behave as if 0, or as if 1, depending on the bit.
4085      * For the moment, we treat the EL2-disabled case as taking
4086      * priority over the HXEn-disabled case. This is true for the only
4087      * bit for a feature which we implement where the answer is different
4088      * for the two cases (MSCEn for FEAT_MOPS).
4089      * This may need to be revisited for future bits.
4090      */
4091     if (!arm_is_el2_enabled(env)) {
4092         ARMCPU *cpu = env_archcpu(env);
4093         uint64_t hcrx = 0;
4094 
4095         /* Bits which whose effective value is 1 if el2 not enabled. */
4096         if (cpu_isar_feature(aa64_mops, cpu)) {
4097             hcrx |= HCRX_MSCEN;
4098         }
4099         if (cpu_isar_feature(aa64_tcr2, cpu)) {
4100             hcrx |= HCRX_TCR2EN;
4101         }
4102         if (cpu_isar_feature(aa64_sctlr2, cpu)) {
4103             hcrx |= HCRX_SCTLR2EN;
4104         }
4105         if (cpu_isar_feature(aa64_gcs, cpu)) {
4106             hcrx |= HCRX_GCSEN;
4107         }
4108         return hcrx;
4109     }
4110     if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
4111         return 0;
4112     }
4113     return env->cp15.hcrx_el2;
4114 }
4115 
4116 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4117                            uint64_t value)
4118 {
4119     /*
4120      * For A-profile AArch32 EL3, if NSACR.CP10
4121      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4122      */
4123     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4124         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4125         uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4126         value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
4127     }
4128     env->cp15.cptr_el[2] = value;
4129 }
4130 
4131 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4132 {
4133     /*
4134      * For A-profile AArch32 EL3, if NSACR.CP10
4135      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4136      */
4137     uint64_t value = env->cp15.cptr_el[2];
4138 
4139     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4140         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4141         value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
4142     }
4143     return value;
4144 }
4145 
4146 static const ARMCPRegInfo el2_cp_reginfo[] = {
4147     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4148       .type = ARM_CP_IO,
4149       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4150       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4151       .nv2_redirect_offset = 0x78,
4152       .resetfn = hcr_reset,
4153       .writefn = hcr_write, .raw_writefn = raw_write },
4154     { .name = "HCR", .state = ARM_CP_STATE_AA32,
4155       .type = ARM_CP_ALIAS | ARM_CP_IO,
4156       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4157       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4158       .writefn = hcr_writelow },
4159     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4160       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4161       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4162     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4163       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4164       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4165       .access = PL2_RW, .accessfn = access_exlock_el2,
4166       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4167     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4168       .type = ARM_CP_NV2_REDIRECT,
4169       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4170       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4171     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4172       .type = ARM_CP_NV2_REDIRECT,
4173       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4174       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4175     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4176       .type = ARM_CP_ALIAS,
4177       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4178       .access = PL2_RW,
4179       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4180     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4181       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
4182       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4183       .access = PL2_RW, .accessfn = access_exlock_el2,
4184       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4185     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4186       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4187       .access = PL2_RW, .writefn = vbar_write,
4188       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4189       .resetvalue = 0 },
4190     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4191       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4192       .access = PL3_RW, .type = ARM_CP_ALIAS,
4193       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4194     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4195       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4196       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4197       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4198       .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4199     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4200       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4201       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4202       .resetvalue = 0 },
4203     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4204       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4205       .access = PL2_RW, .type = ARM_CP_ALIAS,
4206       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4207     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4208       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4209       .access = PL2_RW, .type = ARM_CP_CONST,
4210       .resetvalue = 0 },
4211     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4212     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4213       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4214       .access = PL2_RW, .type = ARM_CP_CONST,
4215       .resetvalue = 0 },
4216     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4217       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4218       .access = PL2_RW, .type = ARM_CP_CONST,
4219       .resetvalue = 0 },
4220     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4221       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4222       .access = PL2_RW, .type = ARM_CP_CONST,
4223       .resetvalue = 0 },
4224     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4225       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4226       .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
4227       .raw_writefn = raw_write,
4228       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4229     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4230       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4231       .type = ARM_CP_ALIAS,
4232       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4233       .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
4234     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4235       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4236       .access = PL2_RW,
4237       .nv2_redirect_offset = 0x40,
4238       /* no .writefn needed as this can't cause an ASID change */
4239       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4240     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4241       .cp = 15, .opc1 = 6, .crm = 2,
4242       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4243       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4244       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4245       .writefn = vttbr_write, .raw_writefn = raw_write },
4246     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4247       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4248       .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
4249       .nv2_redirect_offset = 0x20,
4250       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4251     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4252       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4253       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4254       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4255     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4256       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4257       .access = PL2_RW, .resetvalue = 0,
4258       .nv2_redirect_offset = 0x90,
4259       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4260     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4261       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4262       .access = PL2_RW, .resetvalue = 0,
4263       .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
4264       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4265     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4266       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4267       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4268 #ifndef CONFIG_USER_ONLY
4269     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4270       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4271       /*
4272        * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4273        * reset values as IMPDEF. We choose to reset to 3 to comply with
4274        * both ARMv7 and ARMv8.
4275        */
4276       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
4277       .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
4278       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4279     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4280       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4281       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4282       .writefn = gt_cntvoff_write,
4283       .nv2_redirect_offset = 0x60,
4284       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4285     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4286       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4287       .writefn = gt_cntvoff_write,
4288       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4289     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4290       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4291       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4292       .type = ARM_CP_IO, .access = PL2_RW,
4293       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4294     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4295       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4296       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4297       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4298     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4299       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4300       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4301       .resetfn = gt_hyp_timer_reset,
4302       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4303     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4304       .type = ARM_CP_IO,
4305       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4306       .access = PL2_RW,
4307       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4308       .resetvalue = 0,
4309       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4310 #endif
4311     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4312       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4313       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4314       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4315     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4316       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4317       .access = PL2_RW,
4318       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4319     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4320       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4321       .access = PL2_RW,
4322       .nv2_redirect_offset = 0x80,
4323       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4324 };
4325 
4326 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4327     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4328       .type = ARM_CP_ALIAS | ARM_CP_IO,
4329       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4330       .access = PL2_RW,
4331       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4332       .writefn = hcr_writehigh },
4333 };
4334 
4335 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
4336                                   bool isread)
4337 {
4338     if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
4339         return CP_ACCESS_OK;
4340     }
4341     return CP_ACCESS_UNDEFINED;
4342 }
4343 
4344 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
4345     { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
4346       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
4347       .access = PL2_RW, .accessfn = sel2_access,
4348       .nv2_redirect_offset = 0x30,
4349       .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
4350     { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
4351       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
4352       .access = PL2_RW, .accessfn = sel2_access,
4353       .nv2_redirect_offset = 0x48,
4354       .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
4355 #ifndef CONFIG_USER_ONLY
4356     /* Secure EL2 Physical Timer */
4357     { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4358       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0,
4359       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4360       .accessfn = gt_sel2timer_access,
4361       .readfn = gt_sec_pel2_tval_read,
4362       .writefn = gt_sec_pel2_tval_write,
4363       .resetfn = gt_sec_pel2_timer_reset,
4364     },
4365     { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4366       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1,
4367       .type = ARM_CP_IO, .access = PL2_RW,
4368       .accessfn = gt_sel2timer_access,
4369       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl),
4370       .resetvalue = 0,
4371       .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write,
4372     },
4373     { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4374       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2,
4375       .type = ARM_CP_IO, .access = PL2_RW,
4376       .accessfn = gt_sel2timer_access,
4377       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval),
4378       .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write,
4379     },
4380     /* Secure EL2 Virtual Timer */
4381     { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
4382       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0,
4383       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4384       .accessfn = gt_sel2timer_access,
4385       .readfn = gt_sec_vel2_tval_read,
4386       .writefn = gt_sec_vel2_tval_write,
4387       .resetfn = gt_sec_vel2_timer_reset,
4388     },
4389     { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
4390       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1,
4391       .type = ARM_CP_IO, .access = PL2_RW,
4392       .accessfn = gt_sel2timer_access,
4393       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl),
4394       .resetvalue = 0,
4395       .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write,
4396     },
4397     { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4398       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2,
4399       .type = ARM_CP_IO, .access = PL2_RW,
4400       .accessfn = gt_sel2timer_access,
4401       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval),
4402       .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write,
4403     },
4404 #endif
4405 };
4406 
4407 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4408                                    bool isread)
4409 {
4410     /*
4411      * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4412      * At Secure EL1 it traps to EL3 or EL2.
4413      */
4414     if (arm_current_el(env) == 3) {
4415         return CP_ACCESS_OK;
4416     }
4417     if (arm_is_secure_below_el3(env)) {
4418         if (env->cp15.scr_el3 & SCR_EEL2) {
4419             return CP_ACCESS_TRAP_EL2;
4420         }
4421         return CP_ACCESS_TRAP_EL3;
4422     }
4423     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4424     if (isread) {
4425         return CP_ACCESS_OK;
4426     }
4427     return CP_ACCESS_UNDEFINED;
4428 }
4429 
4430 static const ARMCPRegInfo el3_cp_reginfo[] = {
4431     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4432       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4433       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4434       .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
4435     { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
4436       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4437       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4438       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4439       .writefn = scr_write, .raw_writefn = raw_write },
4440     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4441       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4442       .access = PL3_RW, .resetvalue = 0,
4443       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4444     { .name = "SDER",
4445       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4446       .access = PL3_RW, .resetvalue = 0,
4447       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4448     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4449       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4450       .writefn = vbar_write, .resetvalue = 0,
4451       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4452     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4453       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4454       .access = PL3_RW, .resetvalue = 0,
4455       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4456     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4457       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4458       .access = PL3_RW,
4459       /* no .writefn needed as this can't cause an ASID change */
4460       .resetvalue = 0,
4461       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4462     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4463       .type = ARM_CP_ALIAS,
4464       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4465       .access = PL3_RW, .accessfn = access_exlock_el3,
4466       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4467     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4468       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4469       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4470     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4471       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4472       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4473     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4474       .type = ARM_CP_ALIAS,
4475       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4476       .access = PL3_RW, .accessfn = access_exlock_el3,
4477       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4478     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4479       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4480       .access = PL3_RW, .writefn = vbar_write,
4481       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4482       .resetvalue = 0 },
4483     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4484       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4485       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4486       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4487     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4488       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4489       .access = PL3_RW, .resetvalue = 0,
4490       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4491     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4492       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4493       .access = PL3_RW, .type = ARM_CP_CONST,
4494       .resetvalue = 0 },
4495     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4496       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4497       .access = PL3_RW, .type = ARM_CP_CONST,
4498       .resetvalue = 0 },
4499     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4500       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4501       .access = PL3_RW, .type = ARM_CP_CONST,
4502       .resetvalue = 0 },
4503 };
4504 
4505 #ifndef CONFIG_USER_ONLY
4506 
4507 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
4508                                  bool isread)
4509 {
4510     if (arm_current_el(env) == 1) {
4511         /* This must be a FEAT_NV access */
4512         return CP_ACCESS_OK;
4513     }
4514     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
4515         return CP_ACCESS_UNDEFINED;
4516     }
4517     return CP_ACCESS_OK;
4518 }
4519 
4520 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
4521                                       bool isread)
4522 {
4523     if (arm_current_el(env) == 1) {
4524         /* This must be a FEAT_NV access with NVx == 101 */
4525         if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
4526             return CP_ACCESS_TRAP_EL2;
4527         }
4528     }
4529     return e2h_access(env, ri, isread);
4530 }
4531 
4532 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
4533                                       bool isread)
4534 {
4535     if (arm_current_el(env) == 1) {
4536         /* This must be a FEAT_NV access with NVx == 101 */
4537         if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
4538             return CP_ACCESS_TRAP_EL2;
4539         }
4540     }
4541     return e2h_access(env, ri, isread);
4542 }
4543 
4544 #endif
4545 
4546 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4547                                      bool isread)
4548 {
4549     int cur_el = arm_current_el(env);
4550 
4551     if (cur_el < 2) {
4552         uint64_t hcr = arm_hcr_el2_eff(env);
4553 
4554         if (cur_el == 0) {
4555             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4556                 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
4557                     return CP_ACCESS_TRAP_EL2;
4558                 }
4559             } else {
4560                 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4561                     return CP_ACCESS_TRAP_EL1;
4562                 }
4563                 if (hcr & HCR_TID2) {
4564                     return CP_ACCESS_TRAP_EL2;
4565                 }
4566             }
4567         } else if (hcr & HCR_TID2) {
4568             return CP_ACCESS_TRAP_EL2;
4569         }
4570     }
4571 
4572     if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
4573         return CP_ACCESS_TRAP_EL2;
4574     }
4575 
4576     return CP_ACCESS_OK;
4577 }
4578 
4579 /*
4580  * Check for traps to RAS registers, which are controlled
4581  * by HCR_EL2.TERR and SCR_EL3.TERR.
4582  */
4583 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
4584                                   bool isread)
4585 {
4586     int el = arm_current_el(env);
4587 
4588     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
4589         return CP_ACCESS_TRAP_EL2;
4590     }
4591     if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) {
4592         return CP_ACCESS_TRAP_EL3;
4593     }
4594     return CP_ACCESS_OK;
4595 }
4596 
4597 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4598 {
4599     int el = arm_current_el(env);
4600 
4601     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4602         return env->cp15.vdisr_el2;
4603     }
4604     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4605         return 0; /* RAZ/WI */
4606     }
4607     return env->cp15.disr_el1;
4608 }
4609 
4610 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4611 {
4612     int el = arm_current_el(env);
4613 
4614     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
4615         env->cp15.vdisr_el2 = val;
4616         return;
4617     }
4618     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
4619         return; /* RAZ/WI */
4620     }
4621     env->cp15.disr_el1 = val;
4622 }
4623 
4624 /*
4625  * Minimal RAS implementation with no Error Records.
4626  * Which means that all of the Error Record registers:
4627  *   ERXADDR_EL1
4628  *   ERXCTLR_EL1
4629  *   ERXFR_EL1
4630  *   ERXMISC0_EL1
4631  *   ERXMISC1_EL1
4632  *   ERXMISC2_EL1
4633  *   ERXMISC3_EL1
4634  *   ERXPFGCDN_EL1  (RASv1p1)
4635  *   ERXPFGCTL_EL1  (RASv1p1)
4636  *   ERXPFGF_EL1    (RASv1p1)
4637  *   ERXSTATUS_EL1
4638  * and
4639  *   ERRSELR_EL1
4640  * may generate UNDEFINED, which is the effect we get by not
4641  * listing them at all.
4642  *
4643  * These registers have fine-grained trap bits, but UNDEF-to-EL1
4644  * is higher priority than FGT-to-EL2 so we do not need to list them
4645  * in order to check for an FGT.
4646  */
4647 static const ARMCPRegInfo minimal_ras_reginfo[] = {
4648     { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
4649       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
4650       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
4651       .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
4652     { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
4653       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
4654       .access = PL1_R, .accessfn = access_terr,
4655       .fgt = FGT_ERRIDR_EL1,
4656       .type = ARM_CP_CONST, .resetvalue = 0 },
4657     { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
4658       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
4659       .nv2_redirect_offset = 0x500,
4660       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
4661     { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
4662       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
4663       .nv2_redirect_offset = 0x508,
4664       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
4665 };
4666 
4667 /*
4668  * Return the exception level to which exceptions should be taken
4669  * via SVEAccessTrap.  This excludes the check for whether the exception
4670  * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
4671  * be found by testing 0 < fp_exception_el < sve_exception_el.
4672  *
4673  * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
4674  * pseudocode does *not* separate out the FP trap checks, but has them
4675  * all in one function.
4676  */
4677 int sve_exception_el(CPUARMState *env, int el)
4678 {
4679 #ifndef CONFIG_USER_ONLY
4680     if (el <= 1 && !el_is_in_host(env, el)) {
4681         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
4682         case 1:
4683             if (el != 0) {
4684                 break;
4685             }
4686             /* fall through */
4687         case 0:
4688         case 2:
4689             return 1;
4690         }
4691     }
4692 
4693     if (el <= 2 && arm_is_el2_enabled(env)) {
4694         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4695         if (env->cp15.hcr_el2 & HCR_E2H) {
4696             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
4697             case 1:
4698                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4699                     break;
4700                 }
4701                 /* fall through */
4702             case 0:
4703             case 2:
4704                 return 2;
4705             }
4706         } else {
4707             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
4708                 return 2;
4709             }
4710         }
4711     }
4712 
4713     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
4714     if (arm_feature(env, ARM_FEATURE_EL3)
4715         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
4716         return 3;
4717     }
4718 #endif
4719     return 0;
4720 }
4721 
4722 /*
4723  * Return the exception level to which exceptions should be taken for SME.
4724  * C.f. the ARM pseudocode function CheckSMEAccess.
4725  */
4726 int sme_exception_el(CPUARMState *env, int el)
4727 {
4728 #ifndef CONFIG_USER_ONLY
4729     if (el <= 1 && !el_is_in_host(env, el)) {
4730         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
4731         case 1:
4732             if (el != 0) {
4733                 break;
4734             }
4735             /* fall through */
4736         case 0:
4737         case 2:
4738             return 1;
4739         }
4740     }
4741 
4742     if (el <= 2 && arm_is_el2_enabled(env)) {
4743         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
4744         if (env->cp15.hcr_el2 & HCR_E2H) {
4745             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
4746             case 1:
4747                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
4748                     break;
4749                 }
4750                 /* fall through */
4751             case 0:
4752             case 2:
4753                 return 2;
4754             }
4755         } else {
4756             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
4757                 return 2;
4758             }
4759         }
4760     }
4761 
4762     /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
4763     if (arm_feature(env, ARM_FEATURE_EL3)
4764         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4765         return 3;
4766     }
4767 #endif
4768     return 0;
4769 }
4770 
4771 /*
4772  * Given that SVE is enabled, return the vector length for EL.
4773  */
4774 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
4775 {
4776     ARMCPU *cpu = env_archcpu(env);
4777     uint64_t *cr = env->vfp.zcr_el;
4778     uint32_t map = cpu->sve_vq.map;
4779     uint32_t len = ARM_MAX_VQ - 1;
4780 
4781     if (sm) {
4782         cr = env->vfp.smcr_el;
4783         map = cpu->sme_vq.map;
4784     }
4785 
4786     if (el <= 1 && !el_is_in_host(env, el)) {
4787         len = MIN(len, 0xf & (uint32_t)cr[1]);
4788     }
4789     if (el <= 2 && arm_is_el2_enabled(env)) {
4790         len = MIN(len, 0xf & (uint32_t)cr[2]);
4791     }
4792     if (arm_feature(env, ARM_FEATURE_EL3)) {
4793         len = MIN(len, 0xf & (uint32_t)cr[3]);
4794     }
4795 
4796     map &= MAKE_64BIT_MASK(0, len + 1);
4797     if (map != 0) {
4798         return 31 - clz32(map);
4799     }
4800 
4801     /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
4802     assert(sm);
4803     return ctz32(cpu->sme_vq.map);
4804 }
4805 
4806 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
4807 {
4808     return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
4809 }
4810 
4811 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4812                       uint64_t value)
4813 {
4814     int cur_el = arm_current_el(env);
4815     int old_len = sve_vqm1_for_el(env, cur_el);
4816     int new_len;
4817 
4818     /* Bits other than [3:0] are RAZ/WI.  */
4819     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
4820     raw_write(env, ri, value & 0xf);
4821 
4822     /*
4823      * Because we arrived here, we know both FP and SVE are enabled;
4824      * otherwise we would have trapped access to the ZCR_ELn register.
4825      */
4826     new_len = sve_vqm1_for_el(env, cur_el);
4827     if (new_len < old_len) {
4828         aarch64_sve_narrow_vq(env, new_len + 1);
4829     }
4830 }
4831 
4832 static const ARMCPRegInfo zcr_reginfo[] = {
4833     { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4834       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4835       .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
4836       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 0),
4837       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 0),
4838       .access = PL1_RW, .type = ARM_CP_SVE,
4839       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4840       .writefn = zcr_write, .raw_writefn = raw_write },
4841     { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4842       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4843       .access = PL2_RW, .type = ARM_CP_SVE,
4844       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4845       .writefn = zcr_write, .raw_writefn = raw_write },
4846     { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4847       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4848       .access = PL3_RW, .type = ARM_CP_SVE,
4849       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4850       .writefn = zcr_write, .raw_writefn = raw_write },
4851 };
4852 
4853 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
4854                                     bool isread)
4855 {
4856     int el = arm_current_el(env);
4857 
4858     if (el == 0) {
4859         uint64_t sctlr = arm_sctlr(env, el);
4860         if (!(sctlr & SCTLR_EnTP2)) {
4861             return CP_ACCESS_TRAP_EL1;
4862         }
4863     }
4864     /* TODO: FEAT_FGT */
4865     if (el < 3
4866         && arm_feature(env, ARM_FEATURE_EL3)
4867         && !(env->cp15.scr_el3 & SCR_ENTP2)) {
4868         return CP_ACCESS_TRAP_EL3;
4869     }
4870     return CP_ACCESS_OK;
4871 }
4872 
4873 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
4874                                       bool isread)
4875 {
4876     /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
4877     if (arm_current_el(env) == 2
4878         && arm_feature(env, ARM_FEATURE_EL3)
4879         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4880         return CP_ACCESS_TRAP_EL3;
4881     }
4882     return CP_ACCESS_OK;
4883 }
4884 
4885 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
4886                                    bool isread)
4887 {
4888     if (arm_current_el(env) < 3
4889         && arm_feature(env, ARM_FEATURE_EL3)
4890         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
4891         return CP_ACCESS_TRAP_EL3;
4892     }
4893     return CP_ACCESS_OK;
4894 }
4895 
4896 /* ResetSVEState */
4897 static void arm_reset_sve_state(CPUARMState *env)
4898 {
4899     memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
4900     /* Recall that FFR is stored as pregs[16]. */
4901     memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
4902     vfp_set_fpsr(env, 0x0800009f);
4903 }
4904 
4905 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
4906 {
4907     uint64_t change = (env->svcr ^ new) & mask;
4908 
4909     if (change == 0) {
4910         return;
4911     }
4912     env->svcr ^= change;
4913 
4914     if (change & R_SVCR_SM_MASK) {
4915         arm_reset_sve_state(env);
4916     }
4917 
4918     /*
4919      * ResetSMEState.
4920      *
4921      * SetPSTATE_ZA zeros on enable and disable.  We can zero this only
4922      * on enable: while disabled, the storage is inaccessible and the
4923      * value does not matter.  We're not saving the storage in vmstate
4924      * when disabled either.
4925      */
4926     if (change & new & R_SVCR_ZA_MASK) {
4927         memset(&env->za_state, 0, sizeof(env->za_state));
4928     }
4929 
4930     if (tcg_enabled()) {
4931         arm_rebuild_hflags(env);
4932     }
4933 }
4934 
4935 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4936                        uint64_t value)
4937 {
4938     aarch64_set_svcr(env, value, -1);
4939 }
4940 
4941 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4942                        uint64_t value)
4943 {
4944     int cur_el = arm_current_el(env);
4945     int old_len = sve_vqm1_for_el(env, cur_el);
4946     uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
4947     int new_len;
4948 
4949     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
4950     if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
4951         valid_mask |= R_SMCR_EZT0_MASK;
4952     }
4953     value &= valid_mask;
4954     raw_write(env, ri, value);
4955 
4956     /*
4957      * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
4958      * when SVL is widened (old values kept, or zeros).  Choose to keep the
4959      * current values for simplicity.  But for QEMU internals, we must still
4960      * apply the narrower SVL to the Zregs and Pregs -- see the comment
4961      * above aarch64_sve_narrow_vq.
4962      */
4963     new_len = sve_vqm1_for_el(env, cur_el);
4964     if (new_len < old_len) {
4965         aarch64_sve_narrow_vq(env, new_len + 1);
4966     }
4967 }
4968 
4969 static const ARMCPRegInfo sme_reginfo[] = {
4970     { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
4971       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
4972       .access = PL0_RW, .accessfn = access_tpidr2,
4973       .fgt = FGT_NTPIDR2_EL0,
4974       .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
4975     { .name = "SVCR", .state = ARM_CP_STATE_AA64,
4976       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
4977       .access = PL0_RW, .type = ARM_CP_SME,
4978       .fieldoffset = offsetof(CPUARMState, svcr),
4979       .writefn = svcr_write, .raw_writefn = raw_write },
4980     { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
4981       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
4982       .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
4983       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 6),
4984       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 6),
4985       .access = PL1_RW, .type = ARM_CP_SME,
4986       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
4987       .writefn = smcr_write, .raw_writefn = raw_write },
4988     { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
4989       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
4990       .access = PL2_RW, .type = ARM_CP_SME,
4991       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
4992       .writefn = smcr_write, .raw_writefn = raw_write },
4993     { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
4994       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
4995       .access = PL3_RW, .type = ARM_CP_SME,
4996       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
4997       .writefn = smcr_write, .raw_writefn = raw_write },
4998     { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
4999       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
5000       .access = PL1_R, .accessfn = access_aa64_tid1,
5001       /*
5002        * IMPLEMENTOR = 0 (software)
5003        * REVISION    = 0 (implementation defined)
5004        * SMPS        = 0 (no streaming execution priority in QEMU)
5005        * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
5006        */
5007       .type = ARM_CP_CONST, .resetvalue = 0, },
5008     /*
5009      * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
5010      */
5011     { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
5012       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
5013       .access = PL1_RW, .accessfn = access_smpri,
5014       .fgt = FGT_NSMPRI_EL1,
5015       .type = ARM_CP_CONST, .resetvalue = 0 },
5016     { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
5017       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
5018       .nv2_redirect_offset = 0x1f8,
5019       .access = PL2_RW, .accessfn = access_smprimap,
5020       .type = ARM_CP_CONST, .resetvalue = 0 },
5021 };
5022 
5023 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5024                         uint64_t value)
5025 {
5026     /* L0GPTSZ is RO; other bits not mentioned are RES0. */
5027     uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
5028         R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
5029         R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
5030 
5031     if (cpu_isar_feature(aa64_rme_gpc2, env_archcpu(env))) {
5032         rw_mask |= R_GPCCR_APPSAA_MASK | R_GPCCR_NSO_MASK |
5033                    R_GPCCR_SPAD_MASK | R_GPCCR_NSPAD_MASK | R_GPCCR_RLPAD_MASK;
5034     }
5035 
5036     env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
5037 }
5038 
5039 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
5040 {
5041     env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
5042                                      env_archcpu(env)->reset_l0gptsz);
5043 }
5044 
5045 static const ARMCPRegInfo rme_reginfo[] = {
5046     { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
5047       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
5048       .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
5049       .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
5050     { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
5051       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
5052       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
5053     { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
5054       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
5055       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
5056     { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
5057       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
5058       .access = PL3_W, .type = ARM_CP_NOP },
5059 };
5060 
5061 static const ARMCPRegInfo rme_mte_reginfo[] = {
5062     { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
5063       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
5064       .access = PL3_W, .type = ARM_CP_NOP },
5065 };
5066 
5067 static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri,
5068                               uint64_t value)
5069 {
5070     env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
5071 }
5072 
5073 static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri)
5074 {
5075     return env->pstate & PSTATE_ALLINT;
5076 }
5077 
5078 static CPAccessResult aa64_allint_access(CPUARMState *env,
5079                                          const ARMCPRegInfo *ri, bool isread)
5080 {
5081     if (!isread && arm_current_el(env) == 1 &&
5082         (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) {
5083         return CP_ACCESS_TRAP_EL2;
5084     }
5085     return CP_ACCESS_OK;
5086 }
5087 
5088 static const ARMCPRegInfo nmi_reginfo[] = {
5089     { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
5090       .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3,
5091       .type = ARM_CP_NO_RAW,
5092       .access = PL1_RW, .accessfn = aa64_allint_access,
5093       .fieldoffset = offsetof(CPUARMState, pstate),
5094       .writefn = aa64_allint_write, .readfn = aa64_allint_read,
5095       .resetfn = arm_cp_reset_ignore },
5096 };
5097 
5098 static CPAccessResult mecid_access(CPUARMState *env,
5099                                    const ARMCPRegInfo *ri, bool isread)
5100 {
5101     int el = arm_current_el(env);
5102 
5103     if (el == 2) {
5104         if (arm_security_space(env) != ARMSS_Realm) {
5105             return CP_ACCESS_UNDEFINED;
5106         }
5107 
5108         if (!(env->cp15.scr_el3 & SCR_MECEN)) {
5109             return CP_ACCESS_TRAP_EL3;
5110         }
5111     }
5112 
5113     return CP_ACCESS_OK;
5114 }
5115 
5116 static void mecid_write(CPUARMState *env, const ARMCPRegInfo *ri,
5117                         uint64_t value)
5118 {
5119     value = extract64(value, 0, MECID_WIDTH);
5120     raw_write(env, ri, value);
5121 }
5122 
5123 static CPAccessResult cipae_access(CPUARMState *env, const ARMCPRegInfo *ri,
5124                                    bool isread)
5125 {
5126     switch (arm_security_space(env)) {
5127     case ARMSS_Root:  /* EL3 */
5128     case ARMSS_Realm: /* Realm EL2 */
5129         return CP_ACCESS_OK;
5130     default:
5131         return CP_ACCESS_UNDEFINED;
5132     }
5133 }
5134 
5135 static const ARMCPRegInfo mec_reginfo[] = {
5136     { .name = "MECIDR_EL2", .state = ARM_CP_STATE_AA64,
5137       .opc0 = 3, .opc1 = 4, .opc2 = 7, .crn = 10, .crm = 8,
5138       .access = PL2_R, .type = ARM_CP_CONST | ARM_CP_NV_NO_TRAP,
5139       .resetvalue = MECID_WIDTH - 1 },
5140     { .name = "MECID_P0_EL2", .state = ARM_CP_STATE_AA64,
5141       .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 8,
5142       .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5143       .accessfn = mecid_access, .writefn = mecid_write,
5144       .fieldoffset = offsetof(CPUARMState, cp15.mecid_p0_el2) },
5145     { .name = "MECID_A0_EL2", .state = ARM_CP_STATE_AA64,
5146       .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 8,
5147       .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5148       .accessfn = mecid_access, .writefn = mecid_write,
5149       .fieldoffset = offsetof(CPUARMState, cp15.mecid_a0_el2) },
5150     { .name = "MECID_P1_EL2", .state = ARM_CP_STATE_AA64,
5151       .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 8,
5152       .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5153       .accessfn = mecid_access, .writefn = mecid_write,
5154       .fieldoffset = offsetof(CPUARMState, cp15.mecid_p1_el2) },
5155     { .name = "MECID_A1_EL2", .state = ARM_CP_STATE_AA64,
5156       .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 8,
5157       .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5158       .accessfn = mecid_access, .writefn = mecid_write,
5159       .fieldoffset = offsetof(CPUARMState, cp15.mecid_a1_el2) },
5160     { .name = "MECID_RL_A_EL3", .state = ARM_CP_STATE_AA64,
5161       .opc0 = 3, .opc1 = 6, .opc2 = 1, .crn = 10, .crm = 10,
5162       .access = PL3_RW, .accessfn = mecid_access,
5163       .writefn = mecid_write,
5164       .fieldoffset = offsetof(CPUARMState, cp15.mecid_rl_a_el3) },
5165     { .name = "VMECID_P_EL2", .state = ARM_CP_STATE_AA64,
5166       .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 9,
5167       .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5168       .accessfn = mecid_access, .writefn = mecid_write,
5169       .fieldoffset = offsetof(CPUARMState, cp15.vmecid_p_el2) },
5170     { .name = "VMECID_A_EL2", .state = ARM_CP_STATE_AA64,
5171       .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 9,
5172       .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
5173       .accessfn = mecid_access, .writefn = mecid_write,
5174       .fieldoffset = offsetof(CPUARMState, cp15.vmecid_a_el2) },
5175     { .name = "DC_CIPAE", .state = ARM_CP_STATE_AA64,
5176       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 0,
5177       .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
5178       .accessfn = cipae_access },
5179 };
5180 
5181 static const ARMCPRegInfo mec_mte_reginfo[] = {
5182     { .name = "DC_CIGDPAE", .state = ARM_CP_STATE_AA64,
5183       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 7,
5184       .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
5185       .accessfn = cipae_access },
5186 };
5187 
5188 #ifndef CONFIG_USER_ONLY
5189 /*
5190  * We don't know until after realize whether there's a GICv3
5191  * attached, and that is what registers the gicv3 sysregs.
5192  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5193  * at runtime.
5194  */
5195 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5196 {
5197     ARMCPU *cpu = env_archcpu(env);
5198     uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
5199 
5200     if (env->gicv3state) {
5201         pfr1 = FIELD_DP64(pfr1, ID_PFR1, GIC, 1);
5202     }
5203     return pfr1;
5204 }
5205 
5206 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5207 {
5208     ARMCPU *cpu = env_archcpu(env);
5209     uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
5210 
5211     if (env->gicv3state) {
5212         pfr0 = FIELD_DP64(pfr0, ID_AA64PFR0, GIC, 1);
5213     }
5214     return pfr0;
5215 }
5216 #endif
5217 
5218 /*
5219  * Shared logic between LORID and the rest of the LOR* registers.
5220  * Secure state exclusion has already been dealt with.
5221  */
5222 static CPAccessResult access_lor_ns(CPUARMState *env,
5223                                     const ARMCPRegInfo *ri, bool isread)
5224 {
5225     int el = arm_current_el(env);
5226 
5227     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5228         return CP_ACCESS_TRAP_EL2;
5229     }
5230     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5231         return CP_ACCESS_TRAP_EL3;
5232     }
5233     return CP_ACCESS_OK;
5234 }
5235 
5236 static CPAccessResult access_lor_other(CPUARMState *env,
5237                                        const ARMCPRegInfo *ri, bool isread)
5238 {
5239     if (arm_is_secure_below_el3(env)) {
5240         /* UNDEF if SCR_EL3.NS == 0 */
5241         return CP_ACCESS_UNDEFINED;
5242     }
5243     return access_lor_ns(env, ri, isread);
5244 }
5245 
5246 /*
5247  * A trivial implementation of ARMv8.1-LOR leaves all of these
5248  * registers fixed at 0, which indicates that there are zero
5249  * supported Limited Ordering regions.
5250  */
5251 static const ARMCPRegInfo lor_reginfo[] = {
5252     { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
5253       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
5254       .access = PL1_RW, .accessfn = access_lor_other,
5255       .fgt = FGT_LORSA_EL1,
5256       .type = ARM_CP_CONST, .resetvalue = 0 },
5257     { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
5258       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
5259       .access = PL1_RW, .accessfn = access_lor_other,
5260       .fgt = FGT_LOREA_EL1,
5261       .type = ARM_CP_CONST, .resetvalue = 0 },
5262     { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
5263       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
5264       .access = PL1_RW, .accessfn = access_lor_other,
5265       .fgt = FGT_LORN_EL1,
5266       .type = ARM_CP_CONST, .resetvalue = 0 },
5267     { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
5268       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
5269       .access = PL1_RW, .accessfn = access_lor_other,
5270       .fgt = FGT_LORC_EL1,
5271       .type = ARM_CP_CONST, .resetvalue = 0 },
5272     { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
5273       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
5274       .access = PL1_R, .accessfn = access_lor_ns,
5275       .fgt = FGT_LORID_EL1,
5276       .type = ARM_CP_CONST, .resetvalue = 0 },
5277 };
5278 
5279 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5280                                    bool isread)
5281 {
5282     int el = arm_current_el(env);
5283 
5284     if (el < 2 &&
5285         arm_is_el2_enabled(env) &&
5286         !(arm_hcr_el2_eff(env) & HCR_APK)) {
5287         return CP_ACCESS_TRAP_EL2;
5288     }
5289     if (el < 3 &&
5290         arm_feature(env, ARM_FEATURE_EL3) &&
5291         !(env->cp15.scr_el3 & SCR_APK)) {
5292         return CP_ACCESS_TRAP_EL3;
5293     }
5294     return CP_ACCESS_OK;
5295 }
5296 
5297 static const ARMCPRegInfo pauth_reginfo[] = {
5298     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5299       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5300       .access = PL1_RW, .accessfn = access_pauth,
5301       .fgt = FGT_APDAKEY,
5302       .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
5303     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5304       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5305       .access = PL1_RW, .accessfn = access_pauth,
5306       .fgt = FGT_APDAKEY,
5307       .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
5308     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5309       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5310       .access = PL1_RW, .accessfn = access_pauth,
5311       .fgt = FGT_APDBKEY,
5312       .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
5313     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5314       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5315       .access = PL1_RW, .accessfn = access_pauth,
5316       .fgt = FGT_APDBKEY,
5317       .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
5318     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5319       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5320       .access = PL1_RW, .accessfn = access_pauth,
5321       .fgt = FGT_APGAKEY,
5322       .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
5323     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5324       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5325       .access = PL1_RW, .accessfn = access_pauth,
5326       .fgt = FGT_APGAKEY,
5327       .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
5328     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5329       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5330       .access = PL1_RW, .accessfn = access_pauth,
5331       .fgt = FGT_APIAKEY,
5332       .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
5333     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5334       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5335       .access = PL1_RW, .accessfn = access_pauth,
5336       .fgt = FGT_APIAKEY,
5337       .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
5338     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5339       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5340       .access = PL1_RW, .accessfn = access_pauth,
5341       .fgt = FGT_APIBKEY,
5342       .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
5343     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5344       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5345       .access = PL1_RW, .accessfn = access_pauth,
5346       .fgt = FGT_APIBKEY,
5347       .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
5348 };
5349 
5350 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5351 {
5352     Error *err = NULL;
5353     uint64_t ret;
5354 
5355     /* Success sets NZCV = 0000.  */
5356     env->NF = env->CF = env->VF = 0, env->ZF = 1;
5357 
5358     if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5359         /*
5360          * ??? Failed, for unknown reasons in the crypto subsystem.
5361          * The best we can do is log the reason and return the
5362          * timed-out indication to the guest.  There is no reason
5363          * we know to expect this failure to be transitory, so the
5364          * guest may well hang retrying the operation.
5365          */
5366         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5367                       ri->name, error_get_pretty(err));
5368         error_free(err);
5369 
5370         env->ZF = 0; /* NZCF = 0100 */
5371         return 0;
5372     }
5373     return ret;
5374 }
5375 
5376 /* We do not support re-seeding, so the two registers operate the same.  */
5377 static const ARMCPRegInfo rndr_reginfo[] = {
5378     { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5379       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5380       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5381       .access = PL0_R, .readfn = rndr_readfn },
5382     { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5383       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5384       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5385       .access = PL0_R, .readfn = rndr_readfn },
5386 };
5387 
5388 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
5389                           uint64_t value)
5390 {
5391 #ifdef CONFIG_TCG
5392     ARMCPU *cpu = env_archcpu(env);
5393     /* CTR_EL0 System register -> DminLine, bits [19:16] */
5394     uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
5395     uint64_t vaddr_in = (uint64_t) value;
5396     uint64_t vaddr = vaddr_in & ~(dline_size - 1);
5397     void *haddr;
5398     int mem_idx = arm_env_mmu_index(env);
5399 
5400     /* This won't be crossing page boundaries */
5401     haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
5402     if (haddr) {
5403 #ifndef CONFIG_USER_ONLY
5404 
5405         ram_addr_t offset;
5406         MemoryRegion *mr;
5407 
5408         /* RCU lock is already being held */
5409         mr = memory_region_from_host(haddr, &offset);
5410 
5411         if (mr) {
5412             memory_region_writeback(mr, offset, dline_size);
5413         }
5414 #endif /*CONFIG_USER_ONLY*/
5415     }
5416 #else
5417     /* Handled by hardware accelerator. */
5418     g_assert_not_reached();
5419 #endif /* CONFIG_TCG */
5420 }
5421 
5422 static const ARMCPRegInfo dcpop_reg[] = {
5423     { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
5424       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
5425       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5426       .fgt = FGT_DCCVAP,
5427       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5428 };
5429 
5430 static const ARMCPRegInfo dcpodp_reg[] = {
5431     { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
5432       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
5433       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
5434       .fgt = FGT_DCCVADP,
5435       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
5436 };
5437 
5438 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
5439                                        bool isread)
5440 {
5441     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
5442         return CP_ACCESS_TRAP_EL2;
5443     }
5444 
5445     return CP_ACCESS_OK;
5446 }
5447 
5448 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
5449                                  bool isread)
5450 {
5451     int el = arm_current_el(env);
5452     if (el < 2 && arm_is_el2_enabled(env)) {
5453         uint64_t hcr = arm_hcr_el2_eff(env);
5454         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5455             return CP_ACCESS_TRAP_EL2;
5456         }
5457     }
5458     if (el < 3 &&
5459         arm_feature(env, ARM_FEATURE_EL3) &&
5460         !(env->cp15.scr_el3 & SCR_ATA)) {
5461         return CP_ACCESS_TRAP_EL3;
5462     }
5463     return CP_ACCESS_OK;
5464 }
5465 
5466 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
5467                                       bool isread)
5468 {
5469     CPAccessResult nv1 = access_nv1(env, ri, isread);
5470 
5471     if (nv1 != CP_ACCESS_OK) {
5472         return nv1;
5473     }
5474     return access_mte(env, ri, isread);
5475 }
5476 
5477 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
5478                                       bool isread)
5479 {
5480     /*
5481      * TFSR_EL2: similar to generic access_mte(), but we need to
5482      * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
5483      * if NV2 is enabled then we will redirect this to TFSR_EL1
5484      * after doing the HCR and SCR ATA traps; otherwise this will
5485      * be a trap to EL2 and the HCR/SCR traps do not apply.
5486      */
5487     int el = arm_current_el(env);
5488 
5489     if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
5490         return CP_ACCESS_OK;
5491     }
5492     if (el < 2 && arm_is_el2_enabled(env)) {
5493         uint64_t hcr = arm_hcr_el2_eff(env);
5494         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
5495             return CP_ACCESS_TRAP_EL2;
5496         }
5497     }
5498     if (el < 3 &&
5499         arm_feature(env, ARM_FEATURE_EL3) &&
5500         !(env->cp15.scr_el3 & SCR_ATA)) {
5501         return CP_ACCESS_TRAP_EL3;
5502     }
5503     return CP_ACCESS_OK;
5504 }
5505 
5506 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
5507 {
5508     return env->pstate & PSTATE_TCO;
5509 }
5510 
5511 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5512 {
5513     env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
5514 }
5515 
5516 static const ARMCPRegInfo mte_reginfo[] = {
5517     { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
5518       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
5519       .access = PL1_RW, .accessfn = access_mte,
5520       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
5521     { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
5522       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
5523       .access = PL1_RW, .accessfn = access_tfsr_el1,
5524       .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
5525       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 6, 0),
5526       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 6, 0),
5527       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
5528     { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
5529       .type = ARM_CP_NV2_REDIRECT,
5530       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
5531       .access = PL2_RW, .accessfn = access_tfsr_el2,
5532       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
5533     { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
5534       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
5535       .access = PL3_RW,
5536       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
5537     { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
5538       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
5539       .access = PL1_RW, .accessfn = access_mte,
5540       .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
5541     { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
5542       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
5543       .access = PL1_RW, .accessfn = access_mte,
5544       .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
5545     { .name = "TCO", .state = ARM_CP_STATE_AA64,
5546       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5547       .type = ARM_CP_NO_RAW,
5548       .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
5549     { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
5550       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
5551       .type = ARM_CP_NOP, .access = PL1_W,
5552       .fgt = FGT_DCIVAC,
5553       .accessfn = aa64_cacheop_poc_access },
5554     { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
5555       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
5556       .fgt = FGT_DCISW,
5557       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5558     { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
5559       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
5560       .type = ARM_CP_NOP, .access = PL1_W,
5561       .fgt = FGT_DCIVAC,
5562       .accessfn = aa64_cacheop_poc_access },
5563     { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
5564       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
5565       .fgt = FGT_DCISW,
5566       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5567     { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
5568       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
5569       .fgt = FGT_DCCSW,
5570       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5571     { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
5572       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
5573       .fgt = FGT_DCCSW,
5574       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5575     { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
5576       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
5577       .fgt = FGT_DCCISW,
5578       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5579     { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
5580       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
5581       .fgt = FGT_DCCISW,
5582       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5583 };
5584 
5585 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
5586     { .name = "TCO", .state = ARM_CP_STATE_AA64,
5587       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
5588       .type = ARM_CP_CONST, .access = PL0_RW, },
5589 };
5590 
5591 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
5592     { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
5593       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
5594       .type = ARM_CP_NOP, .access = PL0_W,
5595       .fgt = FGT_DCCVAC,
5596       .accessfn = aa64_cacheop_poc_access },
5597     { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
5598       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
5599       .type = ARM_CP_NOP, .access = PL0_W,
5600       .fgt = FGT_DCCVAC,
5601       .accessfn = aa64_cacheop_poc_access },
5602     { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
5603       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
5604       .type = ARM_CP_NOP, .access = PL0_W,
5605       .fgt = FGT_DCCVAP,
5606       .accessfn = aa64_cacheop_poc_access },
5607     { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
5608       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
5609       .type = ARM_CP_NOP, .access = PL0_W,
5610       .fgt = FGT_DCCVAP,
5611       .accessfn = aa64_cacheop_poc_access },
5612     { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
5613       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
5614       .type = ARM_CP_NOP, .access = PL0_W,
5615       .fgt = FGT_DCCVADP,
5616       .accessfn = aa64_cacheop_poc_access },
5617     { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
5618       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
5619       .type = ARM_CP_NOP, .access = PL0_W,
5620       .fgt = FGT_DCCVADP,
5621       .accessfn = aa64_cacheop_poc_access },
5622     { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
5623       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
5624       .type = ARM_CP_NOP, .access = PL0_W,
5625       .fgt = FGT_DCCIVAC,
5626       .accessfn = aa64_cacheop_poc_access },
5627     { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
5628       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
5629       .type = ARM_CP_NOP, .access = PL0_W,
5630       .fgt = FGT_DCCIVAC,
5631       .accessfn = aa64_cacheop_poc_access },
5632     { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
5633       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
5634       .access = PL0_W, .type = ARM_CP_DC_GVA,
5635 #ifndef CONFIG_USER_ONLY
5636       /* Avoid overhead of an access check that always passes in user-mode */
5637       .accessfn = aa64_zva_access,
5638       .fgt = FGT_DCZVA,
5639 #endif
5640     },
5641     { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
5642       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
5643       .access = PL0_W, .type = ARM_CP_DC_GZVA,
5644 #ifndef CONFIG_USER_ONLY
5645       /* Avoid overhead of an access check that always passes in user-mode */
5646       .accessfn = aa64_zva_access,
5647       .fgt = FGT_DCZVA,
5648 #endif
5649     },
5650 };
5651 
5652 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
5653                                      bool isread)
5654 {
5655     uint64_t hcr = arm_hcr_el2_eff(env);
5656     int el = arm_current_el(env);
5657 
5658     if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
5659         if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
5660             if (hcr & HCR_TGE) {
5661                 return CP_ACCESS_TRAP_EL2;
5662             }
5663             return CP_ACCESS_TRAP_EL1;
5664         }
5665     } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
5666         return CP_ACCESS_TRAP_EL2;
5667     }
5668     if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
5669         return CP_ACCESS_TRAP_EL2;
5670     }
5671     if (el < 3
5672         && arm_feature(env, ARM_FEATURE_EL3)
5673         && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
5674         return CP_ACCESS_TRAP_EL3;
5675     }
5676     return CP_ACCESS_OK;
5677 }
5678 
5679 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
5680                                          const ARMCPRegInfo *ri,
5681                                          bool isread)
5682 {
5683     CPAccessResult nv1 = access_nv1(env, ri, isread);
5684 
5685     if (nv1 != CP_ACCESS_OK) {
5686         return nv1;
5687     }
5688     return access_scxtnum(env, ri, isread);
5689 }
5690 
5691 static const ARMCPRegInfo scxtnum_reginfo[] = {
5692     { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
5693       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
5694       .access = PL0_RW, .accessfn = access_scxtnum,
5695       .fgt = FGT_SCXTNUM_EL0,
5696       .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
5697     { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
5698       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
5699       .access = PL1_RW, .accessfn = access_scxtnum_el1,
5700       .fgt = FGT_SCXTNUM_EL1,
5701       .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
5702       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 7),
5703       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 7),
5704       .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
5705     { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
5706       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
5707       .access = PL2_RW, .accessfn = access_scxtnum,
5708       .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
5709     { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
5710       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
5711       .access = PL3_RW,
5712       .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
5713 };
5714 
5715 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
5716                                  bool isread)
5717 {
5718     if (arm_current_el(env) == 2 &&
5719         arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
5720         return CP_ACCESS_TRAP_EL3;
5721     }
5722     return CP_ACCESS_OK;
5723 }
5724 
5725 static const ARMCPRegInfo fgt_reginfo[] = {
5726     { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5727       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5728       .nv2_redirect_offset = 0x1b8,
5729       .access = PL2_RW, .accessfn = access_fgt,
5730       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
5731     { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5732       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
5733       .nv2_redirect_offset = 0x1c0,
5734       .access = PL2_RW, .accessfn = access_fgt,
5735       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
5736     { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
5737       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
5738       .nv2_redirect_offset = 0x1d0,
5739       .access = PL2_RW, .accessfn = access_fgt,
5740       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
5741     { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
5742       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
5743       .nv2_redirect_offset = 0x1d8,
5744       .access = PL2_RW, .accessfn = access_fgt,
5745       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
5746     { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
5747       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
5748       .nv2_redirect_offset = 0x1c8,
5749       .access = PL2_RW, .accessfn = access_fgt,
5750       .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
5751 };
5752 
5753 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5754                        uint64_t value)
5755 {
5756     /*
5757      * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
5758      * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
5759      * about the RESS bits at the top -- we choose the "generate an EL2
5760      * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
5761      * the ptw.c code detect the resulting invalid address).
5762      */
5763     env->cp15.vncr_el2 = value & ~0xfffULL;
5764 }
5765 
5766 static const ARMCPRegInfo nv2_reginfo[] = {
5767     { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
5768       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
5769       .access = PL2_RW,
5770       .writefn = vncr_write,
5771       .nv2_redirect_offset = 0xb0,
5772       .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
5773 };
5774 
5775 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5776                                      bool isread)
5777 {
5778     int el = arm_current_el(env);
5779 
5780     if (el == 0) {
5781         uint64_t sctlr = arm_sctlr(env, el);
5782         if (!(sctlr & SCTLR_EnRCTX)) {
5783             return CP_ACCESS_TRAP_EL1;
5784         }
5785     } else if (el == 1) {
5786         uint64_t hcr = arm_hcr_el2_eff(env);
5787         if (hcr & HCR_NV) {
5788             return CP_ACCESS_TRAP_EL2;
5789         }
5790     }
5791     return CP_ACCESS_OK;
5792 }
5793 
5794 static const ARMCPRegInfo predinv_reginfo[] = {
5795     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5796       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5797       .fgt = FGT_CFPRCTX,
5798       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5799     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5800       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5801       .fgt = FGT_DVPRCTX,
5802       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5803     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5804       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5805       .fgt = FGT_CPPRCTX,
5806       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5807     /*
5808      * Note the AArch32 opcodes have a different OPC1.
5809      */
5810     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5811       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5812       .fgt = FGT_CFPRCTX,
5813       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5814     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5815       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5816       .fgt = FGT_DVPRCTX,
5817       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5818     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5819       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5820       .fgt = FGT_CPPRCTX,
5821       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5822 };
5823 
5824 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5825 {
5826     /* Read the high 32 bits of the current CCSIDR */
5827     return extract64(ccsidr_read(env, ri), 32, 32);
5828 }
5829 
5830 static const ARMCPRegInfo ccsidr2_reginfo[] = {
5831     { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
5832       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
5833       .access = PL1_R,
5834       .accessfn = access_tid4,
5835       .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
5836 };
5837 
5838 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5839                                        bool isread)
5840 {
5841     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
5842         return CP_ACCESS_TRAP_EL2;
5843     }
5844 
5845     return CP_ACCESS_OK;
5846 }
5847 
5848 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5849                                        bool isread)
5850 {
5851     if (arm_feature(env, ARM_FEATURE_V8)) {
5852         return access_aa64_tid3(env, ri, isread);
5853     }
5854 
5855     return CP_ACCESS_OK;
5856 }
5857 
5858 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
5859                                      bool isread)
5860 {
5861     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
5862         return CP_ACCESS_TRAP_EL2;
5863     }
5864 
5865     return CP_ACCESS_OK;
5866 }
5867 
5868 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
5869                                         const ARMCPRegInfo *ri, bool isread)
5870 {
5871     /*
5872      * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
5873      * in v7A, not in v8A.
5874      */
5875     if (!arm_feature(env, ARM_FEATURE_V8) &&
5876         arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
5877         (env->cp15.hstr_el2 & HSTR_TJDBX)) {
5878         return CP_ACCESS_TRAP_EL2;
5879     }
5880     return CP_ACCESS_OK;
5881 }
5882 
5883 static const ARMCPRegInfo jazelle_regs[] = {
5884     { .name = "JIDR",
5885       .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
5886       .access = PL1_R, .accessfn = access_jazelle,
5887       .type = ARM_CP_CONST, .resetvalue = 0 },
5888     { .name = "JOSCR",
5889       .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
5890       .accessfn = access_joscr_jmcr,
5891       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5892     { .name = "JMCR",
5893       .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
5894       .accessfn = access_joscr_jmcr,
5895       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5896 };
5897 
5898 static const ARMCPRegInfo contextidr_el2 = {
5899     .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
5900     .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
5901     .access = PL2_RW,
5902     .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
5903 };
5904 
5905 static const ARMCPRegInfo vhe_reginfo[] = {
5906     { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
5907       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
5908       .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
5909       .raw_writefn = raw_write,
5910       .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
5911 #ifndef CONFIG_USER_ONLY
5912     { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5913       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
5914       .fieldoffset =
5915         offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
5916       .type = ARM_CP_IO, .access = PL2_RW,
5917       .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
5918     { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5919       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
5920       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5921       .resetfn = gt_hv_timer_reset,
5922       .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
5923     { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5924       .type = ARM_CP_IO,
5925       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
5926       .access = PL2_RW,
5927       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
5928       .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
5929     { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
5930       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
5931       .type = ARM_CP_IO | ARM_CP_ALIAS,
5932       .access = PL2_RW, .accessfn = access_el1nvpct,
5933       .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
5934       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
5935       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
5936     { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
5937       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
5938       .type = ARM_CP_IO | ARM_CP_ALIAS,
5939       .access = PL2_RW, .accessfn = access_el1nvvct,
5940       .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
5941       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
5942       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
5943     { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5944       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
5945       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5946       .access = PL2_RW, .accessfn = e2h_access,
5947       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
5948     { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
5949       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
5950       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
5951       .access = PL2_RW, .accessfn = e2h_access,
5952       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
5953     { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5954       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
5955       .type = ARM_CP_IO | ARM_CP_ALIAS,
5956       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
5957       .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
5958       .access = PL2_RW, .accessfn = access_el1nvpct,
5959       .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
5960     { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
5961       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
5962       .type = ARM_CP_IO | ARM_CP_ALIAS,
5963       .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
5964       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
5965       .access = PL2_RW, .accessfn = access_el1nvvct,
5966       .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
5967 #endif
5968 };
5969 
5970 /*
5971  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
5972  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
5973  * is non-zero, which is never for ARMv7, optionally in ARMv8
5974  * and mandatorily for ARMv8.2 and up.
5975  * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
5976  * implementation is RAZ/WI we can ignore this detail, as we
5977  * do for ACTLR.
5978  */
5979 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
5980     { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
5981       .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
5982       .access = PL1_RW, .accessfn = access_tacr,
5983       .type = ARM_CP_CONST, .resetvalue = 0 },
5984     { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
5985       .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
5986       .access = PL2_RW, .type = ARM_CP_CONST,
5987       .resetvalue = 0 },
5988 };
5989 
5990 static CPAccessResult sctlr2_el2_access(CPUARMState *env,
5991                                         const ARMCPRegInfo *ri,
5992                                         bool isread)
5993 {
5994     if (arm_current_el(env) < 3
5995         && arm_feature(env, ARM_FEATURE_EL3)
5996         && !(env->cp15.scr_el3 & SCR_SCTLR2EN)) {
5997         return CP_ACCESS_TRAP_EL3;
5998     }
5999     return CP_ACCESS_OK;
6000 }
6001 
6002 static CPAccessResult sctlr2_el1_access(CPUARMState *env,
6003                                         const ARMCPRegInfo *ri,
6004                                         bool isread)
6005 {
6006     CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6007     if (ret != CP_ACCESS_OK) {
6008         return ret;
6009     }
6010     if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_SCTLR2EN)) {
6011         return CP_ACCESS_TRAP_EL2;
6012     }
6013     return sctlr2_el2_access(env, ri, isread);
6014 }
6015 
6016 static void sctlr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
6017                              uint64_t value)
6018 {
6019     uint64_t valid_mask = 0;
6020 
6021     value &= valid_mask;
6022     raw_write(env, ri, value);
6023 }
6024 
6025 static void sctlr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6026                              uint64_t value)
6027 {
6028     uint64_t valid_mask = 0;
6029 
6030     if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
6031         valid_mask |= SCTLR2_EMEC;
6032     }
6033     value &= valid_mask;
6034     raw_write(env, ri, value);
6035 }
6036 
6037 static void sctlr2_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
6038                              uint64_t value)
6039 {
6040     uint64_t valid_mask = 0;
6041 
6042     if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
6043         valid_mask |= SCTLR2_EMEC;
6044     }
6045     value &= valid_mask;
6046     raw_write(env, ri, value);
6047 }
6048 
6049 static const ARMCPRegInfo sctlr2_reginfo[] = {
6050     { .name = "SCTLR2_EL1", .state = ARM_CP_STATE_AA64,
6051       .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 1, .crm = 0,
6052       .access = PL1_RW, .accessfn = sctlr2_el1_access,
6053       .writefn = sctlr2_el1_write, .fgt = FGT_SCTLR_EL1,
6054       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 3),
6055       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 3),
6056       .nv2_redirect_offset = 0x278 | NV2_REDIR_NV1,
6057       .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[1]) },
6058     { .name = "SCTLR2_EL2", .state = ARM_CP_STATE_AA64,
6059       .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 1, .crm = 0,
6060       .access = PL2_RW, .accessfn = sctlr2_el2_access,
6061       .writefn = sctlr2_el2_write,
6062       .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[2]) },
6063     { .name = "SCTLR2_EL3", .state = ARM_CP_STATE_AA64,
6064       .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 1, .crm = 0,
6065       .access = PL3_RW, .writefn = sctlr2_el3_write,
6066       .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[3]) },
6067 };
6068 
6069 static CPAccessResult tcr2_el2_access(CPUARMState *env, const ARMCPRegInfo *ri,
6070                                       bool isread)
6071 {
6072     if (arm_current_el(env) < 3
6073         && arm_feature(env, ARM_FEATURE_EL3)
6074         && !(env->cp15.scr_el3 & SCR_TCR2EN)) {
6075         return CP_ACCESS_TRAP_EL3;
6076     }
6077     return CP_ACCESS_OK;
6078 }
6079 
6080 static CPAccessResult tcr2_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
6081                                       bool isread)
6082 {
6083     CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6084     if (ret != CP_ACCESS_OK) {
6085         return ret;
6086     }
6087     if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_TCR2EN)) {
6088         return CP_ACCESS_TRAP_EL2;
6089     }
6090     return tcr2_el2_access(env, ri, isread);
6091 }
6092 
6093 static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
6094                            uint64_t value)
6095 {
6096     ARMCPU *cpu = env_archcpu(env);
6097     uint64_t valid_mask = 0;
6098 
6099     if (cpu_isar_feature(aa64_s1pie, cpu)) {
6100         valid_mask |= TCR2_PIE;
6101     }
6102     if (cpu_isar_feature(aa64_aie, cpu)) {
6103         valid_mask |= TCR2_AIE;
6104     }
6105     value &= valid_mask;
6106     raw_write(env, ri, value);
6107 }
6108 
6109 static void tcr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6110                            uint64_t value)
6111 {
6112     ARMCPU *cpu = env_archcpu(env);
6113     uint64_t valid_mask = 0;
6114 
6115     if (cpu_isar_feature(aa64_s1pie, cpu)) {
6116         valid_mask |= TCR2_PIE;
6117     }
6118     if (cpu_isar_feature(aa64_aie, cpu)) {
6119         valid_mask |= TCR2_AIE;
6120     }
6121     if (cpu_isar_feature(aa64_mec, cpu)) {
6122         valid_mask |= TCR2_AMEC0 | TCR2_AMEC1;
6123     }
6124     value &= valid_mask;
6125     raw_write(env, ri, value);
6126 }
6127 
6128 static const ARMCPRegInfo tcr2_reginfo[] = {
6129     { .name = "TCR2_EL1", .state = ARM_CP_STATE_AA64,
6130       .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 2, .crm = 0,
6131       .access = PL1_RW, .accessfn = tcr2_el1_access,
6132       .writefn = tcr2_el1_write, .fgt = FGT_TCR_EL1,
6133       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 3),
6134       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 3),
6135       .nv2_redirect_offset = 0x270 | NV2_REDIR_NV1,
6136       .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[1]) },
6137     { .name = "TCR2_EL2", .state = ARM_CP_STATE_AA64,
6138       .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 2, .crm = 0,
6139       .access = PL2_RW, .accessfn = tcr2_el2_access,
6140       .writefn = tcr2_el2_write,
6141       .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[2]) },
6142 };
6143 
6144 static CPAccessResult pien_access(CPUARMState *env, const ARMCPRegInfo *ri,
6145                                   bool isread)
6146 {
6147     if (arm_feature(env, ARM_FEATURE_EL3)
6148         && !(env->cp15.scr_el3 & SCR_PIEN)
6149         && arm_current_el(env) < 3) {
6150         return CP_ACCESS_TRAP_EL3;
6151     }
6152     return CP_ACCESS_OK;
6153 }
6154 
6155 static CPAccessResult pien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
6156                                       bool isread)
6157 {
6158     CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6159     if (ret == CP_ACCESS_OK) {
6160         ret = pien_access(env, ri, isread);
6161     }
6162     return ret;
6163 }
6164 
6165 static const ARMCPRegInfo s1pie_reginfo[] = {
6166     { .name = "PIR_EL1", .state = ARM_CP_STATE_AA64,
6167       .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 10, .crm = 2,
6168       .access = PL1_RW, .accessfn = pien_el1_access,
6169       .fgt = FGT_NPIR_EL1, .nv2_redirect_offset = 0x2a0 | NV2_REDIR_NV1,
6170       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 3),
6171       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 3),
6172       .fieldoffset = offsetof(CPUARMState, cp15.pir_el[1]) },
6173     { .name = "PIR_EL2", .state = ARM_CP_STATE_AA64,
6174       .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 2,
6175       .access = PL2_RW, .accessfn = pien_access,
6176       .fieldoffset = offsetof(CPUARMState, cp15.pir_el[2]) },
6177     { .name = "PIR_EL3", .state = ARM_CP_STATE_AA64,
6178       .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 10, .crm = 2,
6179       .access = PL3_RW,
6180       .fieldoffset = offsetof(CPUARMState, cp15.pir_el[3]) },
6181     { .name = "PIRE0_EL1", .state = ARM_CP_STATE_AA64,
6182       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 10, .crm = 2,
6183       .access = PL1_RW, .accessfn = pien_el1_access,
6184       .fgt = FGT_NPIRE0_EL1, .nv2_redirect_offset = 0x290 | NV2_REDIR_NV1,
6185       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 2),
6186       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 2),
6187       .fieldoffset = offsetof(CPUARMState, cp15.pir_el[0]) },
6188     { .name = "PIRE0_EL2", .state = ARM_CP_STATE_AA64,
6189       .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 2,
6190       .access = PL2_RW, .accessfn = pien_access,
6191       .fieldoffset = offsetof(CPUARMState, cp15.pire0_el2) },
6192 };
6193 
6194 static const ARMCPRegInfo s2pie_reginfo[] = {
6195     { .name = "S2PIR_EL2", .state = ARM_CP_STATE_AA64,
6196       .opc0 = 3, .opc1 = 4, .opc2 = 5, .crn = 10, .crm = 2,
6197       .access = PL2_RW, .accessfn = pien_access,
6198       .nv2_redirect_offset = 0x2b0,
6199       .fieldoffset = offsetof(CPUARMState, cp15.s2pir_el2) },
6200 };
6201 
6202 static CPAccessResult aien_access(CPUARMState *env, const ARMCPRegInfo *ri,
6203                                   bool isread)
6204 {
6205     if (arm_feature(env, ARM_FEATURE_EL3)
6206         && !(env->cp15.scr_el3 & SCR_AIEN)
6207         && arm_current_el(env) < 3) {
6208         return CP_ACCESS_TRAP_EL3;
6209     }
6210     return CP_ACCESS_OK;
6211 }
6212 
6213 static CPAccessResult aien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
6214                                       bool isread)
6215 {
6216     CPAccessResult ret = access_tvm_trvm(env, ri, isread);
6217     if (ret == CP_ACCESS_OK) {
6218         ret = aien_access(env, ri, isread);
6219     }
6220     return ret;
6221 }
6222 
6223 static const ARMCPRegInfo aie_reginfo[] = {
6224     { .name = "MAIR2_EL1", .state = ARM_CP_STATE_AA64,
6225       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
6226       .access = PL1_RW, .accessfn = aien_el1_access,
6227       .fgt = FGT_NMAIR2_EL1, .nv2_redirect_offset = 0x280 | NV2_REDIR_NV1,
6228       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 1, 1),
6229       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 1),
6230       .fieldoffset = offsetof(CPUARMState, cp15.mair2_el[1]) },
6231     { .name = "MAIR2_EL2", .state = ARM_CP_STATE_AA64,
6232       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 1, .opc2 = 1,
6233       .access = PL2_RW, .accessfn = aien_access,
6234       .fieldoffset = offsetof(CPUARMState, cp15.mair2_el[2]) },
6235     { .name = "MAIR2_EL3", .state = ARM_CP_STATE_AA64,
6236       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 1, .opc2 = 1,
6237       .access = PL3_RW,
6238       .fieldoffset = offsetof(CPUARMState, cp15.mair2_el[3]) },
6239 
6240     { .name = "AMAIR2_EL1", .state = ARM_CP_STATE_AA64,
6241       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 3, .opc2 = 1,
6242       .access = PL1_RW, .accessfn = aien_el1_access,
6243       .fgt = FGT_NAMAIR2_EL1, .nv2_redirect_offset = 0x288 | NV2_REDIR_NV1,
6244       .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 1),
6245       .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 1),
6246       .type = ARM_CP_CONST, .resetvalue = 0 },
6247     { .name = "AMAIR2_EL2", .state = ARM_CP_STATE_AA64,
6248       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
6249       .access = PL2_RW, .accessfn = aien_access,
6250       .type = ARM_CP_CONST, .resetvalue = 0 },
6251     { .name = "AMAIR2_EL3", .state = ARM_CP_STATE_AA64,
6252       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 1,
6253       .access = PL3_RW,
6254       .type = ARM_CP_CONST, .resetvalue = 0 },
6255 };
6256 
6257 void register_cp_regs_for_features(ARMCPU *cpu)
6258 {
6259     /* Register all the coprocessor registers based on feature bits */
6260     CPUARMState *env = &cpu->env;
6261     ARMISARegisters *isar = &cpu->isar;
6262 
6263     if (arm_feature(env, ARM_FEATURE_M)) {
6264         /* M profile has no coprocessor registers */
6265         return;
6266     }
6267 
6268     define_arm_cp_regs(cpu, cp_reginfo);
6269     if (!arm_feature(env, ARM_FEATURE_V8)) {
6270         /*
6271          * Must go early as it is full of wildcards that may be
6272          * overridden by later definitions.
6273          */
6274         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6275     }
6276 
6277 #ifndef CONFIG_USER_ONLY
6278     if (tcg_enabled()) {
6279         define_tlb_insn_regs(cpu);
6280         define_at_insn_regs(cpu);
6281     }
6282 #endif
6283 
6284     if (arm_feature(env, ARM_FEATURE_V6)) {
6285         /* The ID registers all have impdef reset values */
6286         ARMCPRegInfo v6_idregs[] = {
6287             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6288               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6289               .access = PL1_R, .type = ARM_CP_CONST,
6290               .accessfn = access_aa32_tid3,
6291               .resetvalue = GET_IDREG(isar, ID_PFR0)},
6292             /*
6293              * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6294              * the value of the GIC field until after we define these regs.
6295              */
6296             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6297               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
6298               .access = PL1_R, .type = ARM_CP_NO_RAW,
6299               .accessfn = access_aa32_tid3,
6300 #ifdef CONFIG_USER_ONLY
6301               .type = ARM_CP_CONST,
6302               .resetvalue = GET_IDREG(isar, ID_PFR1),
6303 #else
6304               .type = ARM_CP_NO_RAW,
6305               .accessfn = access_aa32_tid3,
6306               .readfn = id_pfr1_read,
6307               .writefn = arm_cp_write_ignore
6308 #endif
6309             },
6310             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6311               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6312               .access = PL1_R, .type = ARM_CP_CONST,
6313               .accessfn = access_aa32_tid3,
6314               .resetvalue = GET_IDREG(isar, ID_DFR0)},
6315             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6316               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6317               .access = PL1_R, .type = ARM_CP_CONST,
6318               .accessfn = access_aa32_tid3,
6319               .resetvalue = GET_IDREG(isar, ID_AFR0)},
6320             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6321               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6322               .access = PL1_R, .type = ARM_CP_CONST,
6323               .accessfn = access_aa32_tid3,
6324               .resetvalue = GET_IDREG(isar, ID_MMFR0)},
6325             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6326               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6327               .access = PL1_R, .type = ARM_CP_CONST,
6328               .accessfn = access_aa32_tid3,
6329               .resetvalue = GET_IDREG(isar, ID_MMFR1)},
6330             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6331               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6332               .access = PL1_R, .type = ARM_CP_CONST,
6333               .accessfn = access_aa32_tid3,
6334               .resetvalue = GET_IDREG(isar, ID_MMFR2)},
6335             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6336               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6337               .access = PL1_R, .type = ARM_CP_CONST,
6338               .accessfn = access_aa32_tid3,
6339               .resetvalue = GET_IDREG(isar, ID_MMFR3)},
6340             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6341               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6342               .access = PL1_R, .type = ARM_CP_CONST,
6343               .accessfn = access_aa32_tid3,
6344               .resetvalue = GET_IDREG(isar, ID_ISAR0)},
6345             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6346               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6347               .access = PL1_R, .type = ARM_CP_CONST,
6348               .accessfn = access_aa32_tid3,
6349               .resetvalue = GET_IDREG(isar, ID_ISAR1)},
6350             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6351               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6352               .access = PL1_R, .type = ARM_CP_CONST,
6353               .accessfn = access_aa32_tid3,
6354               .resetvalue = GET_IDREG(isar, ID_ISAR2)},
6355             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6356               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6357               .access = PL1_R, .type = ARM_CP_CONST,
6358               .accessfn = access_aa32_tid3,
6359               .resetvalue = GET_IDREG(isar, ID_ISAR3) },
6360             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6361               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6362               .access = PL1_R, .type = ARM_CP_CONST,
6363               .accessfn = access_aa32_tid3,
6364               .resetvalue = GET_IDREG(isar, ID_ISAR4) },
6365             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6366               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6367               .access = PL1_R, .type = ARM_CP_CONST,
6368               .accessfn = access_aa32_tid3,
6369               .resetvalue = GET_IDREG(isar, ID_ISAR5) },
6370             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6371               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6372               .access = PL1_R, .type = ARM_CP_CONST,
6373               .accessfn = access_aa32_tid3,
6374               .resetvalue = GET_IDREG(isar, ID_MMFR4)},
6375             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
6376               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6377               .access = PL1_R, .type = ARM_CP_CONST,
6378               .accessfn = access_aa32_tid3,
6379               .resetvalue = GET_IDREG(isar, ID_ISAR6) },
6380         };
6381         define_arm_cp_regs(cpu, v6_idregs);
6382         define_arm_cp_regs(cpu, v6_cp_reginfo);
6383     } else {
6384         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6385     }
6386     if (arm_feature(env, ARM_FEATURE_V6K)) {
6387         define_arm_cp_regs(cpu, v6k_cp_reginfo);
6388     }
6389     if (arm_feature(env, ARM_FEATURE_V7)) {
6390         ARMCPRegInfo clidr = {
6391             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6392             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
6393             .access = PL1_R, .type = ARM_CP_CONST,
6394             .accessfn = access_tid4,
6395             .fgt = FGT_CLIDR_EL1,
6396             .resetvalue = GET_IDREG(isar, CLIDR)
6397         };
6398         define_one_arm_cp_reg(cpu, &clidr);
6399         define_arm_cp_regs(cpu, v7_cp_reginfo);
6400         define_debug_regs(cpu);
6401     } else {
6402         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
6403     }
6404     if (arm_feature(env, ARM_FEATURE_V8)) {
6405         /*
6406          * v8 ID registers, which all have impdef reset values.
6407          * Note that within the ID register ranges the unused slots
6408          * must all RAZ, not UNDEF; future architecture versions may
6409          * define new registers here.
6410          * ID registers which are AArch64 views of the AArch32 ID registers
6411          * which already existed in v6 and v7 are handled elsewhere,
6412          * in v6_idregs[].
6413          */
6414         int i;
6415         ARMCPRegInfo v8_idregs[] = {
6416             /*
6417              * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
6418              * emulation because we don't know the right value for the
6419              * GIC field until after we define these regs.
6420              */
6421             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6422               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6423               .access = PL1_R,
6424 #ifdef CONFIG_USER_ONLY
6425               .type = ARM_CP_CONST,
6426               .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
6427 #else
6428               .type = ARM_CP_NO_RAW,
6429               .accessfn = access_aa64_tid3,
6430               .readfn = id_aa64pfr0_read,
6431               .writefn = arm_cp_write_ignore
6432 #endif
6433             },
6434             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6435               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6436               .access = PL1_R, .type = ARM_CP_CONST,
6437               .accessfn = access_aa64_tid3,
6438               .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
6439             { .name = "ID_AA64PFR2_EL1", .state = ARM_CP_STATE_AA64,
6440               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6441               .access = PL1_R, .type = ARM_CP_CONST,
6442               .accessfn = access_aa64_tid3,
6443               .resetvalue = GET_IDREG(isar, ID_AA64PFR2)},
6444             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6445               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6446               .access = PL1_R, .type = ARM_CP_CONST,
6447               .accessfn = access_aa64_tid3,
6448               .resetvalue = 0 },
6449             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6450               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6451               .access = PL1_R, .type = ARM_CP_CONST,
6452               .accessfn = access_aa64_tid3,
6453               .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
6454             { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
6455               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6456               .access = PL1_R, .type = ARM_CP_CONST,
6457               .accessfn = access_aa64_tid3,
6458               .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
6459             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6460               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6461               .access = PL1_R, .type = ARM_CP_CONST,
6462               .accessfn = access_aa64_tid3,
6463               .resetvalue = 0 },
6464             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6465               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6466               .access = PL1_R, .type = ARM_CP_CONST,
6467               .accessfn = access_aa64_tid3,
6468               .resetvalue = 0 },
6469             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6470               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6471               .access = PL1_R, .type = ARM_CP_CONST,
6472               .accessfn = access_aa64_tid3,
6473               .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
6474             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6475               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6476               .access = PL1_R, .type = ARM_CP_CONST,
6477               .accessfn = access_aa64_tid3,
6478               .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
6479             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6480               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6481               .access = PL1_R, .type = ARM_CP_CONST,
6482               .accessfn = access_aa64_tid3,
6483               .resetvalue = 0 },
6484             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6485               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6486               .access = PL1_R, .type = ARM_CP_CONST,
6487               .accessfn = access_aa64_tid3,
6488               .resetvalue = 0 },
6489             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6490               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6491               .access = PL1_R, .type = ARM_CP_CONST,
6492               .accessfn = access_aa64_tid3,
6493               .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
6494             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6495               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6496               .access = PL1_R, .type = ARM_CP_CONST,
6497               .accessfn = access_aa64_tid3,
6498               .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
6499             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6500               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6501               .access = PL1_R, .type = ARM_CP_CONST,
6502               .accessfn = access_aa64_tid3,
6503               .resetvalue = 0 },
6504             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6505               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6506               .access = PL1_R, .type = ARM_CP_CONST,
6507               .accessfn = access_aa64_tid3,
6508               .resetvalue = 0 },
6509             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6510               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6511               .access = PL1_R, .type = ARM_CP_CONST,
6512               .accessfn = access_aa64_tid3,
6513               .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
6514             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6515               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6516               .access = PL1_R, .type = ARM_CP_CONST,
6517               .accessfn = access_aa64_tid3,
6518               .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
6519             { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
6520               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6521               .access = PL1_R, .type = ARM_CP_CONST,
6522               .accessfn = access_aa64_tid3,
6523               .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
6524             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6525               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6526               .access = PL1_R, .type = ARM_CP_CONST,
6527               .accessfn = access_aa64_tid3,
6528               .resetvalue = 0 },
6529             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6530               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6531               .access = PL1_R, .type = ARM_CP_CONST,
6532               .accessfn = access_aa64_tid3,
6533               .resetvalue = 0 },
6534             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6535               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6536               .access = PL1_R, .type = ARM_CP_CONST,
6537               .accessfn = access_aa64_tid3,
6538               .resetvalue = 0 },
6539             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6540               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6541               .access = PL1_R, .type = ARM_CP_CONST,
6542               .accessfn = access_aa64_tid3,
6543               .resetvalue = 0 },
6544             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6545               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6546               .access = PL1_R, .type = ARM_CP_CONST,
6547               .accessfn = access_aa64_tid3,
6548               .resetvalue = 0 },
6549             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6550               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6551               .access = PL1_R, .type = ARM_CP_CONST,
6552               .accessfn = access_aa64_tid3,
6553               .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
6554             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6555               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6556               .access = PL1_R, .type = ARM_CP_CONST,
6557               .accessfn = access_aa64_tid3,
6558               .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
6559             { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
6560               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6561               .access = PL1_R, .type = ARM_CP_CONST,
6562               .accessfn = access_aa64_tid3,
6563               .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
6564             { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
6565               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6566               .access = PL1_R, .type = ARM_CP_CONST,
6567               .accessfn = access_aa64_tid3,
6568               .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
6569             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6570               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6571               .access = PL1_R, .type = ARM_CP_CONST,
6572               .accessfn = access_aa64_tid3,
6573               .resetvalue = 0 },
6574             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6575               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6576               .access = PL1_R, .type = ARM_CP_CONST,
6577               .accessfn = access_aa64_tid3,
6578               .resetvalue = 0 },
6579             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6580               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6581               .access = PL1_R, .type = ARM_CP_CONST,
6582               .accessfn = access_aa64_tid3,
6583               .resetvalue = 0 },
6584             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6585               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6586               .access = PL1_R, .type = ARM_CP_CONST,
6587               .accessfn = access_aa64_tid3,
6588               .resetvalue = 0 },
6589             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6590               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6591               .access = PL1_R, .type = ARM_CP_CONST,
6592               .accessfn = access_aa64_tid3,
6593               .resetvalue = cpu->isar.mvfr0 },
6594             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6595               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6596               .access = PL1_R, .type = ARM_CP_CONST,
6597               .accessfn = access_aa64_tid3,
6598               .resetvalue = cpu->isar.mvfr1 },
6599             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6600               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6601               .access = PL1_R, .type = ARM_CP_CONST,
6602               .accessfn = access_aa64_tid3,
6603               .resetvalue = cpu->isar.mvfr2 },
6604             /*
6605              * "0, c0, c3, {0,1,2}" are the encodings corresponding to
6606              * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
6607              * as RAZ, since it is in the "reserved for future ID
6608              * registers, RAZ" part of the AArch32 encoding space.
6609              */
6610             { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
6611               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6612               .access = PL1_R, .type = ARM_CP_CONST,
6613               .accessfn = access_aa64_tid3,
6614               .resetvalue = 0 },
6615             { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
6616               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6617               .access = PL1_R, .type = ARM_CP_CONST,
6618               .accessfn = access_aa64_tid3,
6619               .resetvalue = 0 },
6620             { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
6621               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6622               .access = PL1_R, .type = ARM_CP_CONST,
6623               .accessfn = access_aa64_tid3,
6624               .resetvalue = 0 },
6625             /*
6626              * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
6627              * they're also RAZ for AArch64, and in v8 are gradually
6628              * being filled with AArch64-view-of-AArch32-ID-register
6629              * for new ID registers.
6630              */
6631             { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
6632               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6633               .access = PL1_R, .type = ARM_CP_CONST,
6634               .accessfn = access_aa64_tid3,
6635               .resetvalue = 0 },
6636             { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
6637               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6638               .access = PL1_R, .type = ARM_CP_CONST,
6639               .accessfn = access_aa64_tid3,
6640               .resetvalue = GET_IDREG(isar, ID_PFR2)},
6641             { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
6642               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6643               .access = PL1_R, .type = ARM_CP_CONST,
6644               .accessfn = access_aa64_tid3,
6645               .resetvalue = GET_IDREG(isar, ID_DFR1)},
6646             { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
6647               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6648               .access = PL1_R, .type = ARM_CP_CONST,
6649               .accessfn = access_aa64_tid3,
6650               .resetvalue = GET_IDREG(isar, ID_MMFR5)},
6651             { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
6652               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6653               .access = PL1_R, .type = ARM_CP_CONST,
6654               .accessfn = access_aa64_tid3,
6655               .resetvalue = 0 },
6656         };
6657 #ifdef CONFIG_USER_ONLY
6658         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6659             { .name = "ID_AA64PFR0_EL1",
6660               .exported_bits = R_ID_AA64PFR0_FP_MASK |
6661                                R_ID_AA64PFR0_ADVSIMD_MASK |
6662                                R_ID_AA64PFR0_SVE_MASK |
6663                                R_ID_AA64PFR0_DIT_MASK,
6664               .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
6665                             (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
6666             { .name = "ID_AA64PFR1_EL1",
6667               .exported_bits = R_ID_AA64PFR1_BT_MASK |
6668                                R_ID_AA64PFR1_SSBS_MASK |
6669                                R_ID_AA64PFR1_MTE_MASK |
6670                                R_ID_AA64PFR1_SME_MASK },
6671             { .name = "ID_AA64PFR2_EL1",
6672               .exported_bits = 0 },
6673             { .name = "ID_AA64PFR*_EL1_RESERVED",
6674               .is_glob = true },
6675             { .name = "ID_AA64ZFR0_EL1",
6676               .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
6677                                R_ID_AA64ZFR0_AES_MASK |
6678                                R_ID_AA64ZFR0_BITPERM_MASK |
6679                                R_ID_AA64ZFR0_BFLOAT16_MASK |
6680                                R_ID_AA64ZFR0_B16B16_MASK |
6681                                R_ID_AA64ZFR0_SHA3_MASK |
6682                                R_ID_AA64ZFR0_SM4_MASK |
6683                                R_ID_AA64ZFR0_I8MM_MASK |
6684                                R_ID_AA64ZFR0_F32MM_MASK |
6685                                R_ID_AA64ZFR0_F64MM_MASK },
6686             { .name = "ID_AA64SMFR0_EL1",
6687               .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
6688                                R_ID_AA64SMFR0_BI32I32_MASK |
6689                                R_ID_AA64SMFR0_B16F32_MASK |
6690                                R_ID_AA64SMFR0_F16F32_MASK |
6691                                R_ID_AA64SMFR0_I8I32_MASK |
6692                                R_ID_AA64SMFR0_F16F16_MASK |
6693                                R_ID_AA64SMFR0_B16B16_MASK |
6694                                R_ID_AA64SMFR0_I16I32_MASK |
6695                                R_ID_AA64SMFR0_F64F64_MASK |
6696                                R_ID_AA64SMFR0_I16I64_MASK |
6697                                R_ID_AA64SMFR0_SMEVER_MASK |
6698                                R_ID_AA64SMFR0_FA64_MASK },
6699             { .name = "ID_AA64MMFR0_EL1",
6700               .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
6701               .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
6702                             (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
6703             { .name = "ID_AA64MMFR1_EL1",
6704               .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
6705             { .name = "ID_AA64MMFR2_EL1",
6706               .exported_bits = R_ID_AA64MMFR2_AT_MASK },
6707             { .name = "ID_AA64MMFR3_EL1",
6708               .exported_bits = 0 },
6709             { .name = "ID_AA64MMFR*_EL1_RESERVED",
6710               .is_glob = true },
6711             { .name = "ID_AA64DFR0_EL1",
6712               .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
6713             { .name = "ID_AA64DFR1_EL1" },
6714             { .name = "ID_AA64DFR*_EL1_RESERVED",
6715               .is_glob = true },
6716             { .name = "ID_AA64AFR*",
6717               .is_glob = true },
6718             { .name = "ID_AA64ISAR0_EL1",
6719               .exported_bits = R_ID_AA64ISAR0_AES_MASK |
6720                                R_ID_AA64ISAR0_SHA1_MASK |
6721                                R_ID_AA64ISAR0_SHA2_MASK |
6722                                R_ID_AA64ISAR0_CRC32_MASK |
6723                                R_ID_AA64ISAR0_ATOMIC_MASK |
6724                                R_ID_AA64ISAR0_RDM_MASK |
6725                                R_ID_AA64ISAR0_SHA3_MASK |
6726                                R_ID_AA64ISAR0_SM3_MASK |
6727                                R_ID_AA64ISAR0_SM4_MASK |
6728                                R_ID_AA64ISAR0_DP_MASK |
6729                                R_ID_AA64ISAR0_FHM_MASK |
6730                                R_ID_AA64ISAR0_TS_MASK |
6731                                R_ID_AA64ISAR0_RNDR_MASK },
6732             { .name = "ID_AA64ISAR1_EL1",
6733               .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
6734                                R_ID_AA64ISAR1_APA_MASK |
6735                                R_ID_AA64ISAR1_API_MASK |
6736                                R_ID_AA64ISAR1_JSCVT_MASK |
6737                                R_ID_AA64ISAR1_FCMA_MASK |
6738                                R_ID_AA64ISAR1_LRCPC_MASK |
6739                                R_ID_AA64ISAR1_GPA_MASK |
6740                                R_ID_AA64ISAR1_GPI_MASK |
6741                                R_ID_AA64ISAR1_FRINTTS_MASK |
6742                                R_ID_AA64ISAR1_SB_MASK |
6743                                R_ID_AA64ISAR1_BF16_MASK |
6744                                R_ID_AA64ISAR1_DGH_MASK |
6745                                R_ID_AA64ISAR1_I8MM_MASK },
6746             { .name = "ID_AA64ISAR2_EL1",
6747               .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
6748                                R_ID_AA64ISAR2_RPRES_MASK |
6749                                R_ID_AA64ISAR2_GPA3_MASK |
6750                                R_ID_AA64ISAR2_APA3_MASK |
6751                                R_ID_AA64ISAR2_MOPS_MASK |
6752                                R_ID_AA64ISAR2_BC_MASK |
6753                                R_ID_AA64ISAR2_RPRFM_MASK |
6754                                R_ID_AA64ISAR2_CSSC_MASK },
6755             { .name = "ID_AA64ISAR*_EL1_RESERVED",
6756               .is_glob = true },
6757         };
6758         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6759 #endif
6760         /*
6761          * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
6762          * TODO: For RMR, a write with bit 1 set should do something with
6763          * cpu_reset(). In the meantime, "the bit is strictly a request",
6764          * so we are in spec just ignoring writes.
6765          */
6766         if (!arm_feature(env, ARM_FEATURE_EL3) &&
6767             !arm_feature(env, ARM_FEATURE_EL2)) {
6768             ARMCPRegInfo el1_reset_regs[] = {
6769                 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
6770                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6771                   .access = PL1_R,
6772                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6773                 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
6774                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6775                   .access = PL1_RW, .type = ARM_CP_CONST,
6776                   .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
6777             };
6778             define_arm_cp_regs(cpu, el1_reset_regs);
6779         }
6780         define_arm_cp_regs(cpu, v8_idregs);
6781         define_arm_cp_regs(cpu, v8_cp_reginfo);
6782         if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
6783             define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
6784         }
6785 
6786         for (i = 4; i < 16; i++) {
6787             /*
6788              * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
6789              * For pre-v8 cores there are RAZ patterns for these in
6790              * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
6791              * v8 extends the "must RAZ" part of the ID register space
6792              * to also cover c0, 0, c{8-15}, {0-7}.
6793              * These are STATE_AA32 because in the AArch64 sysreg space
6794              * c4-c7 is where the AArch64 ID registers live (and we've
6795              * already defined those in v8_idregs[]), and c8-c15 are not
6796              * "must RAZ" for AArch64.
6797              */
6798             g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
6799             ARMCPRegInfo v8_aa32_raz_idregs = {
6800                 .name = name,
6801                 .state = ARM_CP_STATE_AA32,
6802                 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
6803                 .access = PL1_R, .type = ARM_CP_CONST,
6804                 .accessfn = access_aa64_tid3,
6805                 .resetvalue = 0 };
6806             define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
6807         }
6808     }
6809 
6810     /*
6811      * Register the base EL2 cpregs.
6812      * Pre v8, these registers are implemented only as part of the
6813      * Virtualization Extensions (EL2 present).  Beginning with v8,
6814      * if EL2 is missing but EL3 is enabled, mostly these become
6815      * RES0 from EL3, with some specific exceptions.
6816      */
6817     if (arm_feature(env, ARM_FEATURE_EL2)
6818         || (arm_feature(env, ARM_FEATURE_EL3)
6819             && arm_feature(env, ARM_FEATURE_V8))) {
6820         uint64_t vmpidr_def = mpidr_read_val(env);
6821         ARMCPRegInfo vpidr_regs[] = {
6822             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6823               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6824               .access = PL2_RW, .accessfn = access_el3_aa32ns,
6825               .resetvalue = cpu->midr,
6826               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6827               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6828             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6829               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6830               .access = PL2_RW, .resetvalue = cpu->midr,
6831               .type = ARM_CP_EL3_NO_EL2_C_NZ,
6832               .nv2_redirect_offset = 0x88,
6833               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6834             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6835               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6836               .access = PL2_RW, .accessfn = access_el3_aa32ns,
6837               .resetvalue = vmpidr_def,
6838               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
6839               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6840             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6841               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6842               .access = PL2_RW, .resetvalue = vmpidr_def,
6843               .type = ARM_CP_EL3_NO_EL2_C_NZ,
6844               .nv2_redirect_offset = 0x50,
6845               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6846         };
6847         /*
6848          * The only field of MDCR_EL2 that has a defined architectural reset
6849          * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
6850          */
6851         ARMCPRegInfo mdcr_el2 = {
6852             .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
6853             .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
6854             .writefn = mdcr_el2_write,
6855             .access = PL2_RW, .resetvalue = pmu_num_counters(env),
6856             .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
6857         };
6858         define_one_arm_cp_reg(cpu, &mdcr_el2);
6859         define_arm_cp_regs(cpu, vpidr_regs);
6860         define_arm_cp_regs(cpu, el2_cp_reginfo);
6861         if (arm_feature(env, ARM_FEATURE_V8)) {
6862             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6863         }
6864         if (cpu_isar_feature(aa64_sel2, cpu)) {
6865             define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
6866         }
6867         /*
6868          * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
6869          * See commentary near RMR_EL1.
6870          */
6871         if (!arm_feature(env, ARM_FEATURE_EL3)) {
6872             static const ARMCPRegInfo el2_reset_regs[] = {
6873                 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6874                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6875                   .access = PL2_R,
6876                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6877                 { .name = "RVBAR", .type = ARM_CP_ALIAS,
6878                   .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6879                   .access = PL2_R,
6880                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
6881                 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
6882                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
6883                   .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6884             };
6885             define_arm_cp_regs(cpu, el2_reset_regs);
6886         }
6887     }
6888 
6889     /* Register the base EL3 cpregs. */
6890     if (arm_feature(env, ARM_FEATURE_EL3)) {
6891         define_arm_cp_regs(cpu, el3_cp_reginfo);
6892         ARMCPRegInfo el3_regs[] = {
6893             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6894               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6895               .access = PL3_R,
6896               .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
6897             { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
6898               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
6899               .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
6900             { .name = "RMR", .state = ARM_CP_STATE_AA32,
6901               .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
6902               .access = PL3_RW, .type = ARM_CP_CONST,
6903               .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
6904             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6905               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6906               .access = PL3_RW,
6907               .raw_writefn = raw_write, .writefn = sctlr_write,
6908               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6909               .resetvalue = cpu->reset_sctlr },
6910         };
6911 
6912         define_arm_cp_regs(cpu, el3_regs);
6913     }
6914     /*
6915      * The behaviour of NSACR is sufficiently various that we don't
6916      * try to describe it in a single reginfo:
6917      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
6918      *     reads as constant 0xc00 from NS EL1 and NS EL2
6919      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6920      *  if v7 without EL3, register doesn't exist
6921      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6922      */
6923     if (arm_feature(env, ARM_FEATURE_EL3)) {
6924         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6925             static const ARMCPRegInfo nsacr = {
6926                 .name = "NSACR", .type = ARM_CP_CONST,
6927                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6928                 .access = PL1_RW, .accessfn = nsacr_access,
6929                 .resetvalue = 0xc00
6930             };
6931             define_one_arm_cp_reg(cpu, &nsacr);
6932         } else {
6933             static const ARMCPRegInfo nsacr = {
6934                 .name = "NSACR",
6935                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6936                 .access = PL3_RW | PL1_R,
6937                 .resetvalue = 0,
6938                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6939             };
6940             define_one_arm_cp_reg(cpu, &nsacr);
6941         }
6942     } else {
6943         if (arm_feature(env, ARM_FEATURE_V8)) {
6944             static const ARMCPRegInfo nsacr = {
6945                 .name = "NSACR", .type = ARM_CP_CONST,
6946                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6947                 .access = PL1_R,
6948                 .resetvalue = 0xc00
6949             };
6950             define_one_arm_cp_reg(cpu, &nsacr);
6951         }
6952     }
6953 
6954     if (arm_feature(env, ARM_FEATURE_PMSA)) {
6955         if (arm_feature(env, ARM_FEATURE_V6)) {
6956             /* PMSAv6 not implemented */
6957             assert(arm_feature(env, ARM_FEATURE_V7));
6958             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6959             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6960         } else {
6961             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6962         }
6963     } else {
6964         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6965         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6966         /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
6967         if (cpu_isar_feature(aa32_hpd, cpu)) {
6968             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6969         }
6970     }
6971     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6972         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6973     }
6974     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6975         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6976     }
6977     if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
6978         define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
6979     }
6980 #ifndef CONFIG_USER_ONLY
6981     if (cpu_isar_feature(aa64_ecv, cpu)) {
6982         define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
6983     }
6984 #endif
6985     if (arm_feature(env, ARM_FEATURE_VAPA)) {
6986         ARMCPRegInfo vapa_cp_reginfo[] = {
6987             { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
6988               .access = PL1_RW, .resetvalue = 0,
6989               .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
6990                                      offsetoflow32(CPUARMState, cp15.par_ns) },
6991               .writefn = par_write},
6992         };
6993 
6994         /*
6995          * When LPAE exists this 32-bit PAR register is an alias of the
6996          * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
6997          */
6998         if (arm_feature(env, ARM_FEATURE_LPAE)) {
6999             vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
7000         }
7001         define_arm_cp_regs(cpu, vapa_cp_reginfo);
7002     }
7003     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7004         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7005     }
7006     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7007         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7008     }
7009     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7010         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7011     }
7012     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7013         define_arm_cp_regs(cpu, omap_cp_reginfo);
7014     }
7015     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7016         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7017     }
7018     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7019         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7020     }
7021     if (arm_feature(env, ARM_FEATURE_LPAE)) {
7022         define_arm_cp_regs(cpu, lpae_cp_reginfo);
7023     }
7024     if (cpu_isar_feature(aa32_jazelle, cpu)) {
7025         define_arm_cp_regs(cpu, jazelle_regs);
7026     }
7027     /*
7028      * Slightly awkwardly, the OMAP and StrongARM cores need all of
7029      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7030      * be read-only (ie write causes UNDEF exception).
7031      */
7032     {
7033         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7034             /*
7035              * Pre-v8 MIDR space.
7036              * Note that the MIDR isn't a simple constant register because
7037              * of the TI925 behaviour where writes to another register can
7038              * cause the MIDR value to change.
7039              *
7040              * Unimplemented registers in the c15 0 0 0 space default to
7041              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7042              * and friends override accordingly.
7043              */
7044             { .name = "MIDR",
7045               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7046               .access = PL1_R, .resetvalue = cpu->midr,
7047               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
7048               .readfn = midr_read,
7049               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7050               .type = ARM_CP_OVERRIDE },
7051             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7052             { .name = "DUMMY",
7053               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7054               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7055             { .name = "DUMMY",
7056               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7057               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7058             { .name = "DUMMY",
7059               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7060               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7061             { .name = "DUMMY",
7062               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7063               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7064             { .name = "DUMMY",
7065               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
7066               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7067         };
7068         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
7069             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
7070               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
7071               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
7072               .fgt = FGT_MIDR_EL1,
7073               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7074               .readfn = midr_read },
7075             /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
7076             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7077               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
7078               .access = PL1_R, .resetvalue = cpu->midr },
7079             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
7080               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
7081               .access = PL1_R,
7082               .accessfn = access_aa64_tid1,
7083               .fgt = FGT_REVIDR_EL1,
7084               .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
7085         };
7086         ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
7087             .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
7088             .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7089             .access = PL1_R, .resetvalue = cpu->midr
7090         };
7091         ARMCPRegInfo id_cp_reginfo[] = {
7092             /* These are common to v8 and pre-v8 */
7093             { .name = "CTR",
7094               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
7095               .access = PL1_R, .accessfn = ctr_el0_access,
7096               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7097             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
7098               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
7099               .access = PL0_R, .accessfn = ctr_el0_access,
7100               .fgt = FGT_CTR_EL0,
7101               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7102             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7103             { .name = "TCMTR",
7104               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
7105               .access = PL1_R,
7106               .accessfn = access_aa32_tid1,
7107               .type = ARM_CP_CONST, .resetvalue = 0 },
7108         };
7109         /* TLBTR is specific to VMSA */
7110         ARMCPRegInfo id_tlbtr_reginfo = {
7111               .name = "TLBTR",
7112               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
7113               .access = PL1_R,
7114               .accessfn = access_aa32_tid1,
7115               .type = ARM_CP_CONST, .resetvalue = 0,
7116         };
7117         /* MPUIR is specific to PMSA V6+ */
7118         ARMCPRegInfo id_mpuir_reginfo = {
7119               .name = "MPUIR",
7120               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7121               .access = PL1_R, .type = ARM_CP_CONST,
7122               .resetvalue = cpu->pmsav7_dregion << 8
7123         };
7124         /* HMPUIR is specific to PMSA V8 */
7125         ARMCPRegInfo id_hmpuir_reginfo = {
7126             .name = "HMPUIR",
7127             .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
7128             .access = PL2_R, .type = ARM_CP_CONST,
7129             .resetvalue = cpu->pmsav8r_hdregion
7130         };
7131         static const ARMCPRegInfo crn0_wi_reginfo = {
7132             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
7133             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
7134             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
7135         };
7136 #ifdef CONFIG_USER_ONLY
7137         static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
7138             { .name = "MIDR_EL1",
7139               .exported_bits = R_MIDR_EL1_REVISION_MASK |
7140                                R_MIDR_EL1_PARTNUM_MASK |
7141                                R_MIDR_EL1_ARCHITECTURE_MASK |
7142                                R_MIDR_EL1_VARIANT_MASK |
7143                                R_MIDR_EL1_IMPLEMENTER_MASK },
7144             { .name = "REVIDR_EL1" },
7145         };
7146         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
7147 #endif
7148         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
7149             arm_feature(env, ARM_FEATURE_STRONGARM)) {
7150             size_t i;
7151             /*
7152              * Register the blanket "writes ignored" value first to cover the
7153              * whole space. Then update the specific ID registers to allow write
7154              * access, so that they ignore writes rather than causing them to
7155              * UNDEF.
7156              */
7157             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
7158             for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
7159                 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
7160             }
7161             for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
7162                 id_cp_reginfo[i].access = PL1_RW;
7163             }
7164             id_mpuir_reginfo.access = PL1_RW;
7165             id_tlbtr_reginfo.access = PL1_RW;
7166         }
7167         if (arm_feature(env, ARM_FEATURE_V8)) {
7168             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
7169             if (!arm_feature(env, ARM_FEATURE_PMSA)) {
7170                 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
7171             }
7172         } else {
7173             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
7174         }
7175         define_arm_cp_regs(cpu, id_cp_reginfo);
7176         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
7177             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
7178         } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
7179                    arm_feature(env, ARM_FEATURE_V8)) {
7180             uint32_t i = 0;
7181             char *tmp_string;
7182 
7183             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
7184             define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
7185             define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
7186 
7187             /* Register alias is only valid for first 32 indexes */
7188             for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
7189                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
7190                 uint8_t opc1 = extract32(i, 4, 1);
7191                 uint8_t opc2 = extract32(i, 0, 1) << 2;
7192 
7193                 tmp_string = g_strdup_printf("PRBAR%u", i);
7194                 ARMCPRegInfo tmp_prbarn_reginfo = {
7195                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
7196                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7197                     .access = PL1_RW, .resetvalue = 0,
7198                     .accessfn = access_tvm_trvm,
7199                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7200                 };
7201                 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
7202                 g_free(tmp_string);
7203 
7204                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
7205                 tmp_string = g_strdup_printf("PRLAR%u", i);
7206                 ARMCPRegInfo tmp_prlarn_reginfo = {
7207                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
7208                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7209                     .access = PL1_RW, .resetvalue = 0,
7210                     .accessfn = access_tvm_trvm,
7211                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7212                 };
7213                 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
7214                 g_free(tmp_string);
7215             }
7216 
7217             /* Register alias is only valid for first 32 indexes */
7218             for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
7219                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
7220                 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
7221                 uint8_t opc2 = extract32(i, 0, 1) << 2;
7222 
7223                 tmp_string = g_strdup_printf("HPRBAR%u", i);
7224                 ARMCPRegInfo tmp_hprbarn_reginfo = {
7225                     .name = tmp_string,
7226                     .type = ARM_CP_NO_RAW,
7227                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7228                     .access = PL2_RW, .resetvalue = 0,
7229                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7230                 };
7231                 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
7232                 g_free(tmp_string);
7233 
7234                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
7235                 tmp_string = g_strdup_printf("HPRLAR%u", i);
7236                 ARMCPRegInfo tmp_hprlarn_reginfo = {
7237                     .name = tmp_string,
7238                     .type = ARM_CP_NO_RAW,
7239                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
7240                     .access = PL2_RW, .resetvalue = 0,
7241                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
7242                 };
7243                 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
7244                 g_free(tmp_string);
7245             }
7246         } else if (arm_feature(env, ARM_FEATURE_V7)) {
7247             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
7248         }
7249     }
7250 
7251     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
7252         ARMCPRegInfo mpidr_cp_reginfo[] = {
7253             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
7254               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
7255               .fgt = FGT_MPIDR_EL1,
7256               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
7257         };
7258 #ifdef CONFIG_USER_ONLY
7259         static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
7260             { .name = "MPIDR_EL1",
7261               .fixed_bits = 0x0000000080000000 },
7262         };
7263         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
7264 #endif
7265         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
7266     }
7267 
7268     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
7269         ARMCPRegInfo auxcr_reginfo[] = {
7270             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
7271               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
7272               .access = PL1_RW, .accessfn = access_tacr,
7273               .nv2_redirect_offset = 0x118,
7274               .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
7275             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
7276               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
7277               .access = PL2_RW, .type = ARM_CP_CONST,
7278               .resetvalue = 0 },
7279             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
7280               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
7281               .access = PL3_RW, .type = ARM_CP_CONST,
7282               .resetvalue = 0 },
7283         };
7284         define_arm_cp_regs(cpu, auxcr_reginfo);
7285         if (cpu_isar_feature(aa32_ac2, cpu)) {
7286             define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
7287         }
7288     }
7289 
7290     if (arm_feature(env, ARM_FEATURE_CBAR)) {
7291         /*
7292          * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7293          * There are two flavours:
7294          *  (1) older 32-bit only cores have a simple 32-bit CBAR
7295          *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7296          *      32-bit register visible to AArch32 at a different encoding
7297          *      to the "flavour 1" register and with the bits rearranged to
7298          *      be able to squash a 64-bit address into the 32-bit view.
7299          * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7300          * in future if we support AArch32-only configs of some of the
7301          * AArch64 cores we might need to add a specific feature flag
7302          * to indicate cores with "flavour 2" CBAR.
7303          */
7304         if (arm_feature(env, ARM_FEATURE_V8)) {
7305             /* 32 bit view is [31:18] 0...0 [43:32]. */
7306             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
7307                 | extract64(cpu->reset_cbar, 32, 12);
7308             ARMCPRegInfo cbar_reginfo[] = {
7309                 { .name = "CBAR",
7310                   .type = ARM_CP_CONST,
7311                   .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
7312                   .access = PL1_R, .resetvalue = cbar32 },
7313                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
7314                   .type = ARM_CP_CONST,
7315                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
7316                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
7317             };
7318             /* We don't implement a r/w 64 bit CBAR currently */
7319             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
7320             define_arm_cp_regs(cpu, cbar_reginfo);
7321         } else {
7322             ARMCPRegInfo cbar = {
7323                 .name = "CBAR",
7324                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
7325                 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
7326                 .fieldoffset = offsetof(CPUARMState,
7327                                         cp15.c15_config_base_address)
7328             };
7329             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
7330                 cbar.access = PL1_R;
7331                 cbar.fieldoffset = 0;
7332                 cbar.type = ARM_CP_CONST;
7333             }
7334             define_one_arm_cp_reg(cpu, &cbar);
7335         }
7336     }
7337 
7338     if (arm_feature(env, ARM_FEATURE_VBAR)) {
7339         static const ARMCPRegInfo vbar_cp_reginfo[] = {
7340             { .name = "VBAR_EL1", .state = ARM_CP_STATE_BOTH,
7341               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
7342               .access = PL1_RW, .writefn = vbar_write,
7343               .accessfn = access_nv1,
7344               .fgt = FGT_VBAR_EL1,
7345               .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
7346               .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 12, 0, 0),
7347               .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 12, 0, 0),
7348               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
7349                                      offsetof(CPUARMState, cp15.vbar_ns) },
7350               .resetvalue = 0 },
7351         };
7352         define_arm_cp_regs(cpu, vbar_cp_reginfo);
7353     }
7354 
7355     /* Generic registers whose values depend on the implementation */
7356     {
7357         ARMCPRegInfo sctlr = {
7358             .name = "SCTLR_EL1", .state = ARM_CP_STATE_BOTH,
7359             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
7360             .access = PL1_RW, .accessfn = access_tvm_trvm,
7361             .fgt = FGT_SCTLR_EL1,
7362             .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 0),
7363             .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 0),
7364             .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
7365             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
7366                                    offsetof(CPUARMState, cp15.sctlr_ns) },
7367             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
7368             .raw_writefn = raw_write,
7369         };
7370         define_one_arm_cp_reg(cpu, &sctlr);
7371 
7372         if (arm_feature(env, ARM_FEATURE_PMSA) &&
7373             arm_feature(env, ARM_FEATURE_V8)) {
7374             ARMCPRegInfo vsctlr = {
7375                 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
7376                 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
7377                 .access = PL2_RW, .resetvalue = 0x0,
7378                 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
7379             };
7380             define_one_arm_cp_reg(cpu, &vsctlr);
7381         }
7382     }
7383 
7384     if (cpu_isar_feature(aa64_lor, cpu)) {
7385         define_arm_cp_regs(cpu, lor_reginfo);
7386     }
7387     if (cpu_isar_feature(aa64_pan, cpu)) {
7388         define_one_arm_cp_reg(cpu, &pan_reginfo);
7389     }
7390     if (cpu_isar_feature(aa64_uao, cpu)) {
7391         define_one_arm_cp_reg(cpu, &uao_reginfo);
7392     }
7393 
7394     if (cpu_isar_feature(aa64_dit, cpu)) {
7395         define_one_arm_cp_reg(cpu, &dit_reginfo);
7396     }
7397     if (cpu_isar_feature(aa64_ssbs, cpu)) {
7398         define_one_arm_cp_reg(cpu, &ssbs_reginfo);
7399     }
7400     if (cpu_isar_feature(any_ras, cpu)) {
7401         define_arm_cp_regs(cpu, minimal_ras_reginfo);
7402     }
7403 
7404     if (cpu_isar_feature(aa64_vh, cpu) ||
7405         cpu_isar_feature(aa64_debugv8p2, cpu)) {
7406         define_one_arm_cp_reg(cpu, &contextidr_el2);
7407     }
7408     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7409         define_arm_cp_regs(cpu, vhe_reginfo);
7410     }
7411 
7412     if (cpu_isar_feature(aa64_sve, cpu)) {
7413         define_arm_cp_regs(cpu, zcr_reginfo);
7414     }
7415 
7416     if (cpu_isar_feature(aa64_hcx, cpu)) {
7417         define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
7418     }
7419 
7420     if (cpu_isar_feature(aa64_sme, cpu)) {
7421         define_arm_cp_regs(cpu, sme_reginfo);
7422     }
7423     if (cpu_isar_feature(aa64_pauth, cpu)) {
7424         define_arm_cp_regs(cpu, pauth_reginfo);
7425     }
7426     if (cpu_isar_feature(aa64_rndr, cpu)) {
7427         define_arm_cp_regs(cpu, rndr_reginfo);
7428     }
7429     /* Data Cache clean instructions up to PoP */
7430     if (cpu_isar_feature(aa64_dcpop, cpu)) {
7431         define_one_arm_cp_reg(cpu, dcpop_reg);
7432 
7433         if (cpu_isar_feature(aa64_dcpodp, cpu)) {
7434             define_one_arm_cp_reg(cpu, dcpodp_reg);
7435         }
7436     }
7437 
7438     /*
7439      * If full MTE is enabled, add all of the system registers.
7440      * If only "instructions available at EL0" are enabled,
7441      * then define only a RAZ/WI version of PSTATE.TCO.
7442      */
7443     if (cpu_isar_feature(aa64_mte, cpu)) {
7444         ARMCPRegInfo gmid_reginfo = {
7445             .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7446             .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7447             .access = PL1_R, .accessfn = access_aa64_tid5,
7448             .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
7449         };
7450         define_one_arm_cp_reg(cpu, &gmid_reginfo);
7451         define_arm_cp_regs(cpu, mte_reginfo);
7452         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7453     } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
7454         define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
7455         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
7456     }
7457 
7458     if (cpu_isar_feature(aa64_scxtnum, cpu)) {
7459         define_arm_cp_regs(cpu, scxtnum_reginfo);
7460     }
7461 
7462     if (cpu_isar_feature(aa64_fgt, cpu)) {
7463         define_arm_cp_regs(cpu, fgt_reginfo);
7464     }
7465 
7466     if (cpu_isar_feature(aa64_rme, cpu)) {
7467         define_arm_cp_regs(cpu, rme_reginfo);
7468         if (cpu_isar_feature(aa64_mte, cpu)) {
7469             define_arm_cp_regs(cpu, rme_mte_reginfo);
7470         }
7471     }
7472 
7473     if (cpu_isar_feature(aa64_nv2, cpu)) {
7474         define_arm_cp_regs(cpu, nv2_reginfo);
7475     }
7476 
7477     if (cpu_isar_feature(aa64_nmi, cpu)) {
7478         define_arm_cp_regs(cpu, nmi_reginfo);
7479     }
7480 
7481     if (cpu_isar_feature(aa64_sctlr2, cpu)) {
7482         define_arm_cp_regs(cpu, sctlr2_reginfo);
7483     }
7484 
7485     if (cpu_isar_feature(aa64_tcr2, cpu)) {
7486         define_arm_cp_regs(cpu, tcr2_reginfo);
7487     }
7488 
7489     if (cpu_isar_feature(aa64_s1pie, cpu)) {
7490         define_arm_cp_regs(cpu, s1pie_reginfo);
7491     }
7492     if (cpu_isar_feature(aa64_s2pie, cpu)) {
7493         define_arm_cp_regs(cpu, s2pie_reginfo);
7494     }
7495     if (cpu_isar_feature(aa64_mec, cpu)) {
7496         define_arm_cp_regs(cpu, mec_reginfo);
7497         if (cpu_isar_feature(aa64_mte, cpu)) {
7498             define_arm_cp_regs(cpu, mec_mte_reginfo);
7499         }
7500     }
7501 
7502     if (cpu_isar_feature(aa64_aie, cpu)) {
7503         define_arm_cp_regs(cpu, aie_reginfo);
7504     }
7505 
7506     if (cpu_isar_feature(any_predinv, cpu)) {
7507         define_arm_cp_regs(cpu, predinv_reginfo);
7508     }
7509 
7510     if (cpu_isar_feature(any_ccidx, cpu)) {
7511         define_arm_cp_regs(cpu, ccsidr2_reginfo);
7512     }
7513 
7514     define_pm_cpregs(cpu);
7515     define_gcs_cpregs(cpu);
7516 }
7517 
7518 /*
7519  * Copy a ARMCPRegInfo structure, allocating it along with the name
7520  * and an optional suffix to the name.
7521  */
7522 static ARMCPRegInfo *alloc_cpreg(const ARMCPRegInfo *in, const char *suffix)
7523 {
7524     const char *name = in->name;
7525     size_t name_len = strlen(name);
7526     size_t suff_len = suffix ? strlen(suffix) : 0;
7527     ARMCPRegInfo *out = g_malloc(sizeof(*in) + name_len + suff_len + 1);
7528     char *p = (char *)(out + 1);
7529 
7530     *out = *in;
7531     out->name = p;
7532 
7533     memcpy(p, name, name_len + 1);
7534     if (suffix) {
7535         memcpy(p + name_len, suffix, suff_len + 1);
7536     }
7537     return out;
7538 }
7539 
7540 /*
7541  * Private utility function for define_one_arm_cp_reg():
7542  * add a single reginfo struct to the hash table.
7543  */
7544 static void add_cpreg_to_hashtable(ARMCPU *cpu, ARMCPRegInfo *r,
7545                                    CPState state, CPSecureState secstate,
7546                                    uint32_t key)
7547 {
7548     CPUARMState *env = &cpu->env;
7549     bool ns = secstate & ARM_CP_SECSTATE_NS;
7550 
7551     /* Overriding of an existing definition must be explicitly requested. */
7552     if (!(r->type & ARM_CP_OVERRIDE)) {
7553         const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
7554         if (oldreg) {
7555             assert(oldreg->type & ARM_CP_OVERRIDE);
7556         }
7557     }
7558 
7559     {
7560         bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
7561 
7562         if (isbanked) {
7563             /*
7564              * Register is banked (using both entries in array).
7565              * Overwriting fieldoffset as the array is only used to define
7566              * banked registers but later only fieldoffset is used.
7567              */
7568             r->fieldoffset = r->bank_fieldoffsets[ns];
7569         }
7570         if (state == ARM_CP_STATE_AA32) {
7571             if (isbanked) {
7572                 /*
7573                  * If the register is banked then we don't need to migrate or
7574                  * reset the 32-bit instance in certain cases:
7575                  *
7576                  * 1) If the register has both 32-bit and 64-bit instances
7577                  *    then we can count on the 64-bit instance taking care
7578                  *    of the non-secure bank.
7579                  * 2) If ARMv8 is enabled then we can count on a 64-bit
7580                  *    version taking care of the secure bank.  This requires
7581                  *    that separate 32 and 64-bit definitions are provided.
7582                  */
7583                 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7584                     (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
7585                     r->type |= ARM_CP_ALIAS;
7586                 }
7587             } else if ((secstate != r->secure) && !ns) {
7588                 /*
7589                  * The register is not banked so we only want to allow
7590                  * migration of the non-secure instance.
7591                  */
7592                 r->type |= ARM_CP_ALIAS;
7593             }
7594         }
7595     }
7596 
7597     /*
7598      * For 32-bit AArch32 regs shared with 64-bit AArch64 regs,
7599      * adjust the field offset for endianness.  This had to be
7600      * delayed until banked registers were resolved.
7601      */
7602     if (HOST_BIG_ENDIAN &&
7603         state == ARM_CP_STATE_AA32 &&
7604         r->state == ARM_CP_STATE_BOTH &&
7605         r->fieldoffset) {
7606         r->fieldoffset += sizeof(uint32_t);
7607     }
7608 
7609     /*
7610      * Special registers (ie NOP/WFI) are never migratable and
7611      * are not even raw-accessible.
7612      */
7613     if (r->type & ARM_CP_SPECIAL_MASK) {
7614         r->type |= ARM_CP_NO_RAW;
7615     }
7616 
7617     /*
7618      * Update fields to match the instantiation, overwiting wildcards
7619      * such as ARM_CP_STATE_BOTH or ARM_CP_SECSTATE_BOTH.
7620      */
7621     r->state = state;
7622     r->secure = secstate;
7623 
7624     /*
7625      * Check that raw accesses are either forbidden or handled. Note that
7626      * we can't assert this earlier because the setup of fieldoffset for
7627      * banked registers has to be done first.
7628      */
7629     if (!(r->type & ARM_CP_NO_RAW)) {
7630         assert(!raw_accessors_invalid(r));
7631     }
7632 
7633     g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r);
7634 }
7635 
7636 static void add_cpreg_to_hashtable_aa32(ARMCPU *cpu, ARMCPRegInfo *r)
7637 {
7638     /*
7639      * Under AArch32 CP registers can be common
7640      * (same for secure and non-secure world) or banked.
7641      */
7642     ARMCPRegInfo *r_s;
7643     bool is64 = r->type & ARM_CP_64BIT;
7644     uint32_t key = ENCODE_CP_REG(r->cp, is64, 0, r->crn,
7645                                  r->crm, r->opc1, r->opc2);
7646 
7647     assert(!(r->type & ARM_CP_ADD_TLBI_NXS)); /* aa64 only */
7648     r->vhe_redir_to_el2 = 0;
7649     r->vhe_redir_to_el01 = 0;
7650 
7651     switch (r->secure) {
7652     case ARM_CP_SECSTATE_NS:
7653         key |= CP_REG_AA32_NS_MASK;
7654         /* fall through */
7655     case ARM_CP_SECSTATE_S:
7656         add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32, r->secure, key);
7657         break;
7658     case ARM_CP_SECSTATE_BOTH:
7659         r_s = alloc_cpreg(r, "_S");
7660         add_cpreg_to_hashtable(cpu, r_s, ARM_CP_STATE_AA32,
7661                                ARM_CP_SECSTATE_S, key);
7662 
7663         key |= CP_REG_AA32_NS_MASK;
7664         add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
7665                                ARM_CP_SECSTATE_NS, key);
7666         break;
7667     default:
7668         g_assert_not_reached();
7669     }
7670 }
7671 
7672 static void add_cpreg_to_hashtable_aa64(ARMCPU *cpu, ARMCPRegInfo *r)
7673 {
7674     uint32_t key = ENCODE_AA64_CP_REG(r->opc0, r->opc1,
7675                                       r->crn, r->crm, r->opc2);
7676 
7677     if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
7678         cpu_isar_feature(aa64_xs, cpu)) {
7679         /*
7680          * This is a TLBI insn which has an NXS variant. The
7681          * NXS variant is at the same encoding except that
7682          * crn is +1, and has the same behaviour except for
7683          * fine-grained trapping. Add the NXS insn here and
7684          * then fall through to add the normal register.
7685          * add_cpreg_to_hashtable() copies the cpreg struct
7686          * and name that it is passed, so it's OK to use
7687          * a local struct here.
7688          */
7689         ARMCPRegInfo *nxs_ri = alloc_cpreg(r, "NXS");
7690         uint32_t nxs_key;
7691 
7692         assert(nxs_ri->crn < 0xf);
7693         nxs_ri->crn++;
7694         /* Also increment the CRN field inside the key value */
7695         nxs_key = key + (1 << CP_REG_ARM64_SYSREG_CRN_SHIFT);
7696         if (nxs_ri->fgt) {
7697             nxs_ri->fgt |= R_FGT_NXS_MASK;
7698         }
7699 
7700         add_cpreg_to_hashtable(cpu, nxs_ri, ARM_CP_STATE_AA64,
7701                                ARM_CP_SECSTATE_NS, nxs_key);
7702     }
7703 
7704     if (!r->vhe_redir_to_el01) {
7705         assert(!r->vhe_redir_to_el2);
7706     } else if (!arm_feature(&cpu->env, ARM_FEATURE_EL2) ||
7707                !cpu_isar_feature(aa64_vh, cpu)) {
7708         r->vhe_redir_to_el2 = 0;
7709         r->vhe_redir_to_el01 = 0;
7710     } else {
7711         /* Create the FOO_EL12 alias. */
7712         ARMCPRegInfo *r2 = alloc_cpreg(r, "2");
7713         uint32_t key2 = r->vhe_redir_to_el01;
7714 
7715         /*
7716          * Clear EL1 redirection on the FOO_EL1 reg;
7717          * Clear EL2 redirection on the FOO_EL12 reg;
7718          * Install redirection from FOO_EL12 back to FOO_EL1.
7719          */
7720         r->vhe_redir_to_el01 = 0;
7721         r2->vhe_redir_to_el2 = 0;
7722         r2->vhe_redir_to_el01 = key;
7723 
7724         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_RAW;
7725         /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
7726         r2->access &= PL2_RW | PL3_RW;
7727         /* The new_reg op fields are as per new_key, not the target reg */
7728         r2->crn = (key2 & CP_REG_ARM64_SYSREG_CRN_MASK)
7729             >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
7730         r2->crm = (key2 & CP_REG_ARM64_SYSREG_CRM_MASK)
7731             >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
7732         r2->opc0 = (key2 & CP_REG_ARM64_SYSREG_OP0_MASK)
7733             >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
7734         r2->opc1 = (key2 & CP_REG_ARM64_SYSREG_OP1_MASK)
7735             >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
7736         r2->opc2 = (key2 & CP_REG_ARM64_SYSREG_OP2_MASK)
7737             >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
7738 
7739         /* Non-redirected access to this register will abort. */
7740         r2->readfn = NULL;
7741         r2->writefn = NULL;
7742         r2->raw_readfn = NULL;
7743         r2->raw_writefn = NULL;
7744         r2->accessfn = NULL;
7745         r2->fieldoffset = 0;
7746 
7747         /*
7748          * If the _EL1 register is redirected to memory by FEAT_NV2,
7749          * then it shares the offset with the _EL12 register,
7750          * and which one is redirected depends on HCR_EL2.NV1.
7751          */
7752         if (r2->nv2_redirect_offset) {
7753             assert(r2->nv2_redirect_offset & NV2_REDIR_NV1);
7754             r2->nv2_redirect_offset &= ~NV2_REDIR_NV1;
7755             r2->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
7756         }
7757         add_cpreg_to_hashtable(cpu, r2, ARM_CP_STATE_AA64,
7758                                ARM_CP_SECSTATE_NS, key2);
7759     }
7760 
7761     add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA64,
7762                            ARM_CP_SECSTATE_NS, key);
7763 }
7764 
7765 void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
7766 {
7767     /*
7768      * Define implementations of coprocessor registers.
7769      * We store these in a hashtable because typically
7770      * there are less than 150 registers in a space which
7771      * is 16*16*16*8*8 = 262144 in size.
7772      * Wildcarding is supported for the crm, opc1 and opc2 fields.
7773      * If a register is defined twice then the second definition is
7774      * used, so this can be used to define some generic registers and
7775      * then override them with implementation specific variations.
7776      * At least one of the original and the second definition should
7777      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7778      * against accidental use.
7779      *
7780      * The state field defines whether the register is to be
7781      * visible in the AArch32 or AArch64 execution state. If the
7782      * state is set to ARM_CP_STATE_BOTH then we synthesise a
7783      * reginfo structure for the AArch32 view, which sees the lower
7784      * 32 bits of the 64 bit register.
7785      *
7786      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7787      * be wildcarded. AArch64 registers are always considered to be 64
7788      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7789      * the register, if any.
7790      */
7791     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7792     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7793     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7794     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7795     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7796     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7797     int cp = r->cp;
7798     ARMCPRegInfo r_const;
7799     CPUARMState *env = &cpu->env;
7800 
7801     /*
7802      * AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless.
7803      * Moreover, the encoding test just following in general prevents
7804      * shared encoding so ARM_CP_STATE_BOTH won't work either.
7805      */
7806     assert(r->state == ARM_CP_STATE_AA32 || !(r->type & ARM_CP_64BIT));
7807     /* AArch32 64-bit registers have only CRm and Opc1 fields. */
7808     assert(!(r->type & ARM_CP_64BIT) || !(r->opc2 || r->crn));
7809     /* op0 only exists in the AArch64 encodings */
7810     assert(r->state != ARM_CP_STATE_AA32 || r->opc0 == 0);
7811 
7812     /*
7813      * This API is only for Arm's system coprocessors (14 and 15) or
7814      * (M-profile or v7A-and-earlier only) for implementation defined
7815      * coprocessors in the range 0..7.  Our decode assumes this, since
7816      * 8..13 can be used for other insns including VFP and Neon. See
7817      * valid_cp() in translate.c.  Assert here that we haven't tried
7818      * to use an invalid coprocessor number.
7819      */
7820     switch (r->state) {
7821     case ARM_CP_STATE_BOTH:
7822         /*
7823          * If the cp field is left unset, assume cp15.
7824          * Otherwise apply the same rules as AA32.
7825          */
7826         if (cp == 0) {
7827             cp = 15;
7828             break;
7829         }
7830         /* fall through */
7831     case ARM_CP_STATE_AA32:
7832         if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
7833             !arm_feature(&cpu->env, ARM_FEATURE_M)) {
7834             assert(cp >= 14 && cp <= 15);
7835         } else {
7836             assert(cp < 8 || (cp >= 14 && cp <= 15));
7837         }
7838         break;
7839     case ARM_CP_STATE_AA64:
7840         assert(cp == 0);
7841         break;
7842     default:
7843         g_assert_not_reached();
7844     }
7845     /*
7846      * The AArch64 pseudocode CheckSystemAccess() specifies that op1
7847      * encodes a minimum access level for the register. We roll this
7848      * runtime check into our general permission check code, so check
7849      * here that the reginfo's specified permissions are strict enough
7850      * to encompass the generic architectural permission check.
7851      */
7852     if (r->state != ARM_CP_STATE_AA32) {
7853         CPAccessRights mask;
7854         switch (r->opc1) {
7855         case 0:
7856             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7857             mask = PL0U_R | PL1_RW;
7858             break;
7859         case 1: case 2:
7860             /* min_EL EL1 */
7861             mask = PL1_RW;
7862             break;
7863         case 3:
7864             /* min_EL EL0 */
7865             mask = PL0_RW;
7866             break;
7867         case 4:
7868         case 5:
7869             /* min_EL EL2 */
7870             mask = PL2_RW;
7871             break;
7872         case 6:
7873             /* min_EL EL3 */
7874             mask = PL3_RW;
7875             break;
7876         case 7:
7877             /* min_EL EL1, secure mode only (we don't check the latter) */
7878             mask = PL1_RW;
7879             break;
7880         default:
7881             /* broken reginfo with out-of-range opc1 */
7882             g_assert_not_reached();
7883         }
7884         /* assert our permissions are not too lax (stricter is fine) */
7885         assert((r->access & ~mask) == 0);
7886     }
7887 
7888     /*
7889      * Check that the register definition has enough info to handle
7890      * reads and writes if they are permitted.
7891      */
7892     if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
7893         if (r->access & PL3_R) {
7894             assert((r->fieldoffset ||
7895                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7896                    r->readfn);
7897         }
7898         if (r->access & PL3_W) {
7899             assert((r->fieldoffset ||
7900                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7901                    r->writefn);
7902         }
7903     }
7904 
7905     /*
7906      * Eliminate registers that are not present because the EL is missing.
7907      * Doing this here makes it easier to put all registers for a given
7908      * feature into the same ARMCPRegInfo array and define them all at once.
7909      */
7910     if (arm_feature(env, ARM_FEATURE_EL3)) {
7911         /*
7912          * An EL2 register without EL2 but with EL3 is (usually) RES0.
7913          * See rule RJFFP in section D1.1.3 of DDI0487H.a.
7914          */
7915         int min_el = ctz32(r->access) / 2;
7916         if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
7917             if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
7918                 return;
7919             }
7920             if (!(r->type & ARM_CP_EL3_NO_EL2_KEEP)) {
7921                 /* This should not have been a very special register. */
7922                 int old_special = r->type & ARM_CP_SPECIAL_MASK;
7923                 assert(old_special == 0 || old_special == ARM_CP_NOP);
7924 
7925                 r_const = *r;
7926 
7927                 /*
7928                  * Set the special function to CONST, retaining the other flags.
7929                  * This is important for e.g. ARM_CP_SVE so that we still
7930                  * take the SVE trap if CPTR_EL3.EZ == 0.
7931                  */
7932                 r_const.type = (r->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
7933                 /*
7934                  * Usually, these registers become RES0, but there are a few
7935                  * special cases like VPIDR_EL2 which have a constant non-zero
7936                  * value with writes ignored.
7937                  */
7938                 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
7939                     r_const.resetvalue = 0;
7940                 }
7941                 /*
7942                  * ARM_CP_CONST has precedence, so removing the callbacks and
7943                  * offsets are not strictly necessary, but it is potentially
7944                  * less confusing to debug later.
7945                  */
7946                 r_const.readfn = NULL;
7947                 r_const.writefn = NULL;
7948                 r_const.raw_readfn = NULL;
7949                 r_const.raw_writefn = NULL;
7950                 r_const.resetfn = NULL;
7951                 r_const.fieldoffset = 0;
7952                 r_const.bank_fieldoffsets[0] = 0;
7953                 r_const.bank_fieldoffsets[1] = 0;
7954 
7955                 r = &r_const;
7956             }
7957         }
7958     } else {
7959         CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
7960                                  ? PL2_RW : PL1_RW);
7961         if ((r->access & max_el) == 0) {
7962             return;
7963         }
7964     }
7965 
7966     for (int crm = crmmin; crm <= crmmax; crm++) {
7967         for (int opc1 = opc1min; opc1 <= opc1max; opc1++) {
7968             for (int opc2 = opc2min; opc2 <= opc2max; opc2++) {
7969                 ARMCPRegInfo *r2 = alloc_cpreg(r, NULL);
7970                 ARMCPRegInfo *r3;
7971 
7972                 /*
7973                  * By convention, for wildcarded registers only the first
7974                  * entry is used for migration; the others are marked as
7975                  * ALIAS so we don't try to transfer the register
7976                  * multiple times.
7977                  */
7978                 if (crm != crmmin || opc1 != opc1min || opc2 != opc2min) {
7979                     r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
7980                 }
7981 
7982                 /* Overwrite CP_ANY with the instantiation. */
7983                 r2->crm = crm;
7984                 r2->opc1 = opc1;
7985                 r2->opc2 = opc2;
7986 
7987                 switch (r->state) {
7988                 case ARM_CP_STATE_AA32:
7989                     add_cpreg_to_hashtable_aa32(cpu, r2);
7990                     break;
7991                 case ARM_CP_STATE_AA64:
7992                     add_cpreg_to_hashtable_aa64(cpu, r2);
7993                     break;
7994                 case ARM_CP_STATE_BOTH:
7995                     r3 = alloc_cpreg(r2, NULL);
7996                     r2->cp = cp;
7997                     add_cpreg_to_hashtable_aa32(cpu, r2);
7998                     r3->cp = 0;
7999                     add_cpreg_to_hashtable_aa64(cpu, r3);
8000                     break;
8001                 default:
8002                     g_assert_not_reached();
8003                 }
8004             }
8005         }
8006     }
8007 }
8008 
8009 /* Define a whole list of registers */
8010 void define_arm_cp_regs_len(ARMCPU *cpu, const ARMCPRegInfo *regs, size_t len)
8011 {
8012     for (size_t i = 0; i < len; ++i) {
8013         define_one_arm_cp_reg(cpu, regs + i);
8014     }
8015 }
8016 
8017 /*
8018  * Modify ARMCPRegInfo for access from userspace.
8019  *
8020  * This is a data driven modification directed by
8021  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8022  * user-space cannot alter any values and dynamic values pertaining to
8023  * execution state are hidden from user space view anyway.
8024  */
8025 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
8026                                  const ARMCPRegUserSpaceInfo *mods,
8027                                  size_t mods_len)
8028 {
8029     for (size_t mi = 0; mi < mods_len; ++mi) {
8030         const ARMCPRegUserSpaceInfo *m = mods + mi;
8031         GPatternSpec *pat = NULL;
8032 
8033         if (m->is_glob) {
8034             pat = g_pattern_spec_new(m->name);
8035         }
8036         for (size_t ri = 0; ri < regs_len; ++ri) {
8037             ARMCPRegInfo *r = regs + ri;
8038 
8039             if (pat && g_pattern_match_string(pat, r->name)) {
8040                 r->type = ARM_CP_CONST;
8041                 r->access = PL0U_R;
8042                 r->resetvalue = 0;
8043                 /* continue */
8044             } else if (strcmp(r->name, m->name) == 0) {
8045                 r->type = ARM_CP_CONST;
8046                 r->access = PL0U_R;
8047                 r->resetvalue &= m->exported_bits;
8048                 r->resetvalue |= m->fixed_bits;
8049                 break;
8050             }
8051         }
8052         if (pat) {
8053             g_pattern_spec_free(pat);
8054         }
8055     }
8056 }
8057 
8058 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
8059 {
8060     return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
8061 }
8062 
8063 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8064                          uint64_t value)
8065 {
8066     /* Helper coprocessor write function for write-ignore registers */
8067 }
8068 
8069 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
8070 {
8071     /* Helper coprocessor write function for read-as-zero registers */
8072     return 0;
8073 }
8074 
8075 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *ri)
8076 {
8077     /* Helper coprocessor reset function for do-nothing-on-reset registers */
8078 }
8079 
8080 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
8081 {
8082     /*
8083      * Return true if it is not valid for us to switch to
8084      * this CPU mode (ie all the UNPREDICTABLE cases in
8085      * the ARM ARM CPSRWriteByInstr pseudocode).
8086      */
8087 
8088     /* Changes to or from Hyp via MSR and CPS are illegal. */
8089     if (write_type == CPSRWriteByInstr &&
8090         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8091          mode == ARM_CPU_MODE_HYP)) {
8092         return 1;
8093     }
8094 
8095     switch (mode) {
8096     case ARM_CPU_MODE_USR:
8097         return 0;
8098     case ARM_CPU_MODE_SYS:
8099     case ARM_CPU_MODE_SVC:
8100     case ARM_CPU_MODE_ABT:
8101     case ARM_CPU_MODE_UND:
8102     case ARM_CPU_MODE_IRQ:
8103     case ARM_CPU_MODE_FIQ:
8104         /*
8105          * Note that we don't implement the IMPDEF NSACR.RFR which in v7
8106          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8107          */
8108         /*
8109          * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8110          * and CPS are treated as illegal mode changes.
8111          */
8112         if (write_type == CPSRWriteByInstr &&
8113             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
8114             (arm_hcr_el2_eff(env) & HCR_TGE)) {
8115             return 1;
8116         }
8117         return 0;
8118     case ARM_CPU_MODE_HYP:
8119         return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
8120     case ARM_CPU_MODE_MON:
8121         return arm_current_el(env) < 3;
8122     default:
8123         return 1;
8124     }
8125 }
8126 
8127 uint32_t cpsr_read(CPUARMState *env)
8128 {
8129     int ZF;
8130     ZF = (env->ZF == 0);
8131     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
8132         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8133         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8134         | ((env->condexec_bits & 0xfc) << 8)
8135         | (env->GE << 16) | (env->daif & CPSR_AIF);
8136 }
8137 
8138 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8139                 CPSRWriteType write_type)
8140 {
8141     uint32_t changed_daif;
8142     bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
8143         (mask & (CPSR_M | CPSR_E | CPSR_IL));
8144 
8145     if (mask & CPSR_NZCV) {
8146         env->ZF = (~val) & CPSR_Z;
8147         env->NF = val;
8148         env->CF = (val >> 29) & 1;
8149         env->VF = (val << 3) & 0x80000000;
8150     }
8151     if (mask & CPSR_Q) {
8152         env->QF = ((val & CPSR_Q) != 0);
8153     }
8154     if (mask & CPSR_T) {
8155         env->thumb = ((val & CPSR_T) != 0);
8156     }
8157     if (mask & CPSR_IT_0_1) {
8158         env->condexec_bits &= ~3;
8159         env->condexec_bits |= (val >> 25) & 3;
8160     }
8161     if (mask & CPSR_IT_2_7) {
8162         env->condexec_bits &= 3;
8163         env->condexec_bits |= (val >> 8) & 0xfc;
8164     }
8165     if (mask & CPSR_GE) {
8166         env->GE = (val >> 16) & 0xf;
8167     }
8168 
8169     /*
8170      * In a V7 implementation that includes the security extensions but does
8171      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8172      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8173      * bits respectively.
8174      *
8175      * In a V8 implementation, it is permitted for privileged software to
8176      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8177      */
8178     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
8179         arm_feature(env, ARM_FEATURE_EL3) &&
8180         !arm_feature(env, ARM_FEATURE_EL2) &&
8181         !arm_is_secure(env)) {
8182 
8183         changed_daif = (env->daif ^ val) & mask;
8184 
8185         if (changed_daif & CPSR_A) {
8186             /*
8187              * Check to see if we are allowed to change the masking of async
8188              * abort exceptions from a non-secure state.
8189              */
8190             if (!(env->cp15.scr_el3 & SCR_AW)) {
8191                 qemu_log_mask(LOG_GUEST_ERROR,
8192                               "Ignoring attempt to switch CPSR_A flag from "
8193                               "non-secure world with SCR.AW bit clear\n");
8194                 mask &= ~CPSR_A;
8195             }
8196         }
8197 
8198         if (changed_daif & CPSR_F) {
8199             /*
8200              * Check to see if we are allowed to change the masking of FIQ
8201              * exceptions from a non-secure state.
8202              */
8203             if (!(env->cp15.scr_el3 & SCR_FW)) {
8204                 qemu_log_mask(LOG_GUEST_ERROR,
8205                               "Ignoring attempt to switch CPSR_F flag from "
8206                               "non-secure world with SCR.FW bit clear\n");
8207                 mask &= ~CPSR_F;
8208             }
8209 
8210             /*
8211              * Check whether non-maskable FIQ (NMFI) support is enabled.
8212              * If this bit is set software is not allowed to mask
8213              * FIQs, but is allowed to set CPSR_F to 0.
8214              */
8215             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8216                 (val & CPSR_F)) {
8217                 qemu_log_mask(LOG_GUEST_ERROR,
8218                               "Ignoring attempt to enable CPSR_F flag "
8219                               "(non-maskable FIQ [NMFI] support enabled)\n");
8220                 mask &= ~CPSR_F;
8221             }
8222         }
8223     }
8224 
8225     env->daif &= ~(CPSR_AIF & mask);
8226     env->daif |= val & CPSR_AIF & mask;
8227 
8228     if (write_type != CPSRWriteRaw &&
8229         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8230         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8231             /*
8232              * Note that we can only get here in USR mode if this is a
8233              * gdb stub write; for this case we follow the architectural
8234              * behaviour for guest writes in USR mode of ignoring an attempt
8235              * to switch mode. (Those are caught by translate.c for writes
8236              * triggered by guest instructions.)
8237              */
8238             mask &= ~CPSR_M;
8239         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
8240             /*
8241              * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8242              * v7, and has defined behaviour in v8:
8243              *  + leave CPSR.M untouched
8244              *  + allow changes to the other CPSR fields
8245              *  + set PSTATE.IL
8246              * For user changes via the GDB stub, we don't set PSTATE.IL,
8247              * as this would be unnecessarily harsh for a user error.
8248              */
8249             mask &= ~CPSR_M;
8250             if (write_type != CPSRWriteByGDBStub &&
8251                 arm_feature(env, ARM_FEATURE_V8)) {
8252                 mask |= CPSR_IL;
8253                 val |= CPSR_IL;
8254             }
8255             qemu_log_mask(LOG_GUEST_ERROR,
8256                           "Illegal AArch32 mode switch attempt from %s to %s\n",
8257                           aarch32_mode_name(env->uncached_cpsr),
8258                           aarch32_mode_name(val));
8259         } else {
8260             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8261                           write_type == CPSRWriteExceptionReturn ?
8262                           "Exception return from AArch32" :
8263                           "AArch32 mode switch from",
8264                           aarch32_mode_name(env->uncached_cpsr),
8265                           aarch32_mode_name(val), env->regs[15]);
8266             switch_mode(env, val & CPSR_M);
8267         }
8268     }
8269     mask &= ~CACHED_CPSR_BITS;
8270     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
8271     if (tcg_enabled() && rebuild_hflags) {
8272         arm_rebuild_hflags(env);
8273     }
8274 }
8275 
8276 #ifdef CONFIG_USER_ONLY
8277 
8278 static void switch_mode(CPUARMState *env, int mode)
8279 {
8280     ARMCPU *cpu = env_archcpu(env);
8281 
8282     if (mode != ARM_CPU_MODE_USR) {
8283         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
8284     }
8285 }
8286 
8287 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8288                                  uint32_t cur_el, bool secure)
8289 {
8290     return 1;
8291 }
8292 
8293 void aarch64_sync_64_to_32(CPUARMState *env)
8294 {
8295     g_assert_not_reached();
8296 }
8297 
8298 #else
8299 
8300 static void switch_mode(CPUARMState *env, int mode)
8301 {
8302     int old_mode;
8303     int i;
8304 
8305     old_mode = env->uncached_cpsr & CPSR_M;
8306     if (mode == old_mode) {
8307         return;
8308     }
8309 
8310     if (old_mode == ARM_CPU_MODE_FIQ) {
8311         memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8312         memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
8313     } else if (mode == ARM_CPU_MODE_FIQ) {
8314         memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8315         memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
8316     }
8317 
8318     i = bank_number(old_mode);
8319     env->banked_r13[i] = env->regs[13];
8320     env->banked_spsr[i] = env->spsr;
8321 
8322     i = bank_number(mode);
8323     env->regs[13] = env->banked_r13[i];
8324     env->spsr = env->banked_spsr[i];
8325 
8326     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
8327     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
8328 }
8329 
8330 /*
8331  * Physical Interrupt Target EL Lookup Table
8332  *
8333  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8334  *
8335  * The below multi-dimensional table is used for looking up the target
8336  * exception level given numerous condition criteria.  Specifically, the
8337  * target EL is based on SCR and HCR routing controls as well as the
8338  * currently executing EL and secure state.
8339  *
8340  *    Dimensions:
8341  *    target_el_table[2][2][2][2][2][4]
8342  *                    |  |  |  |  |  +--- Current EL
8343  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
8344  *                    |  |  |  +--------- HCR mask override
8345  *                    |  |  +------------ SCR exec state control
8346  *                    |  +--------------- SCR mask override
8347  *                    +------------------ 32-bit(0)/64-bit(1) EL3
8348  *
8349  *    The table values are as such:
8350  *    0-3 = EL0-EL3
8351  *     -1 = Cannot occur
8352  *
8353  * The ARM ARM target EL table includes entries indicating that an "exception
8354  * is not taken".  The two cases where this is applicable are:
8355  *    1) An exception is taken from EL3 but the SCR does not have the exception
8356  *    routed to EL3.
8357  *    2) An exception is taken from EL2 but the HCR does not have the exception
8358  *    routed to EL2.
8359  * In these two cases, the below table contain a target of EL1.  This value is
8360  * returned as it is expected that the consumer of the table data will check
8361  * for "target EL >= current EL" to ensure the exception is not taken.
8362  *
8363  *            SCR     HCR
8364  *         64  EA     AMO                 From
8365  *        BIT IRQ     IMO      Non-secure         Secure
8366  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
8367  */
8368 static const int8_t target_el_table[2][2][2][2][2][4] = {
8369     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
8370        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
8371       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
8372        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
8373      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
8374        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
8375       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
8376        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
8377     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
8378        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
8379       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
8380        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
8381      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
8382        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
8383       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
8384        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
8385 };
8386 
8387 /*
8388  * Determine the target EL for physical exceptions
8389  */
8390 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8391                                  uint32_t cur_el, bool secure)
8392 {
8393     CPUARMState *env = cpu_env(cs);
8394     bool rw;
8395     bool scr;
8396     bool hcr;
8397     int target_el;
8398     /* Is the highest EL AArch64? */
8399     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
8400     uint64_t hcr_el2;
8401 
8402     if (arm_feature(env, ARM_FEATURE_EL3)) {
8403         rw = arm_scr_rw_eff(env);
8404     } else {
8405         /*
8406          * Either EL2 is the highest EL (and so the EL2 register width
8407          * is given by is64); or there is no EL2 or EL3, in which case
8408          * the value of 'rw' does not affect the table lookup anyway.
8409          */
8410         rw = is64;
8411     }
8412 
8413     hcr_el2 = arm_hcr_el2_eff(env);
8414     switch (excp_idx) {
8415     case EXCP_IRQ:
8416     case EXCP_NMI:
8417         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
8418         hcr = hcr_el2 & HCR_IMO;
8419         break;
8420     case EXCP_FIQ:
8421         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
8422         hcr = hcr_el2 & HCR_FMO;
8423         break;
8424     default:
8425         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
8426         hcr = hcr_el2 & HCR_AMO;
8427         break;
8428     };
8429 
8430     /*
8431      * For these purposes, TGE and AMO/IMO/FMO both force the
8432      * interrupt to EL2.  Fold TGE into the bit extracted above.
8433      */
8434     hcr |= (hcr_el2 & HCR_TGE) != 0;
8435 
8436     /* Perform a table-lookup for the target EL given the current state */
8437     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
8438 
8439     assert(target_el > 0);
8440 
8441     return target_el;
8442 }
8443 
8444 void arm_log_exception(CPUState *cs)
8445 {
8446     int idx = cs->exception_index;
8447 
8448     if (qemu_loglevel_mask(CPU_LOG_INT)) {
8449         const char *exc = NULL;
8450         static const char * const excnames[] = {
8451             [EXCP_UDEF] = "Undefined Instruction",
8452             [EXCP_SWI] = "SVC",
8453             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
8454             [EXCP_DATA_ABORT] = "Data Abort",
8455             [EXCP_IRQ] = "IRQ",
8456             [EXCP_FIQ] = "FIQ",
8457             [EXCP_BKPT] = "Breakpoint",
8458             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
8459             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
8460             [EXCP_HVC] = "Hypervisor Call",
8461             [EXCP_HYP_TRAP] = "Hypervisor Trap",
8462             [EXCP_SMC] = "Secure Monitor Call",
8463             [EXCP_VIRQ] = "Virtual IRQ",
8464             [EXCP_VFIQ] = "Virtual FIQ",
8465             [EXCP_SEMIHOST] = "Semihosting call",
8466             [EXCP_NOCP] = "v7M NOCP UsageFault",
8467             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
8468             [EXCP_STKOF] = "v8M STKOF UsageFault",
8469             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
8470             [EXCP_LSERR] = "v8M LSERR UsageFault",
8471             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
8472             [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
8473             [EXCP_VSERR] = "Virtual SERR",
8474             [EXCP_GPC] = "Granule Protection Check",
8475             [EXCP_NMI] = "NMI",
8476             [EXCP_VINMI] = "Virtual IRQ NMI",
8477             [EXCP_VFNMI] = "Virtual FIQ NMI",
8478             [EXCP_MON_TRAP] = "Monitor Trap",
8479         };
8480 
8481         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
8482             exc = excnames[idx];
8483         }
8484         if (!exc) {
8485             exc = "unknown";
8486         }
8487         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
8488                       idx, exc, cs->cpu_index);
8489     }
8490 }
8491 
8492 /*
8493  * Function used to synchronize QEMU's AArch64 register set with AArch32
8494  * register set.  This is necessary when switching between AArch32 and AArch64
8495  * execution state.
8496  */
8497 void aarch64_sync_32_to_64(CPUARMState *env)
8498 {
8499     int i;
8500     uint32_t mode = env->uncached_cpsr & CPSR_M;
8501 
8502     /* We can blanket copy R[0:7] to X[0:7] */
8503     for (i = 0; i < 8; i++) {
8504         env->xregs[i] = env->regs[i];
8505     }
8506 
8507     /*
8508      * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8509      * Otherwise, they come from the banked user regs.
8510      */
8511     if (mode == ARM_CPU_MODE_FIQ) {
8512         for (i = 8; i < 13; i++) {
8513             env->xregs[i] = env->usr_regs[i - 8];
8514         }
8515     } else {
8516         for (i = 8; i < 13; i++) {
8517             env->xregs[i] = env->regs[i];
8518         }
8519     }
8520 
8521     /*
8522      * Registers x13-x23 are the various mode SP and FP registers. Registers
8523      * r13 and r14 are only copied if we are in that mode, otherwise we copy
8524      * from the mode banked register.
8525      */
8526     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8527         env->xregs[13] = env->regs[13];
8528         env->xregs[14] = env->regs[14];
8529     } else {
8530         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8531         /* HYP is an exception in that it is copied from r14 */
8532         if (mode == ARM_CPU_MODE_HYP) {
8533             env->xregs[14] = env->regs[14];
8534         } else {
8535             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8536         }
8537     }
8538 
8539     if (mode == ARM_CPU_MODE_HYP) {
8540         env->xregs[15] = env->regs[13];
8541     } else {
8542         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8543     }
8544 
8545     if (mode == ARM_CPU_MODE_IRQ) {
8546         env->xregs[16] = env->regs[14];
8547         env->xregs[17] = env->regs[13];
8548     } else {
8549         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8550         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8551     }
8552 
8553     if (mode == ARM_CPU_MODE_SVC) {
8554         env->xregs[18] = env->regs[14];
8555         env->xregs[19] = env->regs[13];
8556     } else {
8557         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8558         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8559     }
8560 
8561     if (mode == ARM_CPU_MODE_ABT) {
8562         env->xregs[20] = env->regs[14];
8563         env->xregs[21] = env->regs[13];
8564     } else {
8565         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8566         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8567     }
8568 
8569     if (mode == ARM_CPU_MODE_UND) {
8570         env->xregs[22] = env->regs[14];
8571         env->xregs[23] = env->regs[13];
8572     } else {
8573         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8574         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8575     }
8576 
8577     /*
8578      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
8579      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
8580      * FIQ bank for r8-r14.
8581      */
8582     if (mode == ARM_CPU_MODE_FIQ) {
8583         for (i = 24; i < 31; i++) {
8584             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
8585         }
8586     } else {
8587         for (i = 24; i < 29; i++) {
8588             env->xregs[i] = env->fiq_regs[i - 24];
8589         }
8590         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8591         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8592     }
8593 
8594     env->pc = env->regs[15];
8595 }
8596 
8597 /*
8598  * Function used to synchronize QEMU's AArch32 register set with AArch64
8599  * register set.  This is necessary when switching between AArch32 and AArch64
8600  * execution state.
8601  */
8602 void aarch64_sync_64_to_32(CPUARMState *env)
8603 {
8604     int i;
8605     uint32_t mode = env->uncached_cpsr & CPSR_M;
8606 
8607     /* We can blanket copy X[0:7] to R[0:7] */
8608     for (i = 0; i < 8; i++) {
8609         env->regs[i] = env->xregs[i];
8610     }
8611 
8612     /*
8613      * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8614      * Otherwise, we copy x8-x12 into the banked user regs.
8615      */
8616     if (mode == ARM_CPU_MODE_FIQ) {
8617         for (i = 8; i < 13; i++) {
8618             env->usr_regs[i - 8] = env->xregs[i];
8619         }
8620     } else {
8621         for (i = 8; i < 13; i++) {
8622             env->regs[i] = env->xregs[i];
8623         }
8624     }
8625 
8626     /*
8627      * Registers r13 & r14 depend on the current mode.
8628      * If we are in a given mode, we copy the corresponding x registers to r13
8629      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
8630      * for the mode.
8631      */
8632     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8633         env->regs[13] = env->xregs[13];
8634         env->regs[14] = env->xregs[14];
8635     } else {
8636         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8637 
8638         /*
8639          * HYP is an exception in that it does not have its own banked r14 but
8640          * shares the USR r14
8641          */
8642         if (mode == ARM_CPU_MODE_HYP) {
8643             env->regs[14] = env->xregs[14];
8644         } else {
8645             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8646         }
8647     }
8648 
8649     if (mode == ARM_CPU_MODE_HYP) {
8650         env->regs[13] = env->xregs[15];
8651     } else {
8652         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8653     }
8654 
8655     if (mode == ARM_CPU_MODE_IRQ) {
8656         env->regs[14] = env->xregs[16];
8657         env->regs[13] = env->xregs[17];
8658     } else {
8659         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8660         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8661     }
8662 
8663     if (mode == ARM_CPU_MODE_SVC) {
8664         env->regs[14] = env->xregs[18];
8665         env->regs[13] = env->xregs[19];
8666     } else {
8667         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8668         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8669     }
8670 
8671     if (mode == ARM_CPU_MODE_ABT) {
8672         env->regs[14] = env->xregs[20];
8673         env->regs[13] = env->xregs[21];
8674     } else {
8675         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8676         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8677     }
8678 
8679     if (mode == ARM_CPU_MODE_UND) {
8680         env->regs[14] = env->xregs[22];
8681         env->regs[13] = env->xregs[23];
8682     } else {
8683         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8684         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8685     }
8686 
8687     /*
8688      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
8689      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
8690      * FIQ bank for r8-r14.
8691      */
8692     if (mode == ARM_CPU_MODE_FIQ) {
8693         for (i = 24; i < 31; i++) {
8694             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
8695         }
8696     } else {
8697         for (i = 24; i < 29; i++) {
8698             env->fiq_regs[i - 24] = env->xregs[i];
8699         }
8700         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8701         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8702     }
8703 
8704     env->regs[15] = env->pc;
8705 }
8706 
8707 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8708                                    uint32_t mask, uint32_t offset,
8709                                    uint32_t newpc)
8710 {
8711     int new_el;
8712 
8713     /* Change the CPU state so as to actually take the exception. */
8714     switch_mode(env, new_mode);
8715 
8716     /*
8717      * For exceptions taken to AArch32 we must clear the SS bit in both
8718      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8719      */
8720     env->pstate &= ~PSTATE_SS;
8721     env->spsr = cpsr_read(env);
8722     /* Clear IT bits.  */
8723     env->condexec_bits = 0;
8724     /* Switch to the new mode, and to the correct instruction set.  */
8725     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8726 
8727     /* This must be after mode switching. */
8728     new_el = arm_current_el(env);
8729 
8730     /* Set new mode endianness */
8731     env->uncached_cpsr &= ~CPSR_E;
8732     if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
8733         env->uncached_cpsr |= CPSR_E;
8734     }
8735     /* J and IL must always be cleared for exception entry */
8736     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8737     env->daif |= mask;
8738 
8739     if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
8740         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
8741             env->uncached_cpsr |= CPSR_SSBS;
8742         } else {
8743             env->uncached_cpsr &= ~CPSR_SSBS;
8744         }
8745     }
8746 
8747     if (new_mode == ARM_CPU_MODE_HYP) {
8748         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8749         env->elr_el[2] = env->regs[15];
8750     } else {
8751         /* CPSR.PAN is normally preserved preserved unless...  */
8752         if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
8753             switch (new_el) {
8754             case 3:
8755                 if (!arm_is_secure_below_el3(env)) {
8756                     /* ... the target is EL3, from non-secure state.  */
8757                     env->uncached_cpsr &= ~CPSR_PAN;
8758                     break;
8759                 }
8760                 /* ... the target is EL3, from secure state ... */
8761                 /* fall through */
8762             case 1:
8763                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
8764                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
8765                     env->uncached_cpsr |= CPSR_PAN;
8766                 }
8767                 break;
8768             }
8769         }
8770         /*
8771          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8772          * and we should just guard the thumb mode on V4
8773          */
8774         if (arm_feature(env, ARM_FEATURE_V4T)) {
8775             env->thumb =
8776                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8777         }
8778         env->regs[14] = env->regs[15] + offset;
8779     }
8780     env->regs[15] = newpc;
8781 
8782     if (tcg_enabled()) {
8783         arm_rebuild_hflags(env);
8784     }
8785 }
8786 
8787 void arm_do_plugin_vcpu_discon_cb(CPUState *cs, uint64_t from)
8788 {
8789     switch (cs->exception_index) {
8790     case EXCP_IRQ:
8791     case EXCP_VIRQ:
8792     case EXCP_NMI:
8793     case EXCP_VINMI:
8794     case EXCP_FIQ:
8795     case EXCP_VFIQ:
8796     case EXCP_VFNMI:
8797     case EXCP_VSERR:
8798         qemu_plugin_vcpu_interrupt_cb(cs, from);
8799         break;
8800     default:
8801         qemu_plugin_vcpu_exception_cb(cs, from);
8802     }
8803 }
8804 
8805 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8806 {
8807     /*
8808      * Handle exception entry to Hyp mode; this is sufficiently
8809      * different to entry to other AArch32 modes that we handle it
8810      * separately here.
8811      *
8812      * The vector table entry used is always the 0x14 Hyp mode entry point,
8813      * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
8814      * The offset applied to the preferred return address is always zero
8815      * (see DDI0487C.a section G1.12.3).
8816      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8817      */
8818     uint32_t addr, mask;
8819     ARMCPU *cpu = ARM_CPU(cs);
8820     CPUARMState *env = &cpu->env;
8821 
8822     switch (cs->exception_index) {
8823     case EXCP_UDEF:
8824         addr = 0x04;
8825         break;
8826     case EXCP_SWI:
8827         addr = 0x08;
8828         break;
8829     case EXCP_BKPT:
8830         /* Fall through to prefetch abort.  */
8831     case EXCP_PREFETCH_ABORT:
8832         env->cp15.ifar_s = env->exception.vaddress;
8833         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8834                       (uint32_t)env->exception.vaddress);
8835         addr = 0x0c;
8836         break;
8837     case EXCP_DATA_ABORT:
8838         env->cp15.dfar_s = env->exception.vaddress;
8839         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8840                       (uint32_t)env->exception.vaddress);
8841         addr = 0x10;
8842         break;
8843     case EXCP_IRQ:
8844         addr = 0x18;
8845         break;
8846     case EXCP_FIQ:
8847         addr = 0x1c;
8848         break;
8849     case EXCP_HVC:
8850         addr = 0x08;
8851         break;
8852     case EXCP_HYP_TRAP:
8853         addr = 0x14;
8854         break;
8855     default:
8856         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8857     }
8858 
8859     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8860         if (!arm_feature(env, ARM_FEATURE_V8)) {
8861             /*
8862              * QEMU syndrome values are v8-style. v7 has the IL bit
8863              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8864              * If this is a v7 CPU, squash the IL bit in those cases.
8865              */
8866             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8867                 (cs->exception_index == EXCP_DATA_ABORT &&
8868                  !(env->exception.syndrome & ARM_EL_ISV)) ||
8869                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8870                 env->exception.syndrome &= ~ARM_EL_IL;
8871             }
8872         }
8873         env->cp15.esr_el[2] = env->exception.syndrome;
8874     }
8875 
8876     if (arm_current_el(env) != 2 && addr < 0x14) {
8877         addr = 0x14;
8878     }
8879 
8880     mask = 0;
8881     if (!(env->cp15.scr_el3 & SCR_EA)) {
8882         mask |= CPSR_A;
8883     }
8884     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8885         mask |= CPSR_I;
8886     }
8887     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8888         mask |= CPSR_F;
8889     }
8890 
8891     addr += env->cp15.hvbar;
8892 
8893     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8894 }
8895 
8896 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8897 {
8898     ARMCPU *cpu = ARM_CPU(cs);
8899     CPUARMState *env = &cpu->env;
8900     uint32_t addr;
8901     uint32_t mask;
8902     int new_mode;
8903     uint32_t offset;
8904     uint32_t moe;
8905 
8906     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8907     switch (syn_get_ec(env->exception.syndrome)) {
8908     case EC_BREAKPOINT:
8909     case EC_BREAKPOINT_SAME_EL:
8910         moe = 1;
8911         break;
8912     case EC_WATCHPOINT:
8913     case EC_WATCHPOINT_SAME_EL:
8914         moe = 10;
8915         break;
8916     case EC_AA32_BKPT:
8917         moe = 3;
8918         break;
8919     case EC_VECTORCATCH:
8920         moe = 5;
8921         break;
8922     default:
8923         moe = 0;
8924         break;
8925     }
8926 
8927     if (moe) {
8928         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8929     }
8930 
8931     if (env->exception.target_el == 2) {
8932         /* Debug exceptions are reported differently on AArch32 */
8933         switch (syn_get_ec(env->exception.syndrome)) {
8934         case EC_BREAKPOINT:
8935         case EC_BREAKPOINT_SAME_EL:
8936         case EC_AA32_BKPT:
8937         case EC_VECTORCATCH:
8938             env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
8939                                                      0, 0, 0x22);
8940             break;
8941         case EC_WATCHPOINT:
8942             env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8943                                                  EC_DATAABORT);
8944             break;
8945         case EC_WATCHPOINT_SAME_EL:
8946             env->exception.syndrome = syn_set_ec(env->exception.syndrome,
8947                                                  EC_DATAABORT_SAME_EL);
8948             break;
8949         }
8950         arm_cpu_do_interrupt_aarch32_hyp(cs);
8951         return;
8952     }
8953 
8954     switch (cs->exception_index) {
8955     case EXCP_UDEF:
8956         new_mode = ARM_CPU_MODE_UND;
8957         addr = 0x04;
8958         mask = CPSR_I;
8959         if (env->thumb) {
8960             offset = 2;
8961         } else {
8962             offset = 4;
8963         }
8964         break;
8965     case EXCP_SWI:
8966         new_mode = ARM_CPU_MODE_SVC;
8967         addr = 0x08;
8968         mask = CPSR_I;
8969         /* The PC already points to the next instruction.  */
8970         offset = 0;
8971         break;
8972     case EXCP_BKPT:
8973         /* Fall through to prefetch abort.  */
8974     case EXCP_PREFETCH_ABORT:
8975         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8976         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8977         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8978                       env->exception.fsr, (uint32_t)env->exception.vaddress);
8979         new_mode = ARM_CPU_MODE_ABT;
8980         addr = 0x0c;
8981         mask = CPSR_A | CPSR_I;
8982         offset = 4;
8983         break;
8984     case EXCP_DATA_ABORT:
8985         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8986         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8987         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8988                       env->exception.fsr,
8989                       (uint32_t)env->exception.vaddress);
8990         new_mode = ARM_CPU_MODE_ABT;
8991         addr = 0x10;
8992         mask = CPSR_A | CPSR_I;
8993         offset = 8;
8994         break;
8995     case EXCP_IRQ:
8996         new_mode = ARM_CPU_MODE_IRQ;
8997         addr = 0x18;
8998         /* Disable IRQ and imprecise data aborts.  */
8999         mask = CPSR_A | CPSR_I;
9000         offset = 4;
9001         if (env->cp15.scr_el3 & SCR_IRQ) {
9002             /* IRQ routed to monitor mode */
9003             new_mode = ARM_CPU_MODE_MON;
9004             mask |= CPSR_F;
9005         }
9006         break;
9007     case EXCP_FIQ:
9008         new_mode = ARM_CPU_MODE_FIQ;
9009         addr = 0x1c;
9010         /* Disable FIQ, IRQ and imprecise data aborts.  */
9011         mask = CPSR_A | CPSR_I | CPSR_F;
9012         if (env->cp15.scr_el3 & SCR_FIQ) {
9013             /* FIQ routed to monitor mode */
9014             new_mode = ARM_CPU_MODE_MON;
9015         }
9016         offset = 4;
9017         break;
9018     case EXCP_VIRQ:
9019         new_mode = ARM_CPU_MODE_IRQ;
9020         addr = 0x18;
9021         /* Disable IRQ and imprecise data aborts.  */
9022         mask = CPSR_A | CPSR_I;
9023         offset = 4;
9024         break;
9025     case EXCP_VFIQ:
9026         new_mode = ARM_CPU_MODE_FIQ;
9027         addr = 0x1c;
9028         /* Disable FIQ, IRQ and imprecise data aborts.  */
9029         mask = CPSR_A | CPSR_I | CPSR_F;
9030         offset = 4;
9031         break;
9032     case EXCP_VSERR:
9033         {
9034             /*
9035              * Note that this is reported as a data abort, but the DFAR
9036              * has an UNKNOWN value.  Construct the SError syndrome from
9037              * AET and ExT fields.
9038              */
9039             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
9040 
9041             if (extended_addresses_enabled(env)) {
9042                 env->exception.fsr = arm_fi_to_lfsc(&fi);
9043             } else {
9044                 env->exception.fsr = arm_fi_to_sfsc(&fi);
9045             }
9046             env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
9047             A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9048             qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
9049                           env->exception.fsr);
9050 
9051             new_mode = ARM_CPU_MODE_ABT;
9052             addr = 0x10;
9053             mask = CPSR_A | CPSR_I;
9054             offset = 8;
9055         }
9056         break;
9057     case EXCP_SMC:
9058         new_mode = ARM_CPU_MODE_MON;
9059         addr = 0x08;
9060         mask = CPSR_A | CPSR_I | CPSR_F;
9061         offset = 0;
9062         break;
9063     case EXCP_MON_TRAP:
9064         new_mode = ARM_CPU_MODE_MON;
9065         addr = 0x04;
9066         mask = CPSR_A | CPSR_I | CPSR_F;
9067         if (env->thumb) {
9068             offset = 2;
9069         } else {
9070             offset = 4;
9071         }
9072         break;
9073     default:
9074         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9075         return; /* Never happens.  Keep compiler happy.  */
9076     }
9077 
9078     if (new_mode == ARM_CPU_MODE_MON) {
9079         addr += env->cp15.mvbar;
9080     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9081         /* High vectors. When enabled, base address cannot be remapped. */
9082         addr += 0xffff0000;
9083     } else {
9084         /*
9085          * ARM v7 architectures provide a vector base address register to remap
9086          * the interrupt vector table.
9087          * This register is only followed in non-monitor mode, and is banked.
9088          * Note: only bits 31:5 are valid.
9089          */
9090         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9091     }
9092 
9093     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9094         env->cp15.scr_el3 &= ~SCR_NS;
9095     }
9096 
9097     take_aarch32_exception(env, new_mode, mask, offset, addr);
9098 }
9099 
9100 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9101 {
9102     /*
9103      * Return the register number of the AArch64 view of the AArch32
9104      * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9105      * be that of the AArch32 mode the exception came from.
9106      */
9107     int mode = env->uncached_cpsr & CPSR_M;
9108 
9109     switch (aarch32_reg) {
9110     case 0 ... 7:
9111         return aarch32_reg;
9112     case 8 ... 12:
9113         return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9114     case 13:
9115         switch (mode) {
9116         case ARM_CPU_MODE_USR:
9117         case ARM_CPU_MODE_SYS:
9118             return 13;
9119         case ARM_CPU_MODE_HYP:
9120             return 15;
9121         case ARM_CPU_MODE_IRQ:
9122             return 17;
9123         case ARM_CPU_MODE_SVC:
9124             return 19;
9125         case ARM_CPU_MODE_ABT:
9126             return 21;
9127         case ARM_CPU_MODE_UND:
9128             return 23;
9129         case ARM_CPU_MODE_FIQ:
9130             return 29;
9131         default:
9132             g_assert_not_reached();
9133         }
9134     case 14:
9135         switch (mode) {
9136         case ARM_CPU_MODE_USR:
9137         case ARM_CPU_MODE_SYS:
9138         case ARM_CPU_MODE_HYP:
9139             return 14;
9140         case ARM_CPU_MODE_IRQ:
9141             return 16;
9142         case ARM_CPU_MODE_SVC:
9143             return 18;
9144         case ARM_CPU_MODE_ABT:
9145             return 20;
9146         case ARM_CPU_MODE_UND:
9147             return 22;
9148         case ARM_CPU_MODE_FIQ:
9149             return 30;
9150         default:
9151             g_assert_not_reached();
9152         }
9153     case 15:
9154         return 31;
9155     default:
9156         g_assert_not_reached();
9157     }
9158 }
9159 
9160 uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
9161 {
9162     uint32_t ret = cpsr_read(env);
9163 
9164     /* Move DIT to the correct location for SPSR_ELx */
9165     if (ret & CPSR_DIT) {
9166         ret &= ~CPSR_DIT;
9167         ret |= PSTATE_DIT;
9168     }
9169     /* Merge PSTATE.SS into SPSR_ELx */
9170     ret |= env->pstate & PSTATE_SS;
9171 
9172     return ret;
9173 }
9174 
9175 void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val)
9176 {
9177     uint32_t mask;
9178 
9179     /* Save SPSR_ELx.SS into PSTATE. */
9180     env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
9181     val &= ~PSTATE_SS;
9182 
9183     /* Move DIT to the correct location for CPSR */
9184     if (val & PSTATE_DIT) {
9185         val &= ~PSTATE_DIT;
9186         val |= CPSR_DIT;
9187     }
9188 
9189     mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
9190     cpsr_write(env, val, mask, CPSRWriteRaw);
9191 }
9192 
9193 static bool syndrome_is_sync_extabt(uint32_t syndrome)
9194 {
9195     /* Return true if this syndrome value is a synchronous external abort */
9196     switch (syn_get_ec(syndrome)) {
9197     case EC_INSNABORT:
9198     case EC_INSNABORT_SAME_EL:
9199     case EC_DATAABORT:
9200     case EC_DATAABORT_SAME_EL:
9201         /* Look at fault status code for all the synchronous ext abort cases */
9202         switch (syndrome & 0x3f) {
9203         case 0x10:
9204         case 0x13:
9205         case 0x14:
9206         case 0x15:
9207         case 0x16:
9208         case 0x17:
9209             return true;
9210         default:
9211             return false;
9212         }
9213     default:
9214         return false;
9215     }
9216 }
9217 
9218 /* Handle exception entry to a target EL which is using AArch64 */
9219 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9220 {
9221     ARMCPU *cpu = ARM_CPU(cs);
9222     CPUARMState *env = &cpu->env;
9223     unsigned int new_el = env->exception.target_el;
9224     vaddr addr = env->cp15.vbar_el[new_el];
9225     uint64_t new_mode = aarch64_pstate_mode(new_el, true);
9226     uint64_t old_mode;
9227     unsigned int cur_el = arm_current_el(env);
9228     int rt;
9229 
9230     if (tcg_enabled()) {
9231         /*
9232          * Note that new_el can never be 0.  If cur_el is 0, then
9233          * el0_a64 is is_a64(), else el0_a64 is ignored.
9234          */
9235         aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9236     }
9237 
9238     if (cur_el < new_el) {
9239         /*
9240          * Entry vector offset depends on whether the implemented EL
9241          * immediately lower than the target level is using AArch32 or AArch64
9242          */
9243         bool is_aa64;
9244         uint64_t hcr;
9245 
9246         switch (new_el) {
9247         case 3:
9248             is_aa64 = arm_scr_rw_eff(env);
9249             break;
9250         case 2:
9251             hcr = arm_hcr_el2_eff(env);
9252             if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9253                 is_aa64 = (hcr & HCR_RW) != 0;
9254                 break;
9255             }
9256             /* fall through */
9257         case 1:
9258             is_aa64 = is_a64(env);
9259             break;
9260         default:
9261             g_assert_not_reached();
9262         }
9263 
9264         if (is_aa64) {
9265             addr += 0x400;
9266         } else {
9267             addr += 0x600;
9268         }
9269     } else {
9270         if (pstate_read(env) & PSTATE_SP) {
9271             addr += 0x200;
9272         }
9273         if (is_a64(env) && (env->cp15.gcscr_el[new_el] & GCSCR_EXLOCKEN)) {
9274             new_mode |= PSTATE_EXLOCK;
9275         }
9276     }
9277 
9278     switch (cs->exception_index) {
9279     case EXCP_GPC:
9280         qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
9281                       env->cp15.mfar_el3);
9282         /* fall through */
9283     case EXCP_PREFETCH_ABORT:
9284     case EXCP_DATA_ABORT:
9285         /*
9286          * FEAT_DoubleFault allows synchronous external aborts taken to EL3
9287          * to be taken to the SError vector entrypoint.
9288          */
9289         if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
9290             syndrome_is_sync_extabt(env->exception.syndrome)) {
9291             addr += 0x180;
9292         }
9293         env->cp15.far_el[new_el] = env->exception.vaddress;
9294         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9295                       env->cp15.far_el[new_el]);
9296         /* fall through */
9297     case EXCP_BKPT:
9298     case EXCP_UDEF:
9299     case EXCP_SWI:
9300     case EXCP_HVC:
9301     case EXCP_HYP_TRAP:
9302     case EXCP_SMC:
9303         switch (syn_get_ec(env->exception.syndrome)) {
9304         case EC_ADVSIMDFPACCESSTRAP:
9305             /*
9306              * QEMU internal FP/SIMD syndromes from AArch32 include the
9307              * TA and coproc fields which are only exposed if the exception
9308              * is taken to AArch32 Hyp mode. Mask them out to get a valid
9309              * AArch64 format syndrome.
9310              */
9311             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
9312             break;
9313         case EC_CP14RTTRAP:
9314         case EC_CP15RTTRAP:
9315         case EC_CP14DTTRAP:
9316             /*
9317              * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9318              * the raw register field from the insn; when taking this to
9319              * AArch64 we must convert it to the AArch64 view of the register
9320              * number. Notice that we read a 4-bit AArch32 register number and
9321              * write back a 5-bit AArch64 one.
9322              */
9323             rt = extract32(env->exception.syndrome, 5, 4);
9324             rt = aarch64_regnum(env, rt);
9325             env->exception.syndrome = deposit32(env->exception.syndrome,
9326                                                 5, 5, rt);
9327             break;
9328         case EC_CP15RRTTRAP:
9329         case EC_CP14RRTTRAP:
9330             /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9331             rt = extract32(env->exception.syndrome, 5, 4);
9332             rt = aarch64_regnum(env, rt);
9333             env->exception.syndrome = deposit32(env->exception.syndrome,
9334                                                 5, 5, rt);
9335             rt = extract32(env->exception.syndrome, 10, 4);
9336             rt = aarch64_regnum(env, rt);
9337             env->exception.syndrome = deposit32(env->exception.syndrome,
9338                                                 10, 5, rt);
9339             break;
9340         }
9341         env->cp15.esr_el[new_el] = env->exception.syndrome;
9342         break;
9343     case EXCP_IRQ:
9344     case EXCP_VIRQ:
9345     case EXCP_NMI:
9346     case EXCP_VINMI:
9347         addr += 0x80;
9348         break;
9349     case EXCP_FIQ:
9350     case EXCP_VFIQ:
9351     case EXCP_VFNMI:
9352         addr += 0x100;
9353         break;
9354     case EXCP_VSERR:
9355         addr += 0x180;
9356         /* Construct the SError syndrome from IDS and ISS fields. */
9357         env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
9358         env->cp15.esr_el[new_el] = env->exception.syndrome;
9359         break;
9360     default:
9361         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9362     }
9363 
9364     if (is_a64(env)) {
9365         old_mode = pstate_read(env);
9366         aarch64_save_sp(env, arm_current_el(env));
9367         env->elr_el[new_el] = env->pc;
9368 
9369         if (cur_el == 1 && new_el == 1) {
9370             uint64_t hcr = arm_hcr_el2_eff(env);
9371             if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
9372                 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
9373                 /*
9374                  * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
9375                  * by setting M[3:2] to 0b10.
9376                  * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
9377                  * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
9378                  */
9379                 old_mode = deposit64(old_mode, 2, 2, 2);
9380             }
9381         }
9382     } else {
9383         old_mode = cpsr_read_for_spsr_elx(env);
9384         env->elr_el[new_el] = env->regs[15];
9385 
9386         aarch64_sync_32_to_64(env);
9387 
9388         env->condexec_bits = 0;
9389     }
9390     env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
9391 
9392     qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%" PRIx64 "\n", old_mode);
9393     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9394                   env->elr_el[new_el]);
9395 
9396     if (cpu_isar_feature(aa64_pan, cpu)) {
9397         /* The value of PSTATE.PAN is normally preserved, except when ... */
9398         new_mode |= old_mode & PSTATE_PAN;
9399         switch (new_el) {
9400         case 2:
9401             /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
9402             if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9403                 != (HCR_E2H | HCR_TGE)) {
9404                 break;
9405             }
9406             /* fall through */
9407         case 1:
9408             /* ... the target is EL1 ... */
9409             /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
9410             if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9411                 new_mode |= PSTATE_PAN;
9412             }
9413             break;
9414         }
9415     }
9416     if (cpu_isar_feature(aa64_mte, cpu)) {
9417         new_mode |= PSTATE_TCO;
9418     }
9419 
9420     if (cpu_isar_feature(aa64_ssbs, cpu)) {
9421         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
9422             new_mode |= PSTATE_SSBS;
9423         } else {
9424             new_mode &= ~PSTATE_SSBS;
9425         }
9426     }
9427 
9428     if (cpu_isar_feature(aa64_nmi, cpu)) {
9429         if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
9430             new_mode |= PSTATE_ALLINT;
9431         } else {
9432             new_mode &= ~PSTATE_ALLINT;
9433         }
9434     }
9435 
9436     pstate_write(env, PSTATE_DAIF | new_mode);
9437     env->aarch64 = true;
9438     aarch64_restore_sp(env, new_el);
9439 
9440     if (tcg_enabled()) {
9441         helper_rebuild_hflags_a64(env, new_el);
9442     }
9443 
9444     env->pc = addr;
9445 
9446     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64
9447                   " PSTATE 0x%" PRIx64 "\n",
9448                   new_el, env->pc, pstate_read(env));
9449 }
9450 
9451 /*
9452  * Do semihosting call and set the appropriate return value. All the
9453  * permission and validity checks have been done at translate time.
9454  *
9455  * We only see semihosting exceptions in TCG only as they are not
9456  * trapped to the hypervisor in KVM.
9457  */
9458 #ifdef CONFIG_TCG
9459 static void tcg_handle_semihosting(CPUState *cs)
9460 {
9461     ARMCPU *cpu = ARM_CPU(cs);
9462     CPUARMState *env = &cpu->env;
9463 
9464     if (is_a64(env)) {
9465         qemu_log_mask(CPU_LOG_INT,
9466                       "...handling as semihosting call 0x%" PRIx64 "\n",
9467                       env->xregs[0]);
9468         do_common_semihosting(cs);
9469         env->pc += 4;
9470     } else {
9471         qemu_log_mask(CPU_LOG_INT,
9472                       "...handling as semihosting call 0x%x\n",
9473                       env->regs[0]);
9474         do_common_semihosting(cs);
9475         env->regs[15] += env->thumb ? 2 : 4;
9476     }
9477 }
9478 #endif
9479 
9480 /*
9481  * Handle a CPU exception for A and R profile CPUs.
9482  * Do any appropriate logging, handle PSCI calls, and then hand off
9483  * to the AArch64-entry or AArch32-entry function depending on the
9484  * target exception level's register width.
9485  *
9486  * Note: this is used for both TCG (as the do_interrupt tcg op),
9487  *       and KVM to re-inject guest debug exceptions, and to
9488  *       inject a Synchronous-External-Abort.
9489  */
9490 void arm_cpu_do_interrupt(CPUState *cs)
9491 {
9492     ARMCPU *cpu = ARM_CPU(cs);
9493     CPUARMState *env = &cpu->env;
9494     unsigned int new_el = env->exception.target_el;
9495     uint64_t last_pc = cs->cc->get_pc(cs);
9496 
9497     assert(!arm_feature(env, ARM_FEATURE_M));
9498 
9499     arm_log_exception(cs);
9500     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9501                   new_el);
9502     if (qemu_loglevel_mask(CPU_LOG_INT)
9503         && !excp_is_internal(cs->exception_index)) {
9504         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx64 "\n",
9505                       syn_get_ec(env->exception.syndrome),
9506                       env->exception.syndrome);
9507     }
9508 
9509     if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
9510         arm_handle_psci_call(cpu);
9511         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9512         qemu_plugin_vcpu_hostcall_cb(cs, last_pc);
9513         return;
9514     }
9515 
9516     /*
9517      * Semihosting semantics depend on the register width of the code
9518      * that caused the exception, not the target exception level, so
9519      * must be handled here.
9520      */
9521 #ifdef CONFIG_TCG
9522     if (cs->exception_index == EXCP_SEMIHOST) {
9523         tcg_handle_semihosting(cs);
9524         qemu_plugin_vcpu_hostcall_cb(cs, last_pc);
9525         return;
9526     }
9527 #endif
9528 
9529     /*
9530      * Hooks may change global state so BQL should be held, also the
9531      * BQL needs to be held for any modification of
9532      * cs->interrupt_request.
9533      */
9534     g_assert(bql_locked());
9535 
9536     arm_call_pre_el_change_hook(cpu);
9537 
9538     assert(!excp_is_internal(cs->exception_index));
9539     if (arm_el_is_aa64(env, new_el)) {
9540         arm_cpu_do_interrupt_aarch64(cs);
9541     } else {
9542         arm_cpu_do_interrupt_aarch32(cs);
9543     }
9544 
9545     arm_call_el_change_hook(cpu);
9546 
9547     if (!kvm_enabled()) {
9548         cpu_set_interrupt(cs, CPU_INTERRUPT_EXITTB);
9549     }
9550 
9551     arm_do_plugin_vcpu_discon_cb(cs, last_pc);
9552 }
9553 #endif /* !CONFIG_USER_ONLY */
9554 
9555 uint64_t arm_sctlr(CPUARMState *env, int el)
9556 {
9557     /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
9558     if (el == 0) {
9559         ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
9560         switch (mmu_idx) {
9561         case ARMMMUIdx_E20_0:
9562             el = 2;
9563             break;
9564         case ARMMMUIdx_E30_0:
9565             el = 3;
9566             break;
9567         default:
9568             el = 1;
9569             break;
9570         }
9571     }
9572     return env->cp15.sctlr_el[el];
9573 }
9574 
9575 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
9576 {
9577     if (regime_has_2_ranges(mmu_idx)) {
9578         return extract64(tcr, 37, 2);
9579     } else if (regime_is_stage2(mmu_idx)) {
9580         return 0; /* VTCR_EL2 */
9581     } else {
9582         /* Replicate the single TBI bit so we always have 2 bits.  */
9583         return extract32(tcr, 20, 1) * 3;
9584     }
9585 }
9586 
9587 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
9588 {
9589     if (regime_has_2_ranges(mmu_idx)) {
9590         return extract64(tcr, 51, 2);
9591     } else if (regime_is_stage2(mmu_idx)) {
9592         return 0; /* VTCR_EL2 */
9593     } else {
9594         /* Replicate the single TBID bit so we always have 2 bits.  */
9595         return extract32(tcr, 29, 1) * 3;
9596     }
9597 }
9598 
9599 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
9600 {
9601     if (regime_has_2_ranges(mmu_idx)) {
9602         return extract64(tcr, 57, 2);
9603     } else {
9604         /* Replicate the single TCMA bit so we always have 2 bits.  */
9605         return extract32(tcr, 30, 1) * 3;
9606     }
9607 }
9608 
9609 static ARMGranuleSize tg0_to_gran_size(int tg)
9610 {
9611     switch (tg) {
9612     case 0:
9613         return Gran4K;
9614     case 1:
9615         return Gran64K;
9616     case 2:
9617         return Gran16K;
9618     default:
9619         return GranInvalid;
9620     }
9621 }
9622 
9623 static ARMGranuleSize tg1_to_gran_size(int tg)
9624 {
9625     switch (tg) {
9626     case 1:
9627         return Gran16K;
9628     case 2:
9629         return Gran4K;
9630     case 3:
9631         return Gran64K;
9632     default:
9633         return GranInvalid;
9634     }
9635 }
9636 
9637 static inline bool have4k(ARMCPU *cpu, bool stage2)
9638 {
9639     return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
9640         : cpu_isar_feature(aa64_tgran4, cpu);
9641 }
9642 
9643 static inline bool have16k(ARMCPU *cpu, bool stage2)
9644 {
9645     return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
9646         : cpu_isar_feature(aa64_tgran16, cpu);
9647 }
9648 
9649 static inline bool have64k(ARMCPU *cpu, bool stage2)
9650 {
9651     return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
9652         : cpu_isar_feature(aa64_tgran64, cpu);
9653 }
9654 
9655 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
9656                                          bool stage2)
9657 {
9658     switch (gran) {
9659     case Gran4K:
9660         if (have4k(cpu, stage2)) {
9661             return gran;
9662         }
9663         break;
9664     case Gran16K:
9665         if (have16k(cpu, stage2)) {
9666             return gran;
9667         }
9668         break;
9669     case Gran64K:
9670         if (have64k(cpu, stage2)) {
9671             return gran;
9672         }
9673         break;
9674     case GranInvalid:
9675         break;
9676     }
9677     /*
9678      * If the guest selects a granule size that isn't implemented,
9679      * the architecture requires that we behave as if it selected one
9680      * that is (with an IMPDEF choice of which one to pick). We choose
9681      * to implement the smallest supported granule size.
9682      */
9683     if (have4k(cpu, stage2)) {
9684         return Gran4K;
9685     }
9686     if (have16k(cpu, stage2)) {
9687         return Gran16K;
9688     }
9689     assert(have64k(cpu, stage2));
9690     return Gran64K;
9691 }
9692 
9693 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9694                                    ARMMMUIdx mmu_idx, bool data,
9695                                    bool el1_is_aa32)
9696 {
9697     uint64_t tcr = regime_tcr(env, mmu_idx);
9698     bool epd, hpd, tsz_oob, ds, ha, hd, pie = false;
9699     bool aie = false;
9700     int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
9701     ARMGranuleSize gran;
9702     ARMCPU *cpu = env_archcpu(env);
9703     bool stage2 = regime_is_stage2(mmu_idx);
9704     int r_el = regime_el(mmu_idx);
9705 
9706     if (!regime_has_2_ranges(mmu_idx)) {
9707         select = 0;
9708         tsz = extract32(tcr, 0, 6);
9709         gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9710         if (stage2) {
9711             /*
9712              * Stage2 does not have hierarchical permissions.
9713              * Thus disabling them makes things easier during ptw.
9714              */
9715             hpd = true;
9716             pie = extract64(tcr, 36, 1) && cpu_isar_feature(aa64_s2pie, cpu);
9717         } else {
9718             hpd = extract32(tcr, 24, 1);
9719             if (r_el == 3) {
9720                 pie = (extract64(tcr, 35, 1)
9721                        && cpu_isar_feature(aa64_s1pie, cpu));
9722                 aie = (extract64(tcr, 37, 1)
9723                        && cpu_isar_feature(aa64_aie, cpu));
9724             } else if (!arm_feature(env, ARM_FEATURE_EL3)
9725                        || (env->cp15.scr_el3 & SCR_TCR2EN)) {
9726                 pie = env->cp15.tcr2_el[2] & TCR2_PIE;
9727                 aie = env->cp15.tcr2_el[2] & TCR2_AIE;
9728             }
9729         }
9730         epd = false;
9731         sh = extract32(tcr, 12, 2);
9732         ps = extract32(tcr, 16, 3);
9733         ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
9734         hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9735         ds = extract64(tcr, 32, 1);
9736     } else {
9737         bool e0pd;
9738 
9739         /*
9740          * Bit 55 is always between the two regions, and is canonical for
9741          * determining if address tagging is enabled.
9742          */
9743         select = extract64(va, 55, 1);
9744         if (!select) {
9745             tsz = extract32(tcr, 0, 6);
9746             gran = tg0_to_gran_size(extract32(tcr, 14, 2));
9747             epd = extract32(tcr, 7, 1);
9748             sh = extract32(tcr, 12, 2);
9749             hpd = extract64(tcr, 41, 1);
9750             e0pd = extract64(tcr, 55, 1);
9751         } else {
9752             tsz = extract32(tcr, 16, 6);
9753             gran = tg1_to_gran_size(extract32(tcr, 30, 2));
9754             epd = extract32(tcr, 23, 1);
9755             sh = extract32(tcr, 28, 2);
9756             hpd = extract64(tcr, 42, 1);
9757             e0pd = extract64(tcr, 56, 1);
9758         }
9759         ps = extract64(tcr, 32, 3);
9760         ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
9761         hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
9762         ds = extract64(tcr, 59, 1);
9763 
9764         if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
9765             regime_is_user(mmu_idx)) {
9766             epd = true;
9767         }
9768 
9769         if ((!arm_feature(env, ARM_FEATURE_EL3)
9770              || (env->cp15.scr_el3 & SCR_TCR2EN))
9771             && (r_el == 2 || (arm_hcrx_el2_eff(env) & HCRX_TCR2EN))) {
9772             pie = env->cp15.tcr2_el[r_el] & TCR2_PIE;
9773             aie = env->cp15.tcr2_el[r_el] & TCR2_AIE;
9774         }
9775     }
9776     hpd |= pie;
9777 
9778     gran = sanitize_gran_size(cpu, gran, stage2);
9779 
9780     if (cpu_isar_feature(aa64_st, cpu)) {
9781         max_tsz = 48 - (gran == Gran64K);
9782     } else {
9783         max_tsz = 39;
9784     }
9785 
9786     /*
9787      * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
9788      * adjust the effective value of DS, as documented.
9789      */
9790     min_tsz = 16;
9791     if (gran == Gran64K) {
9792         if (cpu_isar_feature(aa64_lva, cpu)) {
9793             min_tsz = 12;
9794         }
9795         ds = false;
9796     } else if (ds) {
9797         if (regime_is_stage2(mmu_idx)) {
9798             if (gran == Gran16K) {
9799                 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
9800             } else {
9801                 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
9802             }
9803         } else {
9804             if (gran == Gran16K) {
9805                 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
9806             } else {
9807                 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
9808             }
9809         }
9810         if (ds) {
9811             min_tsz = 12;
9812         }
9813     }
9814 
9815     if (stage2 && el1_is_aa32) {
9816         /*
9817          * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
9818          * are loosened: a configured IPA of 40 bits is permitted even if
9819          * the implemented PA is less than that (and so a 40 bit IPA would
9820          * fault for an AArch64 EL1). See R_DTLMN.
9821          */
9822         min_tsz = MIN(min_tsz, 24);
9823     }
9824 
9825     if (tsz > max_tsz) {
9826         tsz = max_tsz;
9827         tsz_oob = true;
9828     } else if (tsz < min_tsz) {
9829         tsz = min_tsz;
9830         tsz_oob = true;
9831     } else {
9832         tsz_oob = false;
9833     }
9834 
9835     /* Present TBI as a composite with TBID.  */
9836     tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
9837     if (!data) {
9838         tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
9839     }
9840     tbi = (tbi >> select) & 1;
9841 
9842     return (ARMVAParameters) {
9843         .tsz = tsz,
9844         .ps = ps,
9845         .sh = sh,
9846         .select = select,
9847         .tbi = tbi,
9848         .epd = epd,
9849         .hpd = hpd,
9850         .tsz_oob = tsz_oob,
9851         .ds = ds,
9852         .ha = ha,
9853         .hd = ha && hd,
9854         .gran = gran,
9855         .pie = pie,
9856         .aie = aie,
9857     };
9858 }
9859 
9860 
9861 /*
9862  * Return the exception level to which FP-disabled exceptions should
9863  * be taken, or 0 if FP is enabled.
9864  */
9865 int fp_exception_el(CPUARMState *env, int cur_el)
9866 {
9867 #ifndef CONFIG_USER_ONLY
9868     uint64_t hcr_el2;
9869 
9870     /*
9871      * CPACR and the CPTR registers don't exist before v6, so FP is
9872      * always accessible
9873      */
9874     if (!arm_feature(env, ARM_FEATURE_V6)) {
9875         return 0;
9876     }
9877 
9878     if (arm_feature(env, ARM_FEATURE_M)) {
9879         /* CPACR can cause a NOCP UsageFault taken to current security state */
9880         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
9881             return 1;
9882         }
9883 
9884         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
9885             if (!extract32(env->v7m.nsacr, 10, 1)) {
9886                 /* FP insns cause a NOCP UsageFault taken to Secure */
9887                 return 3;
9888             }
9889         }
9890 
9891         return 0;
9892     }
9893 
9894     hcr_el2 = arm_hcr_el2_eff(env);
9895 
9896     /*
9897      * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
9898      * 0, 2 : trap EL0 and EL1/PL1 accesses
9899      * 1    : trap only EL0 accesses
9900      * 3    : trap no accesses
9901      * This register is ignored if E2H+TGE are both set.
9902      */
9903     if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9904         int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
9905 
9906         switch (fpen) {
9907         case 1:
9908             if (cur_el != 0) {
9909                 break;
9910             }
9911             /* fall through */
9912         case 0:
9913         case 2:
9914             /* Trap from Secure PL0 or PL1 to Secure PL1. */
9915             if (!arm_el_is_aa64(env, 3)
9916                 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
9917                 return 3;
9918             }
9919             if (cur_el <= 1) {
9920                 return 1;
9921             }
9922             break;
9923         }
9924     }
9925 
9926     /*
9927      * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
9928      * to control non-secure access to the FPU. It doesn't have any
9929      * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
9930      */
9931     if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
9932          cur_el <= 2 && !arm_is_secure_below_el3(env))) {
9933         if (!extract32(env->cp15.nsacr, 10, 1)) {
9934             /* FP insns act as UNDEF */
9935             return cur_el == 2 ? 2 : 1;
9936         }
9937     }
9938 
9939     /*
9940      * CPTR_EL2 is present in v7VE or v8, and changes format
9941      * with HCR_EL2.E2H (regardless of TGE).
9942      */
9943     if (cur_el <= 2) {
9944         if (hcr_el2 & HCR_E2H) {
9945             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
9946             case 1:
9947                 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
9948                     break;
9949                 }
9950                 /* fall through */
9951             case 0:
9952             case 2:
9953                 return 2;
9954             }
9955         } else if (arm_is_el2_enabled(env)) {
9956             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
9957                 return 2;
9958             }
9959         }
9960     }
9961 
9962     /* CPTR_EL3 : present in v8 */
9963     if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
9964         /* Trap all FP ops to EL3 */
9965         return 3;
9966     }
9967 #endif
9968     return 0;
9969 }
9970 
9971 #ifndef CONFIG_TCG
9972 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
9973 {
9974     g_assert_not_reached();
9975 }
9976 #endif
9977 
9978 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
9979 {
9980     ARMMMUIdx idx;
9981     uint64_t hcr;
9982 
9983     if (arm_feature(env, ARM_FEATURE_M)) {
9984         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
9985     }
9986 
9987     /* See ARM pseudo-function ELIsInHost.  */
9988     switch (el) {
9989     case 0:
9990         hcr = arm_hcr_el2_eff(env);
9991         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
9992             idx = ARMMMUIdx_E20_0;
9993         } else if (arm_is_secure_below_el3(env) &&
9994                    !arm_el_is_aa64(env, 3)) {
9995             idx = ARMMMUIdx_E30_0;
9996         } else {
9997             idx = ARMMMUIdx_E10_0;
9998         }
9999         break;
10000     case 1:
10001         if (arm_pan_enabled(env)) {
10002             idx = ARMMMUIdx_E10_1_PAN;
10003         } else {
10004             idx = ARMMMUIdx_E10_1;
10005         }
10006         break;
10007     case 2:
10008         /* Note that TGE does not apply at EL2.  */
10009         if (arm_hcr_el2_eff(env) & HCR_E2H) {
10010             if (arm_pan_enabled(env)) {
10011                 idx = ARMMMUIdx_E20_2_PAN;
10012             } else {
10013                 idx = ARMMMUIdx_E20_2;
10014             }
10015         } else {
10016             idx = ARMMMUIdx_E2;
10017         }
10018         break;
10019     case 3:
10020         if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
10021             return ARMMMUIdx_E30_3_PAN;
10022         }
10023         return ARMMMUIdx_E3;
10024     default:
10025         g_assert_not_reached();
10026     }
10027 
10028     return idx;
10029 }
10030 
10031 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
10032 {
10033     return arm_mmu_idx_el(env, arm_current_el(env));
10034 }
10035 
10036 /*
10037  * The manual says that when SVE is enabled and VQ is widened the
10038  * implementation is allowed to zero the previously inaccessible
10039  * portion of the registers.  The corollary to that is that when
10040  * SVE is enabled and VQ is narrowed we are also allowed to zero
10041  * the now inaccessible portion of the registers.
10042  *
10043  * The intent of this is that no predicate bit beyond VQ is ever set.
10044  * Which means that some operations on predicate registers themselves
10045  * may operate on full uint64_t or even unrolled across the maximum
10046  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
10047  * may well be cheaper than conditionals to restrict the operation
10048  * to the relevant portion of a uint16_t[16].
10049  */
10050 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
10051 {
10052     int i, j;
10053     uint64_t pmask;
10054 
10055     assert(vq >= 1 && vq <= ARM_MAX_VQ);
10056     assert(vq <= env_archcpu(env)->sve_max_vq);
10057 
10058     /* Zap the high bits of the zregs.  */
10059     for (i = 0; i < 32; i++) {
10060         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
10061     }
10062 
10063     /* Zap the high bits of the pregs and ffr.  */
10064     pmask = 0;
10065     if (vq & 3) {
10066         pmask = ~(-1ULL << (16 * (vq & 3)));
10067     }
10068     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
10069         for (i = 0; i < 17; ++i) {
10070             env->vfp.pregs[i].p[j] &= pmask;
10071         }
10072         pmask = 0;
10073     }
10074 }
10075 
10076 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
10077 {
10078     int exc_el;
10079 
10080     if (sm) {
10081         exc_el = sme_exception_el(env, el);
10082     } else {
10083         exc_el = sve_exception_el(env, el);
10084     }
10085     if (exc_el) {
10086         return 0; /* disabled */
10087     }
10088     return sve_vqm1_for_el_sm(env, el, sm);
10089 }
10090 
10091 /*
10092  * Notice a change in SVE vector size when changing EL.
10093  */
10094 void aarch64_sve_change_el(CPUARMState *env, int old_el,
10095                            int new_el, bool el0_a64)
10096 {
10097     ARMCPU *cpu = env_archcpu(env);
10098     int old_len, new_len;
10099     bool old_a64, new_a64, sm;
10100 
10101     /* Nothing to do if no SVE.  */
10102     if (!cpu_isar_feature(aa64_sve, cpu)) {
10103         return;
10104     }
10105 
10106     /* Nothing to do if FP is disabled in either EL.  */
10107     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
10108         return;
10109     }
10110 
10111     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
10112     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
10113 
10114     /*
10115      * Both AArch64.TakeException and AArch64.ExceptionReturn
10116      * invoke ResetSVEState when taking an exception from, or
10117      * returning to, AArch32 state when PSTATE.SM is enabled.
10118      */
10119     sm = FIELD_EX64(env->svcr, SVCR, SM);
10120     if (old_a64 != new_a64 && sm) {
10121         arm_reset_sve_state(env);
10122         return;
10123     }
10124 
10125     /*
10126      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
10127      * at ELx, or not available because the EL is in AArch32 state, then
10128      * for all purposes other than a direct read, the ZCR_ELx.LEN field
10129      * has an effective value of 0".
10130      *
10131      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
10132      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
10133      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
10134      * we already have the correct register contents when encountering the
10135      * vq0->vq0 transition between EL0->EL1.
10136      */
10137     old_len = new_len = 0;
10138     if (old_a64) {
10139         old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
10140     }
10141     if (new_a64) {
10142         new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
10143     }
10144 
10145     /* When changing vector length, clear inaccessible state.  */
10146     if (new_len < old_len) {
10147         aarch64_sve_narrow_vq(env, new_len + 1);
10148     }
10149 }
10150 
10151 #ifndef CONFIG_USER_ONLY
10152 ARMSecuritySpace arm_security_space(CPUARMState *env)
10153 {
10154     if (arm_feature(env, ARM_FEATURE_M)) {
10155         return arm_secure_to_space(env->v7m.secure);
10156     }
10157 
10158     /*
10159      * If EL3 is not supported then the secure state is implementation
10160      * defined, in which case QEMU defaults to non-secure.
10161      */
10162     if (!arm_feature(env, ARM_FEATURE_EL3)) {
10163         return ARMSS_NonSecure;
10164     }
10165 
10166     /* Check for AArch64 EL3 or AArch32 Mon. */
10167     if (is_a64(env)) {
10168         if (extract32(env->pstate, 2, 2) == 3) {
10169             if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
10170                 return ARMSS_Root;
10171             } else {
10172                 return ARMSS_Secure;
10173             }
10174         }
10175     } else {
10176         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10177             return ARMSS_Secure;
10178         }
10179     }
10180 
10181     return arm_security_space_below_el3(env);
10182 }
10183 
10184 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
10185 {
10186     assert(!arm_feature(env, ARM_FEATURE_M));
10187 
10188     /*
10189      * If EL3 is not supported then the secure state is implementation
10190      * defined, in which case QEMU defaults to non-secure.
10191      */
10192     if (!arm_feature(env, ARM_FEATURE_EL3)) {
10193         return ARMSS_NonSecure;
10194     }
10195 
10196     /*
10197      * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
10198      * Ignoring NSE when !NS retains consistency without having to
10199      * modify other predicates.
10200      */
10201     if (!(env->cp15.scr_el3 & SCR_NS)) {
10202         return ARMSS_Secure;
10203     } else if (env->cp15.scr_el3 & SCR_NSE) {
10204         return ARMSS_Realm;
10205     } else {
10206         return ARMSS_NonSecure;
10207     }
10208 }
10209 #endif /* !CONFIG_USER_ONLY */
10210