xref: /openbmc/qemu/target/arm/helper.c (revision ca4af17c)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/timer.h"
18 #include "qemu/bitops.h"
19 #include "qemu/crc32c.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/exec-all.h"
22 #include <zlib.h> /* For crc32 */
23 #include "hw/irq.h"
24 #include "sysemu/cpu-timers.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/tcg.h"
27 #include "qapi/error.h"
28 #include "qemu/guest-random.h"
29 #ifdef CONFIG_TCG
30 #include "semihosting/common-semi.h"
31 #endif
32 #include "cpregs.h"
33 #include "target/arm/gtimer.h"
34 
35 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
36 
37 static void switch_mode(CPUARMState *env, int mode);
38 
39 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
40 {
41     assert(ri->fieldoffset);
42     if (cpreg_field_is_64bit(ri)) {
43         return CPREG_FIELD64(env, ri);
44     } else {
45         return CPREG_FIELD32(env, ri);
46     }
47 }
48 
49 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
50 {
51     assert(ri->fieldoffset);
52     if (cpreg_field_is_64bit(ri)) {
53         CPREG_FIELD64(env, ri) = value;
54     } else {
55         CPREG_FIELD32(env, ri) = value;
56     }
57 }
58 
59 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
60 {
61     return (char *)env + ri->fieldoffset;
62 }
63 
64 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
65 {
66     /* Raw read of a coprocessor register (as needed for migration, etc). */
67     if (ri->type & ARM_CP_CONST) {
68         return ri->resetvalue;
69     } else if (ri->raw_readfn) {
70         return ri->raw_readfn(env, ri);
71     } else if (ri->readfn) {
72         return ri->readfn(env, ri);
73     } else {
74         return raw_read(env, ri);
75     }
76 }
77 
78 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
79                              uint64_t v)
80 {
81     /*
82      * Raw write of a coprocessor register (as needed for migration, etc).
83      * Note that constant registers are treated as write-ignored; the
84      * caller should check for success by whether a readback gives the
85      * value written.
86      */
87     if (ri->type & ARM_CP_CONST) {
88         return;
89     } else if (ri->raw_writefn) {
90         ri->raw_writefn(env, ri, v);
91     } else if (ri->writefn) {
92         ri->writefn(env, ri, v);
93     } else {
94         raw_write(env, ri, v);
95     }
96 }
97 
98 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
99 {
100    /*
101     * Return true if the regdef would cause an assertion if you called
102     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
103     * program bug for it not to have the NO_RAW flag).
104     * NB that returning false here doesn't necessarily mean that calling
105     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
106     * read/write access functions which are safe for raw use" from "has
107     * read/write access functions which have side effects but has forgotten
108     * to provide raw access functions".
109     * The tests here line up with the conditions in read/write_raw_cp_reg()
110     * and assertions in raw_read()/raw_write().
111     */
112     if ((ri->type & ARM_CP_CONST) ||
113         ri->fieldoffset ||
114         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
115         return false;
116     }
117     return true;
118 }
119 
120 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
121 {
122     /* Write the coprocessor state from cpu->env to the (index,value) list. */
123     int i;
124     bool ok = true;
125 
126     for (i = 0; i < cpu->cpreg_array_len; i++) {
127         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
128         const ARMCPRegInfo *ri;
129         uint64_t newval;
130 
131         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
132         if (!ri) {
133             ok = false;
134             continue;
135         }
136         if (ri->type & ARM_CP_NO_RAW) {
137             continue;
138         }
139 
140         newval = read_raw_cp_reg(&cpu->env, ri);
141         if (kvm_sync) {
142             /*
143              * Only sync if the previous list->cpustate sync succeeded.
144              * Rather than tracking the success/failure state for every
145              * item in the list, we just recheck "does the raw write we must
146              * have made in write_list_to_cpustate() read back OK" here.
147              */
148             uint64_t oldval = cpu->cpreg_values[i];
149 
150             if (oldval == newval) {
151                 continue;
152             }
153 
154             write_raw_cp_reg(&cpu->env, ri, oldval);
155             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
156                 continue;
157             }
158 
159             write_raw_cp_reg(&cpu->env, ri, newval);
160         }
161         cpu->cpreg_values[i] = newval;
162     }
163     return ok;
164 }
165 
166 bool write_list_to_cpustate(ARMCPU *cpu)
167 {
168     int i;
169     bool ok = true;
170 
171     for (i = 0; i < cpu->cpreg_array_len; i++) {
172         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
173         uint64_t v = cpu->cpreg_values[i];
174         const ARMCPRegInfo *ri;
175 
176         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
177         if (!ri) {
178             ok = false;
179             continue;
180         }
181         if (ri->type & ARM_CP_NO_RAW) {
182             continue;
183         }
184         /*
185          * Write value and confirm it reads back as written
186          * (to catch read-only registers and partially read-only
187          * registers where the incoming migration value doesn't match)
188          */
189         write_raw_cp_reg(&cpu->env, ri, v);
190         if (read_raw_cp_reg(&cpu->env, ri) != v) {
191             ok = false;
192         }
193     }
194     return ok;
195 }
196 
197 static void add_cpreg_to_list(gpointer key, gpointer opaque)
198 {
199     ARMCPU *cpu = opaque;
200     uint32_t regidx = (uintptr_t)key;
201     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
202 
203     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
204         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
205         /* The value array need not be initialized at this point */
206         cpu->cpreg_array_len++;
207     }
208 }
209 
210 static void count_cpreg(gpointer key, gpointer opaque)
211 {
212     ARMCPU *cpu = opaque;
213     const ARMCPRegInfo *ri;
214 
215     ri = g_hash_table_lookup(cpu->cp_regs, key);
216 
217     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
218         cpu->cpreg_array_len++;
219     }
220 }
221 
222 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
223 {
224     uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
225     uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
226 
227     if (aidx > bidx) {
228         return 1;
229     }
230     if (aidx < bidx) {
231         return -1;
232     }
233     return 0;
234 }
235 
236 void init_cpreg_list(ARMCPU *cpu)
237 {
238     /*
239      * Initialise the cpreg_tuples[] array based on the cp_regs hash.
240      * Note that we require cpreg_tuples[] to be sorted by key ID.
241      */
242     GList *keys;
243     int arraylen;
244 
245     keys = g_hash_table_get_keys(cpu->cp_regs);
246     keys = g_list_sort(keys, cpreg_key_compare);
247 
248     cpu->cpreg_array_len = 0;
249 
250     g_list_foreach(keys, count_cpreg, cpu);
251 
252     arraylen = cpu->cpreg_array_len;
253     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
254     cpu->cpreg_values = g_new(uint64_t, arraylen);
255     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
256     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
257     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
258     cpu->cpreg_array_len = 0;
259 
260     g_list_foreach(keys, add_cpreg_to_list, cpu);
261 
262     assert(cpu->cpreg_array_len == arraylen);
263 
264     g_list_free(keys);
265 }
266 
267 static bool arm_pan_enabled(CPUARMState *env)
268 {
269     if (is_a64(env)) {
270         if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
271             return false;
272         }
273         return env->pstate & PSTATE_PAN;
274     } else {
275         return env->uncached_cpsr & CPSR_PAN;
276     }
277 }
278 
279 /*
280  * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
281  */
282 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
283                                         const ARMCPRegInfo *ri,
284                                         bool isread)
285 {
286     if (!is_a64(env) && arm_current_el(env) == 3 &&
287         arm_is_secure_below_el3(env)) {
288         return CP_ACCESS_TRAP_UNCATEGORIZED;
289     }
290     return CP_ACCESS_OK;
291 }
292 
293 /*
294  * Some secure-only AArch32 registers trap to EL3 if used from
295  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
296  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
297  * We assume that the .access field is set to PL1_RW.
298  */
299 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
300                                             const ARMCPRegInfo *ri,
301                                             bool isread)
302 {
303     if (arm_current_el(env) == 3) {
304         return CP_ACCESS_OK;
305     }
306     if (arm_is_secure_below_el3(env)) {
307         if (env->cp15.scr_el3 & SCR_EEL2) {
308             return CP_ACCESS_TRAP_EL2;
309         }
310         return CP_ACCESS_TRAP_EL3;
311     }
312     /* This will be EL1 NS and EL2 NS, which just UNDEF */
313     return CP_ACCESS_TRAP_UNCATEGORIZED;
314 }
315 
316 /*
317  * Check for traps to performance monitor registers, which are controlled
318  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
319  */
320 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
321                                  bool isread)
322 {
323     int el = arm_current_el(env);
324     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
325 
326     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
327         return CP_ACCESS_TRAP_EL2;
328     }
329     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
330         return CP_ACCESS_TRAP_EL3;
331     }
332     return CP_ACCESS_OK;
333 }
334 
335 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
336 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
337                                bool isread)
338 {
339     if (arm_current_el(env) == 1) {
340         uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
341         if (arm_hcr_el2_eff(env) & trap) {
342             return CP_ACCESS_TRAP_EL2;
343         }
344     }
345     return CP_ACCESS_OK;
346 }
347 
348 /* Check for traps from EL1 due to HCR_EL2.TSW.  */
349 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
350                                  bool isread)
351 {
352     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
353         return CP_ACCESS_TRAP_EL2;
354     }
355     return CP_ACCESS_OK;
356 }
357 
358 /* Check for traps from EL1 due to HCR_EL2.TACR.  */
359 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
360                                   bool isread)
361 {
362     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
363         return CP_ACCESS_TRAP_EL2;
364     }
365     return CP_ACCESS_OK;
366 }
367 
368 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
369 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
370                                   bool isread)
371 {
372     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
373         return CP_ACCESS_TRAP_EL2;
374     }
375     return CP_ACCESS_OK;
376 }
377 
378 /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
379 static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
380                                     bool isread)
381 {
382     if (arm_current_el(env) == 1 &&
383         (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) {
384         return CP_ACCESS_TRAP_EL2;
385     }
386     return CP_ACCESS_OK;
387 }
388 
389 #ifdef TARGET_AARCH64
390 /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
391 static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
392                                     bool isread)
393 {
394     if (arm_current_el(env) == 1 &&
395         (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) {
396         return CP_ACCESS_TRAP_EL2;
397     }
398     return CP_ACCESS_OK;
399 }
400 #endif
401 
402 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
403 {
404     ARMCPU *cpu = env_archcpu(env);
405 
406     raw_write(env, ri, value);
407     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
408 }
409 
410 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
411 {
412     ARMCPU *cpu = env_archcpu(env);
413 
414     if (raw_read(env, ri) != value) {
415         /*
416          * Unlike real hardware the qemu TLB uses virtual addresses,
417          * not modified virtual addresses, so this causes a TLB flush.
418          */
419         tlb_flush(CPU(cpu));
420         raw_write(env, ri, value);
421     }
422 }
423 
424 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
425                              uint64_t value)
426 {
427     ARMCPU *cpu = env_archcpu(env);
428 
429     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
430         && !extended_addresses_enabled(env)) {
431         /*
432          * For VMSA (when not using the LPAE long descriptor page table
433          * format) this register includes the ASID, so do a TLB flush.
434          * For PMSA it is purely a process ID and no action is needed.
435          */
436         tlb_flush(CPU(cpu));
437     }
438     raw_write(env, ri, value);
439 }
440 
441 static int alle1_tlbmask(CPUARMState *env)
442 {
443     /*
444      * Note that the 'ALL' scope must invalidate both stage 1 and
445      * stage 2 translations, whereas most other scopes only invalidate
446      * stage 1 translations.
447      */
448     return (ARMMMUIdxBit_E10_1 |
449             ARMMMUIdxBit_E10_1_PAN |
450             ARMMMUIdxBit_E10_0 |
451             ARMMMUIdxBit_Stage2 |
452             ARMMMUIdxBit_Stage2_S);
453 }
454 
455 
456 /* IS variants of TLB operations must affect all cores */
457 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
458                              uint64_t value)
459 {
460     CPUState *cs = env_cpu(env);
461 
462     tlb_flush_all_cpus_synced(cs);
463 }
464 
465 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
466                              uint64_t value)
467 {
468     CPUState *cs = env_cpu(env);
469 
470     tlb_flush_all_cpus_synced(cs);
471 }
472 
473 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
474                              uint64_t value)
475 {
476     CPUState *cs = env_cpu(env);
477 
478     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
479 }
480 
481 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
482                              uint64_t value)
483 {
484     CPUState *cs = env_cpu(env);
485 
486     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
487 }
488 
489 /*
490  * Non-IS variants of TLB operations are upgraded to
491  * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
492  * force broadcast of these operations.
493  */
494 static bool tlb_force_broadcast(CPUARMState *env)
495 {
496     return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
497 }
498 
499 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
500                           uint64_t value)
501 {
502     /* Invalidate all (TLBIALL) */
503     CPUState *cs = env_cpu(env);
504 
505     if (tlb_force_broadcast(env)) {
506         tlb_flush_all_cpus_synced(cs);
507     } else {
508         tlb_flush(cs);
509     }
510 }
511 
512 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
513                           uint64_t value)
514 {
515     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
516     CPUState *cs = env_cpu(env);
517 
518     value &= TARGET_PAGE_MASK;
519     if (tlb_force_broadcast(env)) {
520         tlb_flush_page_all_cpus_synced(cs, value);
521     } else {
522         tlb_flush_page(cs, value);
523     }
524 }
525 
526 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
527                            uint64_t value)
528 {
529     /* Invalidate by ASID (TLBIASID) */
530     CPUState *cs = env_cpu(env);
531 
532     if (tlb_force_broadcast(env)) {
533         tlb_flush_all_cpus_synced(cs);
534     } else {
535         tlb_flush(cs);
536     }
537 }
538 
539 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
540                            uint64_t value)
541 {
542     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
543     CPUState *cs = env_cpu(env);
544 
545     value &= TARGET_PAGE_MASK;
546     if (tlb_force_broadcast(env)) {
547         tlb_flush_page_all_cpus_synced(cs, value);
548     } else {
549         tlb_flush_page(cs, value);
550     }
551 }
552 
553 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
554                                uint64_t value)
555 {
556     CPUState *cs = env_cpu(env);
557 
558     tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
559 }
560 
561 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
562                                   uint64_t value)
563 {
564     CPUState *cs = env_cpu(env);
565 
566     tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
567 }
568 
569 
570 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
571                               uint64_t value)
572 {
573     CPUState *cs = env_cpu(env);
574 
575     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
576 }
577 
578 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
579                                  uint64_t value)
580 {
581     CPUState *cs = env_cpu(env);
582 
583     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
584 }
585 
586 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
587                               uint64_t value)
588 {
589     CPUState *cs = env_cpu(env);
590     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
591 
592     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
593 }
594 
595 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
596                                  uint64_t value)
597 {
598     CPUState *cs = env_cpu(env);
599     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
600 
601     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
602                                              ARMMMUIdxBit_E2);
603 }
604 
605 static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
606                                 uint64_t value)
607 {
608     CPUState *cs = env_cpu(env);
609     uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
610 
611     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
612 }
613 
614 static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
615                                 uint64_t value)
616 {
617     CPUState *cs = env_cpu(env);
618     uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
619 
620     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
621 }
622 
623 static const ARMCPRegInfo cp_reginfo[] = {
624     /*
625      * Define the secure and non-secure FCSE identifier CP registers
626      * separately because there is no secure bank in V8 (no _EL3).  This allows
627      * the secure register to be properly reset and migrated. There is also no
628      * v8 EL1 version of the register so the non-secure instance stands alone.
629      */
630     { .name = "FCSEIDR",
631       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
632       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
633       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
634       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
635     { .name = "FCSEIDR_S",
636       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
637       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
638       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
639       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
640     /*
641      * Define the secure and non-secure context identifier CP registers
642      * separately because there is no secure bank in V8 (no _EL3).  This allows
643      * the secure register to be properly reset and migrated.  In the
644      * non-secure case, the 32-bit register will have reset and migration
645      * disabled during registration as it is handled by the 64-bit instance.
646      */
647     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
648       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
649       .access = PL1_RW, .accessfn = access_tvm_trvm,
650       .fgt = FGT_CONTEXTIDR_EL1,
651       .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
652       .secure = ARM_CP_SECSTATE_NS,
653       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
654       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
655     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
656       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
657       .access = PL1_RW, .accessfn = access_tvm_trvm,
658       .secure = ARM_CP_SECSTATE_S,
659       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
660       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
661 };
662 
663 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
664     /*
665      * NB: Some of these registers exist in v8 but with more precise
666      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
667      */
668     /* MMU Domain access control / MPU write buffer control */
669     { .name = "DACR",
670       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
671       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
672       .writefn = dacr_write, .raw_writefn = raw_write,
673       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
674                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
675     /*
676      * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
677      * For v6 and v5, these mappings are overly broad.
678      */
679     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
680       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
681     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
682       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
683     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
684       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
685     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
686       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
687     /* Cache maintenance ops; some of this space may be overridden later. */
688     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
689       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
690       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
691 };
692 
693 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
694     /*
695      * Not all pre-v6 cores implemented this WFI, so this is slightly
696      * over-broad.
697      */
698     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
699       .access = PL1_W, .type = ARM_CP_WFI },
700 };
701 
702 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
703     /*
704      * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
705      * is UNPREDICTABLE; we choose to NOP as most implementations do).
706      */
707     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
708       .access = PL1_W, .type = ARM_CP_WFI },
709     /*
710      * L1 cache lockdown. Not architectural in v6 and earlier but in practice
711      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
712      * OMAPCP will override this space.
713      */
714     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
715       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
716       .resetvalue = 0 },
717     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
718       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
719       .resetvalue = 0 },
720     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
721     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
722       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
723       .resetvalue = 0 },
724     /*
725      * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
726      * implementing it as RAZ means the "debug architecture version" bits
727      * will read as a reserved value, which should cause Linux to not try
728      * to use the debug hardware.
729      */
730     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
731       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
732     /*
733      * MMU TLB control. Note that the wildcarding means we cover not just
734      * the unified TLB ops but also the dside/iside/inner-shareable variants.
735      */
736     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
737       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
738       .type = ARM_CP_NO_RAW },
739     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
740       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
741       .type = ARM_CP_NO_RAW },
742     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
743       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
744       .type = ARM_CP_NO_RAW },
745     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
746       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
747       .type = ARM_CP_NO_RAW },
748     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
749       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
750     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
751       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
752 };
753 
754 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
755                         uint64_t value)
756 {
757     uint32_t mask = 0;
758 
759     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
760     if (!arm_feature(env, ARM_FEATURE_V8)) {
761         /*
762          * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
763          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
764          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
765          */
766         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
767             /* VFP coprocessor: cp10 & cp11 [23:20] */
768             mask |= R_CPACR_ASEDIS_MASK |
769                     R_CPACR_D32DIS_MASK |
770                     R_CPACR_CP11_MASK |
771                     R_CPACR_CP10_MASK;
772 
773             if (!arm_feature(env, ARM_FEATURE_NEON)) {
774                 /* ASEDIS [31] bit is RAO/WI */
775                 value |= R_CPACR_ASEDIS_MASK;
776             }
777 
778             /*
779              * VFPv3 and upwards with NEON implement 32 double precision
780              * registers (D0-D31).
781              */
782             if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
783                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
784                 value |= R_CPACR_D32DIS_MASK;
785             }
786         }
787         value &= mask;
788     }
789 
790     /*
791      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
792      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
793      */
794     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
795         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
796         mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
797         value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
798     }
799 
800     env->cp15.cpacr_el1 = value;
801 }
802 
803 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
804 {
805     /*
806      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
807      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
808      */
809     uint64_t value = env->cp15.cpacr_el1;
810 
811     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
812         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
813         value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
814     }
815     return value;
816 }
817 
818 
819 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
820 {
821     /*
822      * Call cpacr_write() so that we reset with the correct RAO bits set
823      * for our CPU features.
824      */
825     cpacr_write(env, ri, 0);
826 }
827 
828 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
829                                    bool isread)
830 {
831     if (arm_feature(env, ARM_FEATURE_V8)) {
832         /* Check if CPACR accesses are to be trapped to EL2 */
833         if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
834             FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
835             return CP_ACCESS_TRAP_EL2;
836         /* Check if CPACR accesses are to be trapped to EL3 */
837         } else if (arm_current_el(env) < 3 &&
838                    FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
839             return CP_ACCESS_TRAP_EL3;
840         }
841     }
842 
843     return CP_ACCESS_OK;
844 }
845 
846 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
847                                   bool isread)
848 {
849     /* Check if CPTR accesses are set to trap to EL3 */
850     if (arm_current_el(env) == 2 &&
851         FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
852         return CP_ACCESS_TRAP_EL3;
853     }
854 
855     return CP_ACCESS_OK;
856 }
857 
858 static const ARMCPRegInfo v6_cp_reginfo[] = {
859     /* prefetch by MVA in v6, NOP in v7 */
860     { .name = "MVA_prefetch",
861       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
862       .access = PL1_W, .type = ARM_CP_NOP },
863     /*
864      * We need to break the TB after ISB to execute self-modifying code
865      * correctly and also to take any pending interrupts immediately.
866      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
867      */
868     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
869       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
870     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
871       .access = PL0_W, .type = ARM_CP_NOP },
872     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
873       .access = PL0_W, .type = ARM_CP_NOP },
874     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
875       .access = PL1_RW, .accessfn = access_tvm_trvm,
876       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
877                              offsetof(CPUARMState, cp15.ifar_ns) },
878       .resetvalue = 0, },
879     /*
880      * Watchpoint Fault Address Register : should actually only be present
881      * for 1136, 1176, 11MPCore.
882      */
883     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
884       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
885     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
886       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
887       .fgt = FGT_CPACR_EL1,
888       .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
889       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
890       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
891 };
892 
893 typedef struct pm_event {
894     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
895     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
896     bool (*supported)(CPUARMState *);
897     /*
898      * Retrieve the current count of the underlying event. The programmed
899      * counters hold a difference from the return value from this function
900      */
901     uint64_t (*get_count)(CPUARMState *);
902     /*
903      * Return how many nanoseconds it will take (at a minimum) for count events
904      * to occur. A negative value indicates the counter will never overflow, or
905      * that the counter has otherwise arranged for the overflow bit to be set
906      * and the PMU interrupt to be raised on overflow.
907      */
908     int64_t (*ns_per_count)(uint64_t);
909 } pm_event;
910 
911 static bool event_always_supported(CPUARMState *env)
912 {
913     return true;
914 }
915 
916 static uint64_t swinc_get_count(CPUARMState *env)
917 {
918     /*
919      * SW_INCR events are written directly to the pmevcntr's by writes to
920      * PMSWINC, so there is no underlying count maintained by the PMU itself
921      */
922     return 0;
923 }
924 
925 static int64_t swinc_ns_per(uint64_t ignored)
926 {
927     return -1;
928 }
929 
930 /*
931  * Return the underlying cycle count for the PMU cycle counters. If we're in
932  * usermode, simply return 0.
933  */
934 static uint64_t cycles_get_count(CPUARMState *env)
935 {
936 #ifndef CONFIG_USER_ONLY
937     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
938                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
939 #else
940     return cpu_get_host_ticks();
941 #endif
942 }
943 
944 #ifndef CONFIG_USER_ONLY
945 static int64_t cycles_ns_per(uint64_t cycles)
946 {
947     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
948 }
949 
950 static bool instructions_supported(CPUARMState *env)
951 {
952     /* Precise instruction counting */
953     return icount_enabled() == ICOUNT_PRECISE;
954 }
955 
956 static uint64_t instructions_get_count(CPUARMState *env)
957 {
958     assert(icount_enabled() == ICOUNT_PRECISE);
959     return (uint64_t)icount_get_raw();
960 }
961 
962 static int64_t instructions_ns_per(uint64_t icount)
963 {
964     assert(icount_enabled() == ICOUNT_PRECISE);
965     return icount_to_ns((int64_t)icount);
966 }
967 #endif
968 
969 static bool pmuv3p1_events_supported(CPUARMState *env)
970 {
971     /* For events which are supported in any v8.1 PMU */
972     return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
973 }
974 
975 static bool pmuv3p4_events_supported(CPUARMState *env)
976 {
977     /* For events which are supported in any v8.1 PMU */
978     return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
979 }
980 
981 static uint64_t zero_event_get_count(CPUARMState *env)
982 {
983     /* For events which on QEMU never fire, so their count is always zero */
984     return 0;
985 }
986 
987 static int64_t zero_event_ns_per(uint64_t cycles)
988 {
989     /* An event which never fires can never overflow */
990     return -1;
991 }
992 
993 static const pm_event pm_events[] = {
994     { .number = 0x000, /* SW_INCR */
995       .supported = event_always_supported,
996       .get_count = swinc_get_count,
997       .ns_per_count = swinc_ns_per,
998     },
999 #ifndef CONFIG_USER_ONLY
1000     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1001       .supported = instructions_supported,
1002       .get_count = instructions_get_count,
1003       .ns_per_count = instructions_ns_per,
1004     },
1005     { .number = 0x011, /* CPU_CYCLES, Cycle */
1006       .supported = event_always_supported,
1007       .get_count = cycles_get_count,
1008       .ns_per_count = cycles_ns_per,
1009     },
1010 #endif
1011     { .number = 0x023, /* STALL_FRONTEND */
1012       .supported = pmuv3p1_events_supported,
1013       .get_count = zero_event_get_count,
1014       .ns_per_count = zero_event_ns_per,
1015     },
1016     { .number = 0x024, /* STALL_BACKEND */
1017       .supported = pmuv3p1_events_supported,
1018       .get_count = zero_event_get_count,
1019       .ns_per_count = zero_event_ns_per,
1020     },
1021     { .number = 0x03c, /* STALL */
1022       .supported = pmuv3p4_events_supported,
1023       .get_count = zero_event_get_count,
1024       .ns_per_count = zero_event_ns_per,
1025     },
1026 };
1027 
1028 /*
1029  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1030  * events (i.e. the statistical profiling extension), this implementation
1031  * should first be updated to something sparse instead of the current
1032  * supported_event_map[] array.
1033  */
1034 #define MAX_EVENT_ID 0x3c
1035 #define UNSUPPORTED_EVENT UINT16_MAX
1036 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1037 
1038 /*
1039  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1040  * of ARM event numbers to indices in our pm_events array.
1041  *
1042  * Note: Events in the 0x40XX range are not currently supported.
1043  */
1044 void pmu_init(ARMCPU *cpu)
1045 {
1046     unsigned int i;
1047 
1048     /*
1049      * Empty supported_event_map and cpu->pmceid[01] before adding supported
1050      * events to them
1051      */
1052     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1053         supported_event_map[i] = UNSUPPORTED_EVENT;
1054     }
1055     cpu->pmceid0 = 0;
1056     cpu->pmceid1 = 0;
1057 
1058     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1059         const pm_event *cnt = &pm_events[i];
1060         assert(cnt->number <= MAX_EVENT_ID);
1061         /* We do not currently support events in the 0x40xx range */
1062         assert(cnt->number <= 0x3f);
1063 
1064         if (cnt->supported(&cpu->env)) {
1065             supported_event_map[cnt->number] = i;
1066             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1067             if (cnt->number & 0x20) {
1068                 cpu->pmceid1 |= event_mask;
1069             } else {
1070                 cpu->pmceid0 |= event_mask;
1071             }
1072         }
1073     }
1074 }
1075 
1076 /*
1077  * Check at runtime whether a PMU event is supported for the current machine
1078  */
1079 static bool event_supported(uint16_t number)
1080 {
1081     if (number > MAX_EVENT_ID) {
1082         return false;
1083     }
1084     return supported_event_map[number] != UNSUPPORTED_EVENT;
1085 }
1086 
1087 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1088                                    bool isread)
1089 {
1090     /*
1091      * Performance monitor registers user accessibility is controlled
1092      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1093      * trapping to EL2 or EL3 for other accesses.
1094      */
1095     int el = arm_current_el(env);
1096     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1097 
1098     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1099         return CP_ACCESS_TRAP;
1100     }
1101     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1102         return CP_ACCESS_TRAP_EL2;
1103     }
1104     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1105         return CP_ACCESS_TRAP_EL3;
1106     }
1107 
1108     return CP_ACCESS_OK;
1109 }
1110 
1111 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1112                                            const ARMCPRegInfo *ri,
1113                                            bool isread)
1114 {
1115     /* ER: event counter read trap control */
1116     if (arm_feature(env, ARM_FEATURE_V8)
1117         && arm_current_el(env) == 0
1118         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1119         && isread) {
1120         return CP_ACCESS_OK;
1121     }
1122 
1123     return pmreg_access(env, ri, isread);
1124 }
1125 
1126 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1127                                          const ARMCPRegInfo *ri,
1128                                          bool isread)
1129 {
1130     /* SW: software increment write trap control */
1131     if (arm_feature(env, ARM_FEATURE_V8)
1132         && arm_current_el(env) == 0
1133         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1134         && !isread) {
1135         return CP_ACCESS_OK;
1136     }
1137 
1138     return pmreg_access(env, ri, isread);
1139 }
1140 
1141 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1142                                         const ARMCPRegInfo *ri,
1143                                         bool isread)
1144 {
1145     /* ER: event counter read trap control */
1146     if (arm_feature(env, ARM_FEATURE_V8)
1147         && arm_current_el(env) == 0
1148         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1149         return CP_ACCESS_OK;
1150     }
1151 
1152     return pmreg_access(env, ri, isread);
1153 }
1154 
1155 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1156                                          const ARMCPRegInfo *ri,
1157                                          bool isread)
1158 {
1159     /* CR: cycle counter read trap control */
1160     if (arm_feature(env, ARM_FEATURE_V8)
1161         && arm_current_el(env) == 0
1162         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1163         && isread) {
1164         return CP_ACCESS_OK;
1165     }
1166 
1167     return pmreg_access(env, ri, isread);
1168 }
1169 
1170 /*
1171  * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
1172  * We use these to decide whether we need to wrap a write to MDCR_EL2
1173  * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
1174  */
1175 #define MDCR_EL2_PMU_ENABLE_BITS \
1176     (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
1177 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
1178 
1179 /*
1180  * Returns true if the counter (pass 31 for PMCCNTR) should count events using
1181  * the current EL, security state, and register configuration.
1182  */
1183 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1184 {
1185     uint64_t filter;
1186     bool e, p, u, nsk, nsu, nsh, m;
1187     bool enabled, prohibited = false, filtered;
1188     bool secure = arm_is_secure(env);
1189     int el = arm_current_el(env);
1190     uint64_t mdcr_el2;
1191     uint8_t hpmn;
1192 
1193     /*
1194      * We might be called for M-profile cores where MDCR_EL2 doesn't
1195      * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
1196      * must be before we read that value.
1197      */
1198     if (!arm_feature(env, ARM_FEATURE_PMU)) {
1199         return false;
1200     }
1201 
1202     mdcr_el2 = arm_mdcr_el2_eff(env);
1203     hpmn = mdcr_el2 & MDCR_HPMN;
1204 
1205     if (!arm_feature(env, ARM_FEATURE_EL2) ||
1206             (counter < hpmn || counter == 31)) {
1207         e = env->cp15.c9_pmcr & PMCRE;
1208     } else {
1209         e = mdcr_el2 & MDCR_HPME;
1210     }
1211     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1212 
1213     /* Is event counting prohibited? */
1214     if (el == 2 && (counter < hpmn || counter == 31)) {
1215         prohibited = mdcr_el2 & MDCR_HPMD;
1216     }
1217     if (secure) {
1218         prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
1219     }
1220 
1221     if (counter == 31) {
1222         /*
1223          * The cycle counter defaults to running. PMCR.DP says "disable
1224          * the cycle counter when event counting is prohibited".
1225          * Some MDCR bits disable the cycle counter specifically.
1226          */
1227         prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
1228         if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1229             if (secure) {
1230                 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
1231             }
1232             if (el == 2) {
1233                 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
1234             }
1235         }
1236     }
1237 
1238     if (counter == 31) {
1239         filter = env->cp15.pmccfiltr_el0;
1240     } else {
1241         filter = env->cp15.c14_pmevtyper[counter];
1242     }
1243 
1244     p   = filter & PMXEVTYPER_P;
1245     u   = filter & PMXEVTYPER_U;
1246     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1247     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1248     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1249     m   = arm_el_is_aa64(env, 1) &&
1250               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1251 
1252     if (el == 0) {
1253         filtered = secure ? u : u != nsu;
1254     } else if (el == 1) {
1255         filtered = secure ? p : p != nsk;
1256     } else if (el == 2) {
1257         filtered = !nsh;
1258     } else { /* EL3 */
1259         filtered = m != p;
1260     }
1261 
1262     if (counter != 31) {
1263         /*
1264          * If not checking PMCCNTR, ensure the counter is setup to an event we
1265          * support
1266          */
1267         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1268         if (!event_supported(event)) {
1269             return false;
1270         }
1271     }
1272 
1273     return enabled && !prohibited && !filtered;
1274 }
1275 
1276 static void pmu_update_irq(CPUARMState *env)
1277 {
1278     ARMCPU *cpu = env_archcpu(env);
1279     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1280             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1281 }
1282 
1283 static bool pmccntr_clockdiv_enabled(CPUARMState *env)
1284 {
1285     /*
1286      * Return true if the clock divider is enabled and the cycle counter
1287      * is supposed to tick only once every 64 clock cycles. This is
1288      * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1289      * (64-bit) cycle counter PMCR.D has no effect.
1290      */
1291     return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
1292 }
1293 
1294 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
1295 {
1296     /* Return true if the specified event counter is configured to be 64 bit */
1297 
1298     /* This isn't intended to be used with the cycle counter */
1299     assert(counter < 31);
1300 
1301     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1302         return false;
1303     }
1304 
1305     if (arm_feature(env, ARM_FEATURE_EL2)) {
1306         /*
1307          * MDCR_EL2.HLP still applies even when EL2 is disabled in the
1308          * current security state, so we don't use arm_mdcr_el2_eff() here.
1309          */
1310         bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
1311         int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1312 
1313         if (counter >= hpmn) {
1314             return hlp;
1315         }
1316     }
1317     return env->cp15.c9_pmcr & PMCRLP;
1318 }
1319 
1320 /*
1321  * Ensure c15_ccnt is the guest-visible count so that operations such as
1322  * enabling/disabling the counter or filtering, modifying the count itself,
1323  * etc. can be done logically. This is essentially a no-op if the counter is
1324  * not enabled at the time of the call.
1325  */
1326 static void pmccntr_op_start(CPUARMState *env)
1327 {
1328     uint64_t cycles = cycles_get_count(env);
1329 
1330     if (pmu_counter_enabled(env, 31)) {
1331         uint64_t eff_cycles = cycles;
1332         if (pmccntr_clockdiv_enabled(env)) {
1333             eff_cycles /= 64;
1334         }
1335 
1336         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1337 
1338         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1339                                  1ull << 63 : 1ull << 31;
1340         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1341             env->cp15.c9_pmovsr |= (1ULL << 31);
1342             pmu_update_irq(env);
1343         }
1344 
1345         env->cp15.c15_ccnt = new_pmccntr;
1346     }
1347     env->cp15.c15_ccnt_delta = cycles;
1348 }
1349 
1350 /*
1351  * If PMCCNTR is enabled, recalculate the delta between the clock and the
1352  * guest-visible count. A call to pmccntr_op_finish should follow every call to
1353  * pmccntr_op_start.
1354  */
1355 static void pmccntr_op_finish(CPUARMState *env)
1356 {
1357     if (pmu_counter_enabled(env, 31)) {
1358 #ifndef CONFIG_USER_ONLY
1359         /* Calculate when the counter will next overflow */
1360         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1361         if (!(env->cp15.c9_pmcr & PMCRLC)) {
1362             remaining_cycles = (uint32_t)remaining_cycles;
1363         }
1364         int64_t overflow_in = cycles_ns_per(remaining_cycles);
1365 
1366         if (overflow_in > 0) {
1367             int64_t overflow_at;
1368 
1369             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1370                                  overflow_in, &overflow_at)) {
1371                 ARMCPU *cpu = env_archcpu(env);
1372                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1373             }
1374         }
1375 #endif
1376 
1377         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1378         if (pmccntr_clockdiv_enabled(env)) {
1379             prev_cycles /= 64;
1380         }
1381         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1382     }
1383 }
1384 
1385 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1386 {
1387 
1388     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1389     uint64_t count = 0;
1390     if (event_supported(event)) {
1391         uint16_t event_idx = supported_event_map[event];
1392         count = pm_events[event_idx].get_count(env);
1393     }
1394 
1395     if (pmu_counter_enabled(env, counter)) {
1396         uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1397         uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
1398             1ULL << 63 : 1ULL << 31;
1399 
1400         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
1401             env->cp15.c9_pmovsr |= (1 << counter);
1402             pmu_update_irq(env);
1403         }
1404         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1405     }
1406     env->cp15.c14_pmevcntr_delta[counter] = count;
1407 }
1408 
1409 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1410 {
1411     if (pmu_counter_enabled(env, counter)) {
1412 #ifndef CONFIG_USER_ONLY
1413         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1414         uint16_t event_idx = supported_event_map[event];
1415         uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
1416         int64_t overflow_in;
1417 
1418         if (!pmevcntr_is_64_bit(env, counter)) {
1419             delta = (uint32_t)delta;
1420         }
1421         overflow_in = pm_events[event_idx].ns_per_count(delta);
1422 
1423         if (overflow_in > 0) {
1424             int64_t overflow_at;
1425 
1426             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1427                                  overflow_in, &overflow_at)) {
1428                 ARMCPU *cpu = env_archcpu(env);
1429                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1430             }
1431         }
1432 #endif
1433 
1434         env->cp15.c14_pmevcntr_delta[counter] -=
1435             env->cp15.c14_pmevcntr[counter];
1436     }
1437 }
1438 
1439 void pmu_op_start(CPUARMState *env)
1440 {
1441     unsigned int i;
1442     pmccntr_op_start(env);
1443     for (i = 0; i < pmu_num_counters(env); i++) {
1444         pmevcntr_op_start(env, i);
1445     }
1446 }
1447 
1448 void pmu_op_finish(CPUARMState *env)
1449 {
1450     unsigned int i;
1451     pmccntr_op_finish(env);
1452     for (i = 0; i < pmu_num_counters(env); i++) {
1453         pmevcntr_op_finish(env, i);
1454     }
1455 }
1456 
1457 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1458 {
1459     pmu_op_start(&cpu->env);
1460 }
1461 
1462 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1463 {
1464     pmu_op_finish(&cpu->env);
1465 }
1466 
1467 void arm_pmu_timer_cb(void *opaque)
1468 {
1469     ARMCPU *cpu = opaque;
1470 
1471     /*
1472      * Update all the counter values based on the current underlying counts,
1473      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1474      * has the effect of setting the cpu->pmu_timer to the next earliest time a
1475      * counter may expire.
1476      */
1477     pmu_op_start(&cpu->env);
1478     pmu_op_finish(&cpu->env);
1479 }
1480 
1481 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1482                        uint64_t value)
1483 {
1484     pmu_op_start(env);
1485 
1486     if (value & PMCRC) {
1487         /* The counter has been reset */
1488         env->cp15.c15_ccnt = 0;
1489     }
1490 
1491     if (value & PMCRP) {
1492         unsigned int i;
1493         for (i = 0; i < pmu_num_counters(env); i++) {
1494             env->cp15.c14_pmevcntr[i] = 0;
1495         }
1496     }
1497 
1498     env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1499     env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1500 
1501     pmu_op_finish(env);
1502 }
1503 
1504 static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1505 {
1506     uint64_t pmcr = env->cp15.c9_pmcr;
1507 
1508     /*
1509      * If EL2 is implemented and enabled for the current security state, reads
1510      * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
1511      */
1512     if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
1513         pmcr &= ~PMCRN_MASK;
1514         pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
1515     }
1516 
1517     return pmcr;
1518 }
1519 
1520 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1521                           uint64_t value)
1522 {
1523     unsigned int i;
1524     uint64_t overflow_mask, new_pmswinc;
1525 
1526     for (i = 0; i < pmu_num_counters(env); i++) {
1527         /* Increment a counter's count iff: */
1528         if ((value & (1 << i)) && /* counter's bit is set */
1529                 /* counter is enabled and not filtered */
1530                 pmu_counter_enabled(env, i) &&
1531                 /* counter is SW_INCR */
1532                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1533             pmevcntr_op_start(env, i);
1534 
1535             /*
1536              * Detect if this write causes an overflow since we can't predict
1537              * PMSWINC overflows like we can for other events
1538              */
1539             new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1540 
1541             overflow_mask = pmevcntr_is_64_bit(env, i) ?
1542                 1ULL << 63 : 1ULL << 31;
1543 
1544             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
1545                 env->cp15.c9_pmovsr |= (1 << i);
1546                 pmu_update_irq(env);
1547             }
1548 
1549             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1550 
1551             pmevcntr_op_finish(env, i);
1552         }
1553     }
1554 }
1555 
1556 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1557 {
1558     uint64_t ret;
1559     pmccntr_op_start(env);
1560     ret = env->cp15.c15_ccnt;
1561     pmccntr_op_finish(env);
1562     return ret;
1563 }
1564 
1565 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1566                          uint64_t value)
1567 {
1568     /*
1569      * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1570      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1571      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1572      * accessed.
1573      */
1574     env->cp15.c9_pmselr = value & 0x1f;
1575 }
1576 
1577 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1578                         uint64_t value)
1579 {
1580     pmccntr_op_start(env);
1581     env->cp15.c15_ccnt = value;
1582     pmccntr_op_finish(env);
1583 }
1584 
1585 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1586                             uint64_t value)
1587 {
1588     uint64_t cur_val = pmccntr_read(env, NULL);
1589 
1590     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1591 }
1592 
1593 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1594                             uint64_t value)
1595 {
1596     pmccntr_op_start(env);
1597     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1598     pmccntr_op_finish(env);
1599 }
1600 
1601 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1602                             uint64_t value)
1603 {
1604     pmccntr_op_start(env);
1605     /* M is not accessible from AArch32 */
1606     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1607         (value & PMCCFILTR);
1608     pmccntr_op_finish(env);
1609 }
1610 
1611 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1612 {
1613     /* M is not visible in AArch32 */
1614     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1615 }
1616 
1617 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1618                             uint64_t value)
1619 {
1620     pmu_op_start(env);
1621     value &= pmu_counter_mask(env);
1622     env->cp15.c9_pmcnten |= value;
1623     pmu_op_finish(env);
1624 }
1625 
1626 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627                              uint64_t value)
1628 {
1629     pmu_op_start(env);
1630     value &= pmu_counter_mask(env);
1631     env->cp15.c9_pmcnten &= ~value;
1632     pmu_op_finish(env);
1633 }
1634 
1635 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1636                          uint64_t value)
1637 {
1638     value &= pmu_counter_mask(env);
1639     env->cp15.c9_pmovsr &= ~value;
1640     pmu_update_irq(env);
1641 }
1642 
1643 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1644                          uint64_t value)
1645 {
1646     value &= pmu_counter_mask(env);
1647     env->cp15.c9_pmovsr |= value;
1648     pmu_update_irq(env);
1649 }
1650 
1651 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1652                              uint64_t value, const uint8_t counter)
1653 {
1654     if (counter == 31) {
1655         pmccfiltr_write(env, ri, value);
1656     } else if (counter < pmu_num_counters(env)) {
1657         pmevcntr_op_start(env, counter);
1658 
1659         /*
1660          * If this counter's event type is changing, store the current
1661          * underlying count for the new type in c14_pmevcntr_delta[counter] so
1662          * pmevcntr_op_finish has the correct baseline when it converts back to
1663          * a delta.
1664          */
1665         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1666             PMXEVTYPER_EVTCOUNT;
1667         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1668         if (old_event != new_event) {
1669             uint64_t count = 0;
1670             if (event_supported(new_event)) {
1671                 uint16_t event_idx = supported_event_map[new_event];
1672                 count = pm_events[event_idx].get_count(env);
1673             }
1674             env->cp15.c14_pmevcntr_delta[counter] = count;
1675         }
1676 
1677         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1678         pmevcntr_op_finish(env, counter);
1679     }
1680     /*
1681      * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1682      * PMSELR value is equal to or greater than the number of implemented
1683      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1684      */
1685 }
1686 
1687 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1688                                const uint8_t counter)
1689 {
1690     if (counter == 31) {
1691         return env->cp15.pmccfiltr_el0;
1692     } else if (counter < pmu_num_counters(env)) {
1693         return env->cp15.c14_pmevtyper[counter];
1694     } else {
1695       /*
1696        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1697        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1698        */
1699         return 0;
1700     }
1701 }
1702 
1703 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1704                               uint64_t value)
1705 {
1706     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1707     pmevtyper_write(env, ri, value, counter);
1708 }
1709 
1710 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1711                                uint64_t value)
1712 {
1713     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1714     env->cp15.c14_pmevtyper[counter] = value;
1715 
1716     /*
1717      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1718      * pmu_op_finish calls when loading saved state for a migration. Because
1719      * we're potentially updating the type of event here, the value written to
1720      * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
1721      * different counter type. Therefore, we need to set this value to the
1722      * current count for the counter type we're writing so that pmu_op_finish
1723      * has the correct count for its calculation.
1724      */
1725     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1726     if (event_supported(event)) {
1727         uint16_t event_idx = supported_event_map[event];
1728         env->cp15.c14_pmevcntr_delta[counter] =
1729             pm_events[event_idx].get_count(env);
1730     }
1731 }
1732 
1733 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1734 {
1735     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1736     return pmevtyper_read(env, ri, counter);
1737 }
1738 
1739 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1740                              uint64_t value)
1741 {
1742     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1743 }
1744 
1745 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1746 {
1747     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1748 }
1749 
1750 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751                              uint64_t value, uint8_t counter)
1752 {
1753     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1754         /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1755         value &= MAKE_64BIT_MASK(0, 32);
1756     }
1757     if (counter < pmu_num_counters(env)) {
1758         pmevcntr_op_start(env, counter);
1759         env->cp15.c14_pmevcntr[counter] = value;
1760         pmevcntr_op_finish(env, counter);
1761     }
1762     /*
1763      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1764      * are CONSTRAINED UNPREDICTABLE.
1765      */
1766 }
1767 
1768 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1769                               uint8_t counter)
1770 {
1771     if (counter < pmu_num_counters(env)) {
1772         uint64_t ret;
1773         pmevcntr_op_start(env, counter);
1774         ret = env->cp15.c14_pmevcntr[counter];
1775         pmevcntr_op_finish(env, counter);
1776         if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1777             /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1778             ret &= MAKE_64BIT_MASK(0, 32);
1779         }
1780         return ret;
1781     } else {
1782       /*
1783        * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1784        * are CONSTRAINED UNPREDICTABLE.
1785        */
1786         return 0;
1787     }
1788 }
1789 
1790 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1791                              uint64_t value)
1792 {
1793     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1794     pmevcntr_write(env, ri, value, counter);
1795 }
1796 
1797 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1798 {
1799     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1800     return pmevcntr_read(env, ri, counter);
1801 }
1802 
1803 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1804                              uint64_t value)
1805 {
1806     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1807     assert(counter < pmu_num_counters(env));
1808     env->cp15.c14_pmevcntr[counter] = value;
1809     pmevcntr_write(env, ri, value, counter);
1810 }
1811 
1812 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1813 {
1814     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1815     assert(counter < pmu_num_counters(env));
1816     return env->cp15.c14_pmevcntr[counter];
1817 }
1818 
1819 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1820                              uint64_t value)
1821 {
1822     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1823 }
1824 
1825 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1826 {
1827     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1828 }
1829 
1830 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1831                             uint64_t value)
1832 {
1833     if (arm_feature(env, ARM_FEATURE_V8)) {
1834         env->cp15.c9_pmuserenr = value & 0xf;
1835     } else {
1836         env->cp15.c9_pmuserenr = value & 1;
1837     }
1838 }
1839 
1840 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1841                              uint64_t value)
1842 {
1843     /* We have no event counters so only the C bit can be changed */
1844     value &= pmu_counter_mask(env);
1845     env->cp15.c9_pminten |= value;
1846     pmu_update_irq(env);
1847 }
1848 
1849 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1850                              uint64_t value)
1851 {
1852     value &= pmu_counter_mask(env);
1853     env->cp15.c9_pminten &= ~value;
1854     pmu_update_irq(env);
1855 }
1856 
1857 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1858                        uint64_t value)
1859 {
1860     /*
1861      * Note that even though the AArch64 view of this register has bits
1862      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1863      * architectural requirements for bits which are RES0 only in some
1864      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1865      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1866      */
1867     raw_write(env, ri, value & ~0x1FULL);
1868 }
1869 
1870 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1871 {
1872     /* Begin with base v8.0 state.  */
1873     uint64_t valid_mask = 0x3fff;
1874     ARMCPU *cpu = env_archcpu(env);
1875     uint64_t changed;
1876 
1877     /*
1878      * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1879      * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1880      * Instead, choose the format based on the mode of EL3.
1881      */
1882     if (arm_el_is_aa64(env, 3)) {
1883         value |= SCR_FW | SCR_AW;      /* RES1 */
1884         valid_mask &= ~SCR_NET;        /* RES0 */
1885 
1886         if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1887             !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1888             value |= SCR_RW;           /* RAO/WI */
1889         }
1890         if (cpu_isar_feature(aa64_ras, cpu)) {
1891             valid_mask |= SCR_TERR;
1892         }
1893         if (cpu_isar_feature(aa64_lor, cpu)) {
1894             valid_mask |= SCR_TLOR;
1895         }
1896         if (cpu_isar_feature(aa64_pauth, cpu)) {
1897             valid_mask |= SCR_API | SCR_APK;
1898         }
1899         if (cpu_isar_feature(aa64_sel2, cpu)) {
1900             valid_mask |= SCR_EEL2;
1901         } else if (cpu_isar_feature(aa64_rme, cpu)) {
1902             /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
1903             value |= SCR_NS;
1904         }
1905         if (cpu_isar_feature(aa64_mte, cpu)) {
1906             valid_mask |= SCR_ATA;
1907         }
1908         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1909             valid_mask |= SCR_ENSCXT;
1910         }
1911         if (cpu_isar_feature(aa64_doublefault, cpu)) {
1912             valid_mask |= SCR_EASE | SCR_NMEA;
1913         }
1914         if (cpu_isar_feature(aa64_sme, cpu)) {
1915             valid_mask |= SCR_ENTP2;
1916         }
1917         if (cpu_isar_feature(aa64_hcx, cpu)) {
1918             valid_mask |= SCR_HXEN;
1919         }
1920         if (cpu_isar_feature(aa64_fgt, cpu)) {
1921             valid_mask |= SCR_FGTEN;
1922         }
1923         if (cpu_isar_feature(aa64_rme, cpu)) {
1924             valid_mask |= SCR_NSE | SCR_GPF;
1925         }
1926         if (cpu_isar_feature(aa64_ecv, cpu)) {
1927             valid_mask |= SCR_ECVEN;
1928         }
1929     } else {
1930         valid_mask &= ~(SCR_RW | SCR_ST);
1931         if (cpu_isar_feature(aa32_ras, cpu)) {
1932             valid_mask |= SCR_TERR;
1933         }
1934     }
1935 
1936     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1937         valid_mask &= ~SCR_HCE;
1938 
1939         /*
1940          * On ARMv7, SMD (or SCD as it is called in v7) is only
1941          * supported if EL2 exists. The bit is UNK/SBZP when
1942          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1943          * when EL2 is unavailable.
1944          * On ARMv8, this bit is always available.
1945          */
1946         if (arm_feature(env, ARM_FEATURE_V7) &&
1947             !arm_feature(env, ARM_FEATURE_V8)) {
1948             valid_mask &= ~SCR_SMD;
1949         }
1950     }
1951 
1952     /* Clear all-context RES0 bits.  */
1953     value &= valid_mask;
1954     changed = env->cp15.scr_el3 ^ value;
1955     env->cp15.scr_el3 = value;
1956 
1957     /*
1958      * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
1959      * we must invalidate all TLBs below EL3.
1960      */
1961     if (changed & (SCR_NS | SCR_NSE)) {
1962         tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
1963                                            ARMMMUIdxBit_E20_0 |
1964                                            ARMMMUIdxBit_E10_1 |
1965                                            ARMMMUIdxBit_E20_2 |
1966                                            ARMMMUIdxBit_E10_1_PAN |
1967                                            ARMMMUIdxBit_E20_2_PAN |
1968                                            ARMMMUIdxBit_E2));
1969     }
1970 }
1971 
1972 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1973 {
1974     /*
1975      * scr_write will set the RES1 bits on an AArch64-only CPU.
1976      * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1977      */
1978     scr_write(env, ri, 0);
1979 }
1980 
1981 static CPAccessResult access_tid4(CPUARMState *env,
1982                                   const ARMCPRegInfo *ri,
1983                                   bool isread)
1984 {
1985     if (arm_current_el(env) == 1 &&
1986         (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
1987         return CP_ACCESS_TRAP_EL2;
1988     }
1989 
1990     return CP_ACCESS_OK;
1991 }
1992 
1993 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1994 {
1995     ARMCPU *cpu = env_archcpu(env);
1996 
1997     /*
1998      * Acquire the CSSELR index from the bank corresponding to the CCSIDR
1999      * bank
2000      */
2001     uint32_t index = A32_BANKED_REG_GET(env, csselr,
2002                                         ri->secure & ARM_CP_SECSTATE_S);
2003 
2004     return cpu->ccsidr[index];
2005 }
2006 
2007 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2008                          uint64_t value)
2009 {
2010     raw_write(env, ri, value & 0xf);
2011 }
2012 
2013 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2014 {
2015     CPUState *cs = env_cpu(env);
2016     bool el1 = arm_current_el(env) == 1;
2017     uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
2018     uint64_t ret = 0;
2019 
2020     if (hcr_el2 & HCR_IMO) {
2021         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2022             ret |= CPSR_I;
2023         }
2024     } else {
2025         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2026             ret |= CPSR_I;
2027         }
2028     }
2029 
2030     if (hcr_el2 & HCR_FMO) {
2031         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2032             ret |= CPSR_F;
2033         }
2034     } else {
2035         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2036             ret |= CPSR_F;
2037         }
2038     }
2039 
2040     if (hcr_el2 & HCR_AMO) {
2041         if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
2042             ret |= CPSR_A;
2043         }
2044     }
2045 
2046     return ret;
2047 }
2048 
2049 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2050                                        bool isread)
2051 {
2052     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2053         return CP_ACCESS_TRAP_EL2;
2054     }
2055 
2056     return CP_ACCESS_OK;
2057 }
2058 
2059 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2060                                        bool isread)
2061 {
2062     if (arm_feature(env, ARM_FEATURE_V8)) {
2063         return access_aa64_tid1(env, ri, isread);
2064     }
2065 
2066     return CP_ACCESS_OK;
2067 }
2068 
2069 static const ARMCPRegInfo v7_cp_reginfo[] = {
2070     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2071     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2072       .access = PL1_W, .type = ARM_CP_NOP },
2073     /*
2074      * Performance monitors are implementation defined in v7,
2075      * but with an ARM recommended set of registers, which we
2076      * follow.
2077      *
2078      * Performance registers fall into three categories:
2079      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2080      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2081      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2082      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2083      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2084      */
2085     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2086       .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
2087       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2088       .writefn = pmcntenset_write,
2089       .accessfn = pmreg_access,
2090       .fgt = FGT_PMCNTEN,
2091       .raw_writefn = raw_write },
2092     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
2093       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2094       .access = PL0_RW, .accessfn = pmreg_access,
2095       .fgt = FGT_PMCNTEN,
2096       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2097       .writefn = pmcntenset_write, .raw_writefn = raw_write },
2098     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2099       .access = PL0_RW,
2100       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2101       .accessfn = pmreg_access,
2102       .fgt = FGT_PMCNTEN,
2103       .writefn = pmcntenclr_write,
2104       .type = ARM_CP_ALIAS | ARM_CP_IO },
2105     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2106       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2107       .access = PL0_RW, .accessfn = pmreg_access,
2108       .fgt = FGT_PMCNTEN,
2109       .type = ARM_CP_ALIAS | ARM_CP_IO,
2110       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2111       .writefn = pmcntenclr_write },
2112     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2113       .access = PL0_RW, .type = ARM_CP_IO,
2114       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2115       .accessfn = pmreg_access,
2116       .fgt = FGT_PMOVS,
2117       .writefn = pmovsr_write,
2118       .raw_writefn = raw_write },
2119     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2120       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2121       .access = PL0_RW, .accessfn = pmreg_access,
2122       .fgt = FGT_PMOVS,
2123       .type = ARM_CP_ALIAS | ARM_CP_IO,
2124       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2125       .writefn = pmovsr_write,
2126       .raw_writefn = raw_write },
2127     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2128       .access = PL0_W, .accessfn = pmreg_access_swinc,
2129       .fgt = FGT_PMSWINC_EL0,
2130       .type = ARM_CP_NO_RAW | ARM_CP_IO,
2131       .writefn = pmswinc_write },
2132     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2133       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2134       .access = PL0_W, .accessfn = pmreg_access_swinc,
2135       .fgt = FGT_PMSWINC_EL0,
2136       .type = ARM_CP_NO_RAW | ARM_CP_IO,
2137       .writefn = pmswinc_write },
2138     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2139       .access = PL0_RW, .type = ARM_CP_ALIAS,
2140       .fgt = FGT_PMSELR_EL0,
2141       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2142       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2143       .raw_writefn = raw_write},
2144     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2145       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2146       .access = PL0_RW, .accessfn = pmreg_access_selr,
2147       .fgt = FGT_PMSELR_EL0,
2148       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2149       .writefn = pmselr_write, .raw_writefn = raw_write, },
2150     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2151       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2152       .fgt = FGT_PMCCNTR_EL0,
2153       .readfn = pmccntr_read, .writefn = pmccntr_write32,
2154       .accessfn = pmreg_access_ccntr },
2155     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2156       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2157       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2158       .fgt = FGT_PMCCNTR_EL0,
2159       .type = ARM_CP_IO,
2160       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2161       .readfn = pmccntr_read, .writefn = pmccntr_write,
2162       .raw_readfn = raw_read, .raw_writefn = raw_write, },
2163     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2164       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2165       .access = PL0_RW, .accessfn = pmreg_access,
2166       .fgt = FGT_PMCCFILTR_EL0,
2167       .type = ARM_CP_ALIAS | ARM_CP_IO,
2168       .resetvalue = 0, },
2169     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2170       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2171       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2172       .access = PL0_RW, .accessfn = pmreg_access,
2173       .fgt = FGT_PMCCFILTR_EL0,
2174       .type = ARM_CP_IO,
2175       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2176       .resetvalue = 0, },
2177     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2178       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2179       .accessfn = pmreg_access,
2180       .fgt = FGT_PMEVTYPERN_EL0,
2181       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2182     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2183       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2184       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2185       .accessfn = pmreg_access,
2186       .fgt = FGT_PMEVTYPERN_EL0,
2187       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2188     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2189       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2190       .accessfn = pmreg_access_xevcntr,
2191       .fgt = FGT_PMEVCNTRN_EL0,
2192       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2193     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2194       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2195       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2196       .accessfn = pmreg_access_xevcntr,
2197       .fgt = FGT_PMEVCNTRN_EL0,
2198       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2199     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2200       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2201       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2202       .resetvalue = 0,
2203       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2204     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2205       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2206       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2207       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2208       .resetvalue = 0,
2209       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2210     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2211       .access = PL1_RW, .accessfn = access_tpm,
2212       .fgt = FGT_PMINTEN,
2213       .type = ARM_CP_ALIAS | ARM_CP_IO,
2214       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2215       .resetvalue = 0,
2216       .writefn = pmintenset_write, .raw_writefn = raw_write },
2217     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2218       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2219       .access = PL1_RW, .accessfn = access_tpm,
2220       .fgt = FGT_PMINTEN,
2221       .type = ARM_CP_IO,
2222       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2223       .writefn = pmintenset_write, .raw_writefn = raw_write,
2224       .resetvalue = 0x0 },
2225     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2226       .access = PL1_RW, .accessfn = access_tpm,
2227       .fgt = FGT_PMINTEN,
2228       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2229       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2230       .writefn = pmintenclr_write, },
2231     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2232       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2233       .access = PL1_RW, .accessfn = access_tpm,
2234       .fgt = FGT_PMINTEN,
2235       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2236       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2237       .writefn = pmintenclr_write },
2238     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2239       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2240       .access = PL1_R,
2241       .accessfn = access_tid4,
2242       .fgt = FGT_CCSIDR_EL1,
2243       .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2244     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2245       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2246       .access = PL1_RW,
2247       .accessfn = access_tid4,
2248       .fgt = FGT_CSSELR_EL1,
2249       .writefn = csselr_write, .resetvalue = 0,
2250       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2251                              offsetof(CPUARMState, cp15.csselr_ns) } },
2252     /*
2253      * Auxiliary ID register: this actually has an IMPDEF value but for now
2254      * just RAZ for all cores:
2255      */
2256     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2257       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2258       .access = PL1_R, .type = ARM_CP_CONST,
2259       .accessfn = access_aa64_tid1,
2260       .fgt = FGT_AIDR_EL1,
2261       .resetvalue = 0 },
2262     /*
2263      * Auxiliary fault status registers: these also are IMPDEF, and we
2264      * choose to RAZ/WI for all cores.
2265      */
2266     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2267       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2268       .access = PL1_RW, .accessfn = access_tvm_trvm,
2269       .fgt = FGT_AFSR0_EL1,
2270       .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
2271       .type = ARM_CP_CONST, .resetvalue = 0 },
2272     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2273       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2274       .access = PL1_RW, .accessfn = access_tvm_trvm,
2275       .fgt = FGT_AFSR1_EL1,
2276       .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
2277       .type = ARM_CP_CONST, .resetvalue = 0 },
2278     /*
2279      * MAIR can just read-as-written because we don't implement caches
2280      * and so don't need to care about memory attributes.
2281      */
2282     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2283       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2284       .access = PL1_RW, .accessfn = access_tvm_trvm,
2285       .fgt = FGT_MAIR_EL1,
2286       .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
2287       .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2288       .resetvalue = 0 },
2289     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2290       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2291       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2292       .resetvalue = 0 },
2293     /*
2294      * For non-long-descriptor page tables these are PRRR and NMRR;
2295      * regardless they still act as reads-as-written for QEMU.
2296      */
2297      /*
2298       * MAIR0/1 are defined separately from their 64-bit counterpart which
2299       * allows them to assign the correct fieldoffset based on the endianness
2300       * handled in the field definitions.
2301       */
2302     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2303       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2304       .access = PL1_RW, .accessfn = access_tvm_trvm,
2305       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2306                              offsetof(CPUARMState, cp15.mair0_ns) },
2307       .resetfn = arm_cp_reset_ignore },
2308     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2309       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2310       .access = PL1_RW, .accessfn = access_tvm_trvm,
2311       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2312                              offsetof(CPUARMState, cp15.mair1_ns) },
2313       .resetfn = arm_cp_reset_ignore },
2314     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2315       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2316       .fgt = FGT_ISR_EL1,
2317       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2318     /* 32 bit ITLB invalidates */
2319     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2320       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2321       .writefn = tlbiall_write },
2322     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2323       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2324       .writefn = tlbimva_write },
2325     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2326       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2327       .writefn = tlbiasid_write },
2328     /* 32 bit DTLB invalidates */
2329     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2330       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2331       .writefn = tlbiall_write },
2332     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2333       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2334       .writefn = tlbimva_write },
2335     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2336       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2337       .writefn = tlbiasid_write },
2338     /* 32 bit TLB invalidates */
2339     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2340       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2341       .writefn = tlbiall_write },
2342     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2343       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2344       .writefn = tlbimva_write },
2345     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2346       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2347       .writefn = tlbiasid_write },
2348     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2349       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2350       .writefn = tlbimvaa_write },
2351 };
2352 
2353 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2354     /* 32 bit TLB invalidates, Inner Shareable */
2355     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2356       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2357       .writefn = tlbiall_is_write },
2358     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2359       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2360       .writefn = tlbimva_is_write },
2361     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2362       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2363       .writefn = tlbiasid_is_write },
2364     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2365       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2366       .writefn = tlbimvaa_is_write },
2367 };
2368 
2369 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2370     /* PMOVSSET is not implemented in v7 before v7ve */
2371     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2372       .access = PL0_RW, .accessfn = pmreg_access,
2373       .fgt = FGT_PMOVS,
2374       .type = ARM_CP_ALIAS | ARM_CP_IO,
2375       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2376       .writefn = pmovsset_write,
2377       .raw_writefn = raw_write },
2378     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2379       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2380       .access = PL0_RW, .accessfn = pmreg_access,
2381       .fgt = FGT_PMOVS,
2382       .type = ARM_CP_ALIAS | ARM_CP_IO,
2383       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2384       .writefn = pmovsset_write,
2385       .raw_writefn = raw_write },
2386 };
2387 
2388 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2389                         uint64_t value)
2390 {
2391     value &= 1;
2392     env->teecr = value;
2393 }
2394 
2395 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2396                                    bool isread)
2397 {
2398     /*
2399      * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2400      * at all, so we don't need to check whether we're v8A.
2401      */
2402     if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2403         (env->cp15.hstr_el2 & HSTR_TTEE)) {
2404         return CP_ACCESS_TRAP_EL2;
2405     }
2406     return CP_ACCESS_OK;
2407 }
2408 
2409 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2410                                     bool isread)
2411 {
2412     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2413         return CP_ACCESS_TRAP;
2414     }
2415     return teecr_access(env, ri, isread);
2416 }
2417 
2418 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2419     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2420       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2421       .resetvalue = 0,
2422       .writefn = teecr_write, .accessfn = teecr_access },
2423     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2424       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2425       .accessfn = teehbr_access, .resetvalue = 0 },
2426 };
2427 
2428 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2429     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2430       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2431       .access = PL0_RW,
2432       .fgt = FGT_TPIDR_EL0,
2433       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2434     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2435       .access = PL0_RW,
2436       .fgt = FGT_TPIDR_EL0,
2437       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2438                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2439       .resetfn = arm_cp_reset_ignore },
2440     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2441       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2442       .access = PL0_R | PL1_W,
2443       .fgt = FGT_TPIDRRO_EL0,
2444       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2445       .resetvalue = 0},
2446     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2447       .access = PL0_R | PL1_W,
2448       .fgt = FGT_TPIDRRO_EL0,
2449       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2450                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2451       .resetfn = arm_cp_reset_ignore },
2452     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2453       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2454       .access = PL1_RW,
2455       .fgt = FGT_TPIDR_EL1,
2456       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2457     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2458       .access = PL1_RW,
2459       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2460                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2461       .resetvalue = 0 },
2462 };
2463 
2464 #ifndef CONFIG_USER_ONLY
2465 
2466 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2467                                        bool isread)
2468 {
2469     /*
2470      * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2471      * Writable only at the highest implemented exception level.
2472      */
2473     int el = arm_current_el(env);
2474     uint64_t hcr;
2475     uint32_t cntkctl;
2476 
2477     switch (el) {
2478     case 0:
2479         hcr = arm_hcr_el2_eff(env);
2480         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2481             cntkctl = env->cp15.cnthctl_el2;
2482         } else {
2483             cntkctl = env->cp15.c14_cntkctl;
2484         }
2485         if (!extract32(cntkctl, 0, 2)) {
2486             return CP_ACCESS_TRAP;
2487         }
2488         break;
2489     case 1:
2490         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2491             arm_is_secure_below_el3(env)) {
2492             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2493             return CP_ACCESS_TRAP_UNCATEGORIZED;
2494         }
2495         break;
2496     case 2:
2497     case 3:
2498         break;
2499     }
2500 
2501     if (!isread && el < arm_highest_el(env)) {
2502         return CP_ACCESS_TRAP_UNCATEGORIZED;
2503     }
2504 
2505     return CP_ACCESS_OK;
2506 }
2507 
2508 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2509                                         bool isread)
2510 {
2511     unsigned int cur_el = arm_current_el(env);
2512     bool has_el2 = arm_is_el2_enabled(env);
2513     uint64_t hcr = arm_hcr_el2_eff(env);
2514 
2515     switch (cur_el) {
2516     case 0:
2517         /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2518         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2519             return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2520                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2521         }
2522 
2523         /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2524         if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2525             return CP_ACCESS_TRAP;
2526         }
2527         /* fall through */
2528     case 1:
2529         /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2530         if (has_el2 && timeridx == GTIMER_PHYS &&
2531             (hcr & HCR_E2H
2532              ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2533              : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2534             return CP_ACCESS_TRAP_EL2;
2535         }
2536         if (has_el2 && timeridx == GTIMER_VIRT) {
2537             if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
2538                 return CP_ACCESS_TRAP_EL2;
2539             }
2540         }
2541         break;
2542     }
2543     return CP_ACCESS_OK;
2544 }
2545 
2546 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2547                                       bool isread)
2548 {
2549     unsigned int cur_el = arm_current_el(env);
2550     bool has_el2 = arm_is_el2_enabled(env);
2551     uint64_t hcr = arm_hcr_el2_eff(env);
2552 
2553     switch (cur_el) {
2554     case 0:
2555         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2556             /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2557             return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2558                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2559         }
2560 
2561         /*
2562          * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2563          * EL0 if EL0[PV]TEN is zero.
2564          */
2565         if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2566             return CP_ACCESS_TRAP;
2567         }
2568         /* fall through */
2569 
2570     case 1:
2571         if (has_el2 && timeridx == GTIMER_PHYS) {
2572             if (hcr & HCR_E2H) {
2573                 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2574                 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2575                     return CP_ACCESS_TRAP_EL2;
2576                 }
2577             } else {
2578                 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2579                 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2580                     return CP_ACCESS_TRAP_EL2;
2581                 }
2582             }
2583         }
2584         if (has_el2 && timeridx == GTIMER_VIRT) {
2585             if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
2586                 return CP_ACCESS_TRAP_EL2;
2587             }
2588         }
2589         break;
2590     }
2591     return CP_ACCESS_OK;
2592 }
2593 
2594 static CPAccessResult gt_pct_access(CPUARMState *env,
2595                                     const ARMCPRegInfo *ri,
2596                                     bool isread)
2597 {
2598     return gt_counter_access(env, GTIMER_PHYS, isread);
2599 }
2600 
2601 static CPAccessResult gt_vct_access(CPUARMState *env,
2602                                     const ARMCPRegInfo *ri,
2603                                     bool isread)
2604 {
2605     return gt_counter_access(env, GTIMER_VIRT, isread);
2606 }
2607 
2608 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2609                                        bool isread)
2610 {
2611     return gt_timer_access(env, GTIMER_PHYS, isread);
2612 }
2613 
2614 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2615                                        bool isread)
2616 {
2617     return gt_timer_access(env, GTIMER_VIRT, isread);
2618 }
2619 
2620 static CPAccessResult gt_stimer_access(CPUARMState *env,
2621                                        const ARMCPRegInfo *ri,
2622                                        bool isread)
2623 {
2624     /*
2625      * The AArch64 register view of the secure physical timer is
2626      * always accessible from EL3, and configurably accessible from
2627      * Secure EL1.
2628      */
2629     switch (arm_current_el(env)) {
2630     case 1:
2631         if (!arm_is_secure(env)) {
2632             return CP_ACCESS_TRAP;
2633         }
2634         if (!(env->cp15.scr_el3 & SCR_ST)) {
2635             return CP_ACCESS_TRAP_EL3;
2636         }
2637         return CP_ACCESS_OK;
2638     case 0:
2639     case 2:
2640         return CP_ACCESS_TRAP;
2641     case 3:
2642         return CP_ACCESS_OK;
2643     default:
2644         g_assert_not_reached();
2645     }
2646 }
2647 
2648 static uint64_t gt_get_countervalue(CPUARMState *env)
2649 {
2650     ARMCPU *cpu = env_archcpu(env);
2651 
2652     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2653 }
2654 
2655 static void gt_update_irq(ARMCPU *cpu, int timeridx)
2656 {
2657     CPUARMState *env = &cpu->env;
2658     uint64_t cnthctl = env->cp15.cnthctl_el2;
2659     ARMSecuritySpace ss = arm_security_space(env);
2660     /* ISTATUS && !IMASK */
2661     int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
2662 
2663     /*
2664      * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
2665      * It is RES0 in Secure and NonSecure state.
2666      */
2667     if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
2668         ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
2669          (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
2670         irqstate = 0;
2671     }
2672 
2673     qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2674     trace_arm_gt_update_irq(timeridx, irqstate);
2675 }
2676 
2677 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
2678 {
2679     /*
2680      * Changing security state between Root and Secure/NonSecure, which may
2681      * happen when switching EL, can change the effective value of CNTHCTL_EL2
2682      * mask bits. Update the IRQ state accordingly.
2683      */
2684     gt_update_irq(cpu, GTIMER_VIRT);
2685     gt_update_irq(cpu, GTIMER_PHYS);
2686 }
2687 
2688 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
2689 {
2690     if ((env->cp15.scr_el3 & SCR_ECVEN) &&
2691         FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
2692         arm_is_el2_enabled(env) &&
2693         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
2694         return env->cp15.cntpoff_el2;
2695     }
2696     return 0;
2697 }
2698 
2699 static uint64_t gt_phys_cnt_offset(CPUARMState *env)
2700 {
2701     if (arm_current_el(env) >= 2) {
2702         return 0;
2703     }
2704     return gt_phys_raw_cnt_offset(env);
2705 }
2706 
2707 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2708 {
2709     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2710 
2711     if (gt->ctl & 1) {
2712         /*
2713          * Timer enabled: calculate and set current ISTATUS, irq, and
2714          * reset timer to when ISTATUS next has to change
2715          */
2716         uint64_t offset = timeridx == GTIMER_VIRT ?
2717             cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env);
2718         uint64_t count = gt_get_countervalue(&cpu->env);
2719         /* Note that this must be unsigned 64 bit arithmetic: */
2720         int istatus = count - offset >= gt->cval;
2721         uint64_t nexttick;
2722 
2723         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2724 
2725         if (istatus) {
2726             /*
2727              * Next transition is when (count - offset) rolls back over to 0.
2728              * If offset > count then this is when count == offset;
2729              * if offset <= count then this is when count == offset + 2^64
2730              * For the latter case we set nexttick to an "as far in future
2731              * as possible" value and let the code below handle it.
2732              */
2733             if (offset > count) {
2734                 nexttick = offset;
2735             } else {
2736                 nexttick = UINT64_MAX;
2737             }
2738         } else {
2739             /*
2740              * Next transition is when (count - offset) == cval, i.e.
2741              * when count == (cval + offset).
2742              * If that would overflow, then again we set up the next interrupt
2743              * for "as far in the future as possible" for the code below.
2744              */
2745             if (uadd64_overflow(gt->cval, offset, &nexttick)) {
2746                 nexttick = UINT64_MAX;
2747             }
2748         }
2749         /*
2750          * Note that the desired next expiry time might be beyond the
2751          * signed-64-bit range of a QEMUTimer -- in this case we just
2752          * set the timer for as far in the future as possible. When the
2753          * timer expires we will reset the timer for any remaining period.
2754          */
2755         if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2756             timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2757         } else {
2758             timer_mod(cpu->gt_timer[timeridx], nexttick);
2759         }
2760         trace_arm_gt_recalc(timeridx, nexttick);
2761     } else {
2762         /* Timer disabled: ISTATUS and timer output always clear */
2763         gt->ctl &= ~4;
2764         timer_del(cpu->gt_timer[timeridx]);
2765         trace_arm_gt_recalc_disabled(timeridx);
2766     }
2767     gt_update_irq(cpu, timeridx);
2768 }
2769 
2770 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2771                            int timeridx)
2772 {
2773     ARMCPU *cpu = env_archcpu(env);
2774 
2775     timer_del(cpu->gt_timer[timeridx]);
2776 }
2777 
2778 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2779 {
2780     return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
2781 }
2782 
2783 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2784 {
2785     uint64_t hcr;
2786 
2787     switch (arm_current_el(env)) {
2788     case 2:
2789         hcr = arm_hcr_el2_eff(env);
2790         if (hcr & HCR_E2H) {
2791             return 0;
2792         }
2793         break;
2794     case 0:
2795         hcr = arm_hcr_el2_eff(env);
2796         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2797             return 0;
2798         }
2799         break;
2800     }
2801 
2802     return env->cp15.cntvoff_el2;
2803 }
2804 
2805 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2806 {
2807     return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2808 }
2809 
2810 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2811                           int timeridx,
2812                           uint64_t value)
2813 {
2814     trace_arm_gt_cval_write(timeridx, value);
2815     env->cp15.c14_timer[timeridx].cval = value;
2816     gt_recalc_timer(env_archcpu(env), timeridx);
2817 }
2818 
2819 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2820                              int timeridx)
2821 {
2822     uint64_t offset = 0;
2823 
2824     switch (timeridx) {
2825     case GTIMER_VIRT:
2826     case GTIMER_HYPVIRT:
2827         offset = gt_virt_cnt_offset(env);
2828         break;
2829     case GTIMER_PHYS:
2830         offset = gt_phys_cnt_offset(env);
2831         break;
2832     }
2833 
2834     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2835                       (gt_get_countervalue(env) - offset));
2836 }
2837 
2838 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2839                           int timeridx,
2840                           uint64_t value)
2841 {
2842     uint64_t offset = 0;
2843 
2844     switch (timeridx) {
2845     case GTIMER_VIRT:
2846     case GTIMER_HYPVIRT:
2847         offset = gt_virt_cnt_offset(env);
2848         break;
2849     case GTIMER_PHYS:
2850         offset = gt_phys_cnt_offset(env);
2851         break;
2852     }
2853 
2854     trace_arm_gt_tval_write(timeridx, value);
2855     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2856                                          sextract64(value, 0, 32);
2857     gt_recalc_timer(env_archcpu(env), timeridx);
2858 }
2859 
2860 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2861                          int timeridx,
2862                          uint64_t value)
2863 {
2864     ARMCPU *cpu = env_archcpu(env);
2865     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2866 
2867     trace_arm_gt_ctl_write(timeridx, value);
2868     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2869     if ((oldval ^ value) & 1) {
2870         /* Enable toggled */
2871         gt_recalc_timer(cpu, timeridx);
2872     } else if ((oldval ^ value) & 2) {
2873         /*
2874          * IMASK toggled: don't need to recalculate,
2875          * just set the interrupt line based on ISTATUS
2876          */
2877         trace_arm_gt_imask_toggle(timeridx);
2878         gt_update_irq(cpu, timeridx);
2879     }
2880 }
2881 
2882 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2883 {
2884     gt_timer_reset(env, ri, GTIMER_PHYS);
2885 }
2886 
2887 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2888                                uint64_t value)
2889 {
2890     gt_cval_write(env, ri, GTIMER_PHYS, value);
2891 }
2892 
2893 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2894 {
2895     return gt_tval_read(env, ri, GTIMER_PHYS);
2896 }
2897 
2898 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2899                                uint64_t value)
2900 {
2901     gt_tval_write(env, ri, GTIMER_PHYS, value);
2902 }
2903 
2904 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2905                               uint64_t value)
2906 {
2907     gt_ctl_write(env, ri, GTIMER_PHYS, value);
2908 }
2909 
2910 static int gt_phys_redir_timeridx(CPUARMState *env)
2911 {
2912     switch (arm_mmu_idx(env)) {
2913     case ARMMMUIdx_E20_0:
2914     case ARMMMUIdx_E20_2:
2915     case ARMMMUIdx_E20_2_PAN:
2916         return GTIMER_HYP;
2917     default:
2918         return GTIMER_PHYS;
2919     }
2920 }
2921 
2922 static int gt_virt_redir_timeridx(CPUARMState *env)
2923 {
2924     switch (arm_mmu_idx(env)) {
2925     case ARMMMUIdx_E20_0:
2926     case ARMMMUIdx_E20_2:
2927     case ARMMMUIdx_E20_2_PAN:
2928         return GTIMER_HYPVIRT;
2929     default:
2930         return GTIMER_VIRT;
2931     }
2932 }
2933 
2934 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2935                                         const ARMCPRegInfo *ri)
2936 {
2937     int timeridx = gt_phys_redir_timeridx(env);
2938     return env->cp15.c14_timer[timeridx].cval;
2939 }
2940 
2941 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2942                                      uint64_t value)
2943 {
2944     int timeridx = gt_phys_redir_timeridx(env);
2945     gt_cval_write(env, ri, timeridx, value);
2946 }
2947 
2948 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2949                                         const ARMCPRegInfo *ri)
2950 {
2951     int timeridx = gt_phys_redir_timeridx(env);
2952     return gt_tval_read(env, ri, timeridx);
2953 }
2954 
2955 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2956                                      uint64_t value)
2957 {
2958     int timeridx = gt_phys_redir_timeridx(env);
2959     gt_tval_write(env, ri, timeridx, value);
2960 }
2961 
2962 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2963                                        const ARMCPRegInfo *ri)
2964 {
2965     int timeridx = gt_phys_redir_timeridx(env);
2966     return env->cp15.c14_timer[timeridx].ctl;
2967 }
2968 
2969 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2970                                     uint64_t value)
2971 {
2972     int timeridx = gt_phys_redir_timeridx(env);
2973     gt_ctl_write(env, ri, timeridx, value);
2974 }
2975 
2976 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2977 {
2978     gt_timer_reset(env, ri, GTIMER_VIRT);
2979 }
2980 
2981 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2982                                uint64_t value)
2983 {
2984     gt_cval_write(env, ri, GTIMER_VIRT, value);
2985 }
2986 
2987 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2988 {
2989     return gt_tval_read(env, ri, GTIMER_VIRT);
2990 }
2991 
2992 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2993                                uint64_t value)
2994 {
2995     gt_tval_write(env, ri, GTIMER_VIRT, value);
2996 }
2997 
2998 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2999                               uint64_t value)
3000 {
3001     gt_ctl_write(env, ri, GTIMER_VIRT, value);
3002 }
3003 
3004 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3005                              uint64_t value)
3006 {
3007     ARMCPU *cpu = env_archcpu(env);
3008     uint32_t oldval = env->cp15.cnthctl_el2;
3009     uint32_t valid_mask =
3010         R_CNTHCTL_EL0PCTEN_E2H1_MASK |
3011         R_CNTHCTL_EL0VCTEN_E2H1_MASK |
3012         R_CNTHCTL_EVNTEN_MASK |
3013         R_CNTHCTL_EVNTDIR_MASK |
3014         R_CNTHCTL_EVNTI_MASK |
3015         R_CNTHCTL_EL0VTEN_MASK |
3016         R_CNTHCTL_EL0PTEN_MASK |
3017         R_CNTHCTL_EL1PCTEN_E2H1_MASK |
3018         R_CNTHCTL_EL1PTEN_MASK;
3019 
3020     if (cpu_isar_feature(aa64_rme, cpu)) {
3021         valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
3022     }
3023     if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
3024         valid_mask |=
3025             R_CNTHCTL_EL1TVT_MASK |
3026             R_CNTHCTL_EL1TVCT_MASK |
3027             R_CNTHCTL_EL1NVPCT_MASK |
3028             R_CNTHCTL_EL1NVVCT_MASK |
3029             R_CNTHCTL_EVNTIS_MASK;
3030     }
3031     if (cpu_isar_feature(aa64_ecv, cpu)) {
3032         valid_mask |= R_CNTHCTL_ECV_MASK;
3033     }
3034 
3035     /* Clear RES0 bits */
3036     value &= valid_mask;
3037 
3038     raw_write(env, ri, value);
3039 
3040     if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
3041         gt_update_irq(cpu, GTIMER_VIRT);
3042     } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
3043         gt_update_irq(cpu, GTIMER_PHYS);
3044     }
3045 }
3046 
3047 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
3048                               uint64_t value)
3049 {
3050     ARMCPU *cpu = env_archcpu(env);
3051 
3052     trace_arm_gt_cntvoff_write(value);
3053     raw_write(env, ri, value);
3054     gt_recalc_timer(cpu, GTIMER_VIRT);
3055 }
3056 
3057 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
3058                                         const ARMCPRegInfo *ri)
3059 {
3060     int timeridx = gt_virt_redir_timeridx(env);
3061     return env->cp15.c14_timer[timeridx].cval;
3062 }
3063 
3064 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3065                                      uint64_t value)
3066 {
3067     int timeridx = gt_virt_redir_timeridx(env);
3068     gt_cval_write(env, ri, timeridx, value);
3069 }
3070 
3071 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
3072                                         const ARMCPRegInfo *ri)
3073 {
3074     int timeridx = gt_virt_redir_timeridx(env);
3075     return gt_tval_read(env, ri, timeridx);
3076 }
3077 
3078 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3079                                      uint64_t value)
3080 {
3081     int timeridx = gt_virt_redir_timeridx(env);
3082     gt_tval_write(env, ri, timeridx, value);
3083 }
3084 
3085 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
3086                                        const ARMCPRegInfo *ri)
3087 {
3088     int timeridx = gt_virt_redir_timeridx(env);
3089     return env->cp15.c14_timer[timeridx].ctl;
3090 }
3091 
3092 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3093                                     uint64_t value)
3094 {
3095     int timeridx = gt_virt_redir_timeridx(env);
3096     gt_ctl_write(env, ri, timeridx, value);
3097 }
3098 
3099 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3100 {
3101     gt_timer_reset(env, ri, GTIMER_HYP);
3102 }
3103 
3104 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3105                               uint64_t value)
3106 {
3107     gt_cval_write(env, ri, GTIMER_HYP, value);
3108 }
3109 
3110 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3111 {
3112     return gt_tval_read(env, ri, GTIMER_HYP);
3113 }
3114 
3115 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3116                               uint64_t value)
3117 {
3118     gt_tval_write(env, ri, GTIMER_HYP, value);
3119 }
3120 
3121 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3122                               uint64_t value)
3123 {
3124     gt_ctl_write(env, ri, GTIMER_HYP, value);
3125 }
3126 
3127 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3128 {
3129     gt_timer_reset(env, ri, GTIMER_SEC);
3130 }
3131 
3132 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3133                               uint64_t value)
3134 {
3135     gt_cval_write(env, ri, GTIMER_SEC, value);
3136 }
3137 
3138 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3139 {
3140     return gt_tval_read(env, ri, GTIMER_SEC);
3141 }
3142 
3143 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3144                               uint64_t value)
3145 {
3146     gt_tval_write(env, ri, GTIMER_SEC, value);
3147 }
3148 
3149 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3150                               uint64_t value)
3151 {
3152     gt_ctl_write(env, ri, GTIMER_SEC, value);
3153 }
3154 
3155 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3156 {
3157     gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3158 }
3159 
3160 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3161                              uint64_t value)
3162 {
3163     gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3164 }
3165 
3166 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3167 {
3168     return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3169 }
3170 
3171 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3172                              uint64_t value)
3173 {
3174     gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3175 }
3176 
3177 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3178                             uint64_t value)
3179 {
3180     gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3181 }
3182 
3183 void arm_gt_ptimer_cb(void *opaque)
3184 {
3185     ARMCPU *cpu = opaque;
3186 
3187     gt_recalc_timer(cpu, GTIMER_PHYS);
3188 }
3189 
3190 void arm_gt_vtimer_cb(void *opaque)
3191 {
3192     ARMCPU *cpu = opaque;
3193 
3194     gt_recalc_timer(cpu, GTIMER_VIRT);
3195 }
3196 
3197 void arm_gt_htimer_cb(void *opaque)
3198 {
3199     ARMCPU *cpu = opaque;
3200 
3201     gt_recalc_timer(cpu, GTIMER_HYP);
3202 }
3203 
3204 void arm_gt_stimer_cb(void *opaque)
3205 {
3206     ARMCPU *cpu = opaque;
3207 
3208     gt_recalc_timer(cpu, GTIMER_SEC);
3209 }
3210 
3211 void arm_gt_hvtimer_cb(void *opaque)
3212 {
3213     ARMCPU *cpu = opaque;
3214 
3215     gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3216 }
3217 
3218 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3219 {
3220     ARMCPU *cpu = env_archcpu(env);
3221 
3222     cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3223 }
3224 
3225 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3226     /*
3227      * Note that CNTFRQ is purely reads-as-written for the benefit
3228      * of software; writing it doesn't actually change the timer frequency.
3229      * Our reset value matches the fixed frequency we implement the timer at.
3230      */
3231     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3232       .type = ARM_CP_ALIAS,
3233       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3234       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3235     },
3236     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3237       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3238       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3239       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3240       .resetfn = arm_gt_cntfrq_reset,
3241     },
3242     /* overall control: mostly access permissions */
3243     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3244       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3245       .access = PL1_RW,
3246       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3247       .resetvalue = 0,
3248     },
3249     /* per-timer control */
3250     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3251       .secure = ARM_CP_SECSTATE_NS,
3252       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3253       .accessfn = gt_ptimer_access,
3254       .fieldoffset = offsetoflow32(CPUARMState,
3255                                    cp15.c14_timer[GTIMER_PHYS].ctl),
3256       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3257       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3258     },
3259     { .name = "CNTP_CTL_S",
3260       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3261       .secure = ARM_CP_SECSTATE_S,
3262       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3263       .accessfn = gt_ptimer_access,
3264       .fieldoffset = offsetoflow32(CPUARMState,
3265                                    cp15.c14_timer[GTIMER_SEC].ctl),
3266       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3267     },
3268     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3269       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3270       .type = ARM_CP_IO, .access = PL0_RW,
3271       .accessfn = gt_ptimer_access,
3272       .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
3273       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3274       .resetvalue = 0,
3275       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3276       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3277     },
3278     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3279       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3280       .accessfn = gt_vtimer_access,
3281       .fieldoffset = offsetoflow32(CPUARMState,
3282                                    cp15.c14_timer[GTIMER_VIRT].ctl),
3283       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3284       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3285     },
3286     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3287       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3288       .type = ARM_CP_IO, .access = PL0_RW,
3289       .accessfn = gt_vtimer_access,
3290       .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
3291       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3292       .resetvalue = 0,
3293       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3294       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3295     },
3296     /* TimerValue views: a 32 bit downcounting view of the underlying state */
3297     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3298       .secure = ARM_CP_SECSTATE_NS,
3299       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3300       .accessfn = gt_ptimer_access,
3301       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3302     },
3303     { .name = "CNTP_TVAL_S",
3304       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3305       .secure = ARM_CP_SECSTATE_S,
3306       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3307       .accessfn = gt_ptimer_access,
3308       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3309     },
3310     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3311       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3312       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3313       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3314       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3315     },
3316     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3317       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3318       .accessfn = gt_vtimer_access,
3319       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3320     },
3321     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3322       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3323       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3324       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3325       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3326     },
3327     /* The counter itself */
3328     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3329       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3330       .accessfn = gt_pct_access,
3331       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3332     },
3333     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3334       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3335       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3336       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3337     },
3338     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3339       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3340       .accessfn = gt_vct_access,
3341       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3342     },
3343     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3344       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3345       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3346       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3347     },
3348     /* Comparison value, indicating when the timer goes off */
3349     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3350       .secure = ARM_CP_SECSTATE_NS,
3351       .access = PL0_RW,
3352       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3353       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3354       .accessfn = gt_ptimer_access,
3355       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3356       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3357     },
3358     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3359       .secure = ARM_CP_SECSTATE_S,
3360       .access = PL0_RW,
3361       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3362       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3363       .accessfn = gt_ptimer_access,
3364       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3365     },
3366     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3367       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3368       .access = PL0_RW,
3369       .type = ARM_CP_IO,
3370       .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
3371       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3372       .resetvalue = 0, .accessfn = gt_ptimer_access,
3373       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3374       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3375     },
3376     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3377       .access = PL0_RW,
3378       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3379       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3380       .accessfn = gt_vtimer_access,
3381       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3382       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3383     },
3384     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3385       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3386       .access = PL0_RW,
3387       .type = ARM_CP_IO,
3388       .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
3389       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3390       .resetvalue = 0, .accessfn = gt_vtimer_access,
3391       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3392       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3393     },
3394     /*
3395      * Secure timer -- this is actually restricted to only EL3
3396      * and configurably Secure-EL1 via the accessfn.
3397      */
3398     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3399       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3400       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3401       .accessfn = gt_stimer_access,
3402       .readfn = gt_sec_tval_read,
3403       .writefn = gt_sec_tval_write,
3404       .resetfn = gt_sec_timer_reset,
3405     },
3406     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3407       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3408       .type = ARM_CP_IO, .access = PL1_RW,
3409       .accessfn = gt_stimer_access,
3410       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3411       .resetvalue = 0,
3412       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3413     },
3414     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3415       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3416       .type = ARM_CP_IO, .access = PL1_RW,
3417       .accessfn = gt_stimer_access,
3418       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3419       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3420     },
3421 };
3422 
3423 /*
3424  * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
3425  * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3426  * so our implementations here are identical to the normal registers.
3427  */
3428 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
3429     { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
3430       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3431       .accessfn = gt_vct_access,
3432       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3433     },
3434     { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3435       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3436       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3437       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3438     },
3439     { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
3440       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3441       .accessfn = gt_pct_access,
3442       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3443     },
3444     { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
3445       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
3446       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3447       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3448     },
3449 };
3450 
3451 static CPAccessResult gt_cntpoff_access(CPUARMState *env,
3452                                         const ARMCPRegInfo *ri,
3453                                         bool isread)
3454 {
3455     if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
3456         !(env->cp15.scr_el3 & SCR_ECVEN)) {
3457         return CP_ACCESS_TRAP_EL3;
3458     }
3459     return CP_ACCESS_OK;
3460 }
3461 
3462 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
3463                               uint64_t value)
3464 {
3465     ARMCPU *cpu = env_archcpu(env);
3466 
3467     trace_arm_gt_cntpoff_write(value);
3468     raw_write(env, ri, value);
3469     gt_recalc_timer(cpu, GTIMER_PHYS);
3470 }
3471 
3472 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
3473     .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
3474     .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
3475     .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3476     .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
3477     .nv2_redirect_offset = 0x1a8,
3478     .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
3479 };
3480 #else
3481 
3482 /*
3483  * In user-mode most of the generic timer registers are inaccessible
3484  * however modern kernels (4.12+) allow access to cntvct_el0
3485  */
3486 
3487 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3488 {
3489     ARMCPU *cpu = env_archcpu(env);
3490 
3491     /*
3492      * Currently we have no support for QEMUTimer in linux-user so we
3493      * can't call gt_get_countervalue(env), instead we directly
3494      * call the lower level functions.
3495      */
3496     return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3497 }
3498 
3499 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3500     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3501       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3502       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3503       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3504       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3505     },
3506     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3507       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3508       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3509       .readfn = gt_virt_cnt_read,
3510     },
3511 };
3512 
3513 /*
3514  * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
3515  * is exposed to userspace by Linux.
3516  */
3517 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
3518     { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3519       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3520       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3521       .readfn = gt_virt_cnt_read,
3522     },
3523 };
3524 
3525 #endif
3526 
3527 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3528 {
3529     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3530         raw_write(env, ri, value);
3531     } else if (arm_feature(env, ARM_FEATURE_V7)) {
3532         raw_write(env, ri, value & 0xfffff6ff);
3533     } else {
3534         raw_write(env, ri, value & 0xfffff1ff);
3535     }
3536 }
3537 
3538 #ifndef CONFIG_USER_ONLY
3539 /* get_phys_addr() isn't present for user-mode-only targets */
3540 
3541 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3542                                  bool isread)
3543 {
3544     if (ri->opc2 & 4) {
3545         /*
3546          * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3547          * Secure EL1 (which can only happen if EL3 is AArch64).
3548          * They are simply UNDEF if executed from NS EL1.
3549          * They function normally from EL2 or EL3.
3550          */
3551         if (arm_current_el(env) == 1) {
3552             if (arm_is_secure_below_el3(env)) {
3553                 if (env->cp15.scr_el3 & SCR_EEL2) {
3554                     return CP_ACCESS_TRAP_EL2;
3555                 }
3556                 return CP_ACCESS_TRAP_EL3;
3557             }
3558             return CP_ACCESS_TRAP_UNCATEGORIZED;
3559         }
3560     }
3561     return CP_ACCESS_OK;
3562 }
3563 
3564 #ifdef CONFIG_TCG
3565 static int par_el1_shareability(GetPhysAddrResult *res)
3566 {
3567     /*
3568      * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
3569      * memory -- see pseudocode PAREncodeShareability().
3570      */
3571     if (((res->cacheattrs.attrs & 0xf0) == 0) ||
3572         res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
3573         return 2;
3574     }
3575     return res->cacheattrs.shareability;
3576 }
3577 
3578 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3579                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
3580                              ARMSecuritySpace ss)
3581 {
3582     bool ret;
3583     uint64_t par64;
3584     bool format64 = false;
3585     ARMMMUFaultInfo fi = {};
3586     GetPhysAddrResult res = {};
3587 
3588     /*
3589      * I_MXTJT: Granule protection checks are not performed on the final address
3590      * of a successful translation.
3591      */
3592     ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
3593                                          &res, &fi);
3594 
3595     /*
3596      * ATS operations only do S1 or S1+S2 translations, so we never
3597      * have to deal with the ARMCacheAttrs format for S2 only.
3598      */
3599     assert(!res.cacheattrs.is_s2_format);
3600 
3601     if (ret) {
3602         /*
3603          * Some kinds of translation fault must cause exceptions rather
3604          * than being reported in the PAR.
3605          */
3606         int current_el = arm_current_el(env);
3607         int target_el;
3608         uint32_t syn, fsr, fsc;
3609         bool take_exc = false;
3610 
3611         if (fi.s1ptw && current_el == 1
3612             && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3613             /*
3614              * Synchronous stage 2 fault on an access made as part of the
3615              * translation table walk for AT S1E0* or AT S1E1* insn
3616              * executed from NS EL1. If this is a synchronous external abort
3617              * and SCR_EL3.EA == 1, then we take a synchronous external abort
3618              * to EL3. Otherwise the fault is taken as an exception to EL2,
3619              * and HPFAR_EL2 holds the faulting IPA.
3620              */
3621             if (fi.type == ARMFault_SyncExternalOnWalk &&
3622                 (env->cp15.scr_el3 & SCR_EA)) {
3623                 target_el = 3;
3624             } else {
3625                 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3626                 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3627                     env->cp15.hpfar_el2 |= HPFAR_NS;
3628                 }
3629                 target_el = 2;
3630             }
3631             take_exc = true;
3632         } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3633             /*
3634              * Synchronous external aborts during a translation table walk
3635              * are taken as Data Abort exceptions.
3636              */
3637             if (fi.stage2) {
3638                 if (current_el == 3) {
3639                     target_el = 3;
3640                 } else {
3641                     target_el = 2;
3642                 }
3643             } else {
3644                 target_el = exception_target_el(env);
3645             }
3646             take_exc = true;
3647         }
3648 
3649         if (take_exc) {
3650             /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3651             if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3652                 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3653                 fsr = arm_fi_to_lfsc(&fi);
3654                 fsc = extract32(fsr, 0, 6);
3655             } else {
3656                 fsr = arm_fi_to_sfsc(&fi);
3657                 fsc = 0x3f;
3658             }
3659             /*
3660              * Report exception with ESR indicating a fault due to a
3661              * translation table walk for a cache maintenance instruction.
3662              */
3663             syn = syn_data_abort_no_iss(current_el == target_el, 0,
3664                                         fi.ea, 1, fi.s1ptw, 1, fsc);
3665             env->exception.vaddress = value;
3666             env->exception.fsr = fsr;
3667             raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3668         }
3669     }
3670 
3671     if (is_a64(env)) {
3672         format64 = true;
3673     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3674         /*
3675          * ATS1Cxx:
3676          * * TTBCR.EAE determines whether the result is returned using the
3677          *   32-bit or the 64-bit PAR format
3678          * * Instructions executed in Hyp mode always use the 64bit format
3679          *
3680          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3681          * * The Non-secure TTBCR.EAE bit is set to 1
3682          * * The implementation includes EL2, and the value of HCR.VM is 1
3683          *
3684          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3685          *
3686          * ATS1Hx always uses the 64bit format.
3687          */
3688         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3689 
3690         if (arm_feature(env, ARM_FEATURE_EL2)) {
3691             if (mmu_idx == ARMMMUIdx_E10_0 ||
3692                 mmu_idx == ARMMMUIdx_E10_1 ||
3693                 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3694                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3695             } else {
3696                 format64 |= arm_current_el(env) == 2;
3697             }
3698         }
3699     }
3700 
3701     if (format64) {
3702         /* Create a 64-bit PAR */
3703         par64 = (1 << 11); /* LPAE bit always set */
3704         if (!ret) {
3705             par64 |= res.f.phys_addr & ~0xfffULL;
3706             if (!res.f.attrs.secure) {
3707                 par64 |= (1 << 9); /* NS */
3708             }
3709             par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
3710             par64 |= par_el1_shareability(&res) << 7; /* SH */
3711         } else {
3712             uint32_t fsr = arm_fi_to_lfsc(&fi);
3713 
3714             par64 |= 1; /* F */
3715             par64 |= (fsr & 0x3f) << 1; /* FS */
3716             if (fi.stage2) {
3717                 par64 |= (1 << 9); /* S */
3718             }
3719             if (fi.s1ptw) {
3720                 par64 |= (1 << 8); /* PTW */
3721             }
3722         }
3723     } else {
3724         /*
3725          * fsr is a DFSR/IFSR value for the short descriptor
3726          * translation table format (with WnR always clear).
3727          * Convert it to a 32-bit PAR.
3728          */
3729         if (!ret) {
3730             /* We do not set any attribute bits in the PAR */
3731             if (res.f.lg_page_size == 24
3732                 && arm_feature(env, ARM_FEATURE_V7)) {
3733                 par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
3734             } else {
3735                 par64 = res.f.phys_addr & 0xfffff000;
3736             }
3737             if (!res.f.attrs.secure) {
3738                 par64 |= (1 << 9); /* NS */
3739             }
3740         } else {
3741             uint32_t fsr = arm_fi_to_sfsc(&fi);
3742 
3743             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3744                     ((fsr & 0xf) << 1) | 1;
3745         }
3746     }
3747     return par64;
3748 }
3749 #endif /* CONFIG_TCG */
3750 
3751 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3752 {
3753 #ifdef CONFIG_TCG
3754     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3755     uint64_t par64;
3756     ARMMMUIdx mmu_idx;
3757     int el = arm_current_el(env);
3758     ARMSecuritySpace ss = arm_security_space(env);
3759 
3760     switch (ri->opc2 & 6) {
3761     case 0:
3762         /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3763         switch (el) {
3764         case 3:
3765             mmu_idx = ARMMMUIdx_E3;
3766             break;
3767         case 2:
3768             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3769             /* fall through */
3770         case 1:
3771             if (ri->crm == 9 && arm_pan_enabled(env)) {
3772                 mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3773             } else {
3774                 mmu_idx = ARMMMUIdx_Stage1_E1;
3775             }
3776             break;
3777         default:
3778             g_assert_not_reached();
3779         }
3780         break;
3781     case 2:
3782         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3783         switch (el) {
3784         case 3:
3785             mmu_idx = ARMMMUIdx_E10_0;
3786             break;
3787         case 2:
3788             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3789             mmu_idx = ARMMMUIdx_Stage1_E0;
3790             break;
3791         case 1:
3792             mmu_idx = ARMMMUIdx_Stage1_E0;
3793             break;
3794         default:
3795             g_assert_not_reached();
3796         }
3797         break;
3798     case 4:
3799         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3800         mmu_idx = ARMMMUIdx_E10_1;
3801         ss = ARMSS_NonSecure;
3802         break;
3803     case 6:
3804         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3805         mmu_idx = ARMMMUIdx_E10_0;
3806         ss = ARMSS_NonSecure;
3807         break;
3808     default:
3809         g_assert_not_reached();
3810     }
3811 
3812     par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
3813 
3814     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3815 #else
3816     /* Handled by hardware accelerator. */
3817     g_assert_not_reached();
3818 #endif /* CONFIG_TCG */
3819 }
3820 
3821 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3822                         uint64_t value)
3823 {
3824 #ifdef CONFIG_TCG
3825     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3826     uint64_t par64;
3827 
3828     /* There is no SecureEL2 for AArch32. */
3829     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
3830                          ARMSS_NonSecure);
3831 
3832     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3833 #else
3834     /* Handled by hardware accelerator. */
3835     g_assert_not_reached();
3836 #endif /* CONFIG_TCG */
3837 }
3838 
3839 static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
3840                                      bool isread)
3841 {
3842     /*
3843      * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
3844      * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
3845      * only happen when executing at EL3 because that combination also causes an
3846      * illegal exception return. We don't need to check FEAT_RME either, because
3847      * scr_write() ensures that the NSE bit is not set otherwise.
3848      */
3849     if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
3850         return CP_ACCESS_TRAP;
3851     }
3852     return CP_ACCESS_OK;
3853 }
3854 
3855 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3856                                      bool isread)
3857 {
3858     if (arm_current_el(env) == 3 &&
3859         !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3860         return CP_ACCESS_TRAP;
3861     }
3862     return at_e012_access(env, ri, isread);
3863 }
3864 
3865 static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
3866                                       bool isread)
3867 {
3868     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
3869         return CP_ACCESS_TRAP_EL2;
3870     }
3871     return at_e012_access(env, ri, isread);
3872 }
3873 
3874 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3875                         uint64_t value)
3876 {
3877 #ifdef CONFIG_TCG
3878     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3879     ARMMMUIdx mmu_idx;
3880     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
3881     bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
3882     bool for_el3 = false;
3883     ARMSecuritySpace ss;
3884 
3885     switch (ri->opc2 & 6) {
3886     case 0:
3887         switch (ri->opc1) {
3888         case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3889             if (ri->crm == 9 && arm_pan_enabled(env)) {
3890                 mmu_idx = regime_e20 ?
3891                           ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
3892             } else {
3893                 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
3894             }
3895             break;
3896         case 4: /* AT S1E2R, AT S1E2W */
3897             mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
3898             break;
3899         case 6: /* AT S1E3R, AT S1E3W */
3900             mmu_idx = ARMMMUIdx_E3;
3901             for_el3 = true;
3902             break;
3903         default:
3904             g_assert_not_reached();
3905         }
3906         break;
3907     case 2: /* AT S1E0R, AT S1E0W */
3908         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
3909         break;
3910     case 4: /* AT S12E1R, AT S12E1W */
3911         mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
3912         break;
3913     case 6: /* AT S12E0R, AT S12E0W */
3914         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
3915         break;
3916     default:
3917         g_assert_not_reached();
3918     }
3919 
3920     ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
3921     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
3922 #else
3923     /* Handled by hardware accelerator. */
3924     g_assert_not_reached();
3925 #endif /* CONFIG_TCG */
3926 }
3927 #endif
3928 
3929 /* Return basic MPU access permission bits.  */
3930 static uint32_t simple_mpu_ap_bits(uint32_t val)
3931 {
3932     uint32_t ret;
3933     uint32_t mask;
3934     int i;
3935     ret = 0;
3936     mask = 3;
3937     for (i = 0; i < 16; i += 2) {
3938         ret |= (val >> i) & mask;
3939         mask <<= 2;
3940     }
3941     return ret;
3942 }
3943 
3944 /* Pad basic MPU access permission bits to extended format.  */
3945 static uint32_t extended_mpu_ap_bits(uint32_t val)
3946 {
3947     uint32_t ret;
3948     uint32_t mask;
3949     int i;
3950     ret = 0;
3951     mask = 3;
3952     for (i = 0; i < 16; i += 2) {
3953         ret |= (val & mask) << i;
3954         mask <<= 2;
3955     }
3956     return ret;
3957 }
3958 
3959 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3960                                  uint64_t value)
3961 {
3962     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3963 }
3964 
3965 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3966 {
3967     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3968 }
3969 
3970 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3971                                  uint64_t value)
3972 {
3973     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3974 }
3975 
3976 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3977 {
3978     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3979 }
3980 
3981 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3982 {
3983     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3984 
3985     if (!u32p) {
3986         return 0;
3987     }
3988 
3989     u32p += env->pmsav7.rnr[M_REG_NS];
3990     return *u32p;
3991 }
3992 
3993 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3994                          uint64_t value)
3995 {
3996     ARMCPU *cpu = env_archcpu(env);
3997     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3998 
3999     if (!u32p) {
4000         return;
4001     }
4002 
4003     u32p += env->pmsav7.rnr[M_REG_NS];
4004     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4005     *u32p = value;
4006 }
4007 
4008 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4009                               uint64_t value)
4010 {
4011     ARMCPU *cpu = env_archcpu(env);
4012     uint32_t nrgs = cpu->pmsav7_dregion;
4013 
4014     if (value >= nrgs) {
4015         qemu_log_mask(LOG_GUEST_ERROR,
4016                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
4017                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
4018         return;
4019     }
4020 
4021     raw_write(env, ri, value);
4022 }
4023 
4024 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4025                           uint64_t value)
4026 {
4027     ARMCPU *cpu = env_archcpu(env);
4028 
4029     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4030     env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
4031 }
4032 
4033 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4034 {
4035     return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
4036 }
4037 
4038 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4039                           uint64_t value)
4040 {
4041     ARMCPU *cpu = env_archcpu(env);
4042 
4043     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4044     env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
4045 }
4046 
4047 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4048 {
4049     return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
4050 }
4051 
4052 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4053                            uint64_t value)
4054 {
4055     ARMCPU *cpu = env_archcpu(env);
4056 
4057     /*
4058      * Ignore writes that would select not implemented region.
4059      * This is architecturally UNPREDICTABLE.
4060      */
4061     if (value >= cpu->pmsav7_dregion) {
4062         return;
4063     }
4064 
4065     env->pmsav7.rnr[M_REG_NS] = value;
4066 }
4067 
4068 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4069                           uint64_t value)
4070 {
4071     ARMCPU *cpu = env_archcpu(env);
4072 
4073     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4074     env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
4075 }
4076 
4077 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4078 {
4079     return env->pmsav8.hprbar[env->pmsav8.hprselr];
4080 }
4081 
4082 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4083                           uint64_t value)
4084 {
4085     ARMCPU *cpu = env_archcpu(env);
4086 
4087     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4088     env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
4089 }
4090 
4091 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4092 {
4093     return env->pmsav8.hprlar[env->pmsav8.hprselr];
4094 }
4095 
4096 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4097                           uint64_t value)
4098 {
4099     uint32_t n;
4100     uint32_t bit;
4101     ARMCPU *cpu = env_archcpu(env);
4102 
4103     /* Ignore writes to unimplemented regions */
4104     int rmax = MIN(cpu->pmsav8r_hdregion, 32);
4105     value &= MAKE_64BIT_MASK(0, rmax);
4106 
4107     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4108 
4109     /* Register alias is only valid for first 32 indexes */
4110     for (n = 0; n < rmax; ++n) {
4111         bit = extract32(value, n, 1);
4112         env->pmsav8.hprlar[n] = deposit32(
4113                     env->pmsav8.hprlar[n], 0, 1, bit);
4114     }
4115 }
4116 
4117 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4118 {
4119     uint32_t n;
4120     uint32_t result = 0x0;
4121     ARMCPU *cpu = env_archcpu(env);
4122 
4123     /* Register alias is only valid for first 32 indexes */
4124     for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
4125         if (env->pmsav8.hprlar[n] & 0x1) {
4126             result |= (0x1 << n);
4127         }
4128     }
4129     return result;
4130 }
4131 
4132 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4133                            uint64_t value)
4134 {
4135     ARMCPU *cpu = env_archcpu(env);
4136 
4137     /*
4138      * Ignore writes that would select not implemented region.
4139      * This is architecturally UNPREDICTABLE.
4140      */
4141     if (value >= cpu->pmsav8r_hdregion) {
4142         return;
4143     }
4144 
4145     env->pmsav8.hprselr = value;
4146 }
4147 
4148 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
4149                           uint64_t value)
4150 {
4151     ARMCPU *cpu = env_archcpu(env);
4152     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4153                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4154 
4155     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4156 
4157     if (ri->opc1 & 4) {
4158         if (index >= cpu->pmsav8r_hdregion) {
4159             return;
4160         }
4161         if (ri->opc2 & 0x1) {
4162             env->pmsav8.hprlar[index] = value;
4163         } else {
4164             env->pmsav8.hprbar[index] = value;
4165         }
4166     } else {
4167         if (index >= cpu->pmsav7_dregion) {
4168             return;
4169         }
4170         if (ri->opc2 & 0x1) {
4171             env->pmsav8.rlar[M_REG_NS][index] = value;
4172         } else {
4173             env->pmsav8.rbar[M_REG_NS][index] = value;
4174         }
4175     }
4176 }
4177 
4178 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
4179 {
4180     ARMCPU *cpu = env_archcpu(env);
4181     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4182                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4183 
4184     if (ri->opc1 & 4) {
4185         if (index >= cpu->pmsav8r_hdregion) {
4186             return 0x0;
4187         }
4188         if (ri->opc2 & 0x1) {
4189             return env->pmsav8.hprlar[index];
4190         } else {
4191             return env->pmsav8.hprbar[index];
4192         }
4193     } else {
4194         if (index >= cpu->pmsav7_dregion) {
4195             return 0x0;
4196         }
4197         if (ri->opc2 & 0x1) {
4198             return env->pmsav8.rlar[M_REG_NS][index];
4199         } else {
4200             return env->pmsav8.rbar[M_REG_NS][index];
4201         }
4202     }
4203 }
4204 
4205 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
4206     { .name = "PRBAR",
4207       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
4208       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4209       .accessfn = access_tvm_trvm,
4210       .readfn = prbar_read, .writefn = prbar_write },
4211     { .name = "PRLAR",
4212       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
4213       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4214       .accessfn = access_tvm_trvm,
4215       .readfn = prlar_read, .writefn = prlar_write },
4216     { .name = "PRSELR", .resetvalue = 0,
4217       .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
4218       .access = PL1_RW, .accessfn = access_tvm_trvm,
4219       .writefn = prselr_write,
4220       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
4221     { .name = "HPRBAR", .resetvalue = 0,
4222       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
4223       .access = PL2_RW, .type = ARM_CP_NO_RAW,
4224       .readfn = hprbar_read, .writefn = hprbar_write },
4225     { .name = "HPRLAR",
4226       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
4227       .access = PL2_RW, .type = ARM_CP_NO_RAW,
4228       .readfn = hprlar_read, .writefn = hprlar_write },
4229     { .name = "HPRSELR", .resetvalue = 0,
4230       .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
4231       .access = PL2_RW,
4232       .writefn = hprselr_write,
4233       .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
4234     { .name = "HPRENR",
4235       .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
4236       .access = PL2_RW, .type = ARM_CP_NO_RAW,
4237       .readfn = hprenr_read, .writefn = hprenr_write },
4238 };
4239 
4240 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
4241     /*
4242      * Reset for all these registers is handled in arm_cpu_reset(),
4243      * because the PMSAv7 is also used by M-profile CPUs, which do
4244      * not register cpregs but still need the state to be reset.
4245      */
4246     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
4247       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4248       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
4249       .readfn = pmsav7_read, .writefn = pmsav7_write,
4250       .resetfn = arm_cp_reset_ignore },
4251     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4252       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4253       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
4254       .readfn = pmsav7_read, .writefn = pmsav7_write,
4255       .resetfn = arm_cp_reset_ignore },
4256     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4257       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4258       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
4259       .readfn = pmsav7_read, .writefn = pmsav7_write,
4260       .resetfn = arm_cp_reset_ignore },
4261     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4262       .access = PL1_RW,
4263       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
4264       .writefn = pmsav7_rgnr_write,
4265       .resetfn = arm_cp_reset_ignore },
4266 };
4267 
4268 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
4269     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4270       .access = PL1_RW, .type = ARM_CP_ALIAS,
4271       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4272       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
4273     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4274       .access = PL1_RW, .type = ARM_CP_ALIAS,
4275       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4276       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
4277     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
4278       .access = PL1_RW,
4279       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4280       .resetvalue = 0, },
4281     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
4282       .access = PL1_RW,
4283       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4284       .resetvalue = 0, },
4285     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4286       .access = PL1_RW,
4287       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
4288     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
4289       .access = PL1_RW,
4290       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
4291     /* Protection region base and size registers */
4292     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4293       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4294       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
4295     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4296       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4297       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
4298     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4299       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4300       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
4301     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4302       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4303       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
4304     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4305       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4306       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
4307     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4308       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4309       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
4310     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4311       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4312       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
4313     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4314       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4315       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
4316 };
4317 
4318 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4319                              uint64_t value)
4320 {
4321     ARMCPU *cpu = env_archcpu(env);
4322 
4323     if (!arm_feature(env, ARM_FEATURE_V8)) {
4324         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
4325             /*
4326              * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
4327              * using Long-descriptor translation table format
4328              */
4329             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
4330         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
4331             /*
4332              * In an implementation that includes the Security Extensions
4333              * TTBCR has additional fields PD0 [4] and PD1 [5] for
4334              * Short-descriptor translation table format.
4335              */
4336             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
4337         } else {
4338             value &= TTBCR_N;
4339         }
4340     }
4341 
4342     if (arm_feature(env, ARM_FEATURE_LPAE)) {
4343         /*
4344          * With LPAE the TTBCR could result in a change of ASID
4345          * via the TTBCR.A1 bit, so do a TLB flush.
4346          */
4347         tlb_flush(CPU(cpu));
4348     }
4349     raw_write(env, ri, value);
4350 }
4351 
4352 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4353                                uint64_t value)
4354 {
4355     ARMCPU *cpu = env_archcpu(env);
4356 
4357     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4358     tlb_flush(CPU(cpu));
4359     raw_write(env, ri, value);
4360 }
4361 
4362 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4363                             uint64_t value)
4364 {
4365     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
4366     if (cpreg_field_is_64bit(ri) &&
4367         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4368         ARMCPU *cpu = env_archcpu(env);
4369         tlb_flush(CPU(cpu));
4370     }
4371     raw_write(env, ri, value);
4372 }
4373 
4374 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4375                                     uint64_t value)
4376 {
4377     /*
4378      * If we are running with E2&0 regime, then an ASID is active.
4379      * Flush if that might be changing.  Note we're not checking
4380      * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4381      * holds the active ASID, only checking the field that might.
4382      */
4383     if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
4384         (arm_hcr_el2_eff(env) & HCR_E2H)) {
4385         uint16_t mask = ARMMMUIdxBit_E20_2 |
4386                         ARMMMUIdxBit_E20_2_PAN |
4387                         ARMMMUIdxBit_E20_0;
4388         tlb_flush_by_mmuidx(env_cpu(env), mask);
4389     }
4390     raw_write(env, ri, value);
4391 }
4392 
4393 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4394                         uint64_t value)
4395 {
4396     ARMCPU *cpu = env_archcpu(env);
4397     CPUState *cs = CPU(cpu);
4398 
4399     /*
4400      * A change in VMID to the stage2 page table (Stage2) invalidates
4401      * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
4402      */
4403     if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4404         tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
4405     }
4406     raw_write(env, ri, value);
4407 }
4408 
4409 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4410     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4411       .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4412       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4413                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4414     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4415       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4416       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4417                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4418     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4419       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4420       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4421                              offsetof(CPUARMState, cp15.dfar_ns) } },
4422     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4423       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4424       .access = PL1_RW, .accessfn = access_tvm_trvm,
4425       .fgt = FGT_FAR_EL1,
4426       .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
4427       .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4428       .resetvalue = 0, },
4429 };
4430 
4431 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4432     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4433       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4434       .access = PL1_RW, .accessfn = access_tvm_trvm,
4435       .fgt = FGT_ESR_EL1,
4436       .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
4437       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4438     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4439       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4440       .access = PL1_RW, .accessfn = access_tvm_trvm,
4441       .fgt = FGT_TTBR0_EL1,
4442       .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
4443       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4444       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4445                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
4446     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4447       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4448       .access = PL1_RW, .accessfn = access_tvm_trvm,
4449       .fgt = FGT_TTBR1_EL1,
4450       .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
4451       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4452       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4453                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
4454     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4455       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4456       .access = PL1_RW, .accessfn = access_tvm_trvm,
4457       .fgt = FGT_TCR_EL1,
4458       .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
4459       .writefn = vmsa_tcr_el12_write,
4460       .raw_writefn = raw_write,
4461       .resetvalue = 0,
4462       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4463     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4464       .access = PL1_RW, .accessfn = access_tvm_trvm,
4465       .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4466       .raw_writefn = raw_write,
4467       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4468                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4469 };
4470 
4471 /*
4472  * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4473  * qemu tlbs nor adjusting cached masks.
4474  */
4475 static const ARMCPRegInfo ttbcr2_reginfo = {
4476     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4477     .access = PL1_RW, .accessfn = access_tvm_trvm,
4478     .type = ARM_CP_ALIAS,
4479     .bank_fieldoffsets = {
4480         offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4481         offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
4482     },
4483 };
4484 
4485 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4486                                 uint64_t value)
4487 {
4488     env->cp15.c15_ticonfig = value & 0xe7;
4489     /* The OS_TYPE bit in this register changes the reported CPUID! */
4490     env->cp15.c0_cpuid = (value & (1 << 5)) ?
4491         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4492 }
4493 
4494 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4495                                 uint64_t value)
4496 {
4497     env->cp15.c15_threadid = value & 0xffff;
4498 }
4499 
4500 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4501                            uint64_t value)
4502 {
4503     /* Wait-for-interrupt (deprecated) */
4504     cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4505 }
4506 
4507 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4508                                   uint64_t value)
4509 {
4510     /*
4511      * On OMAP there are registers indicating the max/min index of dcache lines
4512      * containing a dirty line; cache flush operations have to reset these.
4513      */
4514     env->cp15.c15_i_max = 0x000;
4515     env->cp15.c15_i_min = 0xff0;
4516 }
4517 
4518 static const ARMCPRegInfo omap_cp_reginfo[] = {
4519     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4520       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4521       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4522       .resetvalue = 0, },
4523     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4524       .access = PL1_RW, .type = ARM_CP_NOP },
4525     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4526       .access = PL1_RW,
4527       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4528       .writefn = omap_ticonfig_write },
4529     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4530       .access = PL1_RW,
4531       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4532     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4533       .access = PL1_RW, .resetvalue = 0xff0,
4534       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4535     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4536       .access = PL1_RW,
4537       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4538       .writefn = omap_threadid_write },
4539     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4540       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4541       .type = ARM_CP_NO_RAW,
4542       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4543     /*
4544      * TODO: Peripheral port remap register:
4545      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4546      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4547      * when MMU is off.
4548      */
4549     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4550       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4551       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4552       .writefn = omap_cachemaint_write },
4553     { .name = "C9", .cp = 15, .crn = 9,
4554       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4555       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4556 };
4557 
4558 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4559                               uint64_t value)
4560 {
4561     env->cp15.c15_cpar = value & 0x3fff;
4562 }
4563 
4564 static const ARMCPRegInfo xscale_cp_reginfo[] = {
4565     { .name = "XSCALE_CPAR",
4566       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4567       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4568       .writefn = xscale_cpar_write, },
4569     { .name = "XSCALE_AUXCR",
4570       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4571       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4572       .resetvalue = 0, },
4573     /*
4574      * XScale specific cache-lockdown: since we have no cache we NOP these
4575      * and hope the guest does not really rely on cache behaviour.
4576      */
4577     { .name = "XSCALE_LOCK_ICACHE_LINE",
4578       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4579       .access = PL1_W, .type = ARM_CP_NOP },
4580     { .name = "XSCALE_UNLOCK_ICACHE",
4581       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4582       .access = PL1_W, .type = ARM_CP_NOP },
4583     { .name = "XSCALE_DCACHE_LOCK",
4584       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4585       .access = PL1_RW, .type = ARM_CP_NOP },
4586     { .name = "XSCALE_UNLOCK_DCACHE",
4587       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4588       .access = PL1_W, .type = ARM_CP_NOP },
4589 };
4590 
4591 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4592     /*
4593      * RAZ/WI the whole crn=15 space, when we don't have a more specific
4594      * implementation of this implementation-defined space.
4595      * Ideally this should eventually disappear in favour of actually
4596      * implementing the correct behaviour for all cores.
4597      */
4598     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4599       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4600       .access = PL1_RW,
4601       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4602       .resetvalue = 0 },
4603 };
4604 
4605 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4606     /* Cache status: RAZ because we have no cache so it's always clean */
4607     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4608       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4609       .resetvalue = 0 },
4610 };
4611 
4612 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4613     /* We never have a block transfer operation in progress */
4614     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4615       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4616       .resetvalue = 0 },
4617     /* The cache ops themselves: these all NOP for QEMU */
4618     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4619       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4620     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4621       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4622     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4623       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4624     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4625       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4626     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4627       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4628     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4629       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4630 };
4631 
4632 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4633     /*
4634      * The cache test-and-clean instructions always return (1 << 30)
4635      * to indicate that there are no dirty cache lines.
4636      */
4637     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4638       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4639       .resetvalue = (1 << 30) },
4640     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4641       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4642       .resetvalue = (1 << 30) },
4643 };
4644 
4645 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4646     /* Ignore ReadBuffer accesses */
4647     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4648       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4649       .access = PL1_RW, .resetvalue = 0,
4650       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4651 };
4652 
4653 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4654 {
4655     unsigned int cur_el = arm_current_el(env);
4656 
4657     if (arm_is_el2_enabled(env) && cur_el == 1) {
4658         return env->cp15.vpidr_el2;
4659     }
4660     return raw_read(env, ri);
4661 }
4662 
4663 static uint64_t mpidr_read_val(CPUARMState *env)
4664 {
4665     ARMCPU *cpu = env_archcpu(env);
4666     uint64_t mpidr = cpu->mp_affinity;
4667 
4668     if (arm_feature(env, ARM_FEATURE_V7MP)) {
4669         mpidr |= (1U << 31);
4670         /*
4671          * Cores which are uniprocessor (non-coherent)
4672          * but still implement the MP extensions set
4673          * bit 30. (For instance, Cortex-R5).
4674          */
4675         if (cpu->mp_is_up) {
4676             mpidr |= (1u << 30);
4677         }
4678     }
4679     return mpidr;
4680 }
4681 
4682 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4683 {
4684     unsigned int cur_el = arm_current_el(env);
4685 
4686     if (arm_is_el2_enabled(env) && cur_el == 1) {
4687         return env->cp15.vmpidr_el2;
4688     }
4689     return mpidr_read_val(env);
4690 }
4691 
4692 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4693     /* NOP AMAIR0/1 */
4694     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4695       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4696       .access = PL1_RW, .accessfn = access_tvm_trvm,
4697       .fgt = FGT_AMAIR_EL1,
4698       .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
4699       .type = ARM_CP_CONST, .resetvalue = 0 },
4700     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4701     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4702       .access = PL1_RW, .accessfn = access_tvm_trvm,
4703       .type = ARM_CP_CONST, .resetvalue = 0 },
4704     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4705       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4706       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4707                              offsetof(CPUARMState, cp15.par_ns)} },
4708     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4709       .access = PL1_RW, .accessfn = access_tvm_trvm,
4710       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4711       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4712                              offsetof(CPUARMState, cp15.ttbr0_ns) },
4713       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4714     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4715       .access = PL1_RW, .accessfn = access_tvm_trvm,
4716       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4717       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4718                              offsetof(CPUARMState, cp15.ttbr1_ns) },
4719       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4720 };
4721 
4722 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4723 {
4724     return vfp_get_fpcr(env);
4725 }
4726 
4727 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4728                             uint64_t value)
4729 {
4730     vfp_set_fpcr(env, value);
4731 }
4732 
4733 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4734 {
4735     return vfp_get_fpsr(env);
4736 }
4737 
4738 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4739                             uint64_t value)
4740 {
4741     vfp_set_fpsr(env, value);
4742 }
4743 
4744 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4745                                        bool isread)
4746 {
4747     if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4748         return CP_ACCESS_TRAP;
4749     }
4750     return CP_ACCESS_OK;
4751 }
4752 
4753 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4754                             uint64_t value)
4755 {
4756     env->daif = value & PSTATE_DAIF;
4757 }
4758 
4759 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4760 {
4761     return env->pstate & PSTATE_PAN;
4762 }
4763 
4764 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4765                            uint64_t value)
4766 {
4767     env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4768 }
4769 
4770 static const ARMCPRegInfo pan_reginfo = {
4771     .name = "PAN", .state = ARM_CP_STATE_AA64,
4772     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4773     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4774     .readfn = aa64_pan_read, .writefn = aa64_pan_write
4775 };
4776 
4777 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4778 {
4779     return env->pstate & PSTATE_UAO;
4780 }
4781 
4782 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4783                            uint64_t value)
4784 {
4785     env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4786 }
4787 
4788 static const ARMCPRegInfo uao_reginfo = {
4789     .name = "UAO", .state = ARM_CP_STATE_AA64,
4790     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4791     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4792     .readfn = aa64_uao_read, .writefn = aa64_uao_write
4793 };
4794 
4795 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4796 {
4797     return env->pstate & PSTATE_DIT;
4798 }
4799 
4800 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4801                            uint64_t value)
4802 {
4803     env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4804 }
4805 
4806 static const ARMCPRegInfo dit_reginfo = {
4807     .name = "DIT", .state = ARM_CP_STATE_AA64,
4808     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4809     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4810     .readfn = aa64_dit_read, .writefn = aa64_dit_write
4811 };
4812 
4813 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4814 {
4815     return env->pstate & PSTATE_SSBS;
4816 }
4817 
4818 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4819                            uint64_t value)
4820 {
4821     env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4822 }
4823 
4824 static const ARMCPRegInfo ssbs_reginfo = {
4825     .name = "SSBS", .state = ARM_CP_STATE_AA64,
4826     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4827     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4828     .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4829 };
4830 
4831 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4832                                               const ARMCPRegInfo *ri,
4833                                               bool isread)
4834 {
4835     /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4836     switch (arm_current_el(env)) {
4837     case 0:
4838         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4839         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4840             return CP_ACCESS_TRAP;
4841         }
4842         /* fall through */
4843     case 1:
4844         /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4845         if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4846             return CP_ACCESS_TRAP_EL2;
4847         }
4848         break;
4849     }
4850     return CP_ACCESS_OK;
4851 }
4852 
4853 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
4854 {
4855     /* Cache invalidate/clean to Point of Unification... */
4856     switch (arm_current_el(env)) {
4857     case 0:
4858         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4859         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4860             return CP_ACCESS_TRAP;
4861         }
4862         /* fall through */
4863     case 1:
4864         /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set.  */
4865         if (arm_hcr_el2_eff(env) & hcrflags) {
4866             return CP_ACCESS_TRAP_EL2;
4867         }
4868         break;
4869     }
4870     return CP_ACCESS_OK;
4871 }
4872 
4873 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
4874                                    bool isread)
4875 {
4876     return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
4877 }
4878 
4879 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
4880                                   bool isread)
4881 {
4882     return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
4883 }
4884 
4885 /*
4886  * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4887  * Page D4-1736 (DDI0487A.b)
4888  */
4889 
4890 static int vae1_tlbmask(CPUARMState *env)
4891 {
4892     uint64_t hcr = arm_hcr_el2_eff(env);
4893     uint16_t mask;
4894 
4895     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4896         mask = ARMMMUIdxBit_E20_2 |
4897                ARMMMUIdxBit_E20_2_PAN |
4898                ARMMMUIdxBit_E20_0;
4899     } else {
4900         mask = ARMMMUIdxBit_E10_1 |
4901                ARMMMUIdxBit_E10_1_PAN |
4902                ARMMMUIdxBit_E10_0;
4903     }
4904     return mask;
4905 }
4906 
4907 static int vae2_tlbmask(CPUARMState *env)
4908 {
4909     uint64_t hcr = arm_hcr_el2_eff(env);
4910     uint16_t mask;
4911 
4912     if (hcr & HCR_E2H) {
4913         mask = ARMMMUIdxBit_E20_2 |
4914                ARMMMUIdxBit_E20_2_PAN |
4915                ARMMMUIdxBit_E20_0;
4916     } else {
4917         mask = ARMMMUIdxBit_E2;
4918     }
4919     return mask;
4920 }
4921 
4922 /* Return 56 if TBI is enabled, 64 otherwise. */
4923 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4924                               uint64_t addr)
4925 {
4926     uint64_t tcr = regime_tcr(env, mmu_idx);
4927     int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4928     int select = extract64(addr, 55, 1);
4929 
4930     return (tbi >> select) & 1 ? 56 : 64;
4931 }
4932 
4933 static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4934 {
4935     uint64_t hcr = arm_hcr_el2_eff(env);
4936     ARMMMUIdx mmu_idx;
4937 
4938     /* Only the regime of the mmu_idx below is significant. */
4939     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4940         mmu_idx = ARMMMUIdx_E20_0;
4941     } else {
4942         mmu_idx = ARMMMUIdx_E10_0;
4943     }
4944 
4945     return tlbbits_for_regime(env, mmu_idx, addr);
4946 }
4947 
4948 static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
4949 {
4950     uint64_t hcr = arm_hcr_el2_eff(env);
4951     ARMMMUIdx mmu_idx;
4952 
4953     /*
4954      * Only the regime of the mmu_idx below is significant.
4955      * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
4956      * only has one.
4957      */
4958     if (hcr & HCR_E2H) {
4959         mmu_idx = ARMMMUIdx_E20_2;
4960     } else {
4961         mmu_idx = ARMMMUIdx_E2;
4962     }
4963 
4964     return tlbbits_for_regime(env, mmu_idx, addr);
4965 }
4966 
4967 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4968                                       uint64_t value)
4969 {
4970     CPUState *cs = env_cpu(env);
4971     int mask = vae1_tlbmask(env);
4972 
4973     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4974 }
4975 
4976 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4977                                     uint64_t value)
4978 {
4979     CPUState *cs = env_cpu(env);
4980     int mask = vae1_tlbmask(env);
4981 
4982     if (tlb_force_broadcast(env)) {
4983         tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4984     } else {
4985         tlb_flush_by_mmuidx(cs, mask);
4986     }
4987 }
4988 
4989 static int e2_tlbmask(CPUARMState *env)
4990 {
4991     return (ARMMMUIdxBit_E20_0 |
4992             ARMMMUIdxBit_E20_2 |
4993             ARMMMUIdxBit_E20_2_PAN |
4994             ARMMMUIdxBit_E2);
4995 }
4996 
4997 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4998                                   uint64_t value)
4999 {
5000     CPUState *cs = env_cpu(env);
5001     int mask = alle1_tlbmask(env);
5002 
5003     tlb_flush_by_mmuidx(cs, mask);
5004 }
5005 
5006 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5007                                   uint64_t value)
5008 {
5009     CPUState *cs = env_cpu(env);
5010     int mask = e2_tlbmask(env);
5011 
5012     tlb_flush_by_mmuidx(cs, mask);
5013 }
5014 
5015 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5016                                   uint64_t value)
5017 {
5018     ARMCPU *cpu = env_archcpu(env);
5019     CPUState *cs = CPU(cpu);
5020 
5021     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
5022 }
5023 
5024 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5025                                     uint64_t value)
5026 {
5027     CPUState *cs = env_cpu(env);
5028     int mask = alle1_tlbmask(env);
5029 
5030     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
5031 }
5032 
5033 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5034                                     uint64_t value)
5035 {
5036     CPUState *cs = env_cpu(env);
5037     int mask = e2_tlbmask(env);
5038 
5039     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
5040 }
5041 
5042 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5043                                     uint64_t value)
5044 {
5045     CPUState *cs = env_cpu(env);
5046 
5047     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
5048 }
5049 
5050 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5051                                  uint64_t value)
5052 {
5053     /*
5054      * Invalidate by VA, EL2
5055      * Currently handles both VAE2 and VALE2, since we don't support
5056      * flush-last-level-only.
5057      */
5058     CPUState *cs = env_cpu(env);
5059     int mask = vae2_tlbmask(env);
5060     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5061     int bits = vae2_tlbbits(env, pageaddr);
5062 
5063     tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
5064 }
5065 
5066 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5067                                  uint64_t value)
5068 {
5069     /*
5070      * Invalidate by VA, EL3
5071      * Currently handles both VAE3 and VALE3, since we don't support
5072      * flush-last-level-only.
5073      */
5074     ARMCPU *cpu = env_archcpu(env);
5075     CPUState *cs = CPU(cpu);
5076     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5077 
5078     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
5079 }
5080 
5081 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5082                                    uint64_t value)
5083 {
5084     CPUState *cs = env_cpu(env);
5085     int mask = vae1_tlbmask(env);
5086     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5087     int bits = vae1_tlbbits(env, pageaddr);
5088 
5089     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
5090 }
5091 
5092 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5093                                  uint64_t value)
5094 {
5095     /*
5096      * Invalidate by VA, EL1&0 (AArch64 version).
5097      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
5098      * since we don't support flush-for-specific-ASID-only or
5099      * flush-last-level-only.
5100      */
5101     CPUState *cs = env_cpu(env);
5102     int mask = vae1_tlbmask(env);
5103     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5104     int bits = vae1_tlbbits(env, pageaddr);
5105 
5106     if (tlb_force_broadcast(env)) {
5107         tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
5108     } else {
5109         tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
5110     }
5111 }
5112 
5113 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5114                                    uint64_t value)
5115 {
5116     CPUState *cs = env_cpu(env);
5117     int mask = vae2_tlbmask(env);
5118     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5119     int bits = vae2_tlbbits(env, pageaddr);
5120 
5121     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
5122 }
5123 
5124 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5125                                    uint64_t value)
5126 {
5127     CPUState *cs = env_cpu(env);
5128     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5129     int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
5130 
5131     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
5132                                                   ARMMMUIdxBit_E3, bits);
5133 }
5134 
5135 static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
5136 {
5137     /*
5138      * The MSB of value is the NS field, which only applies if SEL2
5139      * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
5140      */
5141     return (value >= 0
5142             && cpu_isar_feature(aa64_sel2, env_archcpu(env))
5143             && arm_is_secure_below_el3(env)
5144             ? ARMMMUIdxBit_Stage2_S
5145             : ARMMMUIdxBit_Stage2);
5146 }
5147 
5148 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5149                                     uint64_t value)
5150 {
5151     CPUState *cs = env_cpu(env);
5152     int mask = ipas2e1_tlbmask(env, value);
5153     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5154 
5155     if (tlb_force_broadcast(env)) {
5156         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
5157     } else {
5158         tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
5159     }
5160 }
5161 
5162 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5163                                       uint64_t value)
5164 {
5165     CPUState *cs = env_cpu(env);
5166     int mask = ipas2e1_tlbmask(env, value);
5167     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5168 
5169     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
5170 }
5171 
5172 #ifdef TARGET_AARCH64
5173 typedef struct {
5174     uint64_t base;
5175     uint64_t length;
5176 } TLBIRange;
5177 
5178 static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
5179 {
5180     /*
5181      * Note that the TLBI range TG field encoding differs from both
5182      * TG0 and TG1 encodings.
5183      */
5184     switch (tg) {
5185     case 1:
5186         return Gran4K;
5187     case 2:
5188         return Gran16K;
5189     case 3:
5190         return Gran64K;
5191     default:
5192         return GranInvalid;
5193     }
5194 }
5195 
5196 static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
5197                                      uint64_t value)
5198 {
5199     unsigned int page_size_granule, page_shift, num, scale, exponent;
5200     /* Extract one bit to represent the va selector in use. */
5201     uint64_t select = sextract64(value, 36, 1);
5202     ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false);
5203     TLBIRange ret = { };
5204     ARMGranuleSize gran;
5205 
5206     page_size_granule = extract64(value, 46, 2);
5207     gran = tlbi_range_tg_to_gran_size(page_size_granule);
5208 
5209     /* The granule encoded in value must match the granule in use. */
5210     if (gran != param.gran) {
5211         qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
5212                       page_size_granule);
5213         return ret;
5214     }
5215 
5216     page_shift = arm_granule_bits(gran);
5217     num = extract64(value, 39, 5);
5218     scale = extract64(value, 44, 2);
5219     exponent = (5 * scale) + 1;
5220 
5221     ret.length = (num + 1) << (exponent + page_shift);
5222 
5223     if (param.select) {
5224         ret.base = sextract64(value, 0, 37);
5225     } else {
5226         ret.base = extract64(value, 0, 37);
5227     }
5228     if (param.ds) {
5229         /*
5230          * With DS=1, BaseADDR is always shifted 16 so that it is able
5231          * to address all 52 va bits.  The input address is perforce
5232          * aligned on a 64k boundary regardless of translation granule.
5233          */
5234         page_shift = 16;
5235     }
5236     ret.base <<= page_shift;
5237 
5238     return ret;
5239 }
5240 
5241 static void do_rvae_write(CPUARMState *env, uint64_t value,
5242                           int idxmap, bool synced)
5243 {
5244     ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
5245     TLBIRange range;
5246     int bits;
5247 
5248     range = tlbi_aa64_get_range(env, one_idx, value);
5249     bits = tlbbits_for_regime(env, one_idx, range.base);
5250 
5251     if (synced) {
5252         tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
5253                                                   range.base,
5254                                                   range.length,
5255                                                   idxmap,
5256                                                   bits);
5257     } else {
5258         tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
5259                                   range.length, idxmap, bits);
5260     }
5261 }
5262 
5263 static void tlbi_aa64_rvae1_write(CPUARMState *env,
5264                                   const ARMCPRegInfo *ri,
5265                                   uint64_t value)
5266 {
5267     /*
5268      * Invalidate by VA range, EL1&0.
5269      * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
5270      * since we don't support flush-for-specific-ASID-only or
5271      * flush-last-level-only.
5272      */
5273 
5274     do_rvae_write(env, value, vae1_tlbmask(env),
5275                   tlb_force_broadcast(env));
5276 }
5277 
5278 static void tlbi_aa64_rvae1is_write(CPUARMState *env,
5279                                     const ARMCPRegInfo *ri,
5280                                     uint64_t value)
5281 {
5282     /*
5283      * Invalidate by VA range, Inner/Outer Shareable EL1&0.
5284      * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
5285      * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
5286      * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
5287      * shareable specific flushes.
5288      */
5289 
5290     do_rvae_write(env, value, vae1_tlbmask(env), true);
5291 }
5292 
5293 static void tlbi_aa64_rvae2_write(CPUARMState *env,
5294                                   const ARMCPRegInfo *ri,
5295                                   uint64_t value)
5296 {
5297     /*
5298      * Invalidate by VA range, EL2.
5299      * Currently handles all of RVAE2 and RVALE2,
5300      * since we don't support flush-for-specific-ASID-only or
5301      * flush-last-level-only.
5302      */
5303 
5304     do_rvae_write(env, value, vae2_tlbmask(env),
5305                   tlb_force_broadcast(env));
5306 
5307 
5308 }
5309 
5310 static void tlbi_aa64_rvae2is_write(CPUARMState *env,
5311                                     const ARMCPRegInfo *ri,
5312                                     uint64_t value)
5313 {
5314     /*
5315      * Invalidate by VA range, Inner/Outer Shareable, EL2.
5316      * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
5317      * since we don't support flush-for-specific-ASID-only,
5318      * flush-last-level-only or inner/outer shareable specific flushes.
5319      */
5320 
5321     do_rvae_write(env, value, vae2_tlbmask(env), true);
5322 
5323 }
5324 
5325 static void tlbi_aa64_rvae3_write(CPUARMState *env,
5326                                   const ARMCPRegInfo *ri,
5327                                   uint64_t value)
5328 {
5329     /*
5330      * Invalidate by VA range, EL3.
5331      * Currently handles all of RVAE3 and RVALE3,
5332      * since we don't support flush-for-specific-ASID-only or
5333      * flush-last-level-only.
5334      */
5335 
5336     do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
5337 }
5338 
5339 static void tlbi_aa64_rvae3is_write(CPUARMState *env,
5340                                     const ARMCPRegInfo *ri,
5341                                     uint64_t value)
5342 {
5343     /*
5344      * Invalidate by VA range, EL3, Inner/Outer Shareable.
5345      * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
5346      * since we don't support flush-for-specific-ASID-only,
5347      * flush-last-level-only or inner/outer specific flushes.
5348      */
5349 
5350     do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
5351 }
5352 
5353 static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5354                                      uint64_t value)
5355 {
5356     do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
5357                   tlb_force_broadcast(env));
5358 }
5359 
5360 static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
5361                                        const ARMCPRegInfo *ri,
5362                                        uint64_t value)
5363 {
5364     do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
5365 }
5366 #endif
5367 
5368 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
5369                                       bool isread)
5370 {
5371     int cur_el = arm_current_el(env);
5372 
5373     if (cur_el < 2) {
5374         uint64_t hcr = arm_hcr_el2_eff(env);
5375 
5376         if (cur_el == 0) {
5377             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5378                 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
5379                     return CP_ACCESS_TRAP_EL2;
5380                 }
5381             } else {
5382                 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
5383                     return CP_ACCESS_TRAP;
5384                 }
5385                 if (hcr & HCR_TDZ) {
5386                     return CP_ACCESS_TRAP_EL2;
5387                 }
5388             }
5389         } else if (hcr & HCR_TDZ) {
5390             return CP_ACCESS_TRAP_EL2;
5391         }
5392     }
5393     return CP_ACCESS_OK;
5394 }
5395 
5396 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
5397 {
5398     ARMCPU *cpu = env_archcpu(env);
5399     int dzp_bit = 1 << 4;
5400 
5401     /* DZP indicates whether DC ZVA access is allowed */
5402     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
5403         dzp_bit = 0;
5404     }
5405     return cpu->dcz_blocksize | dzp_bit;
5406 }
5407 
5408 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5409                                     bool isread)
5410 {
5411     if (!(env->pstate & PSTATE_SP)) {
5412         /*
5413          * Access to SP_EL0 is undefined if it's being used as
5414          * the stack pointer.
5415          */
5416         return CP_ACCESS_TRAP_UNCATEGORIZED;
5417     }
5418     return CP_ACCESS_OK;
5419 }
5420 
5421 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
5422 {
5423     return env->pstate & PSTATE_SP;
5424 }
5425 
5426 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5427 {
5428     update_spsel(env, val);
5429 }
5430 
5431 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5432                         uint64_t value)
5433 {
5434     ARMCPU *cpu = env_archcpu(env);
5435 
5436     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
5437         /* M bit is RAZ/WI for PMSA with no MPU implemented */
5438         value &= ~SCTLR_M;
5439     }
5440 
5441     /* ??? Lots of these bits are not implemented.  */
5442 
5443     if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
5444         if (ri->opc1 == 6) { /* SCTLR_EL3 */
5445             value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
5446         } else {
5447             value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
5448                        SCTLR_ATA0 | SCTLR_ATA);
5449         }
5450     }
5451 
5452     if (raw_read(env, ri) == value) {
5453         /*
5454          * Skip the TLB flush if nothing actually changed; Linux likes
5455          * to do a lot of pointless SCTLR writes.
5456          */
5457         return;
5458     }
5459 
5460     raw_write(env, ri, value);
5461 
5462     /* This may enable/disable the MMU, so do a TLB flush.  */
5463     tlb_flush(CPU(cpu));
5464 
5465     if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
5466         /*
5467          * Normally we would always end the TB on an SCTLR write; see the
5468          * comment in ARMCPRegInfo sctlr initialization below for why Xscale
5469          * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
5470          * of hflags from the translator, so do it here.
5471          */
5472         arm_rebuild_hflags(env);
5473     }
5474 }
5475 
5476 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5477                            uint64_t value)
5478 {
5479     /*
5480      * Some MDCR_EL3 bits affect whether PMU counters are running:
5481      * if we are trying to change any of those then we must
5482      * bracket this update with PMU start/finish calls.
5483      */
5484     bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
5485 
5486     if (pmu_op) {
5487         pmu_op_start(env);
5488     }
5489     env->cp15.mdcr_el3 = value;
5490     if (pmu_op) {
5491         pmu_op_finish(env);
5492     }
5493 }
5494 
5495 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5496                        uint64_t value)
5497 {
5498     /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
5499     mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
5500 }
5501 
5502 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5503                            uint64_t value)
5504 {
5505     /*
5506      * Some MDCR_EL2 bits affect whether PMU counters are running:
5507      * if we are trying to change any of those then we must
5508      * bracket this update with PMU start/finish calls.
5509      */
5510     bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
5511 
5512     if (pmu_op) {
5513         pmu_op_start(env);
5514     }
5515     env->cp15.mdcr_el2 = value;
5516     if (pmu_op) {
5517         pmu_op_finish(env);
5518     }
5519 }
5520 
5521 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
5522                                  bool isread)
5523 {
5524     if (arm_current_el(env) == 1) {
5525         uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
5526 
5527         if (hcr_nv == (HCR_NV | HCR_NV1)) {
5528             return CP_ACCESS_TRAP_EL2;
5529         }
5530     }
5531     return CP_ACCESS_OK;
5532 }
5533 
5534 #ifdef CONFIG_USER_ONLY
5535 /*
5536  * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
5537  * code to get around W^X restrictions, where one region is writable and the
5538  * other is executable.
5539  *
5540  * Since the executable region is never written to we cannot detect code
5541  * changes when running in user mode, and rely on the emulated JIT telling us
5542  * that the code has changed by executing this instruction.
5543  */
5544 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
5545                           uint64_t value)
5546 {
5547     uint64_t icache_line_mask, start_address, end_address;
5548     const ARMCPU *cpu;
5549 
5550     cpu = env_archcpu(env);
5551 
5552     icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
5553     start_address = value & ~icache_line_mask;
5554     end_address = value | icache_line_mask;
5555 
5556     mmap_lock();
5557 
5558     tb_invalidate_phys_range(start_address, end_address);
5559 
5560     mmap_unlock();
5561 }
5562 #endif
5563 
5564 static const ARMCPRegInfo v8_cp_reginfo[] = {
5565     /*
5566      * Minimal set of EL0-visible registers. This will need to be expanded
5567      * significantly for system emulation of AArch64 CPUs.
5568      */
5569     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
5570       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
5571       .access = PL0_RW, .type = ARM_CP_NZCV },
5572     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
5573       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
5574       .type = ARM_CP_NO_RAW,
5575       .access = PL0_RW, .accessfn = aa64_daif_access,
5576       .fieldoffset = offsetof(CPUARMState, daif),
5577       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
5578     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
5579       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
5580       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5581       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
5582     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
5583       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
5584       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5585       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
5586     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
5587       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
5588       .access = PL0_R, .type = ARM_CP_NO_RAW,
5589       .fgt = FGT_DCZID_EL0,
5590       .readfn = aa64_dczid_read },
5591     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
5592       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
5593       .access = PL0_W, .type = ARM_CP_DC_ZVA,
5594 #ifndef CONFIG_USER_ONLY
5595       /* Avoid overhead of an access check that always passes in user-mode */
5596       .accessfn = aa64_zva_access,
5597       .fgt = FGT_DCZVA,
5598 #endif
5599     },
5600     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
5601       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
5602       .access = PL1_R, .type = ARM_CP_CURRENTEL },
5603     /*
5604      * Instruction cache ops. All of these except `IC IVAU` NOP because we
5605      * don't emulate caches.
5606      */
5607     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
5608       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5609       .access = PL1_W, .type = ARM_CP_NOP,
5610       .fgt = FGT_ICIALLUIS,
5611       .accessfn = access_ticab },
5612     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
5613       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5614       .access = PL1_W, .type = ARM_CP_NOP,
5615       .fgt = FGT_ICIALLU,
5616       .accessfn = access_tocu },
5617     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
5618       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
5619       .access = PL0_W,
5620       .fgt = FGT_ICIVAU,
5621       .accessfn = access_tocu,
5622 #ifdef CONFIG_USER_ONLY
5623       .type = ARM_CP_NO_RAW,
5624       .writefn = ic_ivau_write
5625 #else
5626       .type = ARM_CP_NOP
5627 #endif
5628     },
5629     /* Cache ops: all NOPs since we don't emulate caches */
5630     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
5631       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5632       .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
5633       .fgt = FGT_DCIVAC,
5634       .type = ARM_CP_NOP },
5635     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
5636       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5637       .fgt = FGT_DCISW,
5638       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5639     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
5640       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
5641       .access = PL0_W, .type = ARM_CP_NOP,
5642       .fgt = FGT_DCCVAC,
5643       .accessfn = aa64_cacheop_poc_access },
5644     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
5645       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5646       .fgt = FGT_DCCSW,
5647       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5648     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
5649       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
5650       .access = PL0_W, .type = ARM_CP_NOP,
5651       .fgt = FGT_DCCVAU,
5652       .accessfn = access_tocu },
5653     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
5654       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
5655       .access = PL0_W, .type = ARM_CP_NOP,
5656       .fgt = FGT_DCCIVAC,
5657       .accessfn = aa64_cacheop_poc_access },
5658     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
5659       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5660       .fgt = FGT_DCCISW,
5661       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5662     /* TLBI operations */
5663     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
5664       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
5665       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5666       .fgt = FGT_TLBIVMALLE1IS,
5667       .writefn = tlbi_aa64_vmalle1is_write },
5668     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
5669       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
5670       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5671       .fgt = FGT_TLBIVAE1IS,
5672       .writefn = tlbi_aa64_vae1is_write },
5673     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
5674       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
5675       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5676       .fgt = FGT_TLBIASIDE1IS,
5677       .writefn = tlbi_aa64_vmalle1is_write },
5678     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
5679       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
5680       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5681       .fgt = FGT_TLBIVAAE1IS,
5682       .writefn = tlbi_aa64_vae1is_write },
5683     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
5684       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5685       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5686       .fgt = FGT_TLBIVALE1IS,
5687       .writefn = tlbi_aa64_vae1is_write },
5688     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
5689       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5690       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5691       .fgt = FGT_TLBIVAALE1IS,
5692       .writefn = tlbi_aa64_vae1is_write },
5693     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
5694       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
5695       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5696       .fgt = FGT_TLBIVMALLE1,
5697       .writefn = tlbi_aa64_vmalle1_write },
5698     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
5699       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
5700       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5701       .fgt = FGT_TLBIVAE1,
5702       .writefn = tlbi_aa64_vae1_write },
5703     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
5704       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
5705       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5706       .fgt = FGT_TLBIASIDE1,
5707       .writefn = tlbi_aa64_vmalle1_write },
5708     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
5709       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
5710       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5711       .fgt = FGT_TLBIVAAE1,
5712       .writefn = tlbi_aa64_vae1_write },
5713     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
5714       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5715       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5716       .fgt = FGT_TLBIVALE1,
5717       .writefn = tlbi_aa64_vae1_write },
5718     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
5719       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5720       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5721       .fgt = FGT_TLBIVAALE1,
5722       .writefn = tlbi_aa64_vae1_write },
5723     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
5724       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5725       .access = PL2_W, .type = ARM_CP_NO_RAW,
5726       .writefn = tlbi_aa64_ipas2e1is_write },
5727     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
5728       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5729       .access = PL2_W, .type = ARM_CP_NO_RAW,
5730       .writefn = tlbi_aa64_ipas2e1is_write },
5731     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
5732       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5733       .access = PL2_W, .type = ARM_CP_NO_RAW,
5734       .writefn = tlbi_aa64_alle1is_write },
5735     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
5736       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
5737       .access = PL2_W, .type = ARM_CP_NO_RAW,
5738       .writefn = tlbi_aa64_alle1is_write },
5739     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
5740       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5741       .access = PL2_W, .type = ARM_CP_NO_RAW,
5742       .writefn = tlbi_aa64_ipas2e1_write },
5743     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
5744       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5745       .access = PL2_W, .type = ARM_CP_NO_RAW,
5746       .writefn = tlbi_aa64_ipas2e1_write },
5747     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
5748       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5749       .access = PL2_W, .type = ARM_CP_NO_RAW,
5750       .writefn = tlbi_aa64_alle1_write },
5751     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
5752       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
5753       .access = PL2_W, .type = ARM_CP_NO_RAW,
5754       .writefn = tlbi_aa64_alle1is_write },
5755 #ifndef CONFIG_USER_ONLY
5756     /* 64 bit address translation operations */
5757     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
5758       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
5759       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5760       .fgt = FGT_ATS1E1R,
5761       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5762     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
5763       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
5764       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5765       .fgt = FGT_ATS1E1W,
5766       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5767     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
5768       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
5769       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5770       .fgt = FGT_ATS1E0R,
5771       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5772     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
5773       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
5774       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5775       .fgt = FGT_ATS1E0W,
5776       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5777     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
5778       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
5779       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5780       .accessfn = at_e012_access, .writefn = ats_write64 },
5781     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
5782       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
5783       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5784       .accessfn = at_e012_access, .writefn = ats_write64 },
5785     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
5786       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
5787       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5788       .accessfn = at_e012_access, .writefn = ats_write64 },
5789     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
5790       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
5791       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5792       .accessfn = at_e012_access, .writefn = ats_write64 },
5793     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5794     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
5795       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
5796       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5797       .writefn = ats_write64 },
5798     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
5799       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
5800       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5801       .writefn = ats_write64 },
5802     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
5803       .type = ARM_CP_ALIAS,
5804       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
5805       .access = PL1_RW, .resetvalue = 0,
5806       .fgt = FGT_PAR_EL1,
5807       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
5808       .writefn = par_write },
5809 #endif
5810     /* TLB invalidate last level of translation table walk */
5811     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5812       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5813       .writefn = tlbimva_is_write },
5814     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5815       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5816       .writefn = tlbimvaa_is_write },
5817     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5818       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5819       .writefn = tlbimva_write },
5820     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5821       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5822       .writefn = tlbimvaa_write },
5823     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5824       .type = ARM_CP_NO_RAW, .access = PL2_W,
5825       .writefn = tlbimva_hyp_write },
5826     { .name = "TLBIMVALHIS",
5827       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5828       .type = ARM_CP_NO_RAW, .access = PL2_W,
5829       .writefn = tlbimva_hyp_is_write },
5830     { .name = "TLBIIPAS2",
5831       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5832       .type = ARM_CP_NO_RAW, .access = PL2_W,
5833       .writefn = tlbiipas2_hyp_write },
5834     { .name = "TLBIIPAS2IS",
5835       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5836       .type = ARM_CP_NO_RAW, .access = PL2_W,
5837       .writefn = tlbiipas2is_hyp_write },
5838     { .name = "TLBIIPAS2L",
5839       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5840       .type = ARM_CP_NO_RAW, .access = PL2_W,
5841       .writefn = tlbiipas2_hyp_write },
5842     { .name = "TLBIIPAS2LIS",
5843       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5844       .type = ARM_CP_NO_RAW, .access = PL2_W,
5845       .writefn = tlbiipas2is_hyp_write },
5846     /* 32 bit cache operations */
5847     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5848       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
5849     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5850       .type = ARM_CP_NOP, .access = PL1_W },
5851     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5852       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5853     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5854       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5855     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5856       .type = ARM_CP_NOP, .access = PL1_W },
5857     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5858       .type = ARM_CP_NOP, .access = PL1_W },
5859     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5860       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5861     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5862       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5863     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5864       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5865     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5866       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5867     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5868       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5869     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5870       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5871     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5872       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5873     /* MMU Domain access control / MPU write buffer control */
5874     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5875       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5876       .writefn = dacr_write, .raw_writefn = raw_write,
5877       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5878                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5879     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5880       .type = ARM_CP_ALIAS,
5881       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5882       .access = PL1_RW, .accessfn = access_nv1,
5883       .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
5884       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5885     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5886       .type = ARM_CP_ALIAS,
5887       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5888       .access = PL1_RW, .accessfn = access_nv1,
5889       .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
5890       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5891     /*
5892      * We rely on the access checks not allowing the guest to write to the
5893      * state field when SPSel indicates that it's being used as the stack
5894      * pointer.
5895      */
5896     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5897       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5898       .access = PL1_RW, .accessfn = sp_el0_access,
5899       .type = ARM_CP_ALIAS,
5900       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5901     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5902       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5903       .nv2_redirect_offset = 0x240,
5904       .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
5905       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5906     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5907       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5908       .type = ARM_CP_NO_RAW,
5909       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5910     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5911       .type = ARM_CP_ALIAS,
5912       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5913       .access = PL2_RW,
5914       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5915     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5916       .type = ARM_CP_ALIAS,
5917       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5918       .access = PL2_RW,
5919       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5920     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5921       .type = ARM_CP_ALIAS,
5922       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5923       .access = PL2_RW,
5924       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5925     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5926       .type = ARM_CP_ALIAS,
5927       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5928       .access = PL2_RW,
5929       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5930     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5931       .type = ARM_CP_IO,
5932       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5933       .resetvalue = 0,
5934       .access = PL3_RW,
5935       .writefn = mdcr_el3_write,
5936       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5937     { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
5938       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5939       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5940       .writefn = sdcr_write,
5941       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5942 };
5943 
5944 /* These are present only when EL1 supports AArch32 */
5945 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
5946     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5947       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5948       .access = PL2_RW,
5949       .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
5950       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
5951     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5952       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5953       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5954       .writefn = dacr_write, .raw_writefn = raw_write,
5955       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5956     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5957       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5958       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5959       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5960 };
5961 
5962 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5963 {
5964     ARMCPU *cpu = env_archcpu(env);
5965 
5966     if (arm_feature(env, ARM_FEATURE_V8)) {
5967         valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5968     } else {
5969         valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5970     }
5971 
5972     if (arm_feature(env, ARM_FEATURE_EL3)) {
5973         valid_mask &= ~HCR_HCD;
5974     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5975         /*
5976          * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5977          * However, if we're using the SMC PSCI conduit then QEMU is
5978          * effectively acting like EL3 firmware and so the guest at
5979          * EL2 should retain the ability to prevent EL1 from being
5980          * able to make SMC calls into the ersatz firmware, so in
5981          * that case HCR.TSC should be read/write.
5982          */
5983         valid_mask &= ~HCR_TSC;
5984     }
5985 
5986     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5987         if (cpu_isar_feature(aa64_vh, cpu)) {
5988             valid_mask |= HCR_E2H;
5989         }
5990         if (cpu_isar_feature(aa64_ras, cpu)) {
5991             valid_mask |= HCR_TERR | HCR_TEA;
5992         }
5993         if (cpu_isar_feature(aa64_lor, cpu)) {
5994             valid_mask |= HCR_TLOR;
5995         }
5996         if (cpu_isar_feature(aa64_pauth, cpu)) {
5997             valid_mask |= HCR_API | HCR_APK;
5998         }
5999         if (cpu_isar_feature(aa64_mte, cpu)) {
6000             valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
6001         }
6002         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
6003             valid_mask |= HCR_ENSCXT;
6004         }
6005         if (cpu_isar_feature(aa64_fwb, cpu)) {
6006             valid_mask |= HCR_FWB;
6007         }
6008         if (cpu_isar_feature(aa64_rme, cpu)) {
6009             valid_mask |= HCR_GPF;
6010         }
6011         if (cpu_isar_feature(aa64_nv, cpu)) {
6012             valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
6013         }
6014         if (cpu_isar_feature(aa64_nv2, cpu)) {
6015             valid_mask |= HCR_NV2;
6016         }
6017     }
6018 
6019     if (cpu_isar_feature(any_evt, cpu)) {
6020         valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
6021     } else if (cpu_isar_feature(any_half_evt, cpu)) {
6022         valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
6023     }
6024 
6025     /* Clear RES0 bits.  */
6026     value &= valid_mask;
6027 
6028     /*
6029      * These bits change the MMU setup:
6030      * HCR_VM enables stage 2 translation
6031      * HCR_PTW forbids certain page-table setups
6032      * HCR_DC disables stage1 and enables stage2 translation
6033      * HCR_DCT enables tagging on (disabled) stage1 translation
6034      * HCR_FWB changes the interpretation of stage2 descriptor bits
6035      * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
6036      */
6037     if ((env->cp15.hcr_el2 ^ value) &
6038         (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
6039         tlb_flush(CPU(cpu));
6040     }
6041     env->cp15.hcr_el2 = value;
6042 
6043     /*
6044      * Updates to VI and VF require us to update the status of
6045      * virtual interrupts, which are the logical OR of these bits
6046      * and the state of the input lines from the GIC. (This requires
6047      * that we have the BQL, which is done by marking the
6048      * reginfo structs as ARM_CP_IO.)
6049      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
6050      * possible for it to be taken immediately, because VIRQ and
6051      * VFIQ are masked unless running at EL0 or EL1, and HCR
6052      * can only be written at EL2.
6053      */
6054     g_assert(bql_locked());
6055     arm_cpu_update_virq(cpu);
6056     arm_cpu_update_vfiq(cpu);
6057     arm_cpu_update_vserr(cpu);
6058 }
6059 
6060 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
6061 {
6062     do_hcr_write(env, value, 0);
6063 }
6064 
6065 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
6066                           uint64_t value)
6067 {
6068     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
6069     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
6070     do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
6071 }
6072 
6073 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
6074                          uint64_t value)
6075 {
6076     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
6077     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
6078     do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
6079 }
6080 
6081 /*
6082  * Return the effective value of HCR_EL2, at the given security state.
6083  * Bits that are not included here:
6084  * RW       (read from SCR_EL3.RW as needed)
6085  */
6086 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
6087 {
6088     uint64_t ret = env->cp15.hcr_el2;
6089 
6090     assert(space != ARMSS_Root);
6091 
6092     if (!arm_is_el2_enabled_secstate(env, space)) {
6093         /*
6094          * "This register has no effect if EL2 is not enabled in the
6095          * current Security state".  This is ARMv8.4-SecEL2 speak for
6096          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
6097          *
6098          * Prior to that, the language was "In an implementation that
6099          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
6100          * as if this field is 0 for all purposes other than a direct
6101          * read or write access of HCR_EL2".  With lots of enumeration
6102          * on a per-field basis.  In current QEMU, this is condition
6103          * is arm_is_secure_below_el3.
6104          *
6105          * Since the v8.4 language applies to the entire register, and
6106          * appears to be backward compatible, use that.
6107          */
6108         return 0;
6109     }
6110 
6111     /*
6112      * For a cpu that supports both aarch64 and aarch32, we can set bits
6113      * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
6114      * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
6115      */
6116     if (!arm_el_is_aa64(env, 2)) {
6117         uint64_t aa32_valid;
6118 
6119         /*
6120          * These bits are up-to-date as of ARMv8.6.
6121          * For HCR, it's easiest to list just the 2 bits that are invalid.
6122          * For HCR2, list those that are valid.
6123          */
6124         aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
6125         aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
6126                        HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
6127         ret &= aa32_valid;
6128     }
6129 
6130     if (ret & HCR_TGE) {
6131         /* These bits are up-to-date as of ARMv8.6.  */
6132         if (ret & HCR_E2H) {
6133             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
6134                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
6135                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
6136                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
6137                      HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
6138                      HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
6139         } else {
6140             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
6141         }
6142         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
6143                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
6144                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
6145                  HCR_TLOR);
6146     }
6147 
6148     return ret;
6149 }
6150 
6151 uint64_t arm_hcr_el2_eff(CPUARMState *env)
6152 {
6153     if (arm_feature(env, ARM_FEATURE_M)) {
6154         return 0;
6155     }
6156     return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
6157 }
6158 
6159 /*
6160  * Corresponds to ARM pseudocode function ELIsInHost().
6161  */
6162 bool el_is_in_host(CPUARMState *env, int el)
6163 {
6164     uint64_t mask;
6165 
6166     /*
6167      * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
6168      * Perform the simplest bit tests first, and validate EL2 afterward.
6169      */
6170     if (el & 1) {
6171         return false; /* EL1 or EL3 */
6172     }
6173 
6174     /*
6175      * Note that hcr_write() checks isar_feature_aa64_vh(),
6176      * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
6177      */
6178     mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
6179     if ((env->cp15.hcr_el2 & mask) != mask) {
6180         return false;
6181     }
6182 
6183     /* TGE and/or E2H set: double check those bits are currently legal. */
6184     return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
6185 }
6186 
6187 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
6188                        uint64_t value)
6189 {
6190     uint64_t valid_mask = 0;
6191 
6192     /* FEAT_MOPS adds MSCEn and MCE2 */
6193     if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
6194         valid_mask |= HCRX_MSCEN | HCRX_MCE2;
6195     }
6196 
6197     /* Clear RES0 bits.  */
6198     env->cp15.hcrx_el2 = value & valid_mask;
6199 }
6200 
6201 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
6202                                   bool isread)
6203 {
6204     if (arm_current_el(env) == 2
6205         && arm_feature(env, ARM_FEATURE_EL3)
6206         && !(env->cp15.scr_el3 & SCR_HXEN)) {
6207         return CP_ACCESS_TRAP_EL3;
6208     }
6209     return CP_ACCESS_OK;
6210 }
6211 
6212 static const ARMCPRegInfo hcrx_el2_reginfo = {
6213     .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
6214     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
6215     .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
6216     .nv2_redirect_offset = 0xa0,
6217     .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
6218 };
6219 
6220 /* Return the effective value of HCRX_EL2.  */
6221 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
6222 {
6223     /*
6224      * The bits in this register behave as 0 for all purposes other than
6225      * direct reads of the register if SCR_EL3.HXEn is 0.
6226      * If EL2 is not enabled in the current security state, then the
6227      * bit may behave as if 0, or as if 1, depending on the bit.
6228      * For the moment, we treat the EL2-disabled case as taking
6229      * priority over the HXEn-disabled case. This is true for the only
6230      * bit for a feature which we implement where the answer is different
6231      * for the two cases (MSCEn for FEAT_MOPS).
6232      * This may need to be revisited for future bits.
6233      */
6234     if (!arm_is_el2_enabled(env)) {
6235         uint64_t hcrx = 0;
6236         if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
6237             /* MSCEn behaves as 1 if EL2 is not enabled */
6238             hcrx |= HCRX_MSCEN;
6239         }
6240         return hcrx;
6241     }
6242     if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
6243         return 0;
6244     }
6245     return env->cp15.hcrx_el2;
6246 }
6247 
6248 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6249                            uint64_t value)
6250 {
6251     /*
6252      * For A-profile AArch32 EL3, if NSACR.CP10
6253      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6254      */
6255     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
6256         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
6257         uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
6258         value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
6259     }
6260     env->cp15.cptr_el[2] = value;
6261 }
6262 
6263 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
6264 {
6265     /*
6266      * For A-profile AArch32 EL3, if NSACR.CP10
6267      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6268      */
6269     uint64_t value = env->cp15.cptr_el[2];
6270 
6271     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
6272         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
6273         value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
6274     }
6275     return value;
6276 }
6277 
6278 static const ARMCPRegInfo el2_cp_reginfo[] = {
6279     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
6280       .type = ARM_CP_IO,
6281       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
6282       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
6283       .nv2_redirect_offset = 0x78,
6284       .writefn = hcr_write, .raw_writefn = raw_write },
6285     { .name = "HCR", .state = ARM_CP_STATE_AA32,
6286       .type = ARM_CP_ALIAS | ARM_CP_IO,
6287       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
6288       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
6289       .writefn = hcr_writelow },
6290     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
6291       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
6292       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6293     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
6294       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
6295       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
6296       .access = PL2_RW,
6297       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
6298     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
6299       .type = ARM_CP_NV2_REDIRECT,
6300       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
6301       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
6302     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
6303       .type = ARM_CP_NV2_REDIRECT,
6304       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
6305       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
6306     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
6307       .type = ARM_CP_ALIAS,
6308       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
6309       .access = PL2_RW,
6310       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
6311     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
6312       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
6313       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
6314       .access = PL2_RW,
6315       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
6316     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
6317       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
6318       .access = PL2_RW, .writefn = vbar_write,
6319       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
6320       .resetvalue = 0 },
6321     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
6322       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
6323       .access = PL3_RW, .type = ARM_CP_ALIAS,
6324       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
6325     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
6326       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
6327       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
6328       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
6329       .readfn = cptr_el2_read, .writefn = cptr_el2_write },
6330     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
6331       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
6332       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
6333       .resetvalue = 0 },
6334     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
6335       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
6336       .access = PL2_RW, .type = ARM_CP_ALIAS,
6337       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
6338     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
6339       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
6340       .access = PL2_RW, .type = ARM_CP_CONST,
6341       .resetvalue = 0 },
6342     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
6343     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
6344       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
6345       .access = PL2_RW, .type = ARM_CP_CONST,
6346       .resetvalue = 0 },
6347     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
6348       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
6349       .access = PL2_RW, .type = ARM_CP_CONST,
6350       .resetvalue = 0 },
6351     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
6352       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
6353       .access = PL2_RW, .type = ARM_CP_CONST,
6354       .resetvalue = 0 },
6355     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
6356       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
6357       .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
6358       .raw_writefn = raw_write,
6359       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
6360     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
6361       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6362       .type = ARM_CP_ALIAS,
6363       .access = PL2_RW, .accessfn = access_el3_aa32ns,
6364       .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
6365     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
6366       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6367       .access = PL2_RW,
6368       .nv2_redirect_offset = 0x40,
6369       /* no .writefn needed as this can't cause an ASID change */
6370       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
6371     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
6372       .cp = 15, .opc1 = 6, .crm = 2,
6373       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6374       .access = PL2_RW, .accessfn = access_el3_aa32ns,
6375       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
6376       .writefn = vttbr_write, .raw_writefn = raw_write },
6377     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
6378       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
6379       .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
6380       .nv2_redirect_offset = 0x20,
6381       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
6382     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
6383       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
6384       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
6385       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
6386     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6387       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
6388       .access = PL2_RW, .resetvalue = 0,
6389       .nv2_redirect_offset = 0x90,
6390       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
6391     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
6392       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
6393       .access = PL2_RW, .resetvalue = 0,
6394       .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
6395       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6396     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
6397       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6398       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6399     { .name = "TLBIALLNSNH",
6400       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
6401       .type = ARM_CP_NO_RAW, .access = PL2_W,
6402       .writefn = tlbiall_nsnh_write },
6403     { .name = "TLBIALLNSNHIS",
6404       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
6405       .type = ARM_CP_NO_RAW, .access = PL2_W,
6406       .writefn = tlbiall_nsnh_is_write },
6407     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6408       .type = ARM_CP_NO_RAW, .access = PL2_W,
6409       .writefn = tlbiall_hyp_write },
6410     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6411       .type = ARM_CP_NO_RAW, .access = PL2_W,
6412       .writefn = tlbiall_hyp_is_write },
6413     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6414       .type = ARM_CP_NO_RAW, .access = PL2_W,
6415       .writefn = tlbimva_hyp_write },
6416     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6417       .type = ARM_CP_NO_RAW, .access = PL2_W,
6418       .writefn = tlbimva_hyp_is_write },
6419     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
6420       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6421       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6422       .writefn = tlbi_aa64_alle2_write },
6423     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
6424       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6425       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6426       .writefn = tlbi_aa64_vae2_write },
6427     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
6428       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
6429       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6430       .writefn = tlbi_aa64_vae2_write },
6431     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
6432       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6433       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6434       .writefn = tlbi_aa64_alle2is_write },
6435     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
6436       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6437       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6438       .writefn = tlbi_aa64_vae2is_write },
6439     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
6440       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
6441       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6442       .writefn = tlbi_aa64_vae2is_write },
6443 #ifndef CONFIG_USER_ONLY
6444     /*
6445      * Unlike the other EL2-related AT operations, these must
6446      * UNDEF from EL3 if EL2 is not implemented, which is why we
6447      * define them here rather than with the rest of the AT ops.
6448      */
6449     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
6450       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6451       .access = PL2_W, .accessfn = at_s1e2_access,
6452       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6453       .writefn = ats_write64 },
6454     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
6455       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6456       .access = PL2_W, .accessfn = at_s1e2_access,
6457       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6458       .writefn = ats_write64 },
6459     /*
6460      * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
6461      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
6462      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
6463      * to behave as if SCR.NS was 1.
6464      */
6465     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6466       .access = PL2_W,
6467       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6468     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6469       .access = PL2_W,
6470       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6471     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
6472       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
6473       /*
6474        * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
6475        * reset values as IMPDEF. We choose to reset to 3 to comply with
6476        * both ARMv7 and ARMv8.
6477        */
6478       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
6479       .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
6480       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
6481     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
6482       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
6483       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
6484       .writefn = gt_cntvoff_write,
6485       .nv2_redirect_offset = 0x60,
6486       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6487     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
6488       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
6489       .writefn = gt_cntvoff_write,
6490       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6491     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
6492       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
6493       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6494       .type = ARM_CP_IO, .access = PL2_RW,
6495       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6496     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
6497       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6498       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
6499       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6500     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
6501       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
6502       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
6503       .resetfn = gt_hyp_timer_reset,
6504       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
6505     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
6506       .type = ARM_CP_IO,
6507       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
6508       .access = PL2_RW,
6509       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
6510       .resetvalue = 0,
6511       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
6512 #endif
6513     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
6514       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6515       .access = PL2_RW, .accessfn = access_el3_aa32ns,
6516       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6517     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
6518       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6519       .access = PL2_RW,
6520       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6521     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
6522       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
6523       .access = PL2_RW,
6524       .nv2_redirect_offset = 0x80,
6525       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
6526 };
6527 
6528 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
6529     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
6530       .type = ARM_CP_ALIAS | ARM_CP_IO,
6531       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
6532       .access = PL2_RW,
6533       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
6534       .writefn = hcr_writehigh },
6535 };
6536 
6537 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
6538                                   bool isread)
6539 {
6540     if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
6541         return CP_ACCESS_OK;
6542     }
6543     return CP_ACCESS_TRAP_UNCATEGORIZED;
6544 }
6545 
6546 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
6547     { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
6548       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
6549       .access = PL2_RW, .accessfn = sel2_access,
6550       .nv2_redirect_offset = 0x30,
6551       .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
6552     { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
6553       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
6554       .access = PL2_RW, .accessfn = sel2_access,
6555       .nv2_redirect_offset = 0x48,
6556       .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
6557 };
6558 
6559 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
6560                                    bool isread)
6561 {
6562     /*
6563      * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
6564      * At Secure EL1 it traps to EL3 or EL2.
6565      */
6566     if (arm_current_el(env) == 3) {
6567         return CP_ACCESS_OK;
6568     }
6569     if (arm_is_secure_below_el3(env)) {
6570         if (env->cp15.scr_el3 & SCR_EEL2) {
6571             return CP_ACCESS_TRAP_EL2;
6572         }
6573         return CP_ACCESS_TRAP_EL3;
6574     }
6575     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
6576     if (isread) {
6577         return CP_ACCESS_OK;
6578     }
6579     return CP_ACCESS_TRAP_UNCATEGORIZED;
6580 }
6581 
6582 static const ARMCPRegInfo el3_cp_reginfo[] = {
6583     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
6584       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
6585       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
6586       .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
6587     { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
6588       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
6589       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6590       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
6591       .writefn = scr_write, .raw_writefn = raw_write },
6592     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
6593       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
6594       .access = PL3_RW, .resetvalue = 0,
6595       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
6596     { .name = "SDER",
6597       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
6598       .access = PL3_RW, .resetvalue = 0,
6599       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
6600     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6601       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6602       .writefn = vbar_write, .resetvalue = 0,
6603       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
6604     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
6605       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
6606       .access = PL3_RW, .resetvalue = 0,
6607       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
6608     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
6609       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6610       .access = PL3_RW,
6611       /* no .writefn needed as this can't cause an ASID change */
6612       .resetvalue = 0,
6613       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
6614     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
6615       .type = ARM_CP_ALIAS,
6616       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
6617       .access = PL3_RW,
6618       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
6619     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
6620       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
6621       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
6622     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
6623       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
6624       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
6625     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
6626       .type = ARM_CP_ALIAS,
6627       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
6628       .access = PL3_RW,
6629       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
6630     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
6631       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
6632       .access = PL3_RW, .writefn = vbar_write,
6633       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
6634       .resetvalue = 0 },
6635     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
6636       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
6637       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
6638       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
6639     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
6640       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
6641       .access = PL3_RW, .resetvalue = 0,
6642       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
6643     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
6644       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
6645       .access = PL3_RW, .type = ARM_CP_CONST,
6646       .resetvalue = 0 },
6647     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
6648       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
6649       .access = PL3_RW, .type = ARM_CP_CONST,
6650       .resetvalue = 0 },
6651     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
6652       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
6653       .access = PL3_RW, .type = ARM_CP_CONST,
6654       .resetvalue = 0 },
6655     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
6656       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
6657       .access = PL3_W, .type = ARM_CP_NO_RAW,
6658       .writefn = tlbi_aa64_alle3is_write },
6659     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
6660       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
6661       .access = PL3_W, .type = ARM_CP_NO_RAW,
6662       .writefn = tlbi_aa64_vae3is_write },
6663     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
6664       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
6665       .access = PL3_W, .type = ARM_CP_NO_RAW,
6666       .writefn = tlbi_aa64_vae3is_write },
6667     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
6668       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
6669       .access = PL3_W, .type = ARM_CP_NO_RAW,
6670       .writefn = tlbi_aa64_alle3_write },
6671     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
6672       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
6673       .access = PL3_W, .type = ARM_CP_NO_RAW,
6674       .writefn = tlbi_aa64_vae3_write },
6675     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
6676       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
6677       .access = PL3_W, .type = ARM_CP_NO_RAW,
6678       .writefn = tlbi_aa64_vae3_write },
6679 };
6680 
6681 #ifndef CONFIG_USER_ONLY
6682 
6683 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
6684                                  bool isread)
6685 {
6686     if (arm_current_el(env) == 1) {
6687         /* This must be a FEAT_NV access */
6688         return CP_ACCESS_OK;
6689     }
6690     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
6691         return CP_ACCESS_TRAP_UNCATEGORIZED;
6692     }
6693     return CP_ACCESS_OK;
6694 }
6695 
6696 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
6697                                       bool isread)
6698 {
6699     if (arm_current_el(env) == 1) {
6700         /* This must be a FEAT_NV access with NVx == 101 */
6701         if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
6702             return CP_ACCESS_TRAP_EL2;
6703         }
6704     }
6705     return e2h_access(env, ri, isread);
6706 }
6707 
6708 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
6709                                       bool isread)
6710 {
6711     if (arm_current_el(env) == 1) {
6712         /* This must be a FEAT_NV access with NVx == 101 */
6713         if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
6714             return CP_ACCESS_TRAP_EL2;
6715         }
6716     }
6717     return e2h_access(env, ri, isread);
6718 }
6719 
6720 /* Test if system register redirection is to occur in the current state.  */
6721 static bool redirect_for_e2h(CPUARMState *env)
6722 {
6723     return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
6724 }
6725 
6726 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
6727 {
6728     CPReadFn *readfn;
6729 
6730     if (redirect_for_e2h(env)) {
6731         /* Switch to the saved EL2 version of the register.  */
6732         ri = ri->opaque;
6733         readfn = ri->readfn;
6734     } else {
6735         readfn = ri->orig_readfn;
6736     }
6737     if (readfn == NULL) {
6738         readfn = raw_read;
6739     }
6740     return readfn(env, ri);
6741 }
6742 
6743 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
6744                           uint64_t value)
6745 {
6746     CPWriteFn *writefn;
6747 
6748     if (redirect_for_e2h(env)) {
6749         /* Switch to the saved EL2 version of the register.  */
6750         ri = ri->opaque;
6751         writefn = ri->writefn;
6752     } else {
6753         writefn = ri->orig_writefn;
6754     }
6755     if (writefn == NULL) {
6756         writefn = raw_write;
6757     }
6758     writefn(env, ri, value);
6759 }
6760 
6761 static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
6762 {
6763     /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6764     return ri->orig_readfn(env, ri->opaque);
6765 }
6766 
6767 static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
6768                               uint64_t value)
6769 {
6770     /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6771     return ri->orig_writefn(env, ri->opaque, value);
6772 }
6773 
6774 static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
6775                                          const ARMCPRegInfo *ri,
6776                                          bool isread)
6777 {
6778     if (arm_current_el(env) == 1) {
6779         /*
6780          * This must be a FEAT_NV access (will either trap or redirect
6781          * to memory). None of the registers with _EL12 aliases want to
6782          * apply their trap controls for this kind of access, so don't
6783          * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
6784          */
6785         return CP_ACCESS_OK;
6786     }
6787     /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
6788     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
6789         return CP_ACCESS_TRAP_UNCATEGORIZED;
6790     }
6791     if (ri->orig_accessfn) {
6792         return ri->orig_accessfn(env, ri->opaque, isread);
6793     }
6794     return CP_ACCESS_OK;
6795 }
6796 
6797 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
6798 {
6799     struct E2HAlias {
6800         uint32_t src_key, dst_key, new_key;
6801         const char *src_name, *dst_name, *new_name;
6802         bool (*feature)(const ARMISARegisters *id);
6803     };
6804 
6805 #define K(op0, op1, crn, crm, op2) \
6806     ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
6807 
6808     static const struct E2HAlias aliases[] = {
6809         { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
6810           "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
6811         { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
6812           "CPACR", "CPTR_EL2", "CPACR_EL12" },
6813         { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
6814           "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
6815         { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
6816           "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
6817         { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
6818           "TCR_EL1", "TCR_EL2", "TCR_EL12" },
6819         { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
6820           "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
6821         { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
6822           "ELR_EL1", "ELR_EL2", "ELR_EL12" },
6823         { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
6824           "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
6825         { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
6826           "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
6827         { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
6828           "ESR_EL1", "ESR_EL2", "ESR_EL12" },
6829         { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
6830           "FAR_EL1", "FAR_EL2", "FAR_EL12" },
6831         { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
6832           "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
6833         { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
6834           "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
6835         { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
6836           "VBAR", "VBAR_EL2", "VBAR_EL12" },
6837         { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6838           "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6839         { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6840           "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6841 
6842         /*
6843          * Note that redirection of ZCR is mentioned in the description
6844          * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6845          * not in the summary table.
6846          */
6847         { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
6848           "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
6849         { K(3, 0,  1, 2, 6), K(3, 4,  1, 2, 6), K(3, 5, 1, 2, 6),
6850           "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
6851 
6852         { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
6853           "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
6854 
6855         { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
6856           "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
6857           isar_feature_aa64_scxtnum },
6858 
6859         /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6860         /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6861     };
6862 #undef K
6863 
6864     size_t i;
6865 
6866     for (i = 0; i < ARRAY_SIZE(aliases); i++) {
6867         const struct E2HAlias *a = &aliases[i];
6868         ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
6869         bool ok;
6870 
6871         if (a->feature && !a->feature(&cpu->isar)) {
6872             continue;
6873         }
6874 
6875         src_reg = g_hash_table_lookup(cpu->cp_regs,
6876                                       (gpointer)(uintptr_t)a->src_key);
6877         dst_reg = g_hash_table_lookup(cpu->cp_regs,
6878                                       (gpointer)(uintptr_t)a->dst_key);
6879         g_assert(src_reg != NULL);
6880         g_assert(dst_reg != NULL);
6881 
6882         /* Cross-compare names to detect typos in the keys.  */
6883         g_assert(strcmp(src_reg->name, a->src_name) == 0);
6884         g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
6885 
6886         /* None of the core system registers use opaque; we will.  */
6887         g_assert(src_reg->opaque == NULL);
6888 
6889         /* Create alias before redirection so we dup the right data. */
6890         new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
6891 
6892         new_reg->name = a->new_name;
6893         new_reg->type |= ARM_CP_ALIAS;
6894         /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
6895         new_reg->access &= PL2_RW | PL3_RW;
6896         /* The new_reg op fields are as per new_key, not the target reg */
6897         new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
6898             >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
6899         new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
6900             >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
6901         new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
6902             >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
6903         new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
6904             >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
6905         new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
6906             >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
6907         new_reg->opaque = src_reg;
6908         new_reg->orig_readfn = src_reg->readfn ?: raw_read;
6909         new_reg->orig_writefn = src_reg->writefn ?: raw_write;
6910         new_reg->orig_accessfn = src_reg->accessfn;
6911         if (!new_reg->raw_readfn) {
6912             new_reg->raw_readfn = raw_read;
6913         }
6914         if (!new_reg->raw_writefn) {
6915             new_reg->raw_writefn = raw_write;
6916         }
6917         new_reg->readfn = el2_e2h_e12_read;
6918         new_reg->writefn = el2_e2h_e12_write;
6919         new_reg->accessfn = el2_e2h_e12_access;
6920 
6921         /*
6922          * If the _EL1 register is redirected to memory by FEAT_NV2,
6923          * then it shares the offset with the _EL12 register,
6924          * and which one is redirected depends on HCR_EL2.NV1.
6925          */
6926         if (new_reg->nv2_redirect_offset) {
6927             assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
6928             new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
6929             new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
6930         }
6931 
6932         ok = g_hash_table_insert(cpu->cp_regs,
6933                                  (gpointer)(uintptr_t)a->new_key, new_reg);
6934         g_assert(ok);
6935 
6936         src_reg->opaque = dst_reg;
6937         src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6938         src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6939         if (!src_reg->raw_readfn) {
6940             src_reg->raw_readfn = raw_read;
6941         }
6942         if (!src_reg->raw_writefn) {
6943             src_reg->raw_writefn = raw_write;
6944         }
6945         src_reg->readfn = el2_e2h_read;
6946         src_reg->writefn = el2_e2h_write;
6947     }
6948 }
6949 #endif
6950 
6951 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6952                                      bool isread)
6953 {
6954     int cur_el = arm_current_el(env);
6955 
6956     if (cur_el < 2) {
6957         uint64_t hcr = arm_hcr_el2_eff(env);
6958 
6959         if (cur_el == 0) {
6960             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
6961                 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6962                     return CP_ACCESS_TRAP_EL2;
6963                 }
6964             } else {
6965                 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6966                     return CP_ACCESS_TRAP;
6967                 }
6968                 if (hcr & HCR_TID2) {
6969                     return CP_ACCESS_TRAP_EL2;
6970                 }
6971             }
6972         } else if (hcr & HCR_TID2) {
6973             return CP_ACCESS_TRAP_EL2;
6974         }
6975     }
6976 
6977     if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6978         return CP_ACCESS_TRAP_EL2;
6979     }
6980 
6981     return CP_ACCESS_OK;
6982 }
6983 
6984 /*
6985  * Check for traps to RAS registers, which are controlled
6986  * by HCR_EL2.TERR and SCR_EL3.TERR.
6987  */
6988 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
6989                                   bool isread)
6990 {
6991     int el = arm_current_el(env);
6992 
6993     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
6994         return CP_ACCESS_TRAP_EL2;
6995     }
6996     if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
6997         return CP_ACCESS_TRAP_EL3;
6998     }
6999     return CP_ACCESS_OK;
7000 }
7001 
7002 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
7003 {
7004     int el = arm_current_el(env);
7005 
7006     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
7007         return env->cp15.vdisr_el2;
7008     }
7009     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
7010         return 0; /* RAZ/WI */
7011     }
7012     return env->cp15.disr_el1;
7013 }
7014 
7015 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7016 {
7017     int el = arm_current_el(env);
7018 
7019     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
7020         env->cp15.vdisr_el2 = val;
7021         return;
7022     }
7023     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
7024         return; /* RAZ/WI */
7025     }
7026     env->cp15.disr_el1 = val;
7027 }
7028 
7029 /*
7030  * Minimal RAS implementation with no Error Records.
7031  * Which means that all of the Error Record registers:
7032  *   ERXADDR_EL1
7033  *   ERXCTLR_EL1
7034  *   ERXFR_EL1
7035  *   ERXMISC0_EL1
7036  *   ERXMISC1_EL1
7037  *   ERXMISC2_EL1
7038  *   ERXMISC3_EL1
7039  *   ERXPFGCDN_EL1  (RASv1p1)
7040  *   ERXPFGCTL_EL1  (RASv1p1)
7041  *   ERXPFGF_EL1    (RASv1p1)
7042  *   ERXSTATUS_EL1
7043  * and
7044  *   ERRSELR_EL1
7045  * may generate UNDEFINED, which is the effect we get by not
7046  * listing them at all.
7047  *
7048  * These registers have fine-grained trap bits, but UNDEF-to-EL1
7049  * is higher priority than FGT-to-EL2 so we do not need to list them
7050  * in order to check for an FGT.
7051  */
7052 static const ARMCPRegInfo minimal_ras_reginfo[] = {
7053     { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
7054       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
7055       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
7056       .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
7057     { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
7058       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
7059       .access = PL1_R, .accessfn = access_terr,
7060       .fgt = FGT_ERRIDR_EL1,
7061       .type = ARM_CP_CONST, .resetvalue = 0 },
7062     { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
7063       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
7064       .nv2_redirect_offset = 0x500,
7065       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
7066     { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
7067       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
7068       .nv2_redirect_offset = 0x508,
7069       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
7070 };
7071 
7072 /*
7073  * Return the exception level to which exceptions should be taken
7074  * via SVEAccessTrap.  This excludes the check for whether the exception
7075  * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
7076  * be found by testing 0 < fp_exception_el < sve_exception_el.
7077  *
7078  * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
7079  * pseudocode does *not* separate out the FP trap checks, but has them
7080  * all in one function.
7081  */
7082 int sve_exception_el(CPUARMState *env, int el)
7083 {
7084 #ifndef CONFIG_USER_ONLY
7085     if (el <= 1 && !el_is_in_host(env, el)) {
7086         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
7087         case 1:
7088             if (el != 0) {
7089                 break;
7090             }
7091             /* fall through */
7092         case 0:
7093         case 2:
7094             return 1;
7095         }
7096     }
7097 
7098     if (el <= 2 && arm_is_el2_enabled(env)) {
7099         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
7100         if (env->cp15.hcr_el2 & HCR_E2H) {
7101             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
7102             case 1:
7103                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
7104                     break;
7105                 }
7106                 /* fall through */
7107             case 0:
7108             case 2:
7109                 return 2;
7110             }
7111         } else {
7112             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
7113                 return 2;
7114             }
7115         }
7116     }
7117 
7118     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
7119     if (arm_feature(env, ARM_FEATURE_EL3)
7120         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
7121         return 3;
7122     }
7123 #endif
7124     return 0;
7125 }
7126 
7127 /*
7128  * Return the exception level to which exceptions should be taken for SME.
7129  * C.f. the ARM pseudocode function CheckSMEAccess.
7130  */
7131 int sme_exception_el(CPUARMState *env, int el)
7132 {
7133 #ifndef CONFIG_USER_ONLY
7134     if (el <= 1 && !el_is_in_host(env, el)) {
7135         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
7136         case 1:
7137             if (el != 0) {
7138                 break;
7139             }
7140             /* fall through */
7141         case 0:
7142         case 2:
7143             return 1;
7144         }
7145     }
7146 
7147     if (el <= 2 && arm_is_el2_enabled(env)) {
7148         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
7149         if (env->cp15.hcr_el2 & HCR_E2H) {
7150             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
7151             case 1:
7152                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
7153                     break;
7154                 }
7155                 /* fall through */
7156             case 0:
7157             case 2:
7158                 return 2;
7159             }
7160         } else {
7161             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
7162                 return 2;
7163             }
7164         }
7165     }
7166 
7167     /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
7168     if (arm_feature(env, ARM_FEATURE_EL3)
7169         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7170         return 3;
7171     }
7172 #endif
7173     return 0;
7174 }
7175 
7176 /*
7177  * Given that SVE is enabled, return the vector length for EL.
7178  */
7179 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
7180 {
7181     ARMCPU *cpu = env_archcpu(env);
7182     uint64_t *cr = env->vfp.zcr_el;
7183     uint32_t map = cpu->sve_vq.map;
7184     uint32_t len = ARM_MAX_VQ - 1;
7185 
7186     if (sm) {
7187         cr = env->vfp.smcr_el;
7188         map = cpu->sme_vq.map;
7189     }
7190 
7191     if (el <= 1 && !el_is_in_host(env, el)) {
7192         len = MIN(len, 0xf & (uint32_t)cr[1]);
7193     }
7194     if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
7195         len = MIN(len, 0xf & (uint32_t)cr[2]);
7196     }
7197     if (arm_feature(env, ARM_FEATURE_EL3)) {
7198         len = MIN(len, 0xf & (uint32_t)cr[3]);
7199     }
7200 
7201     map &= MAKE_64BIT_MASK(0, len + 1);
7202     if (map != 0) {
7203         return 31 - clz32(map);
7204     }
7205 
7206     /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
7207     assert(sm);
7208     return ctz32(cpu->sme_vq.map);
7209 }
7210 
7211 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
7212 {
7213     return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
7214 }
7215 
7216 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7217                       uint64_t value)
7218 {
7219     int cur_el = arm_current_el(env);
7220     int old_len = sve_vqm1_for_el(env, cur_el);
7221     int new_len;
7222 
7223     /* Bits other than [3:0] are RAZ/WI.  */
7224     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
7225     raw_write(env, ri, value & 0xf);
7226 
7227     /*
7228      * Because we arrived here, we know both FP and SVE are enabled;
7229      * otherwise we would have trapped access to the ZCR_ELn register.
7230      */
7231     new_len = sve_vqm1_for_el(env, cur_el);
7232     if (new_len < old_len) {
7233         aarch64_sve_narrow_vq(env, new_len + 1);
7234     }
7235 }
7236 
7237 static const ARMCPRegInfo zcr_reginfo[] = {
7238     { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
7239       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
7240       .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
7241       .access = PL1_RW, .type = ARM_CP_SVE,
7242       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
7243       .writefn = zcr_write, .raw_writefn = raw_write },
7244     { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
7245       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
7246       .access = PL2_RW, .type = ARM_CP_SVE,
7247       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
7248       .writefn = zcr_write, .raw_writefn = raw_write },
7249     { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
7250       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
7251       .access = PL3_RW, .type = ARM_CP_SVE,
7252       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
7253       .writefn = zcr_write, .raw_writefn = raw_write },
7254 };
7255 
7256 #ifdef TARGET_AARCH64
7257 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
7258                                     bool isread)
7259 {
7260     int el = arm_current_el(env);
7261 
7262     if (el == 0) {
7263         uint64_t sctlr = arm_sctlr(env, el);
7264         if (!(sctlr & SCTLR_EnTP2)) {
7265             return CP_ACCESS_TRAP;
7266         }
7267     }
7268     /* TODO: FEAT_FGT */
7269     if (el < 3
7270         && arm_feature(env, ARM_FEATURE_EL3)
7271         && !(env->cp15.scr_el3 & SCR_ENTP2)) {
7272         return CP_ACCESS_TRAP_EL3;
7273     }
7274     return CP_ACCESS_OK;
7275 }
7276 
7277 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
7278                                       bool isread)
7279 {
7280     /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
7281     if (arm_current_el(env) == 2
7282         && arm_feature(env, ARM_FEATURE_EL3)
7283         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7284         return CP_ACCESS_TRAP_EL3;
7285     }
7286     return CP_ACCESS_OK;
7287 }
7288 
7289 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
7290                                    bool isread)
7291 {
7292     if (arm_current_el(env) < 3
7293         && arm_feature(env, ARM_FEATURE_EL3)
7294         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7295         return CP_ACCESS_TRAP_EL3;
7296     }
7297     return CP_ACCESS_OK;
7298 }
7299 
7300 /* ResetSVEState */
7301 static void arm_reset_sve_state(CPUARMState *env)
7302 {
7303     memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
7304     /* Recall that FFR is stored as pregs[16]. */
7305     memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
7306     vfp_set_fpcr(env, 0x0800009f);
7307 }
7308 
7309 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
7310 {
7311     uint64_t change = (env->svcr ^ new) & mask;
7312 
7313     if (change == 0) {
7314         return;
7315     }
7316     env->svcr ^= change;
7317 
7318     if (change & R_SVCR_SM_MASK) {
7319         arm_reset_sve_state(env);
7320     }
7321 
7322     /*
7323      * ResetSMEState.
7324      *
7325      * SetPSTATE_ZA zeros on enable and disable.  We can zero this only
7326      * on enable: while disabled, the storage is inaccessible and the
7327      * value does not matter.  We're not saving the storage in vmstate
7328      * when disabled either.
7329      */
7330     if (change & new & R_SVCR_ZA_MASK) {
7331         memset(env->zarray, 0, sizeof(env->zarray));
7332     }
7333 
7334     if (tcg_enabled()) {
7335         arm_rebuild_hflags(env);
7336     }
7337 }
7338 
7339 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7340                        uint64_t value)
7341 {
7342     aarch64_set_svcr(env, value, -1);
7343 }
7344 
7345 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7346                        uint64_t value)
7347 {
7348     int cur_el = arm_current_el(env);
7349     int old_len = sve_vqm1_for_el(env, cur_el);
7350     int new_len;
7351 
7352     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
7353     value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
7354     raw_write(env, ri, value);
7355 
7356     /*
7357      * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
7358      * when SVL is widened (old values kept, or zeros).  Choose to keep the
7359      * current values for simplicity.  But for QEMU internals, we must still
7360      * apply the narrower SVL to the Zregs and Pregs -- see the comment
7361      * above aarch64_sve_narrow_vq.
7362      */
7363     new_len = sve_vqm1_for_el(env, cur_el);
7364     if (new_len < old_len) {
7365         aarch64_sve_narrow_vq(env, new_len + 1);
7366     }
7367 }
7368 
7369 static const ARMCPRegInfo sme_reginfo[] = {
7370     { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
7371       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
7372       .access = PL0_RW, .accessfn = access_tpidr2,
7373       .fgt = FGT_NTPIDR2_EL0,
7374       .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
7375     { .name = "SVCR", .state = ARM_CP_STATE_AA64,
7376       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
7377       .access = PL0_RW, .type = ARM_CP_SME,
7378       .fieldoffset = offsetof(CPUARMState, svcr),
7379       .writefn = svcr_write, .raw_writefn = raw_write },
7380     { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
7381       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
7382       .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
7383       .access = PL1_RW, .type = ARM_CP_SME,
7384       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
7385       .writefn = smcr_write, .raw_writefn = raw_write },
7386     { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
7387       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
7388       .access = PL2_RW, .type = ARM_CP_SME,
7389       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
7390       .writefn = smcr_write, .raw_writefn = raw_write },
7391     { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
7392       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
7393       .access = PL3_RW, .type = ARM_CP_SME,
7394       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
7395       .writefn = smcr_write, .raw_writefn = raw_write },
7396     { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
7397       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
7398       .access = PL1_R, .accessfn = access_aa64_tid1,
7399       /*
7400        * IMPLEMENTOR = 0 (software)
7401        * REVISION    = 0 (implementation defined)
7402        * SMPS        = 0 (no streaming execution priority in QEMU)
7403        * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
7404        */
7405       .type = ARM_CP_CONST, .resetvalue = 0, },
7406     /*
7407      * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
7408      */
7409     { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
7410       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
7411       .access = PL1_RW, .accessfn = access_smpri,
7412       .fgt = FGT_NSMPRI_EL1,
7413       .type = ARM_CP_CONST, .resetvalue = 0 },
7414     { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
7415       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
7416       .nv2_redirect_offset = 0x1f8,
7417       .access = PL2_RW, .accessfn = access_smprimap,
7418       .type = ARM_CP_CONST, .resetvalue = 0 },
7419 };
7420 
7421 static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
7422                                   uint64_t value)
7423 {
7424     CPUState *cs = env_cpu(env);
7425 
7426     tlb_flush(cs);
7427 }
7428 
7429 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7430                         uint64_t value)
7431 {
7432     /* L0GPTSZ is RO; other bits not mentioned are RES0. */
7433     uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
7434         R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
7435         R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
7436 
7437     env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
7438 }
7439 
7440 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
7441 {
7442     env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
7443                                      env_archcpu(env)->reset_l0gptsz);
7444 }
7445 
7446 static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
7447                                     uint64_t value)
7448 {
7449     CPUState *cs = env_cpu(env);
7450 
7451     tlb_flush_all_cpus_synced(cs);
7452 }
7453 
7454 static const ARMCPRegInfo rme_reginfo[] = {
7455     { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
7456       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
7457       .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
7458       .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
7459     { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
7460       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
7461       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
7462     { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
7463       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
7464       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
7465     { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
7466       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
7467       .access = PL3_W, .type = ARM_CP_NO_RAW,
7468       .writefn = tlbi_aa64_paall_write },
7469     { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
7470       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
7471       .access = PL3_W, .type = ARM_CP_NO_RAW,
7472       .writefn = tlbi_aa64_paallos_write },
7473     /*
7474      * QEMU does not have a way to invalidate by physical address, thus
7475      * invalidating a range of physical addresses is accomplished by
7476      * flushing all tlb entries in the outer shareable domain,
7477      * just like PAALLOS.
7478      */
7479     { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
7480       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
7481       .access = PL3_W, .type = ARM_CP_NO_RAW,
7482       .writefn = tlbi_aa64_paallos_write },
7483     { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
7484       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
7485       .access = PL3_W, .type = ARM_CP_NO_RAW,
7486       .writefn = tlbi_aa64_paallos_write },
7487     { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
7488       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
7489       .access = PL3_W, .type = ARM_CP_NOP },
7490 };
7491 
7492 static const ARMCPRegInfo rme_mte_reginfo[] = {
7493     { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
7494       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
7495       .access = PL3_W, .type = ARM_CP_NOP },
7496 };
7497 #endif /* TARGET_AARCH64 */
7498 
7499 static void define_pmu_regs(ARMCPU *cpu)
7500 {
7501     /*
7502      * v7 performance monitor control register: same implementor
7503      * field as main ID register, and we implement four counters in
7504      * addition to the cycle count register.
7505      */
7506     unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
7507     ARMCPRegInfo pmcr = {
7508         .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
7509         .access = PL0_RW,
7510         .fgt = FGT_PMCR_EL0,
7511         .type = ARM_CP_IO | ARM_CP_ALIAS,
7512         .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
7513         .accessfn = pmreg_access,
7514         .readfn = pmcr_read, .raw_readfn = raw_read,
7515         .writefn = pmcr_write, .raw_writefn = raw_write,
7516     };
7517     ARMCPRegInfo pmcr64 = {
7518         .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
7519         .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
7520         .access = PL0_RW, .accessfn = pmreg_access,
7521         .fgt = FGT_PMCR_EL0,
7522         .type = ARM_CP_IO,
7523         .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
7524         .resetvalue = cpu->isar.reset_pmcr_el0,
7525         .readfn = pmcr_read, .raw_readfn = raw_read,
7526         .writefn = pmcr_write, .raw_writefn = raw_write,
7527     };
7528 
7529     define_one_arm_cp_reg(cpu, &pmcr);
7530     define_one_arm_cp_reg(cpu, &pmcr64);
7531     for (i = 0; i < pmcrn; i++) {
7532         char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
7533         char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
7534         char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
7535         char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
7536         ARMCPRegInfo pmev_regs[] = {
7537             { .name = pmevcntr_name, .cp = 15, .crn = 14,
7538               .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7539               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7540               .fgt = FGT_PMEVCNTRN_EL0,
7541               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7542               .accessfn = pmreg_access_xevcntr },
7543             { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
7544               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
7545               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
7546               .type = ARM_CP_IO,
7547               .fgt = FGT_PMEVCNTRN_EL0,
7548               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7549               .raw_readfn = pmevcntr_rawread,
7550               .raw_writefn = pmevcntr_rawwrite },
7551             { .name = pmevtyper_name, .cp = 15, .crn = 14,
7552               .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7553               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7554               .fgt = FGT_PMEVTYPERN_EL0,
7555               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7556               .accessfn = pmreg_access },
7557             { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
7558               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
7559               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
7560               .fgt = FGT_PMEVTYPERN_EL0,
7561               .type = ARM_CP_IO,
7562               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7563               .raw_writefn = pmevtyper_rawwrite },
7564         };
7565         define_arm_cp_regs(cpu, pmev_regs);
7566         g_free(pmevcntr_name);
7567         g_free(pmevcntr_el0_name);
7568         g_free(pmevtyper_name);
7569         g_free(pmevtyper_el0_name);
7570     }
7571     if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
7572         ARMCPRegInfo v81_pmu_regs[] = {
7573             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
7574               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
7575               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7576               .fgt = FGT_PMCEIDN_EL0,
7577               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
7578             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
7579               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
7580               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7581               .fgt = FGT_PMCEIDN_EL0,
7582               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
7583         };
7584         define_arm_cp_regs(cpu, v81_pmu_regs);
7585     }
7586     if (cpu_isar_feature(any_pmuv3p4, cpu)) {
7587         static const ARMCPRegInfo v84_pmmir = {
7588             .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
7589             .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
7590             .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7591             .fgt = FGT_PMMIR_EL1,
7592             .resetvalue = 0
7593         };
7594         define_one_arm_cp_reg(cpu, &v84_pmmir);
7595     }
7596 }
7597 
7598 #ifndef CONFIG_USER_ONLY
7599 /*
7600  * We don't know until after realize whether there's a GICv3
7601  * attached, and that is what registers the gicv3 sysregs.
7602  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
7603  * at runtime.
7604  */
7605 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
7606 {
7607     ARMCPU *cpu = env_archcpu(env);
7608     uint64_t pfr1 = cpu->isar.id_pfr1;
7609 
7610     if (env->gicv3state) {
7611         pfr1 |= 1 << 28;
7612     }
7613     return pfr1;
7614 }
7615 
7616 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
7617 {
7618     ARMCPU *cpu = env_archcpu(env);
7619     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
7620 
7621     if (env->gicv3state) {
7622         pfr0 |= 1 << 24;
7623     }
7624     return pfr0;
7625 }
7626 #endif
7627 
7628 /*
7629  * Shared logic between LORID and the rest of the LOR* registers.
7630  * Secure state exclusion has already been dealt with.
7631  */
7632 static CPAccessResult access_lor_ns(CPUARMState *env,
7633                                     const ARMCPRegInfo *ri, bool isread)
7634 {
7635     int el = arm_current_el(env);
7636 
7637     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
7638         return CP_ACCESS_TRAP_EL2;
7639     }
7640     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
7641         return CP_ACCESS_TRAP_EL3;
7642     }
7643     return CP_ACCESS_OK;
7644 }
7645 
7646 static CPAccessResult access_lor_other(CPUARMState *env,
7647                                        const ARMCPRegInfo *ri, bool isread)
7648 {
7649     if (arm_is_secure_below_el3(env)) {
7650         /* Access denied in secure mode.  */
7651         return CP_ACCESS_TRAP;
7652     }
7653     return access_lor_ns(env, ri, isread);
7654 }
7655 
7656 /*
7657  * A trivial implementation of ARMv8.1-LOR leaves all of these
7658  * registers fixed at 0, which indicates that there are zero
7659  * supported Limited Ordering regions.
7660  */
7661 static const ARMCPRegInfo lor_reginfo[] = {
7662     { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
7663       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
7664       .access = PL1_RW, .accessfn = access_lor_other,
7665       .fgt = FGT_LORSA_EL1,
7666       .type = ARM_CP_CONST, .resetvalue = 0 },
7667     { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
7668       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
7669       .access = PL1_RW, .accessfn = access_lor_other,
7670       .fgt = FGT_LOREA_EL1,
7671       .type = ARM_CP_CONST, .resetvalue = 0 },
7672     { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
7673       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
7674       .access = PL1_RW, .accessfn = access_lor_other,
7675       .fgt = FGT_LORN_EL1,
7676       .type = ARM_CP_CONST, .resetvalue = 0 },
7677     { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
7678       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
7679       .access = PL1_RW, .accessfn = access_lor_other,
7680       .fgt = FGT_LORC_EL1,
7681       .type = ARM_CP_CONST, .resetvalue = 0 },
7682     { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
7683       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
7684       .access = PL1_R, .accessfn = access_lor_ns,
7685       .fgt = FGT_LORID_EL1,
7686       .type = ARM_CP_CONST, .resetvalue = 0 },
7687 };
7688 
7689 #ifdef TARGET_AARCH64
7690 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
7691                                    bool isread)
7692 {
7693     int el = arm_current_el(env);
7694 
7695     if (el < 2 &&
7696         arm_is_el2_enabled(env) &&
7697         !(arm_hcr_el2_eff(env) & HCR_APK)) {
7698         return CP_ACCESS_TRAP_EL2;
7699     }
7700     if (el < 3 &&
7701         arm_feature(env, ARM_FEATURE_EL3) &&
7702         !(env->cp15.scr_el3 & SCR_APK)) {
7703         return CP_ACCESS_TRAP_EL3;
7704     }
7705     return CP_ACCESS_OK;
7706 }
7707 
7708 static const ARMCPRegInfo pauth_reginfo[] = {
7709     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7710       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
7711       .access = PL1_RW, .accessfn = access_pauth,
7712       .fgt = FGT_APDAKEY,
7713       .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
7714     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7715       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
7716       .access = PL1_RW, .accessfn = access_pauth,
7717       .fgt = FGT_APDAKEY,
7718       .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
7719     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7720       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
7721       .access = PL1_RW, .accessfn = access_pauth,
7722       .fgt = FGT_APDBKEY,
7723       .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
7724     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7725       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
7726       .access = PL1_RW, .accessfn = access_pauth,
7727       .fgt = FGT_APDBKEY,
7728       .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
7729     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7730       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
7731       .access = PL1_RW, .accessfn = access_pauth,
7732       .fgt = FGT_APGAKEY,
7733       .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
7734     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7735       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
7736       .access = PL1_RW, .accessfn = access_pauth,
7737       .fgt = FGT_APGAKEY,
7738       .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
7739     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7740       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
7741       .access = PL1_RW, .accessfn = access_pauth,
7742       .fgt = FGT_APIAKEY,
7743       .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
7744     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7745       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
7746       .access = PL1_RW, .accessfn = access_pauth,
7747       .fgt = FGT_APIAKEY,
7748       .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
7749     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7750       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
7751       .access = PL1_RW, .accessfn = access_pauth,
7752       .fgt = FGT_APIBKEY,
7753       .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
7754     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7755       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
7756       .access = PL1_RW, .accessfn = access_pauth,
7757       .fgt = FGT_APIBKEY,
7758       .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
7759 };
7760 
7761 static const ARMCPRegInfo tlbirange_reginfo[] = {
7762     { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
7763       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
7764       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7765       .fgt = FGT_TLBIRVAE1IS,
7766       .writefn = tlbi_aa64_rvae1is_write },
7767     { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
7768       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
7769       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7770       .fgt = FGT_TLBIRVAAE1IS,
7771       .writefn = tlbi_aa64_rvae1is_write },
7772    { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
7773       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
7774       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7775       .fgt = FGT_TLBIRVALE1IS,
7776       .writefn = tlbi_aa64_rvae1is_write },
7777     { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
7778       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
7779       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7780       .fgt = FGT_TLBIRVAALE1IS,
7781       .writefn = tlbi_aa64_rvae1is_write },
7782     { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
7783       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
7784       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7785       .fgt = FGT_TLBIRVAE1OS,
7786       .writefn = tlbi_aa64_rvae1is_write },
7787     { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
7788       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
7789       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7790       .fgt = FGT_TLBIRVAAE1OS,
7791       .writefn = tlbi_aa64_rvae1is_write },
7792    { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
7793       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
7794       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7795       .fgt = FGT_TLBIRVALE1OS,
7796       .writefn = tlbi_aa64_rvae1is_write },
7797     { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
7798       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
7799       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7800       .fgt = FGT_TLBIRVAALE1OS,
7801       .writefn = tlbi_aa64_rvae1is_write },
7802     { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
7803       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
7804       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7805       .fgt = FGT_TLBIRVAE1,
7806       .writefn = tlbi_aa64_rvae1_write },
7807     { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
7808       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
7809       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7810       .fgt = FGT_TLBIRVAAE1,
7811       .writefn = tlbi_aa64_rvae1_write },
7812    { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
7813       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
7814       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7815       .fgt = FGT_TLBIRVALE1,
7816       .writefn = tlbi_aa64_rvae1_write },
7817     { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
7818       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
7819       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7820       .fgt = FGT_TLBIRVAALE1,
7821       .writefn = tlbi_aa64_rvae1_write },
7822     { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
7823       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
7824       .access = PL2_W, .type = ARM_CP_NO_RAW,
7825       .writefn = tlbi_aa64_ripas2e1is_write },
7826     { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
7827       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
7828       .access = PL2_W, .type = ARM_CP_NO_RAW,
7829       .writefn = tlbi_aa64_ripas2e1is_write },
7830     { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
7831       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
7832       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7833       .writefn = tlbi_aa64_rvae2is_write },
7834    { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
7835       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
7836       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7837       .writefn = tlbi_aa64_rvae2is_write },
7838     { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
7839       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
7840       .access = PL2_W, .type = ARM_CP_NO_RAW,
7841       .writefn = tlbi_aa64_ripas2e1_write },
7842     { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
7843       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
7844       .access = PL2_W, .type = ARM_CP_NO_RAW,
7845       .writefn = tlbi_aa64_ripas2e1_write },
7846    { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
7847       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
7848       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7849       .writefn = tlbi_aa64_rvae2is_write },
7850    { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
7851       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
7852       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7853       .writefn = tlbi_aa64_rvae2is_write },
7854     { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
7855       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
7856       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7857       .writefn = tlbi_aa64_rvae2_write },
7858    { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
7859       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
7860       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7861       .writefn = tlbi_aa64_rvae2_write },
7862    { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
7863       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
7864       .access = PL3_W, .type = ARM_CP_NO_RAW,
7865       .writefn = tlbi_aa64_rvae3is_write },
7866    { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
7867       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
7868       .access = PL3_W, .type = ARM_CP_NO_RAW,
7869       .writefn = tlbi_aa64_rvae3is_write },
7870    { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
7871       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
7872       .access = PL3_W, .type = ARM_CP_NO_RAW,
7873       .writefn = tlbi_aa64_rvae3is_write },
7874    { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
7875       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
7876       .access = PL3_W, .type = ARM_CP_NO_RAW,
7877       .writefn = tlbi_aa64_rvae3is_write },
7878    { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
7879       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
7880       .access = PL3_W, .type = ARM_CP_NO_RAW,
7881       .writefn = tlbi_aa64_rvae3_write },
7882    { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
7883       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
7884       .access = PL3_W, .type = ARM_CP_NO_RAW,
7885       .writefn = tlbi_aa64_rvae3_write },
7886 };
7887 
7888 static const ARMCPRegInfo tlbios_reginfo[] = {
7889     { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
7890       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
7891       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7892       .fgt = FGT_TLBIVMALLE1OS,
7893       .writefn = tlbi_aa64_vmalle1is_write },
7894     { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
7895       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
7896       .fgt = FGT_TLBIVAE1OS,
7897       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7898       .writefn = tlbi_aa64_vae1is_write },
7899     { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
7900       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
7901       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7902       .fgt = FGT_TLBIASIDE1OS,
7903       .writefn = tlbi_aa64_vmalle1is_write },
7904     { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
7905       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
7906       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7907       .fgt = FGT_TLBIVAAE1OS,
7908       .writefn = tlbi_aa64_vae1is_write },
7909     { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
7910       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
7911       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7912       .fgt = FGT_TLBIVALE1OS,
7913       .writefn = tlbi_aa64_vae1is_write },
7914     { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
7915       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
7916       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7917       .fgt = FGT_TLBIVAALE1OS,
7918       .writefn = tlbi_aa64_vae1is_write },
7919     { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
7920       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
7921       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7922       .writefn = tlbi_aa64_alle2is_write },
7923     { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
7924       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
7925       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7926       .writefn = tlbi_aa64_vae2is_write },
7927    { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
7928       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
7929       .access = PL2_W, .type = ARM_CP_NO_RAW,
7930       .writefn = tlbi_aa64_alle1is_write },
7931     { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
7932       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
7933       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7934       .writefn = tlbi_aa64_vae2is_write },
7935     { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
7936       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
7937       .access = PL2_W, .type = ARM_CP_NO_RAW,
7938       .writefn = tlbi_aa64_alle1is_write },
7939     { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
7940       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
7941       .access = PL2_W, .type = ARM_CP_NOP },
7942     { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
7943       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
7944       .access = PL2_W, .type = ARM_CP_NOP },
7945     { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7946       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
7947       .access = PL2_W, .type = ARM_CP_NOP },
7948     { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7949       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
7950       .access = PL2_W, .type = ARM_CP_NOP },
7951     { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
7952       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
7953       .access = PL3_W, .type = ARM_CP_NO_RAW,
7954       .writefn = tlbi_aa64_alle3is_write },
7955     { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
7956       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
7957       .access = PL3_W, .type = ARM_CP_NO_RAW,
7958       .writefn = tlbi_aa64_vae3is_write },
7959     { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
7960       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
7961       .access = PL3_W, .type = ARM_CP_NO_RAW,
7962       .writefn = tlbi_aa64_vae3is_write },
7963 };
7964 
7965 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
7966 {
7967     Error *err = NULL;
7968     uint64_t ret;
7969 
7970     /* Success sets NZCV = 0000.  */
7971     env->NF = env->CF = env->VF = 0, env->ZF = 1;
7972 
7973     if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
7974         /*
7975          * ??? Failed, for unknown reasons in the crypto subsystem.
7976          * The best we can do is log the reason and return the
7977          * timed-out indication to the guest.  There is no reason
7978          * we know to expect this failure to be transitory, so the
7979          * guest may well hang retrying the operation.
7980          */
7981         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
7982                       ri->name, error_get_pretty(err));
7983         error_free(err);
7984 
7985         env->ZF = 0; /* NZCF = 0100 */
7986         return 0;
7987     }
7988     return ret;
7989 }
7990 
7991 /* We do not support re-seeding, so the two registers operate the same.  */
7992 static const ARMCPRegInfo rndr_reginfo[] = {
7993     { .name = "RNDR", .state = ARM_CP_STATE_AA64,
7994       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7995       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
7996       .access = PL0_R, .readfn = rndr_readfn },
7997     { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
7998       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7999       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
8000       .access = PL0_R, .readfn = rndr_readfn },
8001 };
8002 
8003 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
8004                           uint64_t value)
8005 {
8006 #ifdef CONFIG_TCG
8007     ARMCPU *cpu = env_archcpu(env);
8008     /* CTR_EL0 System register -> DminLine, bits [19:16] */
8009     uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
8010     uint64_t vaddr_in = (uint64_t) value;
8011     uint64_t vaddr = vaddr_in & ~(dline_size - 1);
8012     void *haddr;
8013     int mem_idx = arm_env_mmu_index(env);
8014 
8015     /* This won't be crossing page boundaries */
8016     haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
8017     if (haddr) {
8018 #ifndef CONFIG_USER_ONLY
8019 
8020         ram_addr_t offset;
8021         MemoryRegion *mr;
8022 
8023         /* RCU lock is already being held */
8024         mr = memory_region_from_host(haddr, &offset);
8025 
8026         if (mr) {
8027             memory_region_writeback(mr, offset, dline_size);
8028         }
8029 #endif /*CONFIG_USER_ONLY*/
8030     }
8031 #else
8032     /* Handled by hardware accelerator. */
8033     g_assert_not_reached();
8034 #endif /* CONFIG_TCG */
8035 }
8036 
8037 static const ARMCPRegInfo dcpop_reg[] = {
8038     { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
8039       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
8040       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
8041       .fgt = FGT_DCCVAP,
8042       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
8043 };
8044 
8045 static const ARMCPRegInfo dcpodp_reg[] = {
8046     { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
8047       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
8048       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
8049       .fgt = FGT_DCCVADP,
8050       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
8051 };
8052 
8053 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
8054                                        bool isread)
8055 {
8056     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
8057         return CP_ACCESS_TRAP_EL2;
8058     }
8059 
8060     return CP_ACCESS_OK;
8061 }
8062 
8063 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
8064                                  bool isread)
8065 {
8066     int el = arm_current_el(env);
8067     if (el < 2 && arm_is_el2_enabled(env)) {
8068         uint64_t hcr = arm_hcr_el2_eff(env);
8069         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
8070             return CP_ACCESS_TRAP_EL2;
8071         }
8072     }
8073     if (el < 3 &&
8074         arm_feature(env, ARM_FEATURE_EL3) &&
8075         !(env->cp15.scr_el3 & SCR_ATA)) {
8076         return CP_ACCESS_TRAP_EL3;
8077     }
8078     return CP_ACCESS_OK;
8079 }
8080 
8081 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
8082                                       bool isread)
8083 {
8084     CPAccessResult nv1 = access_nv1(env, ri, isread);
8085 
8086     if (nv1 != CP_ACCESS_OK) {
8087         return nv1;
8088     }
8089     return access_mte(env, ri, isread);
8090 }
8091 
8092 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
8093                                       bool isread)
8094 {
8095     /*
8096      * TFSR_EL2: similar to generic access_mte(), but we need to
8097      * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
8098      * if NV2 is enabled then we will redirect this to TFSR_EL1
8099      * after doing the HCR and SCR ATA traps; otherwise this will
8100      * be a trap to EL2 and the HCR/SCR traps do not apply.
8101      */
8102     int el = arm_current_el(env);
8103 
8104     if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
8105         return CP_ACCESS_OK;
8106     }
8107     if (el < 2 && arm_is_el2_enabled(env)) {
8108         uint64_t hcr = arm_hcr_el2_eff(env);
8109         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
8110             return CP_ACCESS_TRAP_EL2;
8111         }
8112     }
8113     if (el < 3 &&
8114         arm_feature(env, ARM_FEATURE_EL3) &&
8115         !(env->cp15.scr_el3 & SCR_ATA)) {
8116         return CP_ACCESS_TRAP_EL3;
8117     }
8118     return CP_ACCESS_OK;
8119 }
8120 
8121 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
8122 {
8123     return env->pstate & PSTATE_TCO;
8124 }
8125 
8126 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
8127 {
8128     env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
8129 }
8130 
8131 static const ARMCPRegInfo mte_reginfo[] = {
8132     { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
8133       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
8134       .access = PL1_RW, .accessfn = access_mte,
8135       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
8136     { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
8137       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
8138       .access = PL1_RW, .accessfn = access_tfsr_el1,
8139       .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
8140       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
8141     { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
8142       .type = ARM_CP_NV2_REDIRECT,
8143       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
8144       .access = PL2_RW, .accessfn = access_tfsr_el2,
8145       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
8146     { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
8147       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
8148       .access = PL3_RW,
8149       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
8150     { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
8151       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
8152       .access = PL1_RW, .accessfn = access_mte,
8153       .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
8154     { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
8155       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
8156       .access = PL1_RW, .accessfn = access_mte,
8157       .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
8158     { .name = "TCO", .state = ARM_CP_STATE_AA64,
8159       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
8160       .type = ARM_CP_NO_RAW,
8161       .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
8162     { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
8163       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
8164       .type = ARM_CP_NOP, .access = PL1_W,
8165       .fgt = FGT_DCIVAC,
8166       .accessfn = aa64_cacheop_poc_access },
8167     { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
8168       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
8169       .fgt = FGT_DCISW,
8170       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8171     { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
8172       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
8173       .type = ARM_CP_NOP, .access = PL1_W,
8174       .fgt = FGT_DCIVAC,
8175       .accessfn = aa64_cacheop_poc_access },
8176     { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
8177       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
8178       .fgt = FGT_DCISW,
8179       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8180     { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
8181       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
8182       .fgt = FGT_DCCSW,
8183       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8184     { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
8185       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
8186       .fgt = FGT_DCCSW,
8187       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8188     { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
8189       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
8190       .fgt = FGT_DCCISW,
8191       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8192     { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
8193       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
8194       .fgt = FGT_DCCISW,
8195       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8196 };
8197 
8198 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
8199     { .name = "TCO", .state = ARM_CP_STATE_AA64,
8200       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
8201       .type = ARM_CP_CONST, .access = PL0_RW, },
8202 };
8203 
8204 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
8205     { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
8206       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
8207       .type = ARM_CP_NOP, .access = PL0_W,
8208       .fgt = FGT_DCCVAC,
8209       .accessfn = aa64_cacheop_poc_access },
8210     { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
8211       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
8212       .type = ARM_CP_NOP, .access = PL0_W,
8213       .fgt = FGT_DCCVAC,
8214       .accessfn = aa64_cacheop_poc_access },
8215     { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
8216       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
8217       .type = ARM_CP_NOP, .access = PL0_W,
8218       .fgt = FGT_DCCVAP,
8219       .accessfn = aa64_cacheop_poc_access },
8220     { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
8221       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
8222       .type = ARM_CP_NOP, .access = PL0_W,
8223       .fgt = FGT_DCCVAP,
8224       .accessfn = aa64_cacheop_poc_access },
8225     { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
8226       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
8227       .type = ARM_CP_NOP, .access = PL0_W,
8228       .fgt = FGT_DCCVADP,
8229       .accessfn = aa64_cacheop_poc_access },
8230     { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
8231       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
8232       .type = ARM_CP_NOP, .access = PL0_W,
8233       .fgt = FGT_DCCVADP,
8234       .accessfn = aa64_cacheop_poc_access },
8235     { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
8236       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
8237       .type = ARM_CP_NOP, .access = PL0_W,
8238       .fgt = FGT_DCCIVAC,
8239       .accessfn = aa64_cacheop_poc_access },
8240     { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
8241       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
8242       .type = ARM_CP_NOP, .access = PL0_W,
8243       .fgt = FGT_DCCIVAC,
8244       .accessfn = aa64_cacheop_poc_access },
8245     { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
8246       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
8247       .access = PL0_W, .type = ARM_CP_DC_GVA,
8248 #ifndef CONFIG_USER_ONLY
8249       /* Avoid overhead of an access check that always passes in user-mode */
8250       .accessfn = aa64_zva_access,
8251       .fgt = FGT_DCZVA,
8252 #endif
8253     },
8254     { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
8255       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
8256       .access = PL0_W, .type = ARM_CP_DC_GZVA,
8257 #ifndef CONFIG_USER_ONLY
8258       /* Avoid overhead of an access check that always passes in user-mode */
8259       .accessfn = aa64_zva_access,
8260       .fgt = FGT_DCZVA,
8261 #endif
8262     },
8263 };
8264 
8265 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
8266                                      bool isread)
8267 {
8268     uint64_t hcr = arm_hcr_el2_eff(env);
8269     int el = arm_current_el(env);
8270 
8271     if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
8272         if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
8273             if (hcr & HCR_TGE) {
8274                 return CP_ACCESS_TRAP_EL2;
8275             }
8276             return CP_ACCESS_TRAP;
8277         }
8278     } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
8279         return CP_ACCESS_TRAP_EL2;
8280     }
8281     if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
8282         return CP_ACCESS_TRAP_EL2;
8283     }
8284     if (el < 3
8285         && arm_feature(env, ARM_FEATURE_EL3)
8286         && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
8287         return CP_ACCESS_TRAP_EL3;
8288     }
8289     return CP_ACCESS_OK;
8290 }
8291 
8292 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
8293                                          const ARMCPRegInfo *ri,
8294                                          bool isread)
8295 {
8296     CPAccessResult nv1 = access_nv1(env, ri, isread);
8297 
8298     if (nv1 != CP_ACCESS_OK) {
8299         return nv1;
8300     }
8301     return access_scxtnum(env, ri, isread);
8302 }
8303 
8304 static const ARMCPRegInfo scxtnum_reginfo[] = {
8305     { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
8306       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
8307       .access = PL0_RW, .accessfn = access_scxtnum,
8308       .fgt = FGT_SCXTNUM_EL0,
8309       .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
8310     { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
8311       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
8312       .access = PL1_RW, .accessfn = access_scxtnum_el1,
8313       .fgt = FGT_SCXTNUM_EL1,
8314       .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
8315       .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
8316     { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
8317       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
8318       .access = PL2_RW, .accessfn = access_scxtnum,
8319       .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
8320     { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
8321       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
8322       .access = PL3_RW,
8323       .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
8324 };
8325 
8326 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
8327                                  bool isread)
8328 {
8329     if (arm_current_el(env) == 2 &&
8330         arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
8331         return CP_ACCESS_TRAP_EL3;
8332     }
8333     return CP_ACCESS_OK;
8334 }
8335 
8336 static const ARMCPRegInfo fgt_reginfo[] = {
8337     { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
8338       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
8339       .nv2_redirect_offset = 0x1b8,
8340       .access = PL2_RW, .accessfn = access_fgt,
8341       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
8342     { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
8343       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
8344       .nv2_redirect_offset = 0x1c0,
8345       .access = PL2_RW, .accessfn = access_fgt,
8346       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
8347     { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
8348       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
8349       .nv2_redirect_offset = 0x1d0,
8350       .access = PL2_RW, .accessfn = access_fgt,
8351       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
8352     { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
8353       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
8354       .nv2_redirect_offset = 0x1d8,
8355       .access = PL2_RW, .accessfn = access_fgt,
8356       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
8357     { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
8358       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
8359       .nv2_redirect_offset = 0x1c8,
8360       .access = PL2_RW, .accessfn = access_fgt,
8361       .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
8362 };
8363 
8364 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
8365                        uint64_t value)
8366 {
8367     /*
8368      * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
8369      * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
8370      * about the RESS bits at the top -- we choose the "generate an EL2
8371      * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
8372      * the ptw.c code detect the resulting invalid address).
8373      */
8374     env->cp15.vncr_el2 = value & ~0xfffULL;
8375 }
8376 
8377 static const ARMCPRegInfo nv2_reginfo[] = {
8378     { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
8379       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
8380       .access = PL2_RW,
8381       .writefn = vncr_write,
8382       .nv2_redirect_offset = 0xb0,
8383       .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
8384 };
8385 
8386 #endif /* TARGET_AARCH64 */
8387 
8388 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
8389                                      bool isread)
8390 {
8391     int el = arm_current_el(env);
8392 
8393     if (el == 0) {
8394         uint64_t sctlr = arm_sctlr(env, el);
8395         if (!(sctlr & SCTLR_EnRCTX)) {
8396             return CP_ACCESS_TRAP;
8397         }
8398     } else if (el == 1) {
8399         uint64_t hcr = arm_hcr_el2_eff(env);
8400         if (hcr & HCR_NV) {
8401             return CP_ACCESS_TRAP_EL2;
8402         }
8403     }
8404     return CP_ACCESS_OK;
8405 }
8406 
8407 static const ARMCPRegInfo predinv_reginfo[] = {
8408     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
8409       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
8410       .fgt = FGT_CFPRCTX,
8411       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8412     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
8413       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
8414       .fgt = FGT_DVPRCTX,
8415       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8416     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
8417       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
8418       .fgt = FGT_CPPRCTX,
8419       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8420     /*
8421      * Note the AArch32 opcodes have a different OPC1.
8422      */
8423     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
8424       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
8425       .fgt = FGT_CFPRCTX,
8426       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8427     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
8428       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
8429       .fgt = FGT_DVPRCTX,
8430       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8431     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
8432       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
8433       .fgt = FGT_CPPRCTX,
8434       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8435 };
8436 
8437 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
8438 {
8439     /* Read the high 32 bits of the current CCSIDR */
8440     return extract64(ccsidr_read(env, ri), 32, 32);
8441 }
8442 
8443 static const ARMCPRegInfo ccsidr2_reginfo[] = {
8444     { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
8445       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
8446       .access = PL1_R,
8447       .accessfn = access_tid4,
8448       .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
8449 };
8450 
8451 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
8452                                        bool isread)
8453 {
8454     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
8455         return CP_ACCESS_TRAP_EL2;
8456     }
8457 
8458     return CP_ACCESS_OK;
8459 }
8460 
8461 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
8462                                        bool isread)
8463 {
8464     if (arm_feature(env, ARM_FEATURE_V8)) {
8465         return access_aa64_tid3(env, ri, isread);
8466     }
8467 
8468     return CP_ACCESS_OK;
8469 }
8470 
8471 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
8472                                      bool isread)
8473 {
8474     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
8475         return CP_ACCESS_TRAP_EL2;
8476     }
8477 
8478     return CP_ACCESS_OK;
8479 }
8480 
8481 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
8482                                         const ARMCPRegInfo *ri, bool isread)
8483 {
8484     /*
8485      * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
8486      * in v7A, not in v8A.
8487      */
8488     if (!arm_feature(env, ARM_FEATURE_V8) &&
8489         arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
8490         (env->cp15.hstr_el2 & HSTR_TJDBX)) {
8491         return CP_ACCESS_TRAP_EL2;
8492     }
8493     return CP_ACCESS_OK;
8494 }
8495 
8496 static const ARMCPRegInfo jazelle_regs[] = {
8497     { .name = "JIDR",
8498       .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
8499       .access = PL1_R, .accessfn = access_jazelle,
8500       .type = ARM_CP_CONST, .resetvalue = 0 },
8501     { .name = "JOSCR",
8502       .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
8503       .accessfn = access_joscr_jmcr,
8504       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
8505     { .name = "JMCR",
8506       .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
8507       .accessfn = access_joscr_jmcr,
8508       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
8509 };
8510 
8511 static const ARMCPRegInfo contextidr_el2 = {
8512     .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
8513     .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
8514     .access = PL2_RW,
8515     .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
8516 };
8517 
8518 static const ARMCPRegInfo vhe_reginfo[] = {
8519     { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
8520       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
8521       .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
8522       .raw_writefn = raw_write,
8523       .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
8524 #ifndef CONFIG_USER_ONLY
8525     { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
8526       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
8527       .fieldoffset =
8528         offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
8529       .type = ARM_CP_IO, .access = PL2_RW,
8530       .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
8531     { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
8532       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
8533       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
8534       .resetfn = gt_hv_timer_reset,
8535       .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
8536     { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
8537       .type = ARM_CP_IO,
8538       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
8539       .access = PL2_RW,
8540       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
8541       .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
8542     { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
8543       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
8544       .type = ARM_CP_IO | ARM_CP_ALIAS,
8545       .access = PL2_RW, .accessfn = access_el1nvpct,
8546       .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
8547       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
8548       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
8549     { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
8550       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
8551       .type = ARM_CP_IO | ARM_CP_ALIAS,
8552       .access = PL2_RW, .accessfn = access_el1nvvct,
8553       .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
8554       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
8555       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
8556     { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8557       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
8558       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8559       .access = PL2_RW, .accessfn = e2h_access,
8560       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
8561     { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8562       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
8563       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8564       .access = PL2_RW, .accessfn = e2h_access,
8565       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
8566     { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8567       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
8568       .type = ARM_CP_IO | ARM_CP_ALIAS,
8569       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
8570       .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
8571       .access = PL2_RW, .accessfn = access_el1nvpct,
8572       .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
8573     { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8574       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
8575       .type = ARM_CP_IO | ARM_CP_ALIAS,
8576       .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
8577       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
8578       .access = PL2_RW, .accessfn = access_el1nvvct,
8579       .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
8580 #endif
8581 };
8582 
8583 #ifndef CONFIG_USER_ONLY
8584 static const ARMCPRegInfo ats1e1_reginfo[] = {
8585     { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
8586       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8587       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8588       .fgt = FGT_ATS1E1RP,
8589       .accessfn = at_s1e01_access, .writefn = ats_write64 },
8590     { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
8591       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8592       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8593       .fgt = FGT_ATS1E1WP,
8594       .accessfn = at_s1e01_access, .writefn = ats_write64 },
8595 };
8596 
8597 static const ARMCPRegInfo ats1cp_reginfo[] = {
8598     { .name = "ATS1CPRP",
8599       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8600       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8601       .writefn = ats_write },
8602     { .name = "ATS1CPWP",
8603       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8604       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8605       .writefn = ats_write },
8606 };
8607 #endif
8608 
8609 /*
8610  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
8611  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
8612  * is non-zero, which is never for ARMv7, optionally in ARMv8
8613  * and mandatorily for ARMv8.2 and up.
8614  * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
8615  * implementation is RAZ/WI we can ignore this detail, as we
8616  * do for ACTLR.
8617  */
8618 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
8619     { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
8620       .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
8621       .access = PL1_RW, .accessfn = access_tacr,
8622       .type = ARM_CP_CONST, .resetvalue = 0 },
8623     { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
8624       .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
8625       .access = PL2_RW, .type = ARM_CP_CONST,
8626       .resetvalue = 0 },
8627 };
8628 
8629 void register_cp_regs_for_features(ARMCPU *cpu)
8630 {
8631     /* Register all the coprocessor registers based on feature bits */
8632     CPUARMState *env = &cpu->env;
8633     if (arm_feature(env, ARM_FEATURE_M)) {
8634         /* M profile has no coprocessor registers */
8635         return;
8636     }
8637 
8638     define_arm_cp_regs(cpu, cp_reginfo);
8639     if (!arm_feature(env, ARM_FEATURE_V8)) {
8640         /*
8641          * Must go early as it is full of wildcards that may be
8642          * overridden by later definitions.
8643          */
8644         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
8645     }
8646 
8647     if (arm_feature(env, ARM_FEATURE_V6)) {
8648         /* The ID registers all have impdef reset values */
8649         ARMCPRegInfo v6_idregs[] = {
8650             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
8651               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
8652               .access = PL1_R, .type = ARM_CP_CONST,
8653               .accessfn = access_aa32_tid3,
8654               .resetvalue = cpu->isar.id_pfr0 },
8655             /*
8656              * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
8657              * the value of the GIC field until after we define these regs.
8658              */
8659             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
8660               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
8661               .access = PL1_R, .type = ARM_CP_NO_RAW,
8662               .accessfn = access_aa32_tid3,
8663 #ifdef CONFIG_USER_ONLY
8664               .type = ARM_CP_CONST,
8665               .resetvalue = cpu->isar.id_pfr1,
8666 #else
8667               .type = ARM_CP_NO_RAW,
8668               .accessfn = access_aa32_tid3,
8669               .readfn = id_pfr1_read,
8670               .writefn = arm_cp_write_ignore
8671 #endif
8672             },
8673             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
8674               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
8675               .access = PL1_R, .type = ARM_CP_CONST,
8676               .accessfn = access_aa32_tid3,
8677               .resetvalue = cpu->isar.id_dfr0 },
8678             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
8679               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
8680               .access = PL1_R, .type = ARM_CP_CONST,
8681               .accessfn = access_aa32_tid3,
8682               .resetvalue = cpu->id_afr0 },
8683             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
8684               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
8685               .access = PL1_R, .type = ARM_CP_CONST,
8686               .accessfn = access_aa32_tid3,
8687               .resetvalue = cpu->isar.id_mmfr0 },
8688             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
8689               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
8690               .access = PL1_R, .type = ARM_CP_CONST,
8691               .accessfn = access_aa32_tid3,
8692               .resetvalue = cpu->isar.id_mmfr1 },
8693             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
8694               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
8695               .access = PL1_R, .type = ARM_CP_CONST,
8696               .accessfn = access_aa32_tid3,
8697               .resetvalue = cpu->isar.id_mmfr2 },
8698             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
8699               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
8700               .access = PL1_R, .type = ARM_CP_CONST,
8701               .accessfn = access_aa32_tid3,
8702               .resetvalue = cpu->isar.id_mmfr3 },
8703             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
8704               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
8705               .access = PL1_R, .type = ARM_CP_CONST,
8706               .accessfn = access_aa32_tid3,
8707               .resetvalue = cpu->isar.id_isar0 },
8708             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
8709               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
8710               .access = PL1_R, .type = ARM_CP_CONST,
8711               .accessfn = access_aa32_tid3,
8712               .resetvalue = cpu->isar.id_isar1 },
8713             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
8714               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
8715               .access = PL1_R, .type = ARM_CP_CONST,
8716               .accessfn = access_aa32_tid3,
8717               .resetvalue = cpu->isar.id_isar2 },
8718             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
8719               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
8720               .access = PL1_R, .type = ARM_CP_CONST,
8721               .accessfn = access_aa32_tid3,
8722               .resetvalue = cpu->isar.id_isar3 },
8723             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
8724               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
8725               .access = PL1_R, .type = ARM_CP_CONST,
8726               .accessfn = access_aa32_tid3,
8727               .resetvalue = cpu->isar.id_isar4 },
8728             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
8729               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
8730               .access = PL1_R, .type = ARM_CP_CONST,
8731               .accessfn = access_aa32_tid3,
8732               .resetvalue = cpu->isar.id_isar5 },
8733             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
8734               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
8735               .access = PL1_R, .type = ARM_CP_CONST,
8736               .accessfn = access_aa32_tid3,
8737               .resetvalue = cpu->isar.id_mmfr4 },
8738             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
8739               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
8740               .access = PL1_R, .type = ARM_CP_CONST,
8741               .accessfn = access_aa32_tid3,
8742               .resetvalue = cpu->isar.id_isar6 },
8743         };
8744         define_arm_cp_regs(cpu, v6_idregs);
8745         define_arm_cp_regs(cpu, v6_cp_reginfo);
8746     } else {
8747         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
8748     }
8749     if (arm_feature(env, ARM_FEATURE_V6K)) {
8750         define_arm_cp_regs(cpu, v6k_cp_reginfo);
8751     }
8752     if (arm_feature(env, ARM_FEATURE_V7MP) &&
8753         !arm_feature(env, ARM_FEATURE_PMSA)) {
8754         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
8755     }
8756     if (arm_feature(env, ARM_FEATURE_V7VE)) {
8757         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
8758     }
8759     if (arm_feature(env, ARM_FEATURE_V7)) {
8760         ARMCPRegInfo clidr = {
8761             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
8762             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
8763             .access = PL1_R, .type = ARM_CP_CONST,
8764             .accessfn = access_tid4,
8765             .fgt = FGT_CLIDR_EL1,
8766             .resetvalue = cpu->clidr
8767         };
8768         define_one_arm_cp_reg(cpu, &clidr);
8769         define_arm_cp_regs(cpu, v7_cp_reginfo);
8770         define_debug_regs(cpu);
8771         define_pmu_regs(cpu);
8772     } else {
8773         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
8774     }
8775     if (arm_feature(env, ARM_FEATURE_V8)) {
8776         /*
8777          * v8 ID registers, which all have impdef reset values.
8778          * Note that within the ID register ranges the unused slots
8779          * must all RAZ, not UNDEF; future architecture versions may
8780          * define new registers here.
8781          * ID registers which are AArch64 views of the AArch32 ID registers
8782          * which already existed in v6 and v7 are handled elsewhere,
8783          * in v6_idregs[].
8784          */
8785         int i;
8786         ARMCPRegInfo v8_idregs[] = {
8787             /*
8788              * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
8789              * emulation because we don't know the right value for the
8790              * GIC field until after we define these regs.
8791              */
8792             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
8793               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
8794               .access = PL1_R,
8795 #ifdef CONFIG_USER_ONLY
8796               .type = ARM_CP_CONST,
8797               .resetvalue = cpu->isar.id_aa64pfr0
8798 #else
8799               .type = ARM_CP_NO_RAW,
8800               .accessfn = access_aa64_tid3,
8801               .readfn = id_aa64pfr0_read,
8802               .writefn = arm_cp_write_ignore
8803 #endif
8804             },
8805             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
8806               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
8807               .access = PL1_R, .type = ARM_CP_CONST,
8808               .accessfn = access_aa64_tid3,
8809               .resetvalue = cpu->isar.id_aa64pfr1},
8810             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8811               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
8812               .access = PL1_R, .type = ARM_CP_CONST,
8813               .accessfn = access_aa64_tid3,
8814               .resetvalue = 0 },
8815             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8816               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
8817               .access = PL1_R, .type = ARM_CP_CONST,
8818               .accessfn = access_aa64_tid3,
8819               .resetvalue = 0 },
8820             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
8821               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
8822               .access = PL1_R, .type = ARM_CP_CONST,
8823               .accessfn = access_aa64_tid3,
8824               .resetvalue = cpu->isar.id_aa64zfr0 },
8825             { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
8826               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
8827               .access = PL1_R, .type = ARM_CP_CONST,
8828               .accessfn = access_aa64_tid3,
8829               .resetvalue = cpu->isar.id_aa64smfr0 },
8830             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8831               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
8832               .access = PL1_R, .type = ARM_CP_CONST,
8833               .accessfn = access_aa64_tid3,
8834               .resetvalue = 0 },
8835             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8836               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
8837               .access = PL1_R, .type = ARM_CP_CONST,
8838               .accessfn = access_aa64_tid3,
8839               .resetvalue = 0 },
8840             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
8841               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
8842               .access = PL1_R, .type = ARM_CP_CONST,
8843               .accessfn = access_aa64_tid3,
8844               .resetvalue = cpu->isar.id_aa64dfr0 },
8845             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
8846               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
8847               .access = PL1_R, .type = ARM_CP_CONST,
8848               .accessfn = access_aa64_tid3,
8849               .resetvalue = cpu->isar.id_aa64dfr1 },
8850             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8851               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
8852               .access = PL1_R, .type = ARM_CP_CONST,
8853               .accessfn = access_aa64_tid3,
8854               .resetvalue = 0 },
8855             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8856               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
8857               .access = PL1_R, .type = ARM_CP_CONST,
8858               .accessfn = access_aa64_tid3,
8859               .resetvalue = 0 },
8860             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
8861               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
8862               .access = PL1_R, .type = ARM_CP_CONST,
8863               .accessfn = access_aa64_tid3,
8864               .resetvalue = cpu->id_aa64afr0 },
8865             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
8866               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
8867               .access = PL1_R, .type = ARM_CP_CONST,
8868               .accessfn = access_aa64_tid3,
8869               .resetvalue = cpu->id_aa64afr1 },
8870             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8871               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
8872               .access = PL1_R, .type = ARM_CP_CONST,
8873               .accessfn = access_aa64_tid3,
8874               .resetvalue = 0 },
8875             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8876               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
8877               .access = PL1_R, .type = ARM_CP_CONST,
8878               .accessfn = access_aa64_tid3,
8879               .resetvalue = 0 },
8880             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
8881               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
8882               .access = PL1_R, .type = ARM_CP_CONST,
8883               .accessfn = access_aa64_tid3,
8884               .resetvalue = cpu->isar.id_aa64isar0 },
8885             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
8886               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
8887               .access = PL1_R, .type = ARM_CP_CONST,
8888               .accessfn = access_aa64_tid3,
8889               .resetvalue = cpu->isar.id_aa64isar1 },
8890             { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
8891               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
8892               .access = PL1_R, .type = ARM_CP_CONST,
8893               .accessfn = access_aa64_tid3,
8894               .resetvalue = cpu->isar.id_aa64isar2 },
8895             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8896               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
8897               .access = PL1_R, .type = ARM_CP_CONST,
8898               .accessfn = access_aa64_tid3,
8899               .resetvalue = 0 },
8900             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8901               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
8902               .access = PL1_R, .type = ARM_CP_CONST,
8903               .accessfn = access_aa64_tid3,
8904               .resetvalue = 0 },
8905             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8906               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
8907               .access = PL1_R, .type = ARM_CP_CONST,
8908               .accessfn = access_aa64_tid3,
8909               .resetvalue = 0 },
8910             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8911               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
8912               .access = PL1_R, .type = ARM_CP_CONST,
8913               .accessfn = access_aa64_tid3,
8914               .resetvalue = 0 },
8915             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8916               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
8917               .access = PL1_R, .type = ARM_CP_CONST,
8918               .accessfn = access_aa64_tid3,
8919               .resetvalue = 0 },
8920             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
8921               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
8922               .access = PL1_R, .type = ARM_CP_CONST,
8923               .accessfn = access_aa64_tid3,
8924               .resetvalue = cpu->isar.id_aa64mmfr0 },
8925             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
8926               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
8927               .access = PL1_R, .type = ARM_CP_CONST,
8928               .accessfn = access_aa64_tid3,
8929               .resetvalue = cpu->isar.id_aa64mmfr1 },
8930             { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
8931               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
8932               .access = PL1_R, .type = ARM_CP_CONST,
8933               .accessfn = access_aa64_tid3,
8934               .resetvalue = cpu->isar.id_aa64mmfr2 },
8935             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8936               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
8937               .access = PL1_R, .type = ARM_CP_CONST,
8938               .accessfn = access_aa64_tid3,
8939               .resetvalue = 0 },
8940             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8941               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
8942               .access = PL1_R, .type = ARM_CP_CONST,
8943               .accessfn = access_aa64_tid3,
8944               .resetvalue = 0 },
8945             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8946               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
8947               .access = PL1_R, .type = ARM_CP_CONST,
8948               .accessfn = access_aa64_tid3,
8949               .resetvalue = 0 },
8950             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8951               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
8952               .access = PL1_R, .type = ARM_CP_CONST,
8953               .accessfn = access_aa64_tid3,
8954               .resetvalue = 0 },
8955             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8956               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
8957               .access = PL1_R, .type = ARM_CP_CONST,
8958               .accessfn = access_aa64_tid3,
8959               .resetvalue = 0 },
8960             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
8961               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
8962               .access = PL1_R, .type = ARM_CP_CONST,
8963               .accessfn = access_aa64_tid3,
8964               .resetvalue = cpu->isar.mvfr0 },
8965             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
8966               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
8967               .access = PL1_R, .type = ARM_CP_CONST,
8968               .accessfn = access_aa64_tid3,
8969               .resetvalue = cpu->isar.mvfr1 },
8970             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
8971               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
8972               .access = PL1_R, .type = ARM_CP_CONST,
8973               .accessfn = access_aa64_tid3,
8974               .resetvalue = cpu->isar.mvfr2 },
8975             /*
8976              * "0, c0, c3, {0,1,2}" are the encodings corresponding to
8977              * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
8978              * as RAZ, since it is in the "reserved for future ID
8979              * registers, RAZ" part of the AArch32 encoding space.
8980              */
8981             { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
8982               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
8983               .access = PL1_R, .type = ARM_CP_CONST,
8984               .accessfn = access_aa64_tid3,
8985               .resetvalue = 0 },
8986             { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
8987               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
8988               .access = PL1_R, .type = ARM_CP_CONST,
8989               .accessfn = access_aa64_tid3,
8990               .resetvalue = 0 },
8991             { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
8992               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
8993               .access = PL1_R, .type = ARM_CP_CONST,
8994               .accessfn = access_aa64_tid3,
8995               .resetvalue = 0 },
8996             /*
8997              * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
8998              * they're also RAZ for AArch64, and in v8 are gradually
8999              * being filled with AArch64-view-of-AArch32-ID-register
9000              * for new ID registers.
9001              */
9002             { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
9003               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
9004               .access = PL1_R, .type = ARM_CP_CONST,
9005               .accessfn = access_aa64_tid3,
9006               .resetvalue = 0 },
9007             { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
9008               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
9009               .access = PL1_R, .type = ARM_CP_CONST,
9010               .accessfn = access_aa64_tid3,
9011               .resetvalue = cpu->isar.id_pfr2 },
9012             { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
9013               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
9014               .access = PL1_R, .type = ARM_CP_CONST,
9015               .accessfn = access_aa64_tid3,
9016               .resetvalue = cpu->isar.id_dfr1 },
9017             { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
9018               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
9019               .access = PL1_R, .type = ARM_CP_CONST,
9020               .accessfn = access_aa64_tid3,
9021               .resetvalue = cpu->isar.id_mmfr5 },
9022             { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
9023               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
9024               .access = PL1_R, .type = ARM_CP_CONST,
9025               .accessfn = access_aa64_tid3,
9026               .resetvalue = 0 },
9027             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
9028               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
9029               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9030               .fgt = FGT_PMCEIDN_EL0,
9031               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
9032             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
9033               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
9034               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9035               .fgt = FGT_PMCEIDN_EL0,
9036               .resetvalue = cpu->pmceid0 },
9037             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
9038               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
9039               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9040               .fgt = FGT_PMCEIDN_EL0,
9041               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
9042             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
9043               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
9044               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9045               .fgt = FGT_PMCEIDN_EL0,
9046               .resetvalue = cpu->pmceid1 },
9047         };
9048 #ifdef CONFIG_USER_ONLY
9049         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
9050             { .name = "ID_AA64PFR0_EL1",
9051               .exported_bits = R_ID_AA64PFR0_FP_MASK |
9052                                R_ID_AA64PFR0_ADVSIMD_MASK |
9053                                R_ID_AA64PFR0_SVE_MASK |
9054                                R_ID_AA64PFR0_DIT_MASK,
9055               .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
9056                             (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
9057             { .name = "ID_AA64PFR1_EL1",
9058               .exported_bits = R_ID_AA64PFR1_BT_MASK |
9059                                R_ID_AA64PFR1_SSBS_MASK |
9060                                R_ID_AA64PFR1_MTE_MASK |
9061                                R_ID_AA64PFR1_SME_MASK },
9062             { .name = "ID_AA64PFR*_EL1_RESERVED",
9063               .is_glob = true },
9064             { .name = "ID_AA64ZFR0_EL1",
9065               .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
9066                                R_ID_AA64ZFR0_AES_MASK |
9067                                R_ID_AA64ZFR0_BITPERM_MASK |
9068                                R_ID_AA64ZFR0_BFLOAT16_MASK |
9069                                R_ID_AA64ZFR0_B16B16_MASK |
9070                                R_ID_AA64ZFR0_SHA3_MASK |
9071                                R_ID_AA64ZFR0_SM4_MASK |
9072                                R_ID_AA64ZFR0_I8MM_MASK |
9073                                R_ID_AA64ZFR0_F32MM_MASK |
9074                                R_ID_AA64ZFR0_F64MM_MASK },
9075             { .name = "ID_AA64SMFR0_EL1",
9076               .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
9077                                R_ID_AA64SMFR0_BI32I32_MASK |
9078                                R_ID_AA64SMFR0_B16F32_MASK |
9079                                R_ID_AA64SMFR0_F16F32_MASK |
9080                                R_ID_AA64SMFR0_I8I32_MASK |
9081                                R_ID_AA64SMFR0_F16F16_MASK |
9082                                R_ID_AA64SMFR0_B16B16_MASK |
9083                                R_ID_AA64SMFR0_I16I32_MASK |
9084                                R_ID_AA64SMFR0_F64F64_MASK |
9085                                R_ID_AA64SMFR0_I16I64_MASK |
9086                                R_ID_AA64SMFR0_SMEVER_MASK |
9087                                R_ID_AA64SMFR0_FA64_MASK },
9088             { .name = "ID_AA64MMFR0_EL1",
9089               .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
9090               .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
9091                             (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
9092             { .name = "ID_AA64MMFR1_EL1",
9093               .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
9094             { .name = "ID_AA64MMFR2_EL1",
9095               .exported_bits = R_ID_AA64MMFR2_AT_MASK },
9096             { .name = "ID_AA64MMFR*_EL1_RESERVED",
9097               .is_glob = true },
9098             { .name = "ID_AA64DFR0_EL1",
9099               .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
9100             { .name = "ID_AA64DFR1_EL1" },
9101             { .name = "ID_AA64DFR*_EL1_RESERVED",
9102               .is_glob = true },
9103             { .name = "ID_AA64AFR*",
9104               .is_glob = true },
9105             { .name = "ID_AA64ISAR0_EL1",
9106               .exported_bits = R_ID_AA64ISAR0_AES_MASK |
9107                                R_ID_AA64ISAR0_SHA1_MASK |
9108                                R_ID_AA64ISAR0_SHA2_MASK |
9109                                R_ID_AA64ISAR0_CRC32_MASK |
9110                                R_ID_AA64ISAR0_ATOMIC_MASK |
9111                                R_ID_AA64ISAR0_RDM_MASK |
9112                                R_ID_AA64ISAR0_SHA3_MASK |
9113                                R_ID_AA64ISAR0_SM3_MASK |
9114                                R_ID_AA64ISAR0_SM4_MASK |
9115                                R_ID_AA64ISAR0_DP_MASK |
9116                                R_ID_AA64ISAR0_FHM_MASK |
9117                                R_ID_AA64ISAR0_TS_MASK |
9118                                R_ID_AA64ISAR0_RNDR_MASK },
9119             { .name = "ID_AA64ISAR1_EL1",
9120               .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
9121                                R_ID_AA64ISAR1_APA_MASK |
9122                                R_ID_AA64ISAR1_API_MASK |
9123                                R_ID_AA64ISAR1_JSCVT_MASK |
9124                                R_ID_AA64ISAR1_FCMA_MASK |
9125                                R_ID_AA64ISAR1_LRCPC_MASK |
9126                                R_ID_AA64ISAR1_GPA_MASK |
9127                                R_ID_AA64ISAR1_GPI_MASK |
9128                                R_ID_AA64ISAR1_FRINTTS_MASK |
9129                                R_ID_AA64ISAR1_SB_MASK |
9130                                R_ID_AA64ISAR1_BF16_MASK |
9131                                R_ID_AA64ISAR1_DGH_MASK |
9132                                R_ID_AA64ISAR1_I8MM_MASK },
9133             { .name = "ID_AA64ISAR2_EL1",
9134               .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
9135                                R_ID_AA64ISAR2_RPRES_MASK |
9136                                R_ID_AA64ISAR2_GPA3_MASK |
9137                                R_ID_AA64ISAR2_APA3_MASK |
9138                                R_ID_AA64ISAR2_MOPS_MASK |
9139                                R_ID_AA64ISAR2_BC_MASK |
9140                                R_ID_AA64ISAR2_RPRFM_MASK |
9141                                R_ID_AA64ISAR2_CSSC_MASK },
9142             { .name = "ID_AA64ISAR*_EL1_RESERVED",
9143               .is_glob = true },
9144         };
9145         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
9146 #endif
9147         /*
9148          * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
9149          * TODO: For RMR, a write with bit 1 set should do something with
9150          * cpu_reset(). In the meantime, "the bit is strictly a request",
9151          * so we are in spec just ignoring writes.
9152          */
9153         if (!arm_feature(env, ARM_FEATURE_EL3) &&
9154             !arm_feature(env, ARM_FEATURE_EL2)) {
9155             ARMCPRegInfo el1_reset_regs[] = {
9156                 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
9157                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
9158                   .access = PL1_R,
9159                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9160                 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
9161                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
9162                   .access = PL1_RW, .type = ARM_CP_CONST,
9163                   .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
9164             };
9165             define_arm_cp_regs(cpu, el1_reset_regs);
9166         }
9167         define_arm_cp_regs(cpu, v8_idregs);
9168         define_arm_cp_regs(cpu, v8_cp_reginfo);
9169         if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
9170             define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
9171         }
9172 
9173         for (i = 4; i < 16; i++) {
9174             /*
9175              * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
9176              * For pre-v8 cores there are RAZ patterns for these in
9177              * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
9178              * v8 extends the "must RAZ" part of the ID register space
9179              * to also cover c0, 0, c{8-15}, {0-7}.
9180              * These are STATE_AA32 because in the AArch64 sysreg space
9181              * c4-c7 is where the AArch64 ID registers live (and we've
9182              * already defined those in v8_idregs[]), and c8-c15 are not
9183              * "must RAZ" for AArch64.
9184              */
9185             g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
9186             ARMCPRegInfo v8_aa32_raz_idregs = {
9187                 .name = name,
9188                 .state = ARM_CP_STATE_AA32,
9189                 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
9190                 .access = PL1_R, .type = ARM_CP_CONST,
9191                 .accessfn = access_aa64_tid3,
9192                 .resetvalue = 0 };
9193             define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
9194         }
9195     }
9196 
9197     /*
9198      * Register the base EL2 cpregs.
9199      * Pre v8, these registers are implemented only as part of the
9200      * Virtualization Extensions (EL2 present).  Beginning with v8,
9201      * if EL2 is missing but EL3 is enabled, mostly these become
9202      * RES0 from EL3, with some specific exceptions.
9203      */
9204     if (arm_feature(env, ARM_FEATURE_EL2)
9205         || (arm_feature(env, ARM_FEATURE_EL3)
9206             && arm_feature(env, ARM_FEATURE_V8))) {
9207         uint64_t vmpidr_def = mpidr_read_val(env);
9208         ARMCPRegInfo vpidr_regs[] = {
9209             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
9210               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
9211               .access = PL2_RW, .accessfn = access_el3_aa32ns,
9212               .resetvalue = cpu->midr,
9213               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
9214               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
9215             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
9216               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
9217               .access = PL2_RW, .resetvalue = cpu->midr,
9218               .type = ARM_CP_EL3_NO_EL2_C_NZ,
9219               .nv2_redirect_offset = 0x88,
9220               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
9221             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
9222               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
9223               .access = PL2_RW, .accessfn = access_el3_aa32ns,
9224               .resetvalue = vmpidr_def,
9225               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
9226               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
9227             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
9228               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
9229               .access = PL2_RW, .resetvalue = vmpidr_def,
9230               .type = ARM_CP_EL3_NO_EL2_C_NZ,
9231               .nv2_redirect_offset = 0x50,
9232               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
9233         };
9234         /*
9235          * The only field of MDCR_EL2 that has a defined architectural reset
9236          * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
9237          */
9238         ARMCPRegInfo mdcr_el2 = {
9239             .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
9240             .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
9241             .writefn = mdcr_el2_write,
9242             .access = PL2_RW, .resetvalue = pmu_num_counters(env),
9243             .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
9244         };
9245         define_one_arm_cp_reg(cpu, &mdcr_el2);
9246         define_arm_cp_regs(cpu, vpidr_regs);
9247         define_arm_cp_regs(cpu, el2_cp_reginfo);
9248         if (arm_feature(env, ARM_FEATURE_V8)) {
9249             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
9250         }
9251         if (cpu_isar_feature(aa64_sel2, cpu)) {
9252             define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
9253         }
9254         /*
9255          * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
9256          * See commentary near RMR_EL1.
9257          */
9258         if (!arm_feature(env, ARM_FEATURE_EL3)) {
9259             static const ARMCPRegInfo el2_reset_regs[] = {
9260                 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
9261                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
9262                   .access = PL2_R,
9263                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9264                 { .name = "RVBAR", .type = ARM_CP_ALIAS,
9265                   .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
9266                   .access = PL2_R,
9267                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9268                 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
9269                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
9270                   .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
9271             };
9272             define_arm_cp_regs(cpu, el2_reset_regs);
9273         }
9274     }
9275 
9276     /* Register the base EL3 cpregs. */
9277     if (arm_feature(env, ARM_FEATURE_EL3)) {
9278         define_arm_cp_regs(cpu, el3_cp_reginfo);
9279         ARMCPRegInfo el3_regs[] = {
9280             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
9281               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
9282               .access = PL3_R,
9283               .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
9284             { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
9285               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
9286               .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
9287             { .name = "RMR", .state = ARM_CP_STATE_AA32,
9288               .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
9289               .access = PL3_RW, .type = ARM_CP_CONST,
9290               .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
9291             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
9292               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
9293               .access = PL3_RW,
9294               .raw_writefn = raw_write, .writefn = sctlr_write,
9295               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
9296               .resetvalue = cpu->reset_sctlr },
9297         };
9298 
9299         define_arm_cp_regs(cpu, el3_regs);
9300     }
9301     /*
9302      * The behaviour of NSACR is sufficiently various that we don't
9303      * try to describe it in a single reginfo:
9304      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
9305      *     reads as constant 0xc00 from NS EL1 and NS EL2
9306      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
9307      *  if v7 without EL3, register doesn't exist
9308      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
9309      */
9310     if (arm_feature(env, ARM_FEATURE_EL3)) {
9311         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
9312             static const ARMCPRegInfo nsacr = {
9313                 .name = "NSACR", .type = ARM_CP_CONST,
9314                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9315                 .access = PL1_RW, .accessfn = nsacr_access,
9316                 .resetvalue = 0xc00
9317             };
9318             define_one_arm_cp_reg(cpu, &nsacr);
9319         } else {
9320             static const ARMCPRegInfo nsacr = {
9321                 .name = "NSACR",
9322                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9323                 .access = PL3_RW | PL1_R,
9324                 .resetvalue = 0,
9325                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
9326             };
9327             define_one_arm_cp_reg(cpu, &nsacr);
9328         }
9329     } else {
9330         if (arm_feature(env, ARM_FEATURE_V8)) {
9331             static const ARMCPRegInfo nsacr = {
9332                 .name = "NSACR", .type = ARM_CP_CONST,
9333                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9334                 .access = PL1_R,
9335                 .resetvalue = 0xc00
9336             };
9337             define_one_arm_cp_reg(cpu, &nsacr);
9338         }
9339     }
9340 
9341     if (arm_feature(env, ARM_FEATURE_PMSA)) {
9342         if (arm_feature(env, ARM_FEATURE_V6)) {
9343             /* PMSAv6 not implemented */
9344             assert(arm_feature(env, ARM_FEATURE_V7));
9345             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
9346             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
9347         } else {
9348             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
9349         }
9350     } else {
9351         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
9352         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
9353         /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
9354         if (cpu_isar_feature(aa32_hpd, cpu)) {
9355             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
9356         }
9357     }
9358     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
9359         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
9360     }
9361     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
9362         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
9363     }
9364     if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
9365         define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
9366     }
9367 #ifndef CONFIG_USER_ONLY
9368     if (cpu_isar_feature(aa64_ecv, cpu)) {
9369         define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
9370     }
9371 #endif
9372     if (arm_feature(env, ARM_FEATURE_VAPA)) {
9373         ARMCPRegInfo vapa_cp_reginfo[] = {
9374             { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
9375               .access = PL1_RW, .resetvalue = 0,
9376               .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
9377                                      offsetoflow32(CPUARMState, cp15.par_ns) },
9378               .writefn = par_write},
9379 #ifndef CONFIG_USER_ONLY
9380             /* This underdecoding is safe because the reginfo is NO_RAW. */
9381             { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
9382               .access = PL1_W, .accessfn = ats_access,
9383               .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
9384 #endif
9385         };
9386 
9387         /*
9388          * When LPAE exists this 32-bit PAR register is an alias of the
9389          * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
9390          */
9391         if (arm_feature(env, ARM_FEATURE_LPAE)) {
9392             vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
9393         }
9394         define_arm_cp_regs(cpu, vapa_cp_reginfo);
9395     }
9396     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
9397         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
9398     }
9399     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
9400         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
9401     }
9402     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
9403         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
9404     }
9405     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
9406         define_arm_cp_regs(cpu, omap_cp_reginfo);
9407     }
9408     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
9409         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
9410     }
9411     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9412         define_arm_cp_regs(cpu, xscale_cp_reginfo);
9413     }
9414     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
9415         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
9416     }
9417     if (arm_feature(env, ARM_FEATURE_LPAE)) {
9418         define_arm_cp_regs(cpu, lpae_cp_reginfo);
9419     }
9420     if (cpu_isar_feature(aa32_jazelle, cpu)) {
9421         define_arm_cp_regs(cpu, jazelle_regs);
9422     }
9423     /*
9424      * Slightly awkwardly, the OMAP and StrongARM cores need all of
9425      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
9426      * be read-only (ie write causes UNDEF exception).
9427      */
9428     {
9429         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
9430             /*
9431              * Pre-v8 MIDR space.
9432              * Note that the MIDR isn't a simple constant register because
9433              * of the TI925 behaviour where writes to another register can
9434              * cause the MIDR value to change.
9435              *
9436              * Unimplemented registers in the c15 0 0 0 space default to
9437              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
9438              * and friends override accordingly.
9439              */
9440             { .name = "MIDR",
9441               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
9442               .access = PL1_R, .resetvalue = cpu->midr,
9443               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
9444               .readfn = midr_read,
9445               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
9446               .type = ARM_CP_OVERRIDE },
9447             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
9448             { .name = "DUMMY",
9449               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
9450               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9451             { .name = "DUMMY",
9452               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
9453               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9454             { .name = "DUMMY",
9455               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
9456               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9457             { .name = "DUMMY",
9458               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
9459               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9460             { .name = "DUMMY",
9461               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
9462               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9463         };
9464         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
9465             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
9466               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
9467               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
9468               .fgt = FGT_MIDR_EL1,
9469               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
9470               .readfn = midr_read },
9471             /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
9472             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
9473               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
9474               .access = PL1_R, .resetvalue = cpu->midr },
9475             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
9476               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
9477               .access = PL1_R,
9478               .accessfn = access_aa64_tid1,
9479               .fgt = FGT_REVIDR_EL1,
9480               .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
9481         };
9482         ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
9483             .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
9484             .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
9485             .access = PL1_R, .resetvalue = cpu->midr
9486         };
9487         ARMCPRegInfo id_cp_reginfo[] = {
9488             /* These are common to v8 and pre-v8 */
9489             { .name = "CTR",
9490               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
9491               .access = PL1_R, .accessfn = ctr_el0_access,
9492               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
9493             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
9494               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
9495               .access = PL0_R, .accessfn = ctr_el0_access,
9496               .fgt = FGT_CTR_EL0,
9497               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
9498             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
9499             { .name = "TCMTR",
9500               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
9501               .access = PL1_R,
9502               .accessfn = access_aa32_tid1,
9503               .type = ARM_CP_CONST, .resetvalue = 0 },
9504         };
9505         /* TLBTR is specific to VMSA */
9506         ARMCPRegInfo id_tlbtr_reginfo = {
9507               .name = "TLBTR",
9508               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
9509               .access = PL1_R,
9510               .accessfn = access_aa32_tid1,
9511               .type = ARM_CP_CONST, .resetvalue = 0,
9512         };
9513         /* MPUIR is specific to PMSA V6+ */
9514         ARMCPRegInfo id_mpuir_reginfo = {
9515               .name = "MPUIR",
9516               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
9517               .access = PL1_R, .type = ARM_CP_CONST,
9518               .resetvalue = cpu->pmsav7_dregion << 8
9519         };
9520         /* HMPUIR is specific to PMSA V8 */
9521         ARMCPRegInfo id_hmpuir_reginfo = {
9522             .name = "HMPUIR",
9523             .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
9524             .access = PL2_R, .type = ARM_CP_CONST,
9525             .resetvalue = cpu->pmsav8r_hdregion
9526         };
9527         static const ARMCPRegInfo crn0_wi_reginfo = {
9528             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
9529             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
9530             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
9531         };
9532 #ifdef CONFIG_USER_ONLY
9533         static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
9534             { .name = "MIDR_EL1",
9535               .exported_bits = R_MIDR_EL1_REVISION_MASK |
9536                                R_MIDR_EL1_PARTNUM_MASK |
9537                                R_MIDR_EL1_ARCHITECTURE_MASK |
9538                                R_MIDR_EL1_VARIANT_MASK |
9539                                R_MIDR_EL1_IMPLEMENTER_MASK },
9540             { .name = "REVIDR_EL1" },
9541         };
9542         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
9543 #endif
9544         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
9545             arm_feature(env, ARM_FEATURE_STRONGARM)) {
9546             size_t i;
9547             /*
9548              * Register the blanket "writes ignored" value first to cover the
9549              * whole space. Then update the specific ID registers to allow write
9550              * access, so that they ignore writes rather than causing them to
9551              * UNDEF.
9552              */
9553             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
9554             for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
9555                 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
9556             }
9557             for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
9558                 id_cp_reginfo[i].access = PL1_RW;
9559             }
9560             id_mpuir_reginfo.access = PL1_RW;
9561             id_tlbtr_reginfo.access = PL1_RW;
9562         }
9563         if (arm_feature(env, ARM_FEATURE_V8)) {
9564             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
9565             if (!arm_feature(env, ARM_FEATURE_PMSA)) {
9566                 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
9567             }
9568         } else {
9569             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
9570         }
9571         define_arm_cp_regs(cpu, id_cp_reginfo);
9572         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
9573             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
9574         } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
9575                    arm_feature(env, ARM_FEATURE_V8)) {
9576             uint32_t i = 0;
9577             char *tmp_string;
9578 
9579             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9580             define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
9581             define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
9582 
9583             /* Register alias is only valid for first 32 indexes */
9584             for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
9585                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
9586                 uint8_t opc1 = extract32(i, 4, 1);
9587                 uint8_t opc2 = extract32(i, 0, 1) << 2;
9588 
9589                 tmp_string = g_strdup_printf("PRBAR%u", i);
9590                 ARMCPRegInfo tmp_prbarn_reginfo = {
9591                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
9592                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9593                     .access = PL1_RW, .resetvalue = 0,
9594                     .accessfn = access_tvm_trvm,
9595                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9596                 };
9597                 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
9598                 g_free(tmp_string);
9599 
9600                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
9601                 tmp_string = g_strdup_printf("PRLAR%u", i);
9602                 ARMCPRegInfo tmp_prlarn_reginfo = {
9603                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
9604                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9605                     .access = PL1_RW, .resetvalue = 0,
9606                     .accessfn = access_tvm_trvm,
9607                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9608                 };
9609                 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
9610                 g_free(tmp_string);
9611             }
9612 
9613             /* Register alias is only valid for first 32 indexes */
9614             for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
9615                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
9616                 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
9617                 uint8_t opc2 = extract32(i, 0, 1) << 2;
9618 
9619                 tmp_string = g_strdup_printf("HPRBAR%u", i);
9620                 ARMCPRegInfo tmp_hprbarn_reginfo = {
9621                     .name = tmp_string,
9622                     .type = ARM_CP_NO_RAW,
9623                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9624                     .access = PL2_RW, .resetvalue = 0,
9625                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9626                 };
9627                 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
9628                 g_free(tmp_string);
9629 
9630                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
9631                 tmp_string = g_strdup_printf("HPRLAR%u", i);
9632                 ARMCPRegInfo tmp_hprlarn_reginfo = {
9633                     .name = tmp_string,
9634                     .type = ARM_CP_NO_RAW,
9635                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9636                     .access = PL2_RW, .resetvalue = 0,
9637                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9638                 };
9639                 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
9640                 g_free(tmp_string);
9641             }
9642         } else if (arm_feature(env, ARM_FEATURE_V7)) {
9643             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9644         }
9645     }
9646 
9647     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
9648         ARMCPRegInfo mpidr_cp_reginfo[] = {
9649             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
9650               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
9651               .fgt = FGT_MPIDR_EL1,
9652               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
9653         };
9654 #ifdef CONFIG_USER_ONLY
9655         static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
9656             { .name = "MPIDR_EL1",
9657               .fixed_bits = 0x0000000080000000 },
9658         };
9659         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
9660 #endif
9661         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
9662     }
9663 
9664     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
9665         ARMCPRegInfo auxcr_reginfo[] = {
9666             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
9667               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
9668               .access = PL1_RW, .accessfn = access_tacr,
9669               .nv2_redirect_offset = 0x118,
9670               .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
9671             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
9672               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
9673               .access = PL2_RW, .type = ARM_CP_CONST,
9674               .resetvalue = 0 },
9675             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
9676               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
9677               .access = PL3_RW, .type = ARM_CP_CONST,
9678               .resetvalue = 0 },
9679         };
9680         define_arm_cp_regs(cpu, auxcr_reginfo);
9681         if (cpu_isar_feature(aa32_ac2, cpu)) {
9682             define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
9683         }
9684     }
9685 
9686     if (arm_feature(env, ARM_FEATURE_CBAR)) {
9687         /*
9688          * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
9689          * There are two flavours:
9690          *  (1) older 32-bit only cores have a simple 32-bit CBAR
9691          *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
9692          *      32-bit register visible to AArch32 at a different encoding
9693          *      to the "flavour 1" register and with the bits rearranged to
9694          *      be able to squash a 64-bit address into the 32-bit view.
9695          * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
9696          * in future if we support AArch32-only configs of some of the
9697          * AArch64 cores we might need to add a specific feature flag
9698          * to indicate cores with "flavour 2" CBAR.
9699          */
9700         if (arm_feature(env, ARM_FEATURE_V8)) {
9701             /* 32 bit view is [31:18] 0...0 [43:32]. */
9702             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
9703                 | extract64(cpu->reset_cbar, 32, 12);
9704             ARMCPRegInfo cbar_reginfo[] = {
9705                 { .name = "CBAR",
9706                   .type = ARM_CP_CONST,
9707                   .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
9708                   .access = PL1_R, .resetvalue = cbar32 },
9709                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
9710                   .type = ARM_CP_CONST,
9711                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
9712                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
9713             };
9714             /* We don't implement a r/w 64 bit CBAR currently */
9715             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
9716             define_arm_cp_regs(cpu, cbar_reginfo);
9717         } else {
9718             ARMCPRegInfo cbar = {
9719                 .name = "CBAR",
9720                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
9721                 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
9722                 .fieldoffset = offsetof(CPUARMState,
9723                                         cp15.c15_config_base_address)
9724             };
9725             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
9726                 cbar.access = PL1_R;
9727                 cbar.fieldoffset = 0;
9728                 cbar.type = ARM_CP_CONST;
9729             }
9730             define_one_arm_cp_reg(cpu, &cbar);
9731         }
9732     }
9733 
9734     if (arm_feature(env, ARM_FEATURE_VBAR)) {
9735         static const ARMCPRegInfo vbar_cp_reginfo[] = {
9736             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
9737               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
9738               .access = PL1_RW, .writefn = vbar_write,
9739               .accessfn = access_nv1,
9740               .fgt = FGT_VBAR_EL1,
9741               .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
9742               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
9743                                      offsetof(CPUARMState, cp15.vbar_ns) },
9744               .resetvalue = 0 },
9745         };
9746         define_arm_cp_regs(cpu, vbar_cp_reginfo);
9747     }
9748 
9749     /* Generic registers whose values depend on the implementation */
9750     {
9751         ARMCPRegInfo sctlr = {
9752             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
9753             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
9754             .access = PL1_RW, .accessfn = access_tvm_trvm,
9755             .fgt = FGT_SCTLR_EL1,
9756             .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
9757             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
9758                                    offsetof(CPUARMState, cp15.sctlr_ns) },
9759             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
9760             .raw_writefn = raw_write,
9761         };
9762         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9763             /*
9764              * Normally we would always end the TB on an SCTLR write, but Linux
9765              * arch/arm/mach-pxa/sleep.S expects two instructions following
9766              * an MMU enable to execute from cache.  Imitate this behaviour.
9767              */
9768             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
9769         }
9770         define_one_arm_cp_reg(cpu, &sctlr);
9771 
9772         if (arm_feature(env, ARM_FEATURE_PMSA) &&
9773             arm_feature(env, ARM_FEATURE_V8)) {
9774             ARMCPRegInfo vsctlr = {
9775                 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
9776                 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
9777                 .access = PL2_RW, .resetvalue = 0x0,
9778                 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
9779             };
9780             define_one_arm_cp_reg(cpu, &vsctlr);
9781         }
9782     }
9783 
9784     if (cpu_isar_feature(aa64_lor, cpu)) {
9785         define_arm_cp_regs(cpu, lor_reginfo);
9786     }
9787     if (cpu_isar_feature(aa64_pan, cpu)) {
9788         define_one_arm_cp_reg(cpu, &pan_reginfo);
9789     }
9790 #ifndef CONFIG_USER_ONLY
9791     if (cpu_isar_feature(aa64_ats1e1, cpu)) {
9792         define_arm_cp_regs(cpu, ats1e1_reginfo);
9793     }
9794     if (cpu_isar_feature(aa32_ats1e1, cpu)) {
9795         define_arm_cp_regs(cpu, ats1cp_reginfo);
9796     }
9797 #endif
9798     if (cpu_isar_feature(aa64_uao, cpu)) {
9799         define_one_arm_cp_reg(cpu, &uao_reginfo);
9800     }
9801 
9802     if (cpu_isar_feature(aa64_dit, cpu)) {
9803         define_one_arm_cp_reg(cpu, &dit_reginfo);
9804     }
9805     if (cpu_isar_feature(aa64_ssbs, cpu)) {
9806         define_one_arm_cp_reg(cpu, &ssbs_reginfo);
9807     }
9808     if (cpu_isar_feature(any_ras, cpu)) {
9809         define_arm_cp_regs(cpu, minimal_ras_reginfo);
9810     }
9811 
9812     if (cpu_isar_feature(aa64_vh, cpu) ||
9813         cpu_isar_feature(aa64_debugv8p2, cpu)) {
9814         define_one_arm_cp_reg(cpu, &contextidr_el2);
9815     }
9816     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9817         define_arm_cp_regs(cpu, vhe_reginfo);
9818     }
9819 
9820     if (cpu_isar_feature(aa64_sve, cpu)) {
9821         define_arm_cp_regs(cpu, zcr_reginfo);
9822     }
9823 
9824     if (cpu_isar_feature(aa64_hcx, cpu)) {
9825         define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
9826     }
9827 
9828 #ifdef TARGET_AARCH64
9829     if (cpu_isar_feature(aa64_sme, cpu)) {
9830         define_arm_cp_regs(cpu, sme_reginfo);
9831     }
9832     if (cpu_isar_feature(aa64_pauth, cpu)) {
9833         define_arm_cp_regs(cpu, pauth_reginfo);
9834     }
9835     if (cpu_isar_feature(aa64_rndr, cpu)) {
9836         define_arm_cp_regs(cpu, rndr_reginfo);
9837     }
9838     if (cpu_isar_feature(aa64_tlbirange, cpu)) {
9839         define_arm_cp_regs(cpu, tlbirange_reginfo);
9840     }
9841     if (cpu_isar_feature(aa64_tlbios, cpu)) {
9842         define_arm_cp_regs(cpu, tlbios_reginfo);
9843     }
9844     /* Data Cache clean instructions up to PoP */
9845     if (cpu_isar_feature(aa64_dcpop, cpu)) {
9846         define_one_arm_cp_reg(cpu, dcpop_reg);
9847 
9848         if (cpu_isar_feature(aa64_dcpodp, cpu)) {
9849             define_one_arm_cp_reg(cpu, dcpodp_reg);
9850         }
9851     }
9852 
9853     /*
9854      * If full MTE is enabled, add all of the system registers.
9855      * If only "instructions available at EL0" are enabled,
9856      * then define only a RAZ/WI version of PSTATE.TCO.
9857      */
9858     if (cpu_isar_feature(aa64_mte, cpu)) {
9859         ARMCPRegInfo gmid_reginfo = {
9860             .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
9861             .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
9862             .access = PL1_R, .accessfn = access_aa64_tid5,
9863             .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
9864         };
9865         define_one_arm_cp_reg(cpu, &gmid_reginfo);
9866         define_arm_cp_regs(cpu, mte_reginfo);
9867         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9868     } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
9869         define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
9870         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9871     }
9872 
9873     if (cpu_isar_feature(aa64_scxtnum, cpu)) {
9874         define_arm_cp_regs(cpu, scxtnum_reginfo);
9875     }
9876 
9877     if (cpu_isar_feature(aa64_fgt, cpu)) {
9878         define_arm_cp_regs(cpu, fgt_reginfo);
9879     }
9880 
9881     if (cpu_isar_feature(aa64_rme, cpu)) {
9882         define_arm_cp_regs(cpu, rme_reginfo);
9883         if (cpu_isar_feature(aa64_mte, cpu)) {
9884             define_arm_cp_regs(cpu, rme_mte_reginfo);
9885         }
9886     }
9887 
9888     if (cpu_isar_feature(aa64_nv2, cpu)) {
9889         define_arm_cp_regs(cpu, nv2_reginfo);
9890     }
9891 #endif
9892 
9893     if (cpu_isar_feature(any_predinv, cpu)) {
9894         define_arm_cp_regs(cpu, predinv_reginfo);
9895     }
9896 
9897     if (cpu_isar_feature(any_ccidx, cpu)) {
9898         define_arm_cp_regs(cpu, ccsidr2_reginfo);
9899     }
9900 
9901 #ifndef CONFIG_USER_ONLY
9902     /*
9903      * Register redirections and aliases must be done last,
9904      * after the registers from the other extensions have been defined.
9905      */
9906     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9907         define_arm_vh_e2h_redirects_aliases(cpu);
9908     }
9909 #endif
9910 }
9911 
9912 /*
9913  * Private utility function for define_one_arm_cp_reg_with_opaque():
9914  * add a single reginfo struct to the hash table.
9915  */
9916 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
9917                                    void *opaque, CPState state,
9918                                    CPSecureState secstate,
9919                                    int crm, int opc1, int opc2,
9920                                    const char *name)
9921 {
9922     CPUARMState *env = &cpu->env;
9923     uint32_t key;
9924     ARMCPRegInfo *r2;
9925     bool is64 = r->type & ARM_CP_64BIT;
9926     bool ns = secstate & ARM_CP_SECSTATE_NS;
9927     int cp = r->cp;
9928     size_t name_len;
9929     bool make_const;
9930 
9931     switch (state) {
9932     case ARM_CP_STATE_AA32:
9933         /* We assume it is a cp15 register if the .cp field is left unset. */
9934         if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
9935             cp = 15;
9936         }
9937         key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
9938         break;
9939     case ARM_CP_STATE_AA64:
9940         /*
9941          * To allow abbreviation of ARMCPRegInfo definitions, we treat
9942          * cp == 0 as equivalent to the value for "standard guest-visible
9943          * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
9944          * in their AArch64 view (the .cp value may be non-zero for the
9945          * benefit of the AArch32 view).
9946          */
9947         if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
9948             cp = CP_REG_ARM64_SYSREG_CP;
9949         }
9950         key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
9951         break;
9952     default:
9953         g_assert_not_reached();
9954     }
9955 
9956     /* Overriding of an existing definition must be explicitly requested. */
9957     if (!(r->type & ARM_CP_OVERRIDE)) {
9958         const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
9959         if (oldreg) {
9960             assert(oldreg->type & ARM_CP_OVERRIDE);
9961         }
9962     }
9963 
9964     /*
9965      * Eliminate registers that are not present because the EL is missing.
9966      * Doing this here makes it easier to put all registers for a given
9967      * feature into the same ARMCPRegInfo array and define them all at once.
9968      */
9969     make_const = false;
9970     if (arm_feature(env, ARM_FEATURE_EL3)) {
9971         /*
9972          * An EL2 register without EL2 but with EL3 is (usually) RES0.
9973          * See rule RJFFP in section D1.1.3 of DDI0487H.a.
9974          */
9975         int min_el = ctz32(r->access) / 2;
9976         if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
9977             if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
9978                 return;
9979             }
9980             make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
9981         }
9982     } else {
9983         CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
9984                                  ? PL2_RW : PL1_RW);
9985         if ((r->access & max_el) == 0) {
9986             return;
9987         }
9988     }
9989 
9990     /* Combine cpreg and name into one allocation. */
9991     name_len = strlen(name) + 1;
9992     r2 = g_malloc(sizeof(*r2) + name_len);
9993     *r2 = *r;
9994     r2->name = memcpy(r2 + 1, name, name_len);
9995 
9996     /*
9997      * Update fields to match the instantiation, overwiting wildcards
9998      * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
9999      */
10000     r2->cp = cp;
10001     r2->crm = crm;
10002     r2->opc1 = opc1;
10003     r2->opc2 = opc2;
10004     r2->state = state;
10005     r2->secure = secstate;
10006     if (opaque) {
10007         r2->opaque = opaque;
10008     }
10009 
10010     if (make_const) {
10011         /* This should not have been a very special register to begin. */
10012         int old_special = r2->type & ARM_CP_SPECIAL_MASK;
10013         assert(old_special == 0 || old_special == ARM_CP_NOP);
10014         /*
10015          * Set the special function to CONST, retaining the other flags.
10016          * This is important for e.g. ARM_CP_SVE so that we still
10017          * take the SVE trap if CPTR_EL3.EZ == 0.
10018          */
10019         r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
10020         /*
10021          * Usually, these registers become RES0, but there are a few
10022          * special cases like VPIDR_EL2 which have a constant non-zero
10023          * value with writes ignored.
10024          */
10025         if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
10026             r2->resetvalue = 0;
10027         }
10028         /*
10029          * ARM_CP_CONST has precedence, so removing the callbacks and
10030          * offsets are not strictly necessary, but it is potentially
10031          * less confusing to debug later.
10032          */
10033         r2->readfn = NULL;
10034         r2->writefn = NULL;
10035         r2->raw_readfn = NULL;
10036         r2->raw_writefn = NULL;
10037         r2->resetfn = NULL;
10038         r2->fieldoffset = 0;
10039         r2->bank_fieldoffsets[0] = 0;
10040         r2->bank_fieldoffsets[1] = 0;
10041     } else {
10042         bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
10043 
10044         if (isbanked) {
10045             /*
10046              * Register is banked (using both entries in array).
10047              * Overwriting fieldoffset as the array is only used to define
10048              * banked registers but later only fieldoffset is used.
10049              */
10050             r2->fieldoffset = r->bank_fieldoffsets[ns];
10051         }
10052         if (state == ARM_CP_STATE_AA32) {
10053             if (isbanked) {
10054                 /*
10055                  * If the register is banked then we don't need to migrate or
10056                  * reset the 32-bit instance in certain cases:
10057                  *
10058                  * 1) If the register has both 32-bit and 64-bit instances
10059                  *    then we can count on the 64-bit instance taking care
10060                  *    of the non-secure bank.
10061                  * 2) If ARMv8 is enabled then we can count on a 64-bit
10062                  *    version taking care of the secure bank.  This requires
10063                  *    that separate 32 and 64-bit definitions are provided.
10064                  */
10065                 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
10066                     (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
10067                     r2->type |= ARM_CP_ALIAS;
10068                 }
10069             } else if ((secstate != r->secure) && !ns) {
10070                 /*
10071                  * The register is not banked so we only want to allow
10072                  * migration of the non-secure instance.
10073                  */
10074                 r2->type |= ARM_CP_ALIAS;
10075             }
10076 
10077             if (HOST_BIG_ENDIAN &&
10078                 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
10079                 r2->fieldoffset += sizeof(uint32_t);
10080             }
10081         }
10082     }
10083 
10084     /*
10085      * By convention, for wildcarded registers only the first
10086      * entry is used for migration; the others are marked as
10087      * ALIAS so we don't try to transfer the register
10088      * multiple times. Special registers (ie NOP/WFI) are
10089      * never migratable and not even raw-accessible.
10090      */
10091     if (r2->type & ARM_CP_SPECIAL_MASK) {
10092         r2->type |= ARM_CP_NO_RAW;
10093     }
10094     if (((r->crm == CP_ANY) && crm != 0) ||
10095         ((r->opc1 == CP_ANY) && opc1 != 0) ||
10096         ((r->opc2 == CP_ANY) && opc2 != 0)) {
10097         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
10098     }
10099 
10100     /*
10101      * Check that raw accesses are either forbidden or handled. Note that
10102      * we can't assert this earlier because the setup of fieldoffset for
10103      * banked registers has to be done first.
10104      */
10105     if (!(r2->type & ARM_CP_NO_RAW)) {
10106         assert(!raw_accessors_invalid(r2));
10107     }
10108 
10109     g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
10110 }
10111 
10112 
10113 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
10114                                        const ARMCPRegInfo *r, void *opaque)
10115 {
10116     /*
10117      * Define implementations of coprocessor registers.
10118      * We store these in a hashtable because typically
10119      * there are less than 150 registers in a space which
10120      * is 16*16*16*8*8 = 262144 in size.
10121      * Wildcarding is supported for the crm, opc1 and opc2 fields.
10122      * If a register is defined twice then the second definition is
10123      * used, so this can be used to define some generic registers and
10124      * then override them with implementation specific variations.
10125      * At least one of the original and the second definition should
10126      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
10127      * against accidental use.
10128      *
10129      * The state field defines whether the register is to be
10130      * visible in the AArch32 or AArch64 execution state. If the
10131      * state is set to ARM_CP_STATE_BOTH then we synthesise a
10132      * reginfo structure for the AArch32 view, which sees the lower
10133      * 32 bits of the 64 bit register.
10134      *
10135      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
10136      * be wildcarded. AArch64 registers are always considered to be 64
10137      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
10138      * the register, if any.
10139      */
10140     int crm, opc1, opc2;
10141     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
10142     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
10143     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
10144     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
10145     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
10146     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
10147     CPState state;
10148 
10149     /* 64 bit registers have only CRm and Opc1 fields */
10150     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
10151     /* op0 only exists in the AArch64 encodings */
10152     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
10153     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
10154     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
10155     /*
10156      * This API is only for Arm's system coprocessors (14 and 15) or
10157      * (M-profile or v7A-and-earlier only) for implementation defined
10158      * coprocessors in the range 0..7.  Our decode assumes this, since
10159      * 8..13 can be used for other insns including VFP and Neon. See
10160      * valid_cp() in translate.c.  Assert here that we haven't tried
10161      * to use an invalid coprocessor number.
10162      */
10163     switch (r->state) {
10164     case ARM_CP_STATE_BOTH:
10165         /* 0 has a special meaning, but otherwise the same rules as AA32. */
10166         if (r->cp == 0) {
10167             break;
10168         }
10169         /* fall through */
10170     case ARM_CP_STATE_AA32:
10171         if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
10172             !arm_feature(&cpu->env, ARM_FEATURE_M)) {
10173             assert(r->cp >= 14 && r->cp <= 15);
10174         } else {
10175             assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
10176         }
10177         break;
10178     case ARM_CP_STATE_AA64:
10179         assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
10180         break;
10181     default:
10182         g_assert_not_reached();
10183     }
10184     /*
10185      * The AArch64 pseudocode CheckSystemAccess() specifies that op1
10186      * encodes a minimum access level for the register. We roll this
10187      * runtime check into our general permission check code, so check
10188      * here that the reginfo's specified permissions are strict enough
10189      * to encompass the generic architectural permission check.
10190      */
10191     if (r->state != ARM_CP_STATE_AA32) {
10192         CPAccessRights mask;
10193         switch (r->opc1) {
10194         case 0:
10195             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
10196             mask = PL0U_R | PL1_RW;
10197             break;
10198         case 1: case 2:
10199             /* min_EL EL1 */
10200             mask = PL1_RW;
10201             break;
10202         case 3:
10203             /* min_EL EL0 */
10204             mask = PL0_RW;
10205             break;
10206         case 4:
10207         case 5:
10208             /* min_EL EL2 */
10209             mask = PL2_RW;
10210             break;
10211         case 6:
10212             /* min_EL EL3 */
10213             mask = PL3_RW;
10214             break;
10215         case 7:
10216             /* min_EL EL1, secure mode only (we don't check the latter) */
10217             mask = PL1_RW;
10218             break;
10219         default:
10220             /* broken reginfo with out-of-range opc1 */
10221             g_assert_not_reached();
10222         }
10223         /* assert our permissions are not too lax (stricter is fine) */
10224         assert((r->access & ~mask) == 0);
10225     }
10226 
10227     /*
10228      * Check that the register definition has enough info to handle
10229      * reads and writes if they are permitted.
10230      */
10231     if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
10232         if (r->access & PL3_R) {
10233             assert((r->fieldoffset ||
10234                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
10235                    r->readfn);
10236         }
10237         if (r->access & PL3_W) {
10238             assert((r->fieldoffset ||
10239                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
10240                    r->writefn);
10241         }
10242     }
10243 
10244     for (crm = crmmin; crm <= crmmax; crm++) {
10245         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
10246             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
10247                 for (state = ARM_CP_STATE_AA32;
10248                      state <= ARM_CP_STATE_AA64; state++) {
10249                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
10250                         continue;
10251                     }
10252                     if (state == ARM_CP_STATE_AA32) {
10253                         /*
10254                          * Under AArch32 CP registers can be common
10255                          * (same for secure and non-secure world) or banked.
10256                          */
10257                         char *name;
10258 
10259                         switch (r->secure) {
10260                         case ARM_CP_SECSTATE_S:
10261                         case ARM_CP_SECSTATE_NS:
10262                             add_cpreg_to_hashtable(cpu, r, opaque, state,
10263                                                    r->secure, crm, opc1, opc2,
10264                                                    r->name);
10265                             break;
10266                         case ARM_CP_SECSTATE_BOTH:
10267                             name = g_strdup_printf("%s_S", r->name);
10268                             add_cpreg_to_hashtable(cpu, r, opaque, state,
10269                                                    ARM_CP_SECSTATE_S,
10270                                                    crm, opc1, opc2, name);
10271                             g_free(name);
10272                             add_cpreg_to_hashtable(cpu, r, opaque, state,
10273                                                    ARM_CP_SECSTATE_NS,
10274                                                    crm, opc1, opc2, r->name);
10275                             break;
10276                         default:
10277                             g_assert_not_reached();
10278                         }
10279                     } else {
10280                         /*
10281                          * AArch64 registers get mapped to non-secure instance
10282                          * of AArch32
10283                          */
10284                         add_cpreg_to_hashtable(cpu, r, opaque, state,
10285                                                ARM_CP_SECSTATE_NS,
10286                                                crm, opc1, opc2, r->name);
10287                     }
10288                 }
10289             }
10290         }
10291     }
10292 }
10293 
10294 /* Define a whole list of registers */
10295 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
10296                                         void *opaque, size_t len)
10297 {
10298     size_t i;
10299     for (i = 0; i < len; ++i) {
10300         define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
10301     }
10302 }
10303 
10304 /*
10305  * Modify ARMCPRegInfo for access from userspace.
10306  *
10307  * This is a data driven modification directed by
10308  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
10309  * user-space cannot alter any values and dynamic values pertaining to
10310  * execution state are hidden from user space view anyway.
10311  */
10312 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
10313                                  const ARMCPRegUserSpaceInfo *mods,
10314                                  size_t mods_len)
10315 {
10316     for (size_t mi = 0; mi < mods_len; ++mi) {
10317         const ARMCPRegUserSpaceInfo *m = mods + mi;
10318         GPatternSpec *pat = NULL;
10319 
10320         if (m->is_glob) {
10321             pat = g_pattern_spec_new(m->name);
10322         }
10323         for (size_t ri = 0; ri < regs_len; ++ri) {
10324             ARMCPRegInfo *r = regs + ri;
10325 
10326             if (pat && g_pattern_match_string(pat, r->name)) {
10327                 r->type = ARM_CP_CONST;
10328                 r->access = PL0U_R;
10329                 r->resetvalue = 0;
10330                 /* continue */
10331             } else if (strcmp(r->name, m->name) == 0) {
10332                 r->type = ARM_CP_CONST;
10333                 r->access = PL0U_R;
10334                 r->resetvalue &= m->exported_bits;
10335                 r->resetvalue |= m->fixed_bits;
10336                 break;
10337             }
10338         }
10339         if (pat) {
10340             g_pattern_spec_free(pat);
10341         }
10342     }
10343 }
10344 
10345 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
10346 {
10347     return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
10348 }
10349 
10350 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
10351                          uint64_t value)
10352 {
10353     /* Helper coprocessor write function for write-ignore registers */
10354 }
10355 
10356 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
10357 {
10358     /* Helper coprocessor write function for read-as-zero registers */
10359     return 0;
10360 }
10361 
10362 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
10363 {
10364     /* Helper coprocessor reset function for do-nothing-on-reset registers */
10365 }
10366 
10367 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
10368 {
10369     /*
10370      * Return true if it is not valid for us to switch to
10371      * this CPU mode (ie all the UNPREDICTABLE cases in
10372      * the ARM ARM CPSRWriteByInstr pseudocode).
10373      */
10374 
10375     /* Changes to or from Hyp via MSR and CPS are illegal. */
10376     if (write_type == CPSRWriteByInstr &&
10377         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
10378          mode == ARM_CPU_MODE_HYP)) {
10379         return 1;
10380     }
10381 
10382     switch (mode) {
10383     case ARM_CPU_MODE_USR:
10384         return 0;
10385     case ARM_CPU_MODE_SYS:
10386     case ARM_CPU_MODE_SVC:
10387     case ARM_CPU_MODE_ABT:
10388     case ARM_CPU_MODE_UND:
10389     case ARM_CPU_MODE_IRQ:
10390     case ARM_CPU_MODE_FIQ:
10391         /*
10392          * Note that we don't implement the IMPDEF NSACR.RFR which in v7
10393          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
10394          */
10395         /*
10396          * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
10397          * and CPS are treated as illegal mode changes.
10398          */
10399         if (write_type == CPSRWriteByInstr &&
10400             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
10401             (arm_hcr_el2_eff(env) & HCR_TGE)) {
10402             return 1;
10403         }
10404         return 0;
10405     case ARM_CPU_MODE_HYP:
10406         return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
10407     case ARM_CPU_MODE_MON:
10408         return arm_current_el(env) < 3;
10409     default:
10410         return 1;
10411     }
10412 }
10413 
10414 uint32_t cpsr_read(CPUARMState *env)
10415 {
10416     int ZF;
10417     ZF = (env->ZF == 0);
10418     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
10419         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
10420         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
10421         | ((env->condexec_bits & 0xfc) << 8)
10422         | (env->GE << 16) | (env->daif & CPSR_AIF);
10423 }
10424 
10425 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
10426                 CPSRWriteType write_type)
10427 {
10428     uint32_t changed_daif;
10429     bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
10430         (mask & (CPSR_M | CPSR_E | CPSR_IL));
10431 
10432     if (mask & CPSR_NZCV) {
10433         env->ZF = (~val) & CPSR_Z;
10434         env->NF = val;
10435         env->CF = (val >> 29) & 1;
10436         env->VF = (val << 3) & 0x80000000;
10437     }
10438     if (mask & CPSR_Q) {
10439         env->QF = ((val & CPSR_Q) != 0);
10440     }
10441     if (mask & CPSR_T) {
10442         env->thumb = ((val & CPSR_T) != 0);
10443     }
10444     if (mask & CPSR_IT_0_1) {
10445         env->condexec_bits &= ~3;
10446         env->condexec_bits |= (val >> 25) & 3;
10447     }
10448     if (mask & CPSR_IT_2_7) {
10449         env->condexec_bits &= 3;
10450         env->condexec_bits |= (val >> 8) & 0xfc;
10451     }
10452     if (mask & CPSR_GE) {
10453         env->GE = (val >> 16) & 0xf;
10454     }
10455 
10456     /*
10457      * In a V7 implementation that includes the security extensions but does
10458      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
10459      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
10460      * bits respectively.
10461      *
10462      * In a V8 implementation, it is permitted for privileged software to
10463      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
10464      */
10465     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
10466         arm_feature(env, ARM_FEATURE_EL3) &&
10467         !arm_feature(env, ARM_FEATURE_EL2) &&
10468         !arm_is_secure(env)) {
10469 
10470         changed_daif = (env->daif ^ val) & mask;
10471 
10472         if (changed_daif & CPSR_A) {
10473             /*
10474              * Check to see if we are allowed to change the masking of async
10475              * abort exceptions from a non-secure state.
10476              */
10477             if (!(env->cp15.scr_el3 & SCR_AW)) {
10478                 qemu_log_mask(LOG_GUEST_ERROR,
10479                               "Ignoring attempt to switch CPSR_A flag from "
10480                               "non-secure world with SCR.AW bit clear\n");
10481                 mask &= ~CPSR_A;
10482             }
10483         }
10484 
10485         if (changed_daif & CPSR_F) {
10486             /*
10487              * Check to see if we are allowed to change the masking of FIQ
10488              * exceptions from a non-secure state.
10489              */
10490             if (!(env->cp15.scr_el3 & SCR_FW)) {
10491                 qemu_log_mask(LOG_GUEST_ERROR,
10492                               "Ignoring attempt to switch CPSR_F flag from "
10493                               "non-secure world with SCR.FW bit clear\n");
10494                 mask &= ~CPSR_F;
10495             }
10496 
10497             /*
10498              * Check whether non-maskable FIQ (NMFI) support is enabled.
10499              * If this bit is set software is not allowed to mask
10500              * FIQs, but is allowed to set CPSR_F to 0.
10501              */
10502             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
10503                 (val & CPSR_F)) {
10504                 qemu_log_mask(LOG_GUEST_ERROR,
10505                               "Ignoring attempt to enable CPSR_F flag "
10506                               "(non-maskable FIQ [NMFI] support enabled)\n");
10507                 mask &= ~CPSR_F;
10508             }
10509         }
10510     }
10511 
10512     env->daif &= ~(CPSR_AIF & mask);
10513     env->daif |= val & CPSR_AIF & mask;
10514 
10515     if (write_type != CPSRWriteRaw &&
10516         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
10517         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
10518             /*
10519              * Note that we can only get here in USR mode if this is a
10520              * gdb stub write; for this case we follow the architectural
10521              * behaviour for guest writes in USR mode of ignoring an attempt
10522              * to switch mode. (Those are caught by translate.c for writes
10523              * triggered by guest instructions.)
10524              */
10525             mask &= ~CPSR_M;
10526         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
10527             /*
10528              * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
10529              * v7, and has defined behaviour in v8:
10530              *  + leave CPSR.M untouched
10531              *  + allow changes to the other CPSR fields
10532              *  + set PSTATE.IL
10533              * For user changes via the GDB stub, we don't set PSTATE.IL,
10534              * as this would be unnecessarily harsh for a user error.
10535              */
10536             mask &= ~CPSR_M;
10537             if (write_type != CPSRWriteByGDBStub &&
10538                 arm_feature(env, ARM_FEATURE_V8)) {
10539                 mask |= CPSR_IL;
10540                 val |= CPSR_IL;
10541             }
10542             qemu_log_mask(LOG_GUEST_ERROR,
10543                           "Illegal AArch32 mode switch attempt from %s to %s\n",
10544                           aarch32_mode_name(env->uncached_cpsr),
10545                           aarch32_mode_name(val));
10546         } else {
10547             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
10548                           write_type == CPSRWriteExceptionReturn ?
10549                           "Exception return from AArch32" :
10550                           "AArch32 mode switch from",
10551                           aarch32_mode_name(env->uncached_cpsr),
10552                           aarch32_mode_name(val), env->regs[15]);
10553             switch_mode(env, val & CPSR_M);
10554         }
10555     }
10556     mask &= ~CACHED_CPSR_BITS;
10557     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
10558     if (tcg_enabled() && rebuild_hflags) {
10559         arm_rebuild_hflags(env);
10560     }
10561 }
10562 
10563 #ifdef CONFIG_USER_ONLY
10564 
10565 static void switch_mode(CPUARMState *env, int mode)
10566 {
10567     ARMCPU *cpu = env_archcpu(env);
10568 
10569     if (mode != ARM_CPU_MODE_USR) {
10570         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
10571     }
10572 }
10573 
10574 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10575                                  uint32_t cur_el, bool secure)
10576 {
10577     return 1;
10578 }
10579 
10580 void aarch64_sync_64_to_32(CPUARMState *env)
10581 {
10582     g_assert_not_reached();
10583 }
10584 
10585 #else
10586 
10587 static void switch_mode(CPUARMState *env, int mode)
10588 {
10589     int old_mode;
10590     int i;
10591 
10592     old_mode = env->uncached_cpsr & CPSR_M;
10593     if (mode == old_mode) {
10594         return;
10595     }
10596 
10597     if (old_mode == ARM_CPU_MODE_FIQ) {
10598         memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
10599         memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
10600     } else if (mode == ARM_CPU_MODE_FIQ) {
10601         memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
10602         memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
10603     }
10604 
10605     i = bank_number(old_mode);
10606     env->banked_r13[i] = env->regs[13];
10607     env->banked_spsr[i] = env->spsr;
10608 
10609     i = bank_number(mode);
10610     env->regs[13] = env->banked_r13[i];
10611     env->spsr = env->banked_spsr[i];
10612 
10613     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
10614     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
10615 }
10616 
10617 /*
10618  * Physical Interrupt Target EL Lookup Table
10619  *
10620  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
10621  *
10622  * The below multi-dimensional table is used for looking up the target
10623  * exception level given numerous condition criteria.  Specifically, the
10624  * target EL is based on SCR and HCR routing controls as well as the
10625  * currently executing EL and secure state.
10626  *
10627  *    Dimensions:
10628  *    target_el_table[2][2][2][2][2][4]
10629  *                    |  |  |  |  |  +--- Current EL
10630  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
10631  *                    |  |  |  +--------- HCR mask override
10632  *                    |  |  +------------ SCR exec state control
10633  *                    |  +--------------- SCR mask override
10634  *                    +------------------ 32-bit(0)/64-bit(1) EL3
10635  *
10636  *    The table values are as such:
10637  *    0-3 = EL0-EL3
10638  *     -1 = Cannot occur
10639  *
10640  * The ARM ARM target EL table includes entries indicating that an "exception
10641  * is not taken".  The two cases where this is applicable are:
10642  *    1) An exception is taken from EL3 but the SCR does not have the exception
10643  *    routed to EL3.
10644  *    2) An exception is taken from EL2 but the HCR does not have the exception
10645  *    routed to EL2.
10646  * In these two cases, the below table contain a target of EL1.  This value is
10647  * returned as it is expected that the consumer of the table data will check
10648  * for "target EL >= current EL" to ensure the exception is not taken.
10649  *
10650  *            SCR     HCR
10651  *         64  EA     AMO                 From
10652  *        BIT IRQ     IMO      Non-secure         Secure
10653  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
10654  */
10655 static const int8_t target_el_table[2][2][2][2][2][4] = {
10656     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10657        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
10658       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10659        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
10660      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10661        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
10662       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10663        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
10664     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
10665        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
10666       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
10667        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
10668      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
10669        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
10670       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
10671        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
10672 };
10673 
10674 /*
10675  * Determine the target EL for physical exceptions
10676  */
10677 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10678                                  uint32_t cur_el, bool secure)
10679 {
10680     CPUARMState *env = cpu_env(cs);
10681     bool rw;
10682     bool scr;
10683     bool hcr;
10684     int target_el;
10685     /* Is the highest EL AArch64? */
10686     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
10687     uint64_t hcr_el2;
10688 
10689     if (arm_feature(env, ARM_FEATURE_EL3)) {
10690         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
10691     } else {
10692         /*
10693          * Either EL2 is the highest EL (and so the EL2 register width
10694          * is given by is64); or there is no EL2 or EL3, in which case
10695          * the value of 'rw' does not affect the table lookup anyway.
10696          */
10697         rw = is64;
10698     }
10699 
10700     hcr_el2 = arm_hcr_el2_eff(env);
10701     switch (excp_idx) {
10702     case EXCP_IRQ:
10703         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
10704         hcr = hcr_el2 & HCR_IMO;
10705         break;
10706     case EXCP_FIQ:
10707         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
10708         hcr = hcr_el2 & HCR_FMO;
10709         break;
10710     default:
10711         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
10712         hcr = hcr_el2 & HCR_AMO;
10713         break;
10714     };
10715 
10716     /*
10717      * For these purposes, TGE and AMO/IMO/FMO both force the
10718      * interrupt to EL2.  Fold TGE into the bit extracted above.
10719      */
10720     hcr |= (hcr_el2 & HCR_TGE) != 0;
10721 
10722     /* Perform a table-lookup for the target EL given the current state */
10723     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
10724 
10725     assert(target_el > 0);
10726 
10727     return target_el;
10728 }
10729 
10730 void arm_log_exception(CPUState *cs)
10731 {
10732     int idx = cs->exception_index;
10733 
10734     if (qemu_loglevel_mask(CPU_LOG_INT)) {
10735         const char *exc = NULL;
10736         static const char * const excnames[] = {
10737             [EXCP_UDEF] = "Undefined Instruction",
10738             [EXCP_SWI] = "SVC",
10739             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
10740             [EXCP_DATA_ABORT] = "Data Abort",
10741             [EXCP_IRQ] = "IRQ",
10742             [EXCP_FIQ] = "FIQ",
10743             [EXCP_BKPT] = "Breakpoint",
10744             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
10745             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
10746             [EXCP_HVC] = "Hypervisor Call",
10747             [EXCP_HYP_TRAP] = "Hypervisor Trap",
10748             [EXCP_SMC] = "Secure Monitor Call",
10749             [EXCP_VIRQ] = "Virtual IRQ",
10750             [EXCP_VFIQ] = "Virtual FIQ",
10751             [EXCP_SEMIHOST] = "Semihosting call",
10752             [EXCP_NOCP] = "v7M NOCP UsageFault",
10753             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
10754             [EXCP_STKOF] = "v8M STKOF UsageFault",
10755             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
10756             [EXCP_LSERR] = "v8M LSERR UsageFault",
10757             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
10758             [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
10759             [EXCP_VSERR] = "Virtual SERR",
10760             [EXCP_GPC] = "Granule Protection Check",
10761         };
10762 
10763         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
10764             exc = excnames[idx];
10765         }
10766         if (!exc) {
10767             exc = "unknown";
10768         }
10769         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
10770                       idx, exc, cs->cpu_index);
10771     }
10772 }
10773 
10774 /*
10775  * Function used to synchronize QEMU's AArch64 register set with AArch32
10776  * register set.  This is necessary when switching between AArch32 and AArch64
10777  * execution state.
10778  */
10779 void aarch64_sync_32_to_64(CPUARMState *env)
10780 {
10781     int i;
10782     uint32_t mode = env->uncached_cpsr & CPSR_M;
10783 
10784     /* We can blanket copy R[0:7] to X[0:7] */
10785     for (i = 0; i < 8; i++) {
10786         env->xregs[i] = env->regs[i];
10787     }
10788 
10789     /*
10790      * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
10791      * Otherwise, they come from the banked user regs.
10792      */
10793     if (mode == ARM_CPU_MODE_FIQ) {
10794         for (i = 8; i < 13; i++) {
10795             env->xregs[i] = env->usr_regs[i - 8];
10796         }
10797     } else {
10798         for (i = 8; i < 13; i++) {
10799             env->xregs[i] = env->regs[i];
10800         }
10801     }
10802 
10803     /*
10804      * Registers x13-x23 are the various mode SP and FP registers. Registers
10805      * r13 and r14 are only copied if we are in that mode, otherwise we copy
10806      * from the mode banked register.
10807      */
10808     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10809         env->xregs[13] = env->regs[13];
10810         env->xregs[14] = env->regs[14];
10811     } else {
10812         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
10813         /* HYP is an exception in that it is copied from r14 */
10814         if (mode == ARM_CPU_MODE_HYP) {
10815             env->xregs[14] = env->regs[14];
10816         } else {
10817             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
10818         }
10819     }
10820 
10821     if (mode == ARM_CPU_MODE_HYP) {
10822         env->xregs[15] = env->regs[13];
10823     } else {
10824         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
10825     }
10826 
10827     if (mode == ARM_CPU_MODE_IRQ) {
10828         env->xregs[16] = env->regs[14];
10829         env->xregs[17] = env->regs[13];
10830     } else {
10831         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
10832         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
10833     }
10834 
10835     if (mode == ARM_CPU_MODE_SVC) {
10836         env->xregs[18] = env->regs[14];
10837         env->xregs[19] = env->regs[13];
10838     } else {
10839         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
10840         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
10841     }
10842 
10843     if (mode == ARM_CPU_MODE_ABT) {
10844         env->xregs[20] = env->regs[14];
10845         env->xregs[21] = env->regs[13];
10846     } else {
10847         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
10848         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
10849     }
10850 
10851     if (mode == ARM_CPU_MODE_UND) {
10852         env->xregs[22] = env->regs[14];
10853         env->xregs[23] = env->regs[13];
10854     } else {
10855         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
10856         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
10857     }
10858 
10859     /*
10860      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10861      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
10862      * FIQ bank for r8-r14.
10863      */
10864     if (mode == ARM_CPU_MODE_FIQ) {
10865         for (i = 24; i < 31; i++) {
10866             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
10867         }
10868     } else {
10869         for (i = 24; i < 29; i++) {
10870             env->xregs[i] = env->fiq_regs[i - 24];
10871         }
10872         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
10873         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
10874     }
10875 
10876     env->pc = env->regs[15];
10877 }
10878 
10879 /*
10880  * Function used to synchronize QEMU's AArch32 register set with AArch64
10881  * register set.  This is necessary when switching between AArch32 and AArch64
10882  * execution state.
10883  */
10884 void aarch64_sync_64_to_32(CPUARMState *env)
10885 {
10886     int i;
10887     uint32_t mode = env->uncached_cpsr & CPSR_M;
10888 
10889     /* We can blanket copy X[0:7] to R[0:7] */
10890     for (i = 0; i < 8; i++) {
10891         env->regs[i] = env->xregs[i];
10892     }
10893 
10894     /*
10895      * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
10896      * Otherwise, we copy x8-x12 into the banked user regs.
10897      */
10898     if (mode == ARM_CPU_MODE_FIQ) {
10899         for (i = 8; i < 13; i++) {
10900             env->usr_regs[i - 8] = env->xregs[i];
10901         }
10902     } else {
10903         for (i = 8; i < 13; i++) {
10904             env->regs[i] = env->xregs[i];
10905         }
10906     }
10907 
10908     /*
10909      * Registers r13 & r14 depend on the current mode.
10910      * If we are in a given mode, we copy the corresponding x registers to r13
10911      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
10912      * for the mode.
10913      */
10914     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10915         env->regs[13] = env->xregs[13];
10916         env->regs[14] = env->xregs[14];
10917     } else {
10918         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
10919 
10920         /*
10921          * HYP is an exception in that it does not have its own banked r14 but
10922          * shares the USR r14
10923          */
10924         if (mode == ARM_CPU_MODE_HYP) {
10925             env->regs[14] = env->xregs[14];
10926         } else {
10927             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
10928         }
10929     }
10930 
10931     if (mode == ARM_CPU_MODE_HYP) {
10932         env->regs[13] = env->xregs[15];
10933     } else {
10934         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
10935     }
10936 
10937     if (mode == ARM_CPU_MODE_IRQ) {
10938         env->regs[14] = env->xregs[16];
10939         env->regs[13] = env->xregs[17];
10940     } else {
10941         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
10942         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
10943     }
10944 
10945     if (mode == ARM_CPU_MODE_SVC) {
10946         env->regs[14] = env->xregs[18];
10947         env->regs[13] = env->xregs[19];
10948     } else {
10949         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
10950         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
10951     }
10952 
10953     if (mode == ARM_CPU_MODE_ABT) {
10954         env->regs[14] = env->xregs[20];
10955         env->regs[13] = env->xregs[21];
10956     } else {
10957         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
10958         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
10959     }
10960 
10961     if (mode == ARM_CPU_MODE_UND) {
10962         env->regs[14] = env->xregs[22];
10963         env->regs[13] = env->xregs[23];
10964     } else {
10965         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
10966         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
10967     }
10968 
10969     /*
10970      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10971      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
10972      * FIQ bank for r8-r14.
10973      */
10974     if (mode == ARM_CPU_MODE_FIQ) {
10975         for (i = 24; i < 31; i++) {
10976             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
10977         }
10978     } else {
10979         for (i = 24; i < 29; i++) {
10980             env->fiq_regs[i - 24] = env->xregs[i];
10981         }
10982         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
10983         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
10984     }
10985 
10986     env->regs[15] = env->pc;
10987 }
10988 
10989 static void take_aarch32_exception(CPUARMState *env, int new_mode,
10990                                    uint32_t mask, uint32_t offset,
10991                                    uint32_t newpc)
10992 {
10993     int new_el;
10994 
10995     /* Change the CPU state so as to actually take the exception. */
10996     switch_mode(env, new_mode);
10997 
10998     /*
10999      * For exceptions taken to AArch32 we must clear the SS bit in both
11000      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
11001      */
11002     env->pstate &= ~PSTATE_SS;
11003     env->spsr = cpsr_read(env);
11004     /* Clear IT bits.  */
11005     env->condexec_bits = 0;
11006     /* Switch to the new mode, and to the correct instruction set.  */
11007     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
11008 
11009     /* This must be after mode switching. */
11010     new_el = arm_current_el(env);
11011 
11012     /* Set new mode endianness */
11013     env->uncached_cpsr &= ~CPSR_E;
11014     if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
11015         env->uncached_cpsr |= CPSR_E;
11016     }
11017     /* J and IL must always be cleared for exception entry */
11018     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
11019     env->daif |= mask;
11020 
11021     if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
11022         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
11023             env->uncached_cpsr |= CPSR_SSBS;
11024         } else {
11025             env->uncached_cpsr &= ~CPSR_SSBS;
11026         }
11027     }
11028 
11029     if (new_mode == ARM_CPU_MODE_HYP) {
11030         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
11031         env->elr_el[2] = env->regs[15];
11032     } else {
11033         /* CPSR.PAN is normally preserved preserved unless...  */
11034         if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
11035             switch (new_el) {
11036             case 3:
11037                 if (!arm_is_secure_below_el3(env)) {
11038                     /* ... the target is EL3, from non-secure state.  */
11039                     env->uncached_cpsr &= ~CPSR_PAN;
11040                     break;
11041                 }
11042                 /* ... the target is EL3, from secure state ... */
11043                 /* fall through */
11044             case 1:
11045                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
11046                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
11047                     env->uncached_cpsr |= CPSR_PAN;
11048                 }
11049                 break;
11050             }
11051         }
11052         /*
11053          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
11054          * and we should just guard the thumb mode on V4
11055          */
11056         if (arm_feature(env, ARM_FEATURE_V4T)) {
11057             env->thumb =
11058                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
11059         }
11060         env->regs[14] = env->regs[15] + offset;
11061     }
11062     env->regs[15] = newpc;
11063 
11064     if (tcg_enabled()) {
11065         arm_rebuild_hflags(env);
11066     }
11067 }
11068 
11069 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
11070 {
11071     /*
11072      * Handle exception entry to Hyp mode; this is sufficiently
11073      * different to entry to other AArch32 modes that we handle it
11074      * separately here.
11075      *
11076      * The vector table entry used is always the 0x14 Hyp mode entry point,
11077      * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
11078      * The offset applied to the preferred return address is always zero
11079      * (see DDI0487C.a section G1.12.3).
11080      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
11081      */
11082     uint32_t addr, mask;
11083     ARMCPU *cpu = ARM_CPU(cs);
11084     CPUARMState *env = &cpu->env;
11085 
11086     switch (cs->exception_index) {
11087     case EXCP_UDEF:
11088         addr = 0x04;
11089         break;
11090     case EXCP_SWI:
11091         addr = 0x08;
11092         break;
11093     case EXCP_BKPT:
11094         /* Fall through to prefetch abort.  */
11095     case EXCP_PREFETCH_ABORT:
11096         env->cp15.ifar_s = env->exception.vaddress;
11097         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
11098                       (uint32_t)env->exception.vaddress);
11099         addr = 0x0c;
11100         break;
11101     case EXCP_DATA_ABORT:
11102         env->cp15.dfar_s = env->exception.vaddress;
11103         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
11104                       (uint32_t)env->exception.vaddress);
11105         addr = 0x10;
11106         break;
11107     case EXCP_IRQ:
11108         addr = 0x18;
11109         break;
11110     case EXCP_FIQ:
11111         addr = 0x1c;
11112         break;
11113     case EXCP_HVC:
11114         addr = 0x08;
11115         break;
11116     case EXCP_HYP_TRAP:
11117         addr = 0x14;
11118         break;
11119     default:
11120         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11121     }
11122 
11123     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
11124         if (!arm_feature(env, ARM_FEATURE_V8)) {
11125             /*
11126              * QEMU syndrome values are v8-style. v7 has the IL bit
11127              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
11128              * If this is a v7 CPU, squash the IL bit in those cases.
11129              */
11130             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
11131                 (cs->exception_index == EXCP_DATA_ABORT &&
11132                  !(env->exception.syndrome & ARM_EL_ISV)) ||
11133                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
11134                 env->exception.syndrome &= ~ARM_EL_IL;
11135             }
11136         }
11137         env->cp15.esr_el[2] = env->exception.syndrome;
11138     }
11139 
11140     if (arm_current_el(env) != 2 && addr < 0x14) {
11141         addr = 0x14;
11142     }
11143 
11144     mask = 0;
11145     if (!(env->cp15.scr_el3 & SCR_EA)) {
11146         mask |= CPSR_A;
11147     }
11148     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
11149         mask |= CPSR_I;
11150     }
11151     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
11152         mask |= CPSR_F;
11153     }
11154 
11155     addr += env->cp15.hvbar;
11156 
11157     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
11158 }
11159 
11160 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
11161 {
11162     ARMCPU *cpu = ARM_CPU(cs);
11163     CPUARMState *env = &cpu->env;
11164     uint32_t addr;
11165     uint32_t mask;
11166     int new_mode;
11167     uint32_t offset;
11168     uint32_t moe;
11169 
11170     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
11171     switch (syn_get_ec(env->exception.syndrome)) {
11172     case EC_BREAKPOINT:
11173     case EC_BREAKPOINT_SAME_EL:
11174         moe = 1;
11175         break;
11176     case EC_WATCHPOINT:
11177     case EC_WATCHPOINT_SAME_EL:
11178         moe = 10;
11179         break;
11180     case EC_AA32_BKPT:
11181         moe = 3;
11182         break;
11183     case EC_VECTORCATCH:
11184         moe = 5;
11185         break;
11186     default:
11187         moe = 0;
11188         break;
11189     }
11190 
11191     if (moe) {
11192         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
11193     }
11194 
11195     if (env->exception.target_el == 2) {
11196         /* Debug exceptions are reported differently on AArch32 */
11197         switch (syn_get_ec(env->exception.syndrome)) {
11198         case EC_BREAKPOINT:
11199         case EC_BREAKPOINT_SAME_EL:
11200         case EC_AA32_BKPT:
11201         case EC_VECTORCATCH:
11202             env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
11203                                                      0, 0, 0x22);
11204             break;
11205         case EC_WATCHPOINT:
11206             env->exception.syndrome = syn_set_ec(env->exception.syndrome,
11207                                                  EC_DATAABORT);
11208             break;
11209         case EC_WATCHPOINT_SAME_EL:
11210             env->exception.syndrome = syn_set_ec(env->exception.syndrome,
11211                                                  EC_DATAABORT_SAME_EL);
11212             break;
11213         }
11214         arm_cpu_do_interrupt_aarch32_hyp(cs);
11215         return;
11216     }
11217 
11218     switch (cs->exception_index) {
11219     case EXCP_UDEF:
11220         new_mode = ARM_CPU_MODE_UND;
11221         addr = 0x04;
11222         mask = CPSR_I;
11223         if (env->thumb) {
11224             offset = 2;
11225         } else {
11226             offset = 4;
11227         }
11228         break;
11229     case EXCP_SWI:
11230         new_mode = ARM_CPU_MODE_SVC;
11231         addr = 0x08;
11232         mask = CPSR_I;
11233         /* The PC already points to the next instruction.  */
11234         offset = 0;
11235         break;
11236     case EXCP_BKPT:
11237         /* Fall through to prefetch abort.  */
11238     case EXCP_PREFETCH_ABORT:
11239         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
11240         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
11241         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
11242                       env->exception.fsr, (uint32_t)env->exception.vaddress);
11243         new_mode = ARM_CPU_MODE_ABT;
11244         addr = 0x0c;
11245         mask = CPSR_A | CPSR_I;
11246         offset = 4;
11247         break;
11248     case EXCP_DATA_ABORT:
11249         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
11250         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
11251         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
11252                       env->exception.fsr,
11253                       (uint32_t)env->exception.vaddress);
11254         new_mode = ARM_CPU_MODE_ABT;
11255         addr = 0x10;
11256         mask = CPSR_A | CPSR_I;
11257         offset = 8;
11258         break;
11259     case EXCP_IRQ:
11260         new_mode = ARM_CPU_MODE_IRQ;
11261         addr = 0x18;
11262         /* Disable IRQ and imprecise data aborts.  */
11263         mask = CPSR_A | CPSR_I;
11264         offset = 4;
11265         if (env->cp15.scr_el3 & SCR_IRQ) {
11266             /* IRQ routed to monitor mode */
11267             new_mode = ARM_CPU_MODE_MON;
11268             mask |= CPSR_F;
11269         }
11270         break;
11271     case EXCP_FIQ:
11272         new_mode = ARM_CPU_MODE_FIQ;
11273         addr = 0x1c;
11274         /* Disable FIQ, IRQ and imprecise data aborts.  */
11275         mask = CPSR_A | CPSR_I | CPSR_F;
11276         if (env->cp15.scr_el3 & SCR_FIQ) {
11277             /* FIQ routed to monitor mode */
11278             new_mode = ARM_CPU_MODE_MON;
11279         }
11280         offset = 4;
11281         break;
11282     case EXCP_VIRQ:
11283         new_mode = ARM_CPU_MODE_IRQ;
11284         addr = 0x18;
11285         /* Disable IRQ and imprecise data aborts.  */
11286         mask = CPSR_A | CPSR_I;
11287         offset = 4;
11288         break;
11289     case EXCP_VFIQ:
11290         new_mode = ARM_CPU_MODE_FIQ;
11291         addr = 0x1c;
11292         /* Disable FIQ, IRQ and imprecise data aborts.  */
11293         mask = CPSR_A | CPSR_I | CPSR_F;
11294         offset = 4;
11295         break;
11296     case EXCP_VSERR:
11297         {
11298             /*
11299              * Note that this is reported as a data abort, but the DFAR
11300              * has an UNKNOWN value.  Construct the SError syndrome from
11301              * AET and ExT fields.
11302              */
11303             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
11304 
11305             if (extended_addresses_enabled(env)) {
11306                 env->exception.fsr = arm_fi_to_lfsc(&fi);
11307             } else {
11308                 env->exception.fsr = arm_fi_to_sfsc(&fi);
11309             }
11310             env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
11311             A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
11312             qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
11313                           env->exception.fsr);
11314 
11315             new_mode = ARM_CPU_MODE_ABT;
11316             addr = 0x10;
11317             mask = CPSR_A | CPSR_I;
11318             offset = 8;
11319         }
11320         break;
11321     case EXCP_SMC:
11322         new_mode = ARM_CPU_MODE_MON;
11323         addr = 0x08;
11324         mask = CPSR_A | CPSR_I | CPSR_F;
11325         offset = 0;
11326         break;
11327     default:
11328         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11329         return; /* Never happens.  Keep compiler happy.  */
11330     }
11331 
11332     if (new_mode == ARM_CPU_MODE_MON) {
11333         addr += env->cp15.mvbar;
11334     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
11335         /* High vectors. When enabled, base address cannot be remapped. */
11336         addr += 0xffff0000;
11337     } else {
11338         /*
11339          * ARM v7 architectures provide a vector base address register to remap
11340          * the interrupt vector table.
11341          * This register is only followed in non-monitor mode, and is banked.
11342          * Note: only bits 31:5 are valid.
11343          */
11344         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
11345     }
11346 
11347     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
11348         env->cp15.scr_el3 &= ~SCR_NS;
11349     }
11350 
11351     take_aarch32_exception(env, new_mode, mask, offset, addr);
11352 }
11353 
11354 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
11355 {
11356     /*
11357      * Return the register number of the AArch64 view of the AArch32
11358      * register @aarch32_reg. The CPUARMState CPSR is assumed to still
11359      * be that of the AArch32 mode the exception came from.
11360      */
11361     int mode = env->uncached_cpsr & CPSR_M;
11362 
11363     switch (aarch32_reg) {
11364     case 0 ... 7:
11365         return aarch32_reg;
11366     case 8 ... 12:
11367         return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
11368     case 13:
11369         switch (mode) {
11370         case ARM_CPU_MODE_USR:
11371         case ARM_CPU_MODE_SYS:
11372             return 13;
11373         case ARM_CPU_MODE_HYP:
11374             return 15;
11375         case ARM_CPU_MODE_IRQ:
11376             return 17;
11377         case ARM_CPU_MODE_SVC:
11378             return 19;
11379         case ARM_CPU_MODE_ABT:
11380             return 21;
11381         case ARM_CPU_MODE_UND:
11382             return 23;
11383         case ARM_CPU_MODE_FIQ:
11384             return 29;
11385         default:
11386             g_assert_not_reached();
11387         }
11388     case 14:
11389         switch (mode) {
11390         case ARM_CPU_MODE_USR:
11391         case ARM_CPU_MODE_SYS:
11392         case ARM_CPU_MODE_HYP:
11393             return 14;
11394         case ARM_CPU_MODE_IRQ:
11395             return 16;
11396         case ARM_CPU_MODE_SVC:
11397             return 18;
11398         case ARM_CPU_MODE_ABT:
11399             return 20;
11400         case ARM_CPU_MODE_UND:
11401             return 22;
11402         case ARM_CPU_MODE_FIQ:
11403             return 30;
11404         default:
11405             g_assert_not_reached();
11406         }
11407     case 15:
11408         return 31;
11409     default:
11410         g_assert_not_reached();
11411     }
11412 }
11413 
11414 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
11415 {
11416     uint32_t ret = cpsr_read(env);
11417 
11418     /* Move DIT to the correct location for SPSR_ELx */
11419     if (ret & CPSR_DIT) {
11420         ret &= ~CPSR_DIT;
11421         ret |= PSTATE_DIT;
11422     }
11423     /* Merge PSTATE.SS into SPSR_ELx */
11424     ret |= env->pstate & PSTATE_SS;
11425 
11426     return ret;
11427 }
11428 
11429 static bool syndrome_is_sync_extabt(uint32_t syndrome)
11430 {
11431     /* Return true if this syndrome value is a synchronous external abort */
11432     switch (syn_get_ec(syndrome)) {
11433     case EC_INSNABORT:
11434     case EC_INSNABORT_SAME_EL:
11435     case EC_DATAABORT:
11436     case EC_DATAABORT_SAME_EL:
11437         /* Look at fault status code for all the synchronous ext abort cases */
11438         switch (syndrome & 0x3f) {
11439         case 0x10:
11440         case 0x13:
11441         case 0x14:
11442         case 0x15:
11443         case 0x16:
11444         case 0x17:
11445             return true;
11446         default:
11447             return false;
11448         }
11449     default:
11450         return false;
11451     }
11452 }
11453 
11454 /* Handle exception entry to a target EL which is using AArch64 */
11455 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
11456 {
11457     ARMCPU *cpu = ARM_CPU(cs);
11458     CPUARMState *env = &cpu->env;
11459     unsigned int new_el = env->exception.target_el;
11460     target_ulong addr = env->cp15.vbar_el[new_el];
11461     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
11462     unsigned int old_mode;
11463     unsigned int cur_el = arm_current_el(env);
11464     int rt;
11465 
11466     if (tcg_enabled()) {
11467         /*
11468          * Note that new_el can never be 0.  If cur_el is 0, then
11469          * el0_a64 is is_a64(), else el0_a64 is ignored.
11470          */
11471         aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
11472     }
11473 
11474     if (cur_el < new_el) {
11475         /*
11476          * Entry vector offset depends on whether the implemented EL
11477          * immediately lower than the target level is using AArch32 or AArch64
11478          */
11479         bool is_aa64;
11480         uint64_t hcr;
11481 
11482         switch (new_el) {
11483         case 3:
11484             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
11485             break;
11486         case 2:
11487             hcr = arm_hcr_el2_eff(env);
11488             if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11489                 is_aa64 = (hcr & HCR_RW) != 0;
11490                 break;
11491             }
11492             /* fall through */
11493         case 1:
11494             is_aa64 = is_a64(env);
11495             break;
11496         default:
11497             g_assert_not_reached();
11498         }
11499 
11500         if (is_aa64) {
11501             addr += 0x400;
11502         } else {
11503             addr += 0x600;
11504         }
11505     } else if (pstate_read(env) & PSTATE_SP) {
11506         addr += 0x200;
11507     }
11508 
11509     switch (cs->exception_index) {
11510     case EXCP_GPC:
11511         qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
11512                       env->cp15.mfar_el3);
11513         /* fall through */
11514     case EXCP_PREFETCH_ABORT:
11515     case EXCP_DATA_ABORT:
11516         /*
11517          * FEAT_DoubleFault allows synchronous external aborts taken to EL3
11518          * to be taken to the SError vector entrypoint.
11519          */
11520         if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
11521             syndrome_is_sync_extabt(env->exception.syndrome)) {
11522             addr += 0x180;
11523         }
11524         env->cp15.far_el[new_el] = env->exception.vaddress;
11525         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
11526                       env->cp15.far_el[new_el]);
11527         /* fall through */
11528     case EXCP_BKPT:
11529     case EXCP_UDEF:
11530     case EXCP_SWI:
11531     case EXCP_HVC:
11532     case EXCP_HYP_TRAP:
11533     case EXCP_SMC:
11534         switch (syn_get_ec(env->exception.syndrome)) {
11535         case EC_ADVSIMDFPACCESSTRAP:
11536             /*
11537              * QEMU internal FP/SIMD syndromes from AArch32 include the
11538              * TA and coproc fields which are only exposed if the exception
11539              * is taken to AArch32 Hyp mode. Mask them out to get a valid
11540              * AArch64 format syndrome.
11541              */
11542             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
11543             break;
11544         case EC_CP14RTTRAP:
11545         case EC_CP15RTTRAP:
11546         case EC_CP14DTTRAP:
11547             /*
11548              * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
11549              * the raw register field from the insn; when taking this to
11550              * AArch64 we must convert it to the AArch64 view of the register
11551              * number. Notice that we read a 4-bit AArch32 register number and
11552              * write back a 5-bit AArch64 one.
11553              */
11554             rt = extract32(env->exception.syndrome, 5, 4);
11555             rt = aarch64_regnum(env, rt);
11556             env->exception.syndrome = deposit32(env->exception.syndrome,
11557                                                 5, 5, rt);
11558             break;
11559         case EC_CP15RRTTRAP:
11560         case EC_CP14RRTTRAP:
11561             /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
11562             rt = extract32(env->exception.syndrome, 5, 4);
11563             rt = aarch64_regnum(env, rt);
11564             env->exception.syndrome = deposit32(env->exception.syndrome,
11565                                                 5, 5, rt);
11566             rt = extract32(env->exception.syndrome, 10, 4);
11567             rt = aarch64_regnum(env, rt);
11568             env->exception.syndrome = deposit32(env->exception.syndrome,
11569                                                 10, 5, rt);
11570             break;
11571         }
11572         env->cp15.esr_el[new_el] = env->exception.syndrome;
11573         break;
11574     case EXCP_IRQ:
11575     case EXCP_VIRQ:
11576         addr += 0x80;
11577         break;
11578     case EXCP_FIQ:
11579     case EXCP_VFIQ:
11580         addr += 0x100;
11581         break;
11582     case EXCP_VSERR:
11583         addr += 0x180;
11584         /* Construct the SError syndrome from IDS and ISS fields. */
11585         env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
11586         env->cp15.esr_el[new_el] = env->exception.syndrome;
11587         break;
11588     default:
11589         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11590     }
11591 
11592     if (is_a64(env)) {
11593         old_mode = pstate_read(env);
11594         aarch64_save_sp(env, arm_current_el(env));
11595         env->elr_el[new_el] = env->pc;
11596 
11597         if (cur_el == 1 && new_el == 1) {
11598             uint64_t hcr = arm_hcr_el2_eff(env);
11599             if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
11600                 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
11601                 /*
11602                  * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
11603                  * by setting M[3:2] to 0b10.
11604                  * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
11605                  * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
11606                  */
11607                 old_mode = deposit32(old_mode, 2, 2, 2);
11608             }
11609         }
11610     } else {
11611         old_mode = cpsr_read_for_spsr_elx(env);
11612         env->elr_el[new_el] = env->regs[15];
11613 
11614         aarch64_sync_32_to_64(env);
11615 
11616         env->condexec_bits = 0;
11617     }
11618     env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
11619 
11620     qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
11621     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
11622                   env->elr_el[new_el]);
11623 
11624     if (cpu_isar_feature(aa64_pan, cpu)) {
11625         /* The value of PSTATE.PAN is normally preserved, except when ... */
11626         new_mode |= old_mode & PSTATE_PAN;
11627         switch (new_el) {
11628         case 2:
11629             /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
11630             if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
11631                 != (HCR_E2H | HCR_TGE)) {
11632                 break;
11633             }
11634             /* fall through */
11635         case 1:
11636             /* ... the target is EL1 ... */
11637             /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
11638             if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
11639                 new_mode |= PSTATE_PAN;
11640             }
11641             break;
11642         }
11643     }
11644     if (cpu_isar_feature(aa64_mte, cpu)) {
11645         new_mode |= PSTATE_TCO;
11646     }
11647 
11648     if (cpu_isar_feature(aa64_ssbs, cpu)) {
11649         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
11650             new_mode |= PSTATE_SSBS;
11651         } else {
11652             new_mode &= ~PSTATE_SSBS;
11653         }
11654     }
11655 
11656     pstate_write(env, PSTATE_DAIF | new_mode);
11657     env->aarch64 = true;
11658     aarch64_restore_sp(env, new_el);
11659 
11660     if (tcg_enabled()) {
11661         helper_rebuild_hflags_a64(env, new_el);
11662     }
11663 
11664     env->pc = addr;
11665 
11666     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
11667                   new_el, env->pc, pstate_read(env));
11668 }
11669 
11670 /*
11671  * Do semihosting call and set the appropriate return value. All the
11672  * permission and validity checks have been done at translate time.
11673  *
11674  * We only see semihosting exceptions in TCG only as they are not
11675  * trapped to the hypervisor in KVM.
11676  */
11677 #ifdef CONFIG_TCG
11678 static void tcg_handle_semihosting(CPUState *cs)
11679 {
11680     ARMCPU *cpu = ARM_CPU(cs);
11681     CPUARMState *env = &cpu->env;
11682 
11683     if (is_a64(env)) {
11684         qemu_log_mask(CPU_LOG_INT,
11685                       "...handling as semihosting call 0x%" PRIx64 "\n",
11686                       env->xregs[0]);
11687         do_common_semihosting(cs);
11688         env->pc += 4;
11689     } else {
11690         qemu_log_mask(CPU_LOG_INT,
11691                       "...handling as semihosting call 0x%x\n",
11692                       env->regs[0]);
11693         do_common_semihosting(cs);
11694         env->regs[15] += env->thumb ? 2 : 4;
11695     }
11696 }
11697 #endif
11698 
11699 /*
11700  * Handle a CPU exception for A and R profile CPUs.
11701  * Do any appropriate logging, handle PSCI calls, and then hand off
11702  * to the AArch64-entry or AArch32-entry function depending on the
11703  * target exception level's register width.
11704  *
11705  * Note: this is used for both TCG (as the do_interrupt tcg op),
11706  *       and KVM to re-inject guest debug exceptions, and to
11707  *       inject a Synchronous-External-Abort.
11708  */
11709 void arm_cpu_do_interrupt(CPUState *cs)
11710 {
11711     ARMCPU *cpu = ARM_CPU(cs);
11712     CPUARMState *env = &cpu->env;
11713     unsigned int new_el = env->exception.target_el;
11714 
11715     assert(!arm_feature(env, ARM_FEATURE_M));
11716 
11717     arm_log_exception(cs);
11718     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
11719                   new_el);
11720     if (qemu_loglevel_mask(CPU_LOG_INT)
11721         && !excp_is_internal(cs->exception_index)) {
11722         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
11723                       syn_get_ec(env->exception.syndrome),
11724                       env->exception.syndrome);
11725     }
11726 
11727     if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
11728         arm_handle_psci_call(cpu);
11729         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
11730         return;
11731     }
11732 
11733     /*
11734      * Semihosting semantics depend on the register width of the code
11735      * that caused the exception, not the target exception level, so
11736      * must be handled here.
11737      */
11738 #ifdef CONFIG_TCG
11739     if (cs->exception_index == EXCP_SEMIHOST) {
11740         tcg_handle_semihosting(cs);
11741         return;
11742     }
11743 #endif
11744 
11745     /*
11746      * Hooks may change global state so BQL should be held, also the
11747      * BQL needs to be held for any modification of
11748      * cs->interrupt_request.
11749      */
11750     g_assert(bql_locked());
11751 
11752     arm_call_pre_el_change_hook(cpu);
11753 
11754     assert(!excp_is_internal(cs->exception_index));
11755     if (arm_el_is_aa64(env, new_el)) {
11756         arm_cpu_do_interrupt_aarch64(cs);
11757     } else {
11758         arm_cpu_do_interrupt_aarch32(cs);
11759     }
11760 
11761     arm_call_el_change_hook(cpu);
11762 
11763     if (!kvm_enabled()) {
11764         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
11765     }
11766 }
11767 #endif /* !CONFIG_USER_ONLY */
11768 
11769 uint64_t arm_sctlr(CPUARMState *env, int el)
11770 {
11771     /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
11772     if (el == 0) {
11773         ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
11774         el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
11775     }
11776     return env->cp15.sctlr_el[el];
11777 }
11778 
11779 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
11780 {
11781     if (regime_has_2_ranges(mmu_idx)) {
11782         return extract64(tcr, 37, 2);
11783     } else if (regime_is_stage2(mmu_idx)) {
11784         return 0; /* VTCR_EL2 */
11785     } else {
11786         /* Replicate the single TBI bit so we always have 2 bits.  */
11787         return extract32(tcr, 20, 1) * 3;
11788     }
11789 }
11790 
11791 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
11792 {
11793     if (regime_has_2_ranges(mmu_idx)) {
11794         return extract64(tcr, 51, 2);
11795     } else if (regime_is_stage2(mmu_idx)) {
11796         return 0; /* VTCR_EL2 */
11797     } else {
11798         /* Replicate the single TBID bit so we always have 2 bits.  */
11799         return extract32(tcr, 29, 1) * 3;
11800     }
11801 }
11802 
11803 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
11804 {
11805     if (regime_has_2_ranges(mmu_idx)) {
11806         return extract64(tcr, 57, 2);
11807     } else {
11808         /* Replicate the single TCMA bit so we always have 2 bits.  */
11809         return extract32(tcr, 30, 1) * 3;
11810     }
11811 }
11812 
11813 static ARMGranuleSize tg0_to_gran_size(int tg)
11814 {
11815     switch (tg) {
11816     case 0:
11817         return Gran4K;
11818     case 1:
11819         return Gran64K;
11820     case 2:
11821         return Gran16K;
11822     default:
11823         return GranInvalid;
11824     }
11825 }
11826 
11827 static ARMGranuleSize tg1_to_gran_size(int tg)
11828 {
11829     switch (tg) {
11830     case 1:
11831         return Gran16K;
11832     case 2:
11833         return Gran4K;
11834     case 3:
11835         return Gran64K;
11836     default:
11837         return GranInvalid;
11838     }
11839 }
11840 
11841 static inline bool have4k(ARMCPU *cpu, bool stage2)
11842 {
11843     return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
11844         : cpu_isar_feature(aa64_tgran4, cpu);
11845 }
11846 
11847 static inline bool have16k(ARMCPU *cpu, bool stage2)
11848 {
11849     return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
11850         : cpu_isar_feature(aa64_tgran16, cpu);
11851 }
11852 
11853 static inline bool have64k(ARMCPU *cpu, bool stage2)
11854 {
11855     return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
11856         : cpu_isar_feature(aa64_tgran64, cpu);
11857 }
11858 
11859 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
11860                                          bool stage2)
11861 {
11862     switch (gran) {
11863     case Gran4K:
11864         if (have4k(cpu, stage2)) {
11865             return gran;
11866         }
11867         break;
11868     case Gran16K:
11869         if (have16k(cpu, stage2)) {
11870             return gran;
11871         }
11872         break;
11873     case Gran64K:
11874         if (have64k(cpu, stage2)) {
11875             return gran;
11876         }
11877         break;
11878     case GranInvalid:
11879         break;
11880     }
11881     /*
11882      * If the guest selects a granule size that isn't implemented,
11883      * the architecture requires that we behave as if it selected one
11884      * that is (with an IMPDEF choice of which one to pick). We choose
11885      * to implement the smallest supported granule size.
11886      */
11887     if (have4k(cpu, stage2)) {
11888         return Gran4K;
11889     }
11890     if (have16k(cpu, stage2)) {
11891         return Gran16K;
11892     }
11893     assert(have64k(cpu, stage2));
11894     return Gran64K;
11895 }
11896 
11897 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11898                                    ARMMMUIdx mmu_idx, bool data,
11899                                    bool el1_is_aa32)
11900 {
11901     uint64_t tcr = regime_tcr(env, mmu_idx);
11902     bool epd, hpd, tsz_oob, ds, ha, hd;
11903     int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
11904     ARMGranuleSize gran;
11905     ARMCPU *cpu = env_archcpu(env);
11906     bool stage2 = regime_is_stage2(mmu_idx);
11907 
11908     if (!regime_has_2_ranges(mmu_idx)) {
11909         select = 0;
11910         tsz = extract32(tcr, 0, 6);
11911         gran = tg0_to_gran_size(extract32(tcr, 14, 2));
11912         if (stage2) {
11913             /* VTCR_EL2 */
11914             hpd = false;
11915         } else {
11916             hpd = extract32(tcr, 24, 1);
11917         }
11918         epd = false;
11919         sh = extract32(tcr, 12, 2);
11920         ps = extract32(tcr, 16, 3);
11921         ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
11922         hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
11923         ds = extract64(tcr, 32, 1);
11924     } else {
11925         bool e0pd;
11926 
11927         /*
11928          * Bit 55 is always between the two regions, and is canonical for
11929          * determining if address tagging is enabled.
11930          */
11931         select = extract64(va, 55, 1);
11932         if (!select) {
11933             tsz = extract32(tcr, 0, 6);
11934             gran = tg0_to_gran_size(extract32(tcr, 14, 2));
11935             epd = extract32(tcr, 7, 1);
11936             sh = extract32(tcr, 12, 2);
11937             hpd = extract64(tcr, 41, 1);
11938             e0pd = extract64(tcr, 55, 1);
11939         } else {
11940             tsz = extract32(tcr, 16, 6);
11941             gran = tg1_to_gran_size(extract32(tcr, 30, 2));
11942             epd = extract32(tcr, 23, 1);
11943             sh = extract32(tcr, 28, 2);
11944             hpd = extract64(tcr, 42, 1);
11945             e0pd = extract64(tcr, 56, 1);
11946         }
11947         ps = extract64(tcr, 32, 3);
11948         ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
11949         hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
11950         ds = extract64(tcr, 59, 1);
11951 
11952         if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
11953             regime_is_user(env, mmu_idx)) {
11954             epd = true;
11955         }
11956     }
11957 
11958     gran = sanitize_gran_size(cpu, gran, stage2);
11959 
11960     if (cpu_isar_feature(aa64_st, cpu)) {
11961         max_tsz = 48 - (gran == Gran64K);
11962     } else {
11963         max_tsz = 39;
11964     }
11965 
11966     /*
11967      * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
11968      * adjust the effective value of DS, as documented.
11969      */
11970     min_tsz = 16;
11971     if (gran == Gran64K) {
11972         if (cpu_isar_feature(aa64_lva, cpu)) {
11973             min_tsz = 12;
11974         }
11975         ds = false;
11976     } else if (ds) {
11977         if (regime_is_stage2(mmu_idx)) {
11978             if (gran == Gran16K) {
11979                 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
11980             } else {
11981                 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
11982             }
11983         } else {
11984             if (gran == Gran16K) {
11985                 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
11986             } else {
11987                 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
11988             }
11989         }
11990         if (ds) {
11991             min_tsz = 12;
11992         }
11993     }
11994 
11995     if (stage2 && el1_is_aa32) {
11996         /*
11997          * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
11998          * are loosened: a configured IPA of 40 bits is permitted even if
11999          * the implemented PA is less than that (and so a 40 bit IPA would
12000          * fault for an AArch64 EL1). See R_DTLMN.
12001          */
12002         min_tsz = MIN(min_tsz, 24);
12003     }
12004 
12005     if (tsz > max_tsz) {
12006         tsz = max_tsz;
12007         tsz_oob = true;
12008     } else if (tsz < min_tsz) {
12009         tsz = min_tsz;
12010         tsz_oob = true;
12011     } else {
12012         tsz_oob = false;
12013     }
12014 
12015     /* Present TBI as a composite with TBID.  */
12016     tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
12017     if (!data) {
12018         tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
12019     }
12020     tbi = (tbi >> select) & 1;
12021 
12022     return (ARMVAParameters) {
12023         .tsz = tsz,
12024         .ps = ps,
12025         .sh = sh,
12026         .select = select,
12027         .tbi = tbi,
12028         .epd = epd,
12029         .hpd = hpd,
12030         .tsz_oob = tsz_oob,
12031         .ds = ds,
12032         .ha = ha,
12033         .hd = ha && hd,
12034         .gran = gran,
12035     };
12036 }
12037 
12038 /*
12039  * Note that signed overflow is undefined in C.  The following routines are
12040  * careful to use unsigned types where modulo arithmetic is required.
12041  * Failure to do so _will_ break on newer gcc.
12042  */
12043 
12044 /* Signed saturating arithmetic.  */
12045 
12046 /* Perform 16-bit signed saturating addition.  */
12047 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
12048 {
12049     uint16_t res;
12050 
12051     res = a + b;
12052     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
12053         if (a & 0x8000) {
12054             res = 0x8000;
12055         } else {
12056             res = 0x7fff;
12057         }
12058     }
12059     return res;
12060 }
12061 
12062 /* Perform 8-bit signed saturating addition.  */
12063 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
12064 {
12065     uint8_t res;
12066 
12067     res = a + b;
12068     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
12069         if (a & 0x80) {
12070             res = 0x80;
12071         } else {
12072             res = 0x7f;
12073         }
12074     }
12075     return res;
12076 }
12077 
12078 /* Perform 16-bit signed saturating subtraction.  */
12079 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
12080 {
12081     uint16_t res;
12082 
12083     res = a - b;
12084     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
12085         if (a & 0x8000) {
12086             res = 0x8000;
12087         } else {
12088             res = 0x7fff;
12089         }
12090     }
12091     return res;
12092 }
12093 
12094 /* Perform 8-bit signed saturating subtraction.  */
12095 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
12096 {
12097     uint8_t res;
12098 
12099     res = a - b;
12100     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
12101         if (a & 0x80) {
12102             res = 0x80;
12103         } else {
12104             res = 0x7f;
12105         }
12106     }
12107     return res;
12108 }
12109 
12110 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12111 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12112 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
12113 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
12114 #define PFX q
12115 
12116 #include "op_addsub.h"
12117 
12118 /* Unsigned saturating arithmetic.  */
12119 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
12120 {
12121     uint16_t res;
12122     res = a + b;
12123     if (res < a) {
12124         res = 0xffff;
12125     }
12126     return res;
12127 }
12128 
12129 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
12130 {
12131     if (a > b) {
12132         return a - b;
12133     } else {
12134         return 0;
12135     }
12136 }
12137 
12138 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
12139 {
12140     uint8_t res;
12141     res = a + b;
12142     if (res < a) {
12143         res = 0xff;
12144     }
12145     return res;
12146 }
12147 
12148 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
12149 {
12150     if (a > b) {
12151         return a - b;
12152     } else {
12153         return 0;
12154     }
12155 }
12156 
12157 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12158 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12159 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
12160 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
12161 #define PFX uq
12162 
12163 #include "op_addsub.h"
12164 
12165 /* Signed modulo arithmetic.  */
12166 #define SARITH16(a, b, n, op) do { \
12167     int32_t sum; \
12168     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12169     RESULT(sum, n, 16); \
12170     if (sum >= 0) \
12171         ge |= 3 << (n * 2); \
12172     } while (0)
12173 
12174 #define SARITH8(a, b, n, op) do { \
12175     int32_t sum; \
12176     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12177     RESULT(sum, n, 8); \
12178     if (sum >= 0) \
12179         ge |= 1 << n; \
12180     } while (0)
12181 
12182 
12183 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12184 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12185 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
12186 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
12187 #define PFX s
12188 #define ARITH_GE
12189 
12190 #include "op_addsub.h"
12191 
12192 /* Unsigned modulo arithmetic.  */
12193 #define ADD16(a, b, n) do { \
12194     uint32_t sum; \
12195     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12196     RESULT(sum, n, 16); \
12197     if ((sum >> 16) == 1) \
12198         ge |= 3 << (n * 2); \
12199     } while (0)
12200 
12201 #define ADD8(a, b, n) do { \
12202     uint32_t sum; \
12203     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12204     RESULT(sum, n, 8); \
12205     if ((sum >> 8) == 1) \
12206         ge |= 1 << n; \
12207     } while (0)
12208 
12209 #define SUB16(a, b, n) do { \
12210     uint32_t sum; \
12211     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12212     RESULT(sum, n, 16); \
12213     if ((sum >> 16) == 0) \
12214         ge |= 3 << (n * 2); \
12215     } while (0)
12216 
12217 #define SUB8(a, b, n) do { \
12218     uint32_t sum; \
12219     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12220     RESULT(sum, n, 8); \
12221     if ((sum >> 8) == 0) \
12222         ge |= 1 << n; \
12223     } while (0)
12224 
12225 #define PFX u
12226 #define ARITH_GE
12227 
12228 #include "op_addsub.h"
12229 
12230 /* Halved signed arithmetic.  */
12231 #define ADD16(a, b, n) \
12232   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12233 #define SUB16(a, b, n) \
12234   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12235 #define ADD8(a, b, n) \
12236   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12237 #define SUB8(a, b, n) \
12238   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12239 #define PFX sh
12240 
12241 #include "op_addsub.h"
12242 
12243 /* Halved unsigned arithmetic.  */
12244 #define ADD16(a, b, n) \
12245   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12246 #define SUB16(a, b, n) \
12247   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12248 #define ADD8(a, b, n) \
12249   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12250 #define SUB8(a, b, n) \
12251   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12252 #define PFX uh
12253 
12254 #include "op_addsub.h"
12255 
12256 static inline uint8_t do_usad(uint8_t a, uint8_t b)
12257 {
12258     if (a > b) {
12259         return a - b;
12260     } else {
12261         return b - a;
12262     }
12263 }
12264 
12265 /* Unsigned sum of absolute byte differences.  */
12266 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12267 {
12268     uint32_t sum;
12269     sum = do_usad(a, b);
12270     sum += do_usad(a >> 8, b >> 8);
12271     sum += do_usad(a >> 16, b >> 16);
12272     sum += do_usad(a >> 24, b >> 24);
12273     return sum;
12274 }
12275 
12276 /* For ARMv6 SEL instruction.  */
12277 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12278 {
12279     uint32_t mask;
12280 
12281     mask = 0;
12282     if (flags & 1) {
12283         mask |= 0xff;
12284     }
12285     if (flags & 2) {
12286         mask |= 0xff00;
12287     }
12288     if (flags & 4) {
12289         mask |= 0xff0000;
12290     }
12291     if (flags & 8) {
12292         mask |= 0xff000000;
12293     }
12294     return (a & mask) | (b & ~mask);
12295 }
12296 
12297 /*
12298  * CRC helpers.
12299  * The upper bytes of val (above the number specified by 'bytes') must have
12300  * been zeroed out by the caller.
12301  */
12302 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12303 {
12304     uint8_t buf[4];
12305 
12306     stl_le_p(buf, val);
12307 
12308     /* zlib crc32 converts the accumulator and output to one's complement.  */
12309     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12310 }
12311 
12312 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12313 {
12314     uint8_t buf[4];
12315 
12316     stl_le_p(buf, val);
12317 
12318     /* Linux crc32c converts the output to one's complement.  */
12319     return crc32c(acc, buf, bytes) ^ 0xffffffff;
12320 }
12321 
12322 /*
12323  * Return the exception level to which FP-disabled exceptions should
12324  * be taken, or 0 if FP is enabled.
12325  */
12326 int fp_exception_el(CPUARMState *env, int cur_el)
12327 {
12328 #ifndef CONFIG_USER_ONLY
12329     uint64_t hcr_el2;
12330 
12331     /*
12332      * CPACR and the CPTR registers don't exist before v6, so FP is
12333      * always accessible
12334      */
12335     if (!arm_feature(env, ARM_FEATURE_V6)) {
12336         return 0;
12337     }
12338 
12339     if (arm_feature(env, ARM_FEATURE_M)) {
12340         /* CPACR can cause a NOCP UsageFault taken to current security state */
12341         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
12342             return 1;
12343         }
12344 
12345         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
12346             if (!extract32(env->v7m.nsacr, 10, 1)) {
12347                 /* FP insns cause a NOCP UsageFault taken to Secure */
12348                 return 3;
12349             }
12350         }
12351 
12352         return 0;
12353     }
12354 
12355     hcr_el2 = arm_hcr_el2_eff(env);
12356 
12357     /*
12358      * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12359      * 0, 2 : trap EL0 and EL1/PL1 accesses
12360      * 1    : trap only EL0 accesses
12361      * 3    : trap no accesses
12362      * This register is ignored if E2H+TGE are both set.
12363      */
12364     if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12365         int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
12366 
12367         switch (fpen) {
12368         case 1:
12369             if (cur_el != 0) {
12370                 break;
12371             }
12372             /* fall through */
12373         case 0:
12374         case 2:
12375             /* Trap from Secure PL0 or PL1 to Secure PL1. */
12376             if (!arm_el_is_aa64(env, 3)
12377                 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
12378                 return 3;
12379             }
12380             if (cur_el <= 1) {
12381                 return 1;
12382             }
12383             break;
12384         }
12385     }
12386 
12387     /*
12388      * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12389      * to control non-secure access to the FPU. It doesn't have any
12390      * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12391      */
12392     if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
12393          cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12394         if (!extract32(env->cp15.nsacr, 10, 1)) {
12395             /* FP insns act as UNDEF */
12396             return cur_el == 2 ? 2 : 1;
12397         }
12398     }
12399 
12400     /*
12401      * CPTR_EL2 is present in v7VE or v8, and changes format
12402      * with HCR_EL2.E2H (regardless of TGE).
12403      */
12404     if (cur_el <= 2) {
12405         if (hcr_el2 & HCR_E2H) {
12406             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
12407             case 1:
12408                 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
12409                     break;
12410                 }
12411                 /* fall through */
12412             case 0:
12413             case 2:
12414                 return 2;
12415             }
12416         } else if (arm_is_el2_enabled(env)) {
12417             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
12418                 return 2;
12419             }
12420         }
12421     }
12422 
12423     /* CPTR_EL3 : present in v8 */
12424     if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
12425         /* Trap all FP ops to EL3 */
12426         return 3;
12427     }
12428 #endif
12429     return 0;
12430 }
12431 
12432 /* Return the exception level we're running at if this is our mmu_idx */
12433 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
12434 {
12435     if (mmu_idx & ARM_MMU_IDX_M) {
12436         return mmu_idx & ARM_MMU_IDX_M_PRIV;
12437     }
12438 
12439     switch (mmu_idx) {
12440     case ARMMMUIdx_E10_0:
12441     case ARMMMUIdx_E20_0:
12442         return 0;
12443     case ARMMMUIdx_E10_1:
12444     case ARMMMUIdx_E10_1_PAN:
12445         return 1;
12446     case ARMMMUIdx_E2:
12447     case ARMMMUIdx_E20_2:
12448     case ARMMMUIdx_E20_2_PAN:
12449         return 2;
12450     case ARMMMUIdx_E3:
12451         return 3;
12452     default:
12453         g_assert_not_reached();
12454     }
12455 }
12456 
12457 #ifndef CONFIG_TCG
12458 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12459 {
12460     g_assert_not_reached();
12461 }
12462 #endif
12463 
12464 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
12465 {
12466     ARMMMUIdx idx;
12467     uint64_t hcr;
12468 
12469     if (arm_feature(env, ARM_FEATURE_M)) {
12470         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12471     }
12472 
12473     /* See ARM pseudo-function ELIsInHost.  */
12474     switch (el) {
12475     case 0:
12476         hcr = arm_hcr_el2_eff(env);
12477         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
12478             idx = ARMMMUIdx_E20_0;
12479         } else {
12480             idx = ARMMMUIdx_E10_0;
12481         }
12482         break;
12483     case 1:
12484         if (arm_pan_enabled(env)) {
12485             idx = ARMMMUIdx_E10_1_PAN;
12486         } else {
12487             idx = ARMMMUIdx_E10_1;
12488         }
12489         break;
12490     case 2:
12491         /* Note that TGE does not apply at EL2.  */
12492         if (arm_hcr_el2_eff(env) & HCR_E2H) {
12493             if (arm_pan_enabled(env)) {
12494                 idx = ARMMMUIdx_E20_2_PAN;
12495             } else {
12496                 idx = ARMMMUIdx_E20_2;
12497             }
12498         } else {
12499             idx = ARMMMUIdx_E2;
12500         }
12501         break;
12502     case 3:
12503         return ARMMMUIdx_E3;
12504     default:
12505         g_assert_not_reached();
12506     }
12507 
12508     return idx;
12509 }
12510 
12511 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12512 {
12513     return arm_mmu_idx_el(env, arm_current_el(env));
12514 }
12515 
12516 static bool mve_no_pred(CPUARMState *env)
12517 {
12518     /*
12519      * Return true if there is definitely no predication of MVE
12520      * instructions by VPR or LTPSIZE. (Returning false even if there
12521      * isn't any predication is OK; generated code will just be
12522      * a little worse.)
12523      * If the CPU does not implement MVE then this TB flag is always 0.
12524      *
12525      * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
12526      * logic in gen_update_fp_context() needs to be updated to match.
12527      *
12528      * We do not include the effect of the ECI bits here -- they are
12529      * tracked in other TB flags. This simplifies the logic for
12530      * "when did we emit code that changes the MVE_NO_PRED TB flag
12531      * and thus need to end the TB?".
12532      */
12533     if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
12534         return false;
12535     }
12536     if (env->v7m.vpr) {
12537         return false;
12538     }
12539     if (env->v7m.ltpsize < 4) {
12540         return false;
12541     }
12542     return true;
12543 }
12544 
12545 void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
12546                           uint64_t *cs_base, uint32_t *pflags)
12547 {
12548     CPUARMTBFlags flags;
12549 
12550     assert_hflags_rebuild_correctly(env);
12551     flags = env->hflags;
12552 
12553     if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
12554         *pc = env->pc;
12555         if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12556             DP_TBFLAG_A64(flags, BTYPE, env->btype);
12557         }
12558     } else {
12559         *pc = env->regs[15];
12560 
12561         if (arm_feature(env, ARM_FEATURE_M)) {
12562             if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12563                 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
12564                 != env->v7m.secure) {
12565                 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
12566             }
12567 
12568             if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
12569                 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
12570                  (env->v7m.secure &&
12571                   !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
12572                 /*
12573                  * ASPEN is set, but FPCA/SFPA indicate that there is no
12574                  * active FP context; we must create a new FP context before
12575                  * executing any FP insn.
12576                  */
12577                 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
12578             }
12579 
12580             bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
12581             if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
12582                 DP_TBFLAG_M32(flags, LSPACT, 1);
12583             }
12584 
12585             if (mve_no_pred(env)) {
12586                 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
12587             }
12588         } else {
12589             /*
12590              * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12591              * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12592              */
12593             if (arm_feature(env, ARM_FEATURE_XSCALE)) {
12594                 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
12595             } else {
12596                 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
12597                 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
12598             }
12599             if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
12600                 DP_TBFLAG_A32(flags, VFPEN, 1);
12601             }
12602         }
12603 
12604         DP_TBFLAG_AM32(flags, THUMB, env->thumb);
12605         DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
12606     }
12607 
12608     /*
12609      * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12610      * states defined in the ARM ARM for software singlestep:
12611      *  SS_ACTIVE   PSTATE.SS   State
12612      *     0            x       Inactive (the TB flag for SS is always 0)
12613      *     1            0       Active-pending
12614      *     1            1       Active-not-pending
12615      * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
12616      */
12617     if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
12618         DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
12619     }
12620 
12621     *pflags = flags.flags;
12622     *cs_base = flags.flags2;
12623 }
12624 
12625 #ifdef TARGET_AARCH64
12626 /*
12627  * The manual says that when SVE is enabled and VQ is widened the
12628  * implementation is allowed to zero the previously inaccessible
12629  * portion of the registers.  The corollary to that is that when
12630  * SVE is enabled and VQ is narrowed we are also allowed to zero
12631  * the now inaccessible portion of the registers.
12632  *
12633  * The intent of this is that no predicate bit beyond VQ is ever set.
12634  * Which means that some operations on predicate registers themselves
12635  * may operate on full uint64_t or even unrolled across the maximum
12636  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
12637  * may well be cheaper than conditionals to restrict the operation
12638  * to the relevant portion of a uint16_t[16].
12639  */
12640 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12641 {
12642     int i, j;
12643     uint64_t pmask;
12644 
12645     assert(vq >= 1 && vq <= ARM_MAX_VQ);
12646     assert(vq <= env_archcpu(env)->sve_max_vq);
12647 
12648     /* Zap the high bits of the zregs.  */
12649     for (i = 0; i < 32; i++) {
12650         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12651     }
12652 
12653     /* Zap the high bits of the pregs and ffr.  */
12654     pmask = 0;
12655     if (vq & 3) {
12656         pmask = ~(-1ULL << (16 * (vq & 3)));
12657     }
12658     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12659         for (i = 0; i < 17; ++i) {
12660             env->vfp.pregs[i].p[j] &= pmask;
12661         }
12662         pmask = 0;
12663     }
12664 }
12665 
12666 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
12667 {
12668     int exc_el;
12669 
12670     if (sm) {
12671         exc_el = sme_exception_el(env, el);
12672     } else {
12673         exc_el = sve_exception_el(env, el);
12674     }
12675     if (exc_el) {
12676         return 0; /* disabled */
12677     }
12678     return sve_vqm1_for_el_sm(env, el, sm);
12679 }
12680 
12681 /*
12682  * Notice a change in SVE vector size when changing EL.
12683  */
12684 void aarch64_sve_change_el(CPUARMState *env, int old_el,
12685                            int new_el, bool el0_a64)
12686 {
12687     ARMCPU *cpu = env_archcpu(env);
12688     int old_len, new_len;
12689     bool old_a64, new_a64, sm;
12690 
12691     /* Nothing to do if no SVE.  */
12692     if (!cpu_isar_feature(aa64_sve, cpu)) {
12693         return;
12694     }
12695 
12696     /* Nothing to do if FP is disabled in either EL.  */
12697     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12698         return;
12699     }
12700 
12701     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12702     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12703 
12704     /*
12705      * Both AArch64.TakeException and AArch64.ExceptionReturn
12706      * invoke ResetSVEState when taking an exception from, or
12707      * returning to, AArch32 state when PSTATE.SM is enabled.
12708      */
12709     sm = FIELD_EX64(env->svcr, SVCR, SM);
12710     if (old_a64 != new_a64 && sm) {
12711         arm_reset_sve_state(env);
12712         return;
12713     }
12714 
12715     /*
12716      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12717      * at ELx, or not available because the EL is in AArch32 state, then
12718      * for all purposes other than a direct read, the ZCR_ELx.LEN field
12719      * has an effective value of 0".
12720      *
12721      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12722      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12723      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
12724      * we already have the correct register contents when encountering the
12725      * vq0->vq0 transition between EL0->EL1.
12726      */
12727     old_len = new_len = 0;
12728     if (old_a64) {
12729         old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
12730     }
12731     if (new_a64) {
12732         new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
12733     }
12734 
12735     /* When changing vector length, clear inaccessible state.  */
12736     if (new_len < old_len) {
12737         aarch64_sve_narrow_vq(env, new_len + 1);
12738     }
12739 }
12740 #endif
12741 
12742 #ifndef CONFIG_USER_ONLY
12743 ARMSecuritySpace arm_security_space(CPUARMState *env)
12744 {
12745     if (arm_feature(env, ARM_FEATURE_M)) {
12746         return arm_secure_to_space(env->v7m.secure);
12747     }
12748 
12749     /*
12750      * If EL3 is not supported then the secure state is implementation
12751      * defined, in which case QEMU defaults to non-secure.
12752      */
12753     if (!arm_feature(env, ARM_FEATURE_EL3)) {
12754         return ARMSS_NonSecure;
12755     }
12756 
12757     /* Check for AArch64 EL3 or AArch32 Mon. */
12758     if (is_a64(env)) {
12759         if (extract32(env->pstate, 2, 2) == 3) {
12760             if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
12761                 return ARMSS_Root;
12762             } else {
12763                 return ARMSS_Secure;
12764             }
12765         }
12766     } else {
12767         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
12768             return ARMSS_Secure;
12769         }
12770     }
12771 
12772     return arm_security_space_below_el3(env);
12773 }
12774 
12775 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
12776 {
12777     assert(!arm_feature(env, ARM_FEATURE_M));
12778 
12779     /*
12780      * If EL3 is not supported then the secure state is implementation
12781      * defined, in which case QEMU defaults to non-secure.
12782      */
12783     if (!arm_feature(env, ARM_FEATURE_EL3)) {
12784         return ARMSS_NonSecure;
12785     }
12786 
12787     /*
12788      * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
12789      * Ignoring NSE when !NS retains consistency without having to
12790      * modify other predicates.
12791      */
12792     if (!(env->cp15.scr_el3 & SCR_NS)) {
12793         return ARMSS_Secure;
12794     } else if (env->cp15.scr_el3 & SCR_NSE) {
12795         return ARMSS_Realm;
12796     } else {
12797         return ARMSS_NonSecure;
12798     }
12799 }
12800 #endif /* !CONFIG_USER_ONLY */
12801