xref: /openbmc/qemu/target/arm/helper.c (revision a6caeee8)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "qemu/log.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/host-utils.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/timer.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/cpu-timers.h"
28 #include "sysemu/kvm.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
33 #ifdef CONFIG_TCG
34 #include "arm_ldst.h"
35 #include "exec/cpu_ldst.h"
36 #include "semihosting/common-semi.h"
37 #endif
38 #include "cpregs.h"
39 
40 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
41 
42 static void switch_mode(CPUARMState *env, int mode);
43 
44 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
45 {
46     assert(ri->fieldoffset);
47     if (cpreg_field_is_64bit(ri)) {
48         return CPREG_FIELD64(env, ri);
49     } else {
50         return CPREG_FIELD32(env, ri);
51     }
52 }
53 
54 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
55                       uint64_t value)
56 {
57     assert(ri->fieldoffset);
58     if (cpreg_field_is_64bit(ri)) {
59         CPREG_FIELD64(env, ri) = value;
60     } else {
61         CPREG_FIELD32(env, ri) = value;
62     }
63 }
64 
65 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
66 {
67     return (char *)env + ri->fieldoffset;
68 }
69 
70 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
71 {
72     /* Raw read of a coprocessor register (as needed for migration, etc). */
73     if (ri->type & ARM_CP_CONST) {
74         return ri->resetvalue;
75     } else if (ri->raw_readfn) {
76         return ri->raw_readfn(env, ri);
77     } else if (ri->readfn) {
78         return ri->readfn(env, ri);
79     } else {
80         return raw_read(env, ri);
81     }
82 }
83 
84 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
85                              uint64_t v)
86 {
87     /* Raw write of a coprocessor register (as needed for migration, etc).
88      * Note that constant registers are treated as write-ignored; the
89      * caller should check for success by whether a readback gives the
90      * value written.
91      */
92     if (ri->type & ARM_CP_CONST) {
93         return;
94     } else if (ri->raw_writefn) {
95         ri->raw_writefn(env, ri, v);
96     } else if (ri->writefn) {
97         ri->writefn(env, ri, v);
98     } else {
99         raw_write(env, ri, v);
100     }
101 }
102 
103 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
104 {
105    /* Return true if the regdef would cause an assertion if you called
106     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
107     * program bug for it not to have the NO_RAW flag).
108     * NB that returning false here doesn't necessarily mean that calling
109     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
110     * read/write access functions which are safe for raw use" from "has
111     * read/write access functions which have side effects but has forgotten
112     * to provide raw access functions".
113     * The tests here line up with the conditions in read/write_raw_cp_reg()
114     * and assertions in raw_read()/raw_write().
115     */
116     if ((ri->type & ARM_CP_CONST) ||
117         ri->fieldoffset ||
118         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
119         return false;
120     }
121     return true;
122 }
123 
124 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
125 {
126     /* Write the coprocessor state from cpu->env to the (index,value) list. */
127     int i;
128     bool ok = true;
129 
130     for (i = 0; i < cpu->cpreg_array_len; i++) {
131         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
132         const ARMCPRegInfo *ri;
133         uint64_t newval;
134 
135         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
136         if (!ri) {
137             ok = false;
138             continue;
139         }
140         if (ri->type & ARM_CP_NO_RAW) {
141             continue;
142         }
143 
144         newval = read_raw_cp_reg(&cpu->env, ri);
145         if (kvm_sync) {
146             /*
147              * Only sync if the previous list->cpustate sync succeeded.
148              * Rather than tracking the success/failure state for every
149              * item in the list, we just recheck "does the raw write we must
150              * have made in write_list_to_cpustate() read back OK" here.
151              */
152             uint64_t oldval = cpu->cpreg_values[i];
153 
154             if (oldval == newval) {
155                 continue;
156             }
157 
158             write_raw_cp_reg(&cpu->env, ri, oldval);
159             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
160                 continue;
161             }
162 
163             write_raw_cp_reg(&cpu->env, ri, newval);
164         }
165         cpu->cpreg_values[i] = newval;
166     }
167     return ok;
168 }
169 
170 bool write_list_to_cpustate(ARMCPU *cpu)
171 {
172     int i;
173     bool ok = true;
174 
175     for (i = 0; i < cpu->cpreg_array_len; i++) {
176         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
177         uint64_t v = cpu->cpreg_values[i];
178         const ARMCPRegInfo *ri;
179 
180         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
181         if (!ri) {
182             ok = false;
183             continue;
184         }
185         if (ri->type & ARM_CP_NO_RAW) {
186             continue;
187         }
188         /* Write value and confirm it reads back as written
189          * (to catch read-only registers and partially read-only
190          * registers where the incoming migration value doesn't match)
191          */
192         write_raw_cp_reg(&cpu->env, ri, v);
193         if (read_raw_cp_reg(&cpu->env, ri) != v) {
194             ok = false;
195         }
196     }
197     return ok;
198 }
199 
200 static void add_cpreg_to_list(gpointer key, gpointer opaque)
201 {
202     ARMCPU *cpu = opaque;
203     uint32_t regidx = (uintptr_t)key;
204     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
205 
206     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
207         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
208         /* The value array need not be initialized at this point */
209         cpu->cpreg_array_len++;
210     }
211 }
212 
213 static void count_cpreg(gpointer key, gpointer opaque)
214 {
215     ARMCPU *cpu = opaque;
216     const ARMCPRegInfo *ri;
217 
218     ri = g_hash_table_lookup(cpu->cp_regs, key);
219 
220     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
221         cpu->cpreg_array_len++;
222     }
223 }
224 
225 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
226 {
227     uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
228     uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
229 
230     if (aidx > bidx) {
231         return 1;
232     }
233     if (aidx < bidx) {
234         return -1;
235     }
236     return 0;
237 }
238 
239 void init_cpreg_list(ARMCPU *cpu)
240 {
241     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
242      * Note that we require cpreg_tuples[] to be sorted by key ID.
243      */
244     GList *keys;
245     int arraylen;
246 
247     keys = g_hash_table_get_keys(cpu->cp_regs);
248     keys = g_list_sort(keys, cpreg_key_compare);
249 
250     cpu->cpreg_array_len = 0;
251 
252     g_list_foreach(keys, count_cpreg, cpu);
253 
254     arraylen = cpu->cpreg_array_len;
255     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
256     cpu->cpreg_values = g_new(uint64_t, arraylen);
257     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
258     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
259     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
260     cpu->cpreg_array_len = 0;
261 
262     g_list_foreach(keys, add_cpreg_to_list, cpu);
263 
264     assert(cpu->cpreg_array_len == arraylen);
265 
266     g_list_free(keys);
267 }
268 
269 /*
270  * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
271  */
272 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
273                                         const ARMCPRegInfo *ri,
274                                         bool isread)
275 {
276     if (!is_a64(env) && arm_current_el(env) == 3 &&
277         arm_is_secure_below_el3(env)) {
278         return CP_ACCESS_TRAP_UNCATEGORIZED;
279     }
280     return CP_ACCESS_OK;
281 }
282 
283 /* Some secure-only AArch32 registers trap to EL3 if used from
284  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
285  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
286  * We assume that the .access field is set to PL1_RW.
287  */
288 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
289                                             const ARMCPRegInfo *ri,
290                                             bool isread)
291 {
292     if (arm_current_el(env) == 3) {
293         return CP_ACCESS_OK;
294     }
295     if (arm_is_secure_below_el3(env)) {
296         if (env->cp15.scr_el3 & SCR_EEL2) {
297             return CP_ACCESS_TRAP_EL2;
298         }
299         return CP_ACCESS_TRAP_EL3;
300     }
301     /* This will be EL1 NS and EL2 NS, which just UNDEF */
302     return CP_ACCESS_TRAP_UNCATEGORIZED;
303 }
304 
305 static uint64_t arm_mdcr_el2_eff(CPUARMState *env)
306 {
307     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
308 }
309 
310 /* Check for traps to "powerdown debug" registers, which are controlled
311  * by MDCR.TDOSA
312  */
313 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
314                                    bool isread)
315 {
316     int el = arm_current_el(env);
317     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
318     bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
319         (arm_hcr_el2_eff(env) & HCR_TGE);
320 
321     if (el < 2 && mdcr_el2_tdosa) {
322         return CP_ACCESS_TRAP_EL2;
323     }
324     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
325         return CP_ACCESS_TRAP_EL3;
326     }
327     return CP_ACCESS_OK;
328 }
329 
330 /* Check for traps to "debug ROM" registers, which are controlled
331  * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
332  */
333 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
334                                   bool isread)
335 {
336     int el = arm_current_el(env);
337     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
338     bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
339         (arm_hcr_el2_eff(env) & HCR_TGE);
340 
341     if (el < 2 && mdcr_el2_tdra) {
342         return CP_ACCESS_TRAP_EL2;
343     }
344     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
345         return CP_ACCESS_TRAP_EL3;
346     }
347     return CP_ACCESS_OK;
348 }
349 
350 /* Check for traps to general debug registers, which are controlled
351  * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
352  */
353 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
354                                   bool isread)
355 {
356     int el = arm_current_el(env);
357     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
358     bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
359         (arm_hcr_el2_eff(env) & HCR_TGE);
360 
361     if (el < 2 && mdcr_el2_tda) {
362         return CP_ACCESS_TRAP_EL2;
363     }
364     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
365         return CP_ACCESS_TRAP_EL3;
366     }
367     return CP_ACCESS_OK;
368 }
369 
370 /* Check for traps to performance monitor registers, which are controlled
371  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
372  */
373 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
374                                  bool isread)
375 {
376     int el = arm_current_el(env);
377     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
378 
379     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
380         return CP_ACCESS_TRAP_EL2;
381     }
382     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
383         return CP_ACCESS_TRAP_EL3;
384     }
385     return CP_ACCESS_OK;
386 }
387 
388 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
389 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
390                                       bool isread)
391 {
392     if (arm_current_el(env) == 1) {
393         uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
394         if (arm_hcr_el2_eff(env) & trap) {
395             return CP_ACCESS_TRAP_EL2;
396         }
397     }
398     return CP_ACCESS_OK;
399 }
400 
401 /* Check for traps from EL1 due to HCR_EL2.TSW.  */
402 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
403                                  bool isread)
404 {
405     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
406         return CP_ACCESS_TRAP_EL2;
407     }
408     return CP_ACCESS_OK;
409 }
410 
411 /* Check for traps from EL1 due to HCR_EL2.TACR.  */
412 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
413                                   bool isread)
414 {
415     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
416         return CP_ACCESS_TRAP_EL2;
417     }
418     return CP_ACCESS_OK;
419 }
420 
421 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
422 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
423                                   bool isread)
424 {
425     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
426         return CP_ACCESS_TRAP_EL2;
427     }
428     return CP_ACCESS_OK;
429 }
430 
431 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
432 {
433     ARMCPU *cpu = env_archcpu(env);
434 
435     raw_write(env, ri, value);
436     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
437 }
438 
439 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
440 {
441     ARMCPU *cpu = env_archcpu(env);
442 
443     if (raw_read(env, ri) != value) {
444         /* Unlike real hardware the qemu TLB uses virtual addresses,
445          * not modified virtual addresses, so this causes a TLB flush.
446          */
447         tlb_flush(CPU(cpu));
448         raw_write(env, ri, value);
449     }
450 }
451 
452 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
453                              uint64_t value)
454 {
455     ARMCPU *cpu = env_archcpu(env);
456 
457     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
458         && !extended_addresses_enabled(env)) {
459         /* For VMSA (when not using the LPAE long descriptor page table
460          * format) this register includes the ASID, so do a TLB flush.
461          * For PMSA it is purely a process ID and no action is needed.
462          */
463         tlb_flush(CPU(cpu));
464     }
465     raw_write(env, ri, value);
466 }
467 
468 /* IS variants of TLB operations must affect all cores */
469 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
470                              uint64_t value)
471 {
472     CPUState *cs = env_cpu(env);
473 
474     tlb_flush_all_cpus_synced(cs);
475 }
476 
477 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
478                              uint64_t value)
479 {
480     CPUState *cs = env_cpu(env);
481 
482     tlb_flush_all_cpus_synced(cs);
483 }
484 
485 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
486                              uint64_t value)
487 {
488     CPUState *cs = env_cpu(env);
489 
490     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
491 }
492 
493 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
494                              uint64_t value)
495 {
496     CPUState *cs = env_cpu(env);
497 
498     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
499 }
500 
501 /*
502  * Non-IS variants of TLB operations are upgraded to
503  * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
504  * force broadcast of these operations.
505  */
506 static bool tlb_force_broadcast(CPUARMState *env)
507 {
508     return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
509 }
510 
511 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
512                           uint64_t value)
513 {
514     /* Invalidate all (TLBIALL) */
515     CPUState *cs = env_cpu(env);
516 
517     if (tlb_force_broadcast(env)) {
518         tlb_flush_all_cpus_synced(cs);
519     } else {
520         tlb_flush(cs);
521     }
522 }
523 
524 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
525                           uint64_t value)
526 {
527     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
528     CPUState *cs = env_cpu(env);
529 
530     value &= TARGET_PAGE_MASK;
531     if (tlb_force_broadcast(env)) {
532         tlb_flush_page_all_cpus_synced(cs, value);
533     } else {
534         tlb_flush_page(cs, value);
535     }
536 }
537 
538 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
539                            uint64_t value)
540 {
541     /* Invalidate by ASID (TLBIASID) */
542     CPUState *cs = env_cpu(env);
543 
544     if (tlb_force_broadcast(env)) {
545         tlb_flush_all_cpus_synced(cs);
546     } else {
547         tlb_flush(cs);
548     }
549 }
550 
551 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
552                            uint64_t value)
553 {
554     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
555     CPUState *cs = env_cpu(env);
556 
557     value &= TARGET_PAGE_MASK;
558     if (tlb_force_broadcast(env)) {
559         tlb_flush_page_all_cpus_synced(cs, value);
560     } else {
561         tlb_flush_page(cs, value);
562     }
563 }
564 
565 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
566                                uint64_t value)
567 {
568     CPUState *cs = env_cpu(env);
569 
570     tlb_flush_by_mmuidx(cs,
571                         ARMMMUIdxBit_E10_1 |
572                         ARMMMUIdxBit_E10_1_PAN |
573                         ARMMMUIdxBit_E10_0);
574 }
575 
576 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
577                                   uint64_t value)
578 {
579     CPUState *cs = env_cpu(env);
580 
581     tlb_flush_by_mmuidx_all_cpus_synced(cs,
582                                         ARMMMUIdxBit_E10_1 |
583                                         ARMMMUIdxBit_E10_1_PAN |
584                                         ARMMMUIdxBit_E10_0);
585 }
586 
587 
588 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
589                               uint64_t value)
590 {
591     CPUState *cs = env_cpu(env);
592 
593     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
594 }
595 
596 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
597                                  uint64_t value)
598 {
599     CPUState *cs = env_cpu(env);
600 
601     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
602 }
603 
604 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
605                               uint64_t value)
606 {
607     CPUState *cs = env_cpu(env);
608     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
609 
610     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
611 }
612 
613 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
614                                  uint64_t value)
615 {
616     CPUState *cs = env_cpu(env);
617     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
618 
619     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
620                                              ARMMMUIdxBit_E2);
621 }
622 
623 static const ARMCPRegInfo cp_reginfo[] = {
624     /* Define the secure and non-secure FCSE identifier CP registers
625      * separately because there is no secure bank in V8 (no _EL3).  This allows
626      * the secure register to be properly reset and migrated. There is also no
627      * v8 EL1 version of the register so the non-secure instance stands alone.
628      */
629     { .name = "FCSEIDR",
630       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
631       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
632       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
633       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
634     { .name = "FCSEIDR_S",
635       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
636       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
637       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
638       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
639     /* Define the secure and non-secure context identifier CP registers
640      * separately because there is no secure bank in V8 (no _EL3).  This allows
641      * the secure register to be properly reset and migrated.  In the
642      * non-secure case, the 32-bit register will have reset and migration
643      * disabled during registration as it is handled by the 64-bit instance.
644      */
645     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
646       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
647       .access = PL1_RW, .accessfn = access_tvm_trvm,
648       .secure = ARM_CP_SECSTATE_NS,
649       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
650       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
651     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
652       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
653       .access = PL1_RW, .accessfn = access_tvm_trvm,
654       .secure = ARM_CP_SECSTATE_S,
655       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
656       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
657 };
658 
659 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
660     /* NB: Some of these registers exist in v8 but with more precise
661      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
662      */
663     /* MMU Domain access control / MPU write buffer control */
664     { .name = "DACR",
665       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
666       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
667       .writefn = dacr_write, .raw_writefn = raw_write,
668       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
669                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
670     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
671      * For v6 and v5, these mappings are overly broad.
672      */
673     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
674       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
675     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
676       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
677     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
678       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
679     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
680       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
681     /* Cache maintenance ops; some of this space may be overridden later. */
682     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
683       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
684       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
685 };
686 
687 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
688     /* Not all pre-v6 cores implemented this WFI, so this is slightly
689      * over-broad.
690      */
691     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
692       .access = PL1_W, .type = ARM_CP_WFI },
693 };
694 
695 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
696     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
697      * is UNPREDICTABLE; we choose to NOP as most implementations do).
698      */
699     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
700       .access = PL1_W, .type = ARM_CP_WFI },
701     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
702      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
703      * OMAPCP will override this space.
704      */
705     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
706       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
707       .resetvalue = 0 },
708     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
709       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
710       .resetvalue = 0 },
711     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
712     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
713       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
714       .resetvalue = 0 },
715     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
716      * implementing it as RAZ means the "debug architecture version" bits
717      * will read as a reserved value, which should cause Linux to not try
718      * to use the debug hardware.
719      */
720     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
721       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
722     /* MMU TLB control. Note that the wildcarding means we cover not just
723      * the unified TLB ops but also the dside/iside/inner-shareable variants.
724      */
725     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
726       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
727       .type = ARM_CP_NO_RAW },
728     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
729       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
730       .type = ARM_CP_NO_RAW },
731     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
732       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
733       .type = ARM_CP_NO_RAW },
734     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
735       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
736       .type = ARM_CP_NO_RAW },
737     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
738       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
739     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
740       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
741 };
742 
743 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
744                         uint64_t value)
745 {
746     uint32_t mask = 0;
747 
748     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
749     if (!arm_feature(env, ARM_FEATURE_V8)) {
750         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
751          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
752          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
753          */
754         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
755             /* VFP coprocessor: cp10 & cp11 [23:20] */
756             mask |= R_CPACR_ASEDIS_MASK |
757                     R_CPACR_D32DIS_MASK |
758                     R_CPACR_CP11_MASK |
759                     R_CPACR_CP10_MASK;
760 
761             if (!arm_feature(env, ARM_FEATURE_NEON)) {
762                 /* ASEDIS [31] bit is RAO/WI */
763                 value |= R_CPACR_ASEDIS_MASK;
764             }
765 
766             /* VFPv3 and upwards with NEON implement 32 double precision
767              * registers (D0-D31).
768              */
769             if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
770                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
771                 value |= R_CPACR_D32DIS_MASK;
772             }
773         }
774         value &= mask;
775     }
776 
777     /*
778      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
779      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
780      */
781     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
782         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
783         mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
784         value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
785     }
786 
787     env->cp15.cpacr_el1 = value;
788 }
789 
790 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
791 {
792     /*
793      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
794      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
795      */
796     uint64_t value = env->cp15.cpacr_el1;
797 
798     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
799         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
800         value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
801     }
802     return value;
803 }
804 
805 
806 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
807 {
808     /* Call cpacr_write() so that we reset with the correct RAO bits set
809      * for our CPU features.
810      */
811     cpacr_write(env, ri, 0);
812 }
813 
814 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
815                                    bool isread)
816 {
817     if (arm_feature(env, ARM_FEATURE_V8)) {
818         /* Check if CPACR accesses are to be trapped to EL2 */
819         if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
820             FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
821             return CP_ACCESS_TRAP_EL2;
822         /* Check if CPACR accesses are to be trapped to EL3 */
823         } else if (arm_current_el(env) < 3 &&
824                    FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
825             return CP_ACCESS_TRAP_EL3;
826         }
827     }
828 
829     return CP_ACCESS_OK;
830 }
831 
832 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
833                                   bool isread)
834 {
835     /* Check if CPTR accesses are set to trap to EL3 */
836     if (arm_current_el(env) == 2 &&
837         FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
838         return CP_ACCESS_TRAP_EL3;
839     }
840 
841     return CP_ACCESS_OK;
842 }
843 
844 static const ARMCPRegInfo v6_cp_reginfo[] = {
845     /* prefetch by MVA in v6, NOP in v7 */
846     { .name = "MVA_prefetch",
847       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
848       .access = PL1_W, .type = ARM_CP_NOP },
849     /* We need to break the TB after ISB to execute self-modifying code
850      * correctly and also to take any pending interrupts immediately.
851      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
852      */
853     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
854       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
855     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
856       .access = PL0_W, .type = ARM_CP_NOP },
857     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
858       .access = PL0_W, .type = ARM_CP_NOP },
859     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
860       .access = PL1_RW, .accessfn = access_tvm_trvm,
861       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
862                              offsetof(CPUARMState, cp15.ifar_ns) },
863       .resetvalue = 0, },
864     /* Watchpoint Fault Address Register : should actually only be present
865      * for 1136, 1176, 11MPCore.
866      */
867     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
868       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
869     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
870       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
871       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
872       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
873 };
874 
875 typedef struct pm_event {
876     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
877     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
878     bool (*supported)(CPUARMState *);
879     /*
880      * Retrieve the current count of the underlying event. The programmed
881      * counters hold a difference from the return value from this function
882      */
883     uint64_t (*get_count)(CPUARMState *);
884     /*
885      * Return how many nanoseconds it will take (at a minimum) for count events
886      * to occur. A negative value indicates the counter will never overflow, or
887      * that the counter has otherwise arranged for the overflow bit to be set
888      * and the PMU interrupt to be raised on overflow.
889      */
890     int64_t (*ns_per_count)(uint64_t);
891 } pm_event;
892 
893 static bool event_always_supported(CPUARMState *env)
894 {
895     return true;
896 }
897 
898 static uint64_t swinc_get_count(CPUARMState *env)
899 {
900     /*
901      * SW_INCR events are written directly to the pmevcntr's by writes to
902      * PMSWINC, so there is no underlying count maintained by the PMU itself
903      */
904     return 0;
905 }
906 
907 static int64_t swinc_ns_per(uint64_t ignored)
908 {
909     return -1;
910 }
911 
912 /*
913  * Return the underlying cycle count for the PMU cycle counters. If we're in
914  * usermode, simply return 0.
915  */
916 static uint64_t cycles_get_count(CPUARMState *env)
917 {
918 #ifndef CONFIG_USER_ONLY
919     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
920                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
921 #else
922     return cpu_get_host_ticks();
923 #endif
924 }
925 
926 #ifndef CONFIG_USER_ONLY
927 static int64_t cycles_ns_per(uint64_t cycles)
928 {
929     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
930 }
931 
932 static bool instructions_supported(CPUARMState *env)
933 {
934     return icount_enabled() == 1; /* Precise instruction counting */
935 }
936 
937 static uint64_t instructions_get_count(CPUARMState *env)
938 {
939     return (uint64_t)icount_get_raw();
940 }
941 
942 static int64_t instructions_ns_per(uint64_t icount)
943 {
944     return icount_to_ns((int64_t)icount);
945 }
946 #endif
947 
948 static bool pmu_8_1_events_supported(CPUARMState *env)
949 {
950     /* For events which are supported in any v8.1 PMU */
951     return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
952 }
953 
954 static bool pmu_8_4_events_supported(CPUARMState *env)
955 {
956     /* For events which are supported in any v8.1 PMU */
957     return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
958 }
959 
960 static uint64_t zero_event_get_count(CPUARMState *env)
961 {
962     /* For events which on QEMU never fire, so their count is always zero */
963     return 0;
964 }
965 
966 static int64_t zero_event_ns_per(uint64_t cycles)
967 {
968     /* An event which never fires can never overflow */
969     return -1;
970 }
971 
972 static const pm_event pm_events[] = {
973     { .number = 0x000, /* SW_INCR */
974       .supported = event_always_supported,
975       .get_count = swinc_get_count,
976       .ns_per_count = swinc_ns_per,
977     },
978 #ifndef CONFIG_USER_ONLY
979     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
980       .supported = instructions_supported,
981       .get_count = instructions_get_count,
982       .ns_per_count = instructions_ns_per,
983     },
984     { .number = 0x011, /* CPU_CYCLES, Cycle */
985       .supported = event_always_supported,
986       .get_count = cycles_get_count,
987       .ns_per_count = cycles_ns_per,
988     },
989 #endif
990     { .number = 0x023, /* STALL_FRONTEND */
991       .supported = pmu_8_1_events_supported,
992       .get_count = zero_event_get_count,
993       .ns_per_count = zero_event_ns_per,
994     },
995     { .number = 0x024, /* STALL_BACKEND */
996       .supported = pmu_8_1_events_supported,
997       .get_count = zero_event_get_count,
998       .ns_per_count = zero_event_ns_per,
999     },
1000     { .number = 0x03c, /* STALL */
1001       .supported = pmu_8_4_events_supported,
1002       .get_count = zero_event_get_count,
1003       .ns_per_count = zero_event_ns_per,
1004     },
1005 };
1006 
1007 /*
1008  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1009  * events (i.e. the statistical profiling extension), this implementation
1010  * should first be updated to something sparse instead of the current
1011  * supported_event_map[] array.
1012  */
1013 #define MAX_EVENT_ID 0x3c
1014 #define UNSUPPORTED_EVENT UINT16_MAX
1015 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1016 
1017 /*
1018  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1019  * of ARM event numbers to indices in our pm_events array.
1020  *
1021  * Note: Events in the 0x40XX range are not currently supported.
1022  */
1023 void pmu_init(ARMCPU *cpu)
1024 {
1025     unsigned int i;
1026 
1027     /*
1028      * Empty supported_event_map and cpu->pmceid[01] before adding supported
1029      * events to them
1030      */
1031     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1032         supported_event_map[i] = UNSUPPORTED_EVENT;
1033     }
1034     cpu->pmceid0 = 0;
1035     cpu->pmceid1 = 0;
1036 
1037     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1038         const pm_event *cnt = &pm_events[i];
1039         assert(cnt->number <= MAX_EVENT_ID);
1040         /* We do not currently support events in the 0x40xx range */
1041         assert(cnt->number <= 0x3f);
1042 
1043         if (cnt->supported(&cpu->env)) {
1044             supported_event_map[cnt->number] = i;
1045             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1046             if (cnt->number & 0x20) {
1047                 cpu->pmceid1 |= event_mask;
1048             } else {
1049                 cpu->pmceid0 |= event_mask;
1050             }
1051         }
1052     }
1053 }
1054 
1055 /*
1056  * Check at runtime whether a PMU event is supported for the current machine
1057  */
1058 static bool event_supported(uint16_t number)
1059 {
1060     if (number > MAX_EVENT_ID) {
1061         return false;
1062     }
1063     return supported_event_map[number] != UNSUPPORTED_EVENT;
1064 }
1065 
1066 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1067                                    bool isread)
1068 {
1069     /* Performance monitor registers user accessibility is controlled
1070      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1071      * trapping to EL2 or EL3 for other accesses.
1072      */
1073     int el = arm_current_el(env);
1074     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1075 
1076     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1077         return CP_ACCESS_TRAP;
1078     }
1079     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1080         return CP_ACCESS_TRAP_EL2;
1081     }
1082     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1083         return CP_ACCESS_TRAP_EL3;
1084     }
1085 
1086     return CP_ACCESS_OK;
1087 }
1088 
1089 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1090                                            const ARMCPRegInfo *ri,
1091                                            bool isread)
1092 {
1093     /* ER: event counter read trap control */
1094     if (arm_feature(env, ARM_FEATURE_V8)
1095         && arm_current_el(env) == 0
1096         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1097         && isread) {
1098         return CP_ACCESS_OK;
1099     }
1100 
1101     return pmreg_access(env, ri, isread);
1102 }
1103 
1104 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1105                                          const ARMCPRegInfo *ri,
1106                                          bool isread)
1107 {
1108     /* SW: software increment write trap control */
1109     if (arm_feature(env, ARM_FEATURE_V8)
1110         && arm_current_el(env) == 0
1111         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1112         && !isread) {
1113         return CP_ACCESS_OK;
1114     }
1115 
1116     return pmreg_access(env, ri, isread);
1117 }
1118 
1119 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1120                                         const ARMCPRegInfo *ri,
1121                                         bool isread)
1122 {
1123     /* ER: event counter read trap control */
1124     if (arm_feature(env, ARM_FEATURE_V8)
1125         && arm_current_el(env) == 0
1126         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1127         return CP_ACCESS_OK;
1128     }
1129 
1130     return pmreg_access(env, ri, isread);
1131 }
1132 
1133 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1134                                          const ARMCPRegInfo *ri,
1135                                          bool isread)
1136 {
1137     /* CR: cycle counter read trap control */
1138     if (arm_feature(env, ARM_FEATURE_V8)
1139         && arm_current_el(env) == 0
1140         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1141         && isread) {
1142         return CP_ACCESS_OK;
1143     }
1144 
1145     return pmreg_access(env, ri, isread);
1146 }
1147 
1148 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1149  * the current EL, security state, and register configuration.
1150  */
1151 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1152 {
1153     uint64_t filter;
1154     bool e, p, u, nsk, nsu, nsh, m;
1155     bool enabled, prohibited, filtered;
1156     bool secure = arm_is_secure(env);
1157     int el = arm_current_el(env);
1158     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1159     uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
1160 
1161     if (!arm_feature(env, ARM_FEATURE_PMU)) {
1162         return false;
1163     }
1164 
1165     if (!arm_feature(env, ARM_FEATURE_EL2) ||
1166             (counter < hpmn || counter == 31)) {
1167         e = env->cp15.c9_pmcr & PMCRE;
1168     } else {
1169         e = mdcr_el2 & MDCR_HPME;
1170     }
1171     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1172 
1173     if (!secure) {
1174         if (el == 2 && (counter < hpmn || counter == 31)) {
1175             prohibited = mdcr_el2 & MDCR_HPMD;
1176         } else {
1177             prohibited = false;
1178         }
1179     } else {
1180         prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1181            !(env->cp15.mdcr_el3 & MDCR_SPME);
1182     }
1183 
1184     if (prohibited && counter == 31) {
1185         prohibited = env->cp15.c9_pmcr & PMCRDP;
1186     }
1187 
1188     if (counter == 31) {
1189         filter = env->cp15.pmccfiltr_el0;
1190     } else {
1191         filter = env->cp15.c14_pmevtyper[counter];
1192     }
1193 
1194     p   = filter & PMXEVTYPER_P;
1195     u   = filter & PMXEVTYPER_U;
1196     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1197     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1198     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1199     m   = arm_el_is_aa64(env, 1) &&
1200               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1201 
1202     if (el == 0) {
1203         filtered = secure ? u : u != nsu;
1204     } else if (el == 1) {
1205         filtered = secure ? p : p != nsk;
1206     } else if (el == 2) {
1207         filtered = !nsh;
1208     } else { /* EL3 */
1209         filtered = m != p;
1210     }
1211 
1212     if (counter != 31) {
1213         /*
1214          * If not checking PMCCNTR, ensure the counter is setup to an event we
1215          * support
1216          */
1217         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1218         if (!event_supported(event)) {
1219             return false;
1220         }
1221     }
1222 
1223     return enabled && !prohibited && !filtered;
1224 }
1225 
1226 static void pmu_update_irq(CPUARMState *env)
1227 {
1228     ARMCPU *cpu = env_archcpu(env);
1229     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1230             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1231 }
1232 
1233 /*
1234  * Ensure c15_ccnt is the guest-visible count so that operations such as
1235  * enabling/disabling the counter or filtering, modifying the count itself,
1236  * etc. can be done logically. This is essentially a no-op if the counter is
1237  * not enabled at the time of the call.
1238  */
1239 static void pmccntr_op_start(CPUARMState *env)
1240 {
1241     uint64_t cycles = cycles_get_count(env);
1242 
1243     if (pmu_counter_enabled(env, 31)) {
1244         uint64_t eff_cycles = cycles;
1245         if (env->cp15.c9_pmcr & PMCRD) {
1246             /* Increment once every 64 processor clock cycles */
1247             eff_cycles /= 64;
1248         }
1249 
1250         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1251 
1252         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1253                                  1ull << 63 : 1ull << 31;
1254         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1255             env->cp15.c9_pmovsr |= (1 << 31);
1256             pmu_update_irq(env);
1257         }
1258 
1259         env->cp15.c15_ccnt = new_pmccntr;
1260     }
1261     env->cp15.c15_ccnt_delta = cycles;
1262 }
1263 
1264 /*
1265  * If PMCCNTR is enabled, recalculate the delta between the clock and the
1266  * guest-visible count. A call to pmccntr_op_finish should follow every call to
1267  * pmccntr_op_start.
1268  */
1269 static void pmccntr_op_finish(CPUARMState *env)
1270 {
1271     if (pmu_counter_enabled(env, 31)) {
1272 #ifndef CONFIG_USER_ONLY
1273         /* Calculate when the counter will next overflow */
1274         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1275         if (!(env->cp15.c9_pmcr & PMCRLC)) {
1276             remaining_cycles = (uint32_t)remaining_cycles;
1277         }
1278         int64_t overflow_in = cycles_ns_per(remaining_cycles);
1279 
1280         if (overflow_in > 0) {
1281             int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1282                 overflow_in;
1283             ARMCPU *cpu = env_archcpu(env);
1284             timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1285         }
1286 #endif
1287 
1288         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1289         if (env->cp15.c9_pmcr & PMCRD) {
1290             /* Increment once every 64 processor clock cycles */
1291             prev_cycles /= 64;
1292         }
1293         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1294     }
1295 }
1296 
1297 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1298 {
1299 
1300     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1301     uint64_t count = 0;
1302     if (event_supported(event)) {
1303         uint16_t event_idx = supported_event_map[event];
1304         count = pm_events[event_idx].get_count(env);
1305     }
1306 
1307     if (pmu_counter_enabled(env, counter)) {
1308         uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1309 
1310         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1311             env->cp15.c9_pmovsr |= (1 << counter);
1312             pmu_update_irq(env);
1313         }
1314         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1315     }
1316     env->cp15.c14_pmevcntr_delta[counter] = count;
1317 }
1318 
1319 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1320 {
1321     if (pmu_counter_enabled(env, counter)) {
1322 #ifndef CONFIG_USER_ONLY
1323         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1324         uint16_t event_idx = supported_event_map[event];
1325         uint64_t delta = UINT32_MAX -
1326             (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1327         int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1328 
1329         if (overflow_in > 0) {
1330             int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1331                 overflow_in;
1332             ARMCPU *cpu = env_archcpu(env);
1333             timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1334         }
1335 #endif
1336 
1337         env->cp15.c14_pmevcntr_delta[counter] -=
1338             env->cp15.c14_pmevcntr[counter];
1339     }
1340 }
1341 
1342 void pmu_op_start(CPUARMState *env)
1343 {
1344     unsigned int i;
1345     pmccntr_op_start(env);
1346     for (i = 0; i < pmu_num_counters(env); i++) {
1347         pmevcntr_op_start(env, i);
1348     }
1349 }
1350 
1351 void pmu_op_finish(CPUARMState *env)
1352 {
1353     unsigned int i;
1354     pmccntr_op_finish(env);
1355     for (i = 0; i < pmu_num_counters(env); i++) {
1356         pmevcntr_op_finish(env, i);
1357     }
1358 }
1359 
1360 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1361 {
1362     pmu_op_start(&cpu->env);
1363 }
1364 
1365 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1366 {
1367     pmu_op_finish(&cpu->env);
1368 }
1369 
1370 void arm_pmu_timer_cb(void *opaque)
1371 {
1372     ARMCPU *cpu = opaque;
1373 
1374     /*
1375      * Update all the counter values based on the current underlying counts,
1376      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1377      * has the effect of setting the cpu->pmu_timer to the next earliest time a
1378      * counter may expire.
1379      */
1380     pmu_op_start(&cpu->env);
1381     pmu_op_finish(&cpu->env);
1382 }
1383 
1384 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1385                        uint64_t value)
1386 {
1387     pmu_op_start(env);
1388 
1389     if (value & PMCRC) {
1390         /* The counter has been reset */
1391         env->cp15.c15_ccnt = 0;
1392     }
1393 
1394     if (value & PMCRP) {
1395         unsigned int i;
1396         for (i = 0; i < pmu_num_counters(env); i++) {
1397             env->cp15.c14_pmevcntr[i] = 0;
1398         }
1399     }
1400 
1401     env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1402     env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1403 
1404     pmu_op_finish(env);
1405 }
1406 
1407 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1408                           uint64_t value)
1409 {
1410     unsigned int i;
1411     for (i = 0; i < pmu_num_counters(env); i++) {
1412         /* Increment a counter's count iff: */
1413         if ((value & (1 << i)) && /* counter's bit is set */
1414                 /* counter is enabled and not filtered */
1415                 pmu_counter_enabled(env, i) &&
1416                 /* counter is SW_INCR */
1417                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1418             pmevcntr_op_start(env, i);
1419 
1420             /*
1421              * Detect if this write causes an overflow since we can't predict
1422              * PMSWINC overflows like we can for other events
1423              */
1424             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1425 
1426             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1427                 env->cp15.c9_pmovsr |= (1 << i);
1428                 pmu_update_irq(env);
1429             }
1430 
1431             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1432 
1433             pmevcntr_op_finish(env, i);
1434         }
1435     }
1436 }
1437 
1438 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1439 {
1440     uint64_t ret;
1441     pmccntr_op_start(env);
1442     ret = env->cp15.c15_ccnt;
1443     pmccntr_op_finish(env);
1444     return ret;
1445 }
1446 
1447 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1448                          uint64_t value)
1449 {
1450     /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1451      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1452      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1453      * accessed.
1454      */
1455     env->cp15.c9_pmselr = value & 0x1f;
1456 }
1457 
1458 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1459                         uint64_t value)
1460 {
1461     pmccntr_op_start(env);
1462     env->cp15.c15_ccnt = value;
1463     pmccntr_op_finish(env);
1464 }
1465 
1466 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1467                             uint64_t value)
1468 {
1469     uint64_t cur_val = pmccntr_read(env, NULL);
1470 
1471     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1472 }
1473 
1474 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1475                             uint64_t value)
1476 {
1477     pmccntr_op_start(env);
1478     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1479     pmccntr_op_finish(env);
1480 }
1481 
1482 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1483                             uint64_t value)
1484 {
1485     pmccntr_op_start(env);
1486     /* M is not accessible from AArch32 */
1487     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1488         (value & PMCCFILTR);
1489     pmccntr_op_finish(env);
1490 }
1491 
1492 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1493 {
1494     /* M is not visible in AArch32 */
1495     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1496 }
1497 
1498 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1499                             uint64_t value)
1500 {
1501     value &= pmu_counter_mask(env);
1502     env->cp15.c9_pmcnten |= value;
1503 }
1504 
1505 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1506                              uint64_t value)
1507 {
1508     value &= pmu_counter_mask(env);
1509     env->cp15.c9_pmcnten &= ~value;
1510 }
1511 
1512 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1513                          uint64_t value)
1514 {
1515     value &= pmu_counter_mask(env);
1516     env->cp15.c9_pmovsr &= ~value;
1517     pmu_update_irq(env);
1518 }
1519 
1520 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1521                          uint64_t value)
1522 {
1523     value &= pmu_counter_mask(env);
1524     env->cp15.c9_pmovsr |= value;
1525     pmu_update_irq(env);
1526 }
1527 
1528 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1529                              uint64_t value, const uint8_t counter)
1530 {
1531     if (counter == 31) {
1532         pmccfiltr_write(env, ri, value);
1533     } else if (counter < pmu_num_counters(env)) {
1534         pmevcntr_op_start(env, counter);
1535 
1536         /*
1537          * If this counter's event type is changing, store the current
1538          * underlying count for the new type in c14_pmevcntr_delta[counter] so
1539          * pmevcntr_op_finish has the correct baseline when it converts back to
1540          * a delta.
1541          */
1542         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1543             PMXEVTYPER_EVTCOUNT;
1544         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1545         if (old_event != new_event) {
1546             uint64_t count = 0;
1547             if (event_supported(new_event)) {
1548                 uint16_t event_idx = supported_event_map[new_event];
1549                 count = pm_events[event_idx].get_count(env);
1550             }
1551             env->cp15.c14_pmevcntr_delta[counter] = count;
1552         }
1553 
1554         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1555         pmevcntr_op_finish(env, counter);
1556     }
1557     /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1558      * PMSELR value is equal to or greater than the number of implemented
1559      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1560      */
1561 }
1562 
1563 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1564                                const uint8_t counter)
1565 {
1566     if (counter == 31) {
1567         return env->cp15.pmccfiltr_el0;
1568     } else if (counter < pmu_num_counters(env)) {
1569         return env->cp15.c14_pmevtyper[counter];
1570     } else {
1571       /*
1572        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1573        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1574        */
1575         return 0;
1576     }
1577 }
1578 
1579 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1580                               uint64_t value)
1581 {
1582     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1583     pmevtyper_write(env, ri, value, counter);
1584 }
1585 
1586 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1587                                uint64_t value)
1588 {
1589     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1590     env->cp15.c14_pmevtyper[counter] = value;
1591 
1592     /*
1593      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1594      * pmu_op_finish calls when loading saved state for a migration. Because
1595      * we're potentially updating the type of event here, the value written to
1596      * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1597      * different counter type. Therefore, we need to set this value to the
1598      * current count for the counter type we're writing so that pmu_op_finish
1599      * has the correct count for its calculation.
1600      */
1601     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1602     if (event_supported(event)) {
1603         uint16_t event_idx = supported_event_map[event];
1604         env->cp15.c14_pmevcntr_delta[counter] =
1605             pm_events[event_idx].get_count(env);
1606     }
1607 }
1608 
1609 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1610 {
1611     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1612     return pmevtyper_read(env, ri, counter);
1613 }
1614 
1615 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1616                              uint64_t value)
1617 {
1618     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1619 }
1620 
1621 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1622 {
1623     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1624 }
1625 
1626 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627                              uint64_t value, uint8_t counter)
1628 {
1629     if (counter < pmu_num_counters(env)) {
1630         pmevcntr_op_start(env, counter);
1631         env->cp15.c14_pmevcntr[counter] = value;
1632         pmevcntr_op_finish(env, counter);
1633     }
1634     /*
1635      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1636      * are CONSTRAINED UNPREDICTABLE.
1637      */
1638 }
1639 
1640 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1641                               uint8_t counter)
1642 {
1643     if (counter < pmu_num_counters(env)) {
1644         uint64_t ret;
1645         pmevcntr_op_start(env, counter);
1646         ret = env->cp15.c14_pmevcntr[counter];
1647         pmevcntr_op_finish(env, counter);
1648         return ret;
1649     } else {
1650       /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1651        * are CONSTRAINED UNPREDICTABLE. */
1652         return 0;
1653     }
1654 }
1655 
1656 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1657                              uint64_t value)
1658 {
1659     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1660     pmevcntr_write(env, ri, value, counter);
1661 }
1662 
1663 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1664 {
1665     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1666     return pmevcntr_read(env, ri, counter);
1667 }
1668 
1669 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1670                              uint64_t value)
1671 {
1672     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1673     assert(counter < pmu_num_counters(env));
1674     env->cp15.c14_pmevcntr[counter] = value;
1675     pmevcntr_write(env, ri, value, counter);
1676 }
1677 
1678 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1679 {
1680     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1681     assert(counter < pmu_num_counters(env));
1682     return env->cp15.c14_pmevcntr[counter];
1683 }
1684 
1685 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1686                              uint64_t value)
1687 {
1688     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1689 }
1690 
1691 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1692 {
1693     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1694 }
1695 
1696 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1697                             uint64_t value)
1698 {
1699     if (arm_feature(env, ARM_FEATURE_V8)) {
1700         env->cp15.c9_pmuserenr = value & 0xf;
1701     } else {
1702         env->cp15.c9_pmuserenr = value & 1;
1703     }
1704 }
1705 
1706 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1707                              uint64_t value)
1708 {
1709     /* We have no event counters so only the C bit can be changed */
1710     value &= pmu_counter_mask(env);
1711     env->cp15.c9_pminten |= value;
1712     pmu_update_irq(env);
1713 }
1714 
1715 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1716                              uint64_t value)
1717 {
1718     value &= pmu_counter_mask(env);
1719     env->cp15.c9_pminten &= ~value;
1720     pmu_update_irq(env);
1721 }
1722 
1723 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1724                        uint64_t value)
1725 {
1726     /* Note that even though the AArch64 view of this register has bits
1727      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1728      * architectural requirements for bits which are RES0 only in some
1729      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1730      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1731      */
1732     raw_write(env, ri, value & ~0x1FULL);
1733 }
1734 
1735 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1736 {
1737     /* Begin with base v8.0 state.  */
1738     uint32_t valid_mask = 0x3fff;
1739     ARMCPU *cpu = env_archcpu(env);
1740 
1741     /*
1742      * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1743      * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1744      * Instead, choose the format based on the mode of EL3.
1745      */
1746     if (arm_el_is_aa64(env, 3)) {
1747         value |= SCR_FW | SCR_AW;      /* RES1 */
1748         valid_mask &= ~SCR_NET;        /* RES0 */
1749 
1750         if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1751             !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1752             value |= SCR_RW;           /* RAO/WI */
1753         }
1754         if (cpu_isar_feature(aa64_ras, cpu)) {
1755             valid_mask |= SCR_TERR;
1756         }
1757         if (cpu_isar_feature(aa64_lor, cpu)) {
1758             valid_mask |= SCR_TLOR;
1759         }
1760         if (cpu_isar_feature(aa64_pauth, cpu)) {
1761             valid_mask |= SCR_API | SCR_APK;
1762         }
1763         if (cpu_isar_feature(aa64_sel2, cpu)) {
1764             valid_mask |= SCR_EEL2;
1765         }
1766         if (cpu_isar_feature(aa64_mte, cpu)) {
1767             valid_mask |= SCR_ATA;
1768         }
1769         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1770             valid_mask |= SCR_ENSCXT;
1771         }
1772         if (cpu_isar_feature(aa64_doublefault, cpu)) {
1773             valid_mask |= SCR_EASE | SCR_NMEA;
1774         }
1775     } else {
1776         valid_mask &= ~(SCR_RW | SCR_ST);
1777         if (cpu_isar_feature(aa32_ras, cpu)) {
1778             valid_mask |= SCR_TERR;
1779         }
1780     }
1781 
1782     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1783         valid_mask &= ~SCR_HCE;
1784 
1785         /* On ARMv7, SMD (or SCD as it is called in v7) is only
1786          * supported if EL2 exists. The bit is UNK/SBZP when
1787          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1788          * when EL2 is unavailable.
1789          * On ARMv8, this bit is always available.
1790          */
1791         if (arm_feature(env, ARM_FEATURE_V7) &&
1792             !arm_feature(env, ARM_FEATURE_V8)) {
1793             valid_mask &= ~SCR_SMD;
1794         }
1795     }
1796 
1797     /* Clear all-context RES0 bits.  */
1798     value &= valid_mask;
1799     raw_write(env, ri, value);
1800 }
1801 
1802 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1803 {
1804     /*
1805      * scr_write will set the RES1 bits on an AArch64-only CPU.
1806      * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1807      */
1808     scr_write(env, ri, 0);
1809 }
1810 
1811 static CPAccessResult access_aa64_tid2(CPUARMState *env,
1812                                        const ARMCPRegInfo *ri,
1813                                        bool isread)
1814 {
1815     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
1816         return CP_ACCESS_TRAP_EL2;
1817     }
1818 
1819     return CP_ACCESS_OK;
1820 }
1821 
1822 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1823 {
1824     ARMCPU *cpu = env_archcpu(env);
1825 
1826     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1827      * bank
1828      */
1829     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1830                                         ri->secure & ARM_CP_SECSTATE_S);
1831 
1832     return cpu->ccsidr[index];
1833 }
1834 
1835 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1836                          uint64_t value)
1837 {
1838     raw_write(env, ri, value & 0xf);
1839 }
1840 
1841 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1842 {
1843     CPUState *cs = env_cpu(env);
1844     bool el1 = arm_current_el(env) == 1;
1845     uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
1846     uint64_t ret = 0;
1847 
1848     if (hcr_el2 & HCR_IMO) {
1849         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1850             ret |= CPSR_I;
1851         }
1852     } else {
1853         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1854             ret |= CPSR_I;
1855         }
1856     }
1857 
1858     if (hcr_el2 & HCR_FMO) {
1859         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1860             ret |= CPSR_F;
1861         }
1862     } else {
1863         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1864             ret |= CPSR_F;
1865         }
1866     }
1867 
1868     if (hcr_el2 & HCR_AMO) {
1869         if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
1870             ret |= CPSR_A;
1871         }
1872     }
1873 
1874     return ret;
1875 }
1876 
1877 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1878                                        bool isread)
1879 {
1880     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
1881         return CP_ACCESS_TRAP_EL2;
1882     }
1883 
1884     return CP_ACCESS_OK;
1885 }
1886 
1887 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1888                                        bool isread)
1889 {
1890     if (arm_feature(env, ARM_FEATURE_V8)) {
1891         return access_aa64_tid1(env, ri, isread);
1892     }
1893 
1894     return CP_ACCESS_OK;
1895 }
1896 
1897 static const ARMCPRegInfo v7_cp_reginfo[] = {
1898     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1899     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1900       .access = PL1_W, .type = ARM_CP_NOP },
1901     /* Performance monitors are implementation defined in v7,
1902      * but with an ARM recommended set of registers, which we
1903      * follow.
1904      *
1905      * Performance registers fall into three categories:
1906      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1907      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1908      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1909      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1910      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1911      */
1912     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1913       .access = PL0_RW, .type = ARM_CP_ALIAS,
1914       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1915       .writefn = pmcntenset_write,
1916       .accessfn = pmreg_access,
1917       .raw_writefn = raw_write },
1918     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1919       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1920       .access = PL0_RW, .accessfn = pmreg_access,
1921       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1922       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1923     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1924       .access = PL0_RW,
1925       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1926       .accessfn = pmreg_access,
1927       .writefn = pmcntenclr_write,
1928       .type = ARM_CP_ALIAS },
1929     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1930       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1931       .access = PL0_RW, .accessfn = pmreg_access,
1932       .type = ARM_CP_ALIAS,
1933       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1934       .writefn = pmcntenclr_write },
1935     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1936       .access = PL0_RW, .type = ARM_CP_IO,
1937       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1938       .accessfn = pmreg_access,
1939       .writefn = pmovsr_write,
1940       .raw_writefn = raw_write },
1941     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1942       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1943       .access = PL0_RW, .accessfn = pmreg_access,
1944       .type = ARM_CP_ALIAS | ARM_CP_IO,
1945       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1946       .writefn = pmovsr_write,
1947       .raw_writefn = raw_write },
1948     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1949       .access = PL0_W, .accessfn = pmreg_access_swinc,
1950       .type = ARM_CP_NO_RAW | ARM_CP_IO,
1951       .writefn = pmswinc_write },
1952     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1953       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
1954       .access = PL0_W, .accessfn = pmreg_access_swinc,
1955       .type = ARM_CP_NO_RAW | ARM_CP_IO,
1956       .writefn = pmswinc_write },
1957     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1958       .access = PL0_RW, .type = ARM_CP_ALIAS,
1959       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1960       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1961       .raw_writefn = raw_write},
1962     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1963       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1964       .access = PL0_RW, .accessfn = pmreg_access_selr,
1965       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1966       .writefn = pmselr_write, .raw_writefn = raw_write, },
1967     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1968       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1969       .readfn = pmccntr_read, .writefn = pmccntr_write32,
1970       .accessfn = pmreg_access_ccntr },
1971     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1972       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1973       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1974       .type = ARM_CP_IO,
1975       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1976       .readfn = pmccntr_read, .writefn = pmccntr_write,
1977       .raw_readfn = raw_read, .raw_writefn = raw_write, },
1978     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1979       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1980       .access = PL0_RW, .accessfn = pmreg_access,
1981       .type = ARM_CP_ALIAS | ARM_CP_IO,
1982       .resetvalue = 0, },
1983     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1984       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1985       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
1986       .access = PL0_RW, .accessfn = pmreg_access,
1987       .type = ARM_CP_IO,
1988       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1989       .resetvalue = 0, },
1990     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1991       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1992       .accessfn = pmreg_access,
1993       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1994     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1995       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1996       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1997       .accessfn = pmreg_access,
1998       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1999     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2000       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2001       .accessfn = pmreg_access_xevcntr,
2002       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2003     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2004       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2005       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2006       .accessfn = pmreg_access_xevcntr,
2007       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2008     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2009       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2010       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2011       .resetvalue = 0,
2012       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2013     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2014       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2015       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2016       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2017       .resetvalue = 0,
2018       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2019     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2020       .access = PL1_RW, .accessfn = access_tpm,
2021       .type = ARM_CP_ALIAS | ARM_CP_IO,
2022       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2023       .resetvalue = 0,
2024       .writefn = pmintenset_write, .raw_writefn = raw_write },
2025     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2026       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2027       .access = PL1_RW, .accessfn = access_tpm,
2028       .type = ARM_CP_IO,
2029       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2030       .writefn = pmintenset_write, .raw_writefn = raw_write,
2031       .resetvalue = 0x0 },
2032     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2033       .access = PL1_RW, .accessfn = access_tpm,
2034       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2035       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2036       .writefn = pmintenclr_write, },
2037     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2038       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2039       .access = PL1_RW, .accessfn = access_tpm,
2040       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2041       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2042       .writefn = pmintenclr_write },
2043     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2044       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2045       .access = PL1_R,
2046       .accessfn = access_aa64_tid2,
2047       .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2048     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2049       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2050       .access = PL1_RW,
2051       .accessfn = access_aa64_tid2,
2052       .writefn = csselr_write, .resetvalue = 0,
2053       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2054                              offsetof(CPUARMState, cp15.csselr_ns) } },
2055     /* Auxiliary ID register: this actually has an IMPDEF value but for now
2056      * just RAZ for all cores:
2057      */
2058     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2059       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2060       .access = PL1_R, .type = ARM_CP_CONST,
2061       .accessfn = access_aa64_tid1,
2062       .resetvalue = 0 },
2063     /* Auxiliary fault status registers: these also are IMPDEF, and we
2064      * choose to RAZ/WI for all cores.
2065      */
2066     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2067       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2068       .access = PL1_RW, .accessfn = access_tvm_trvm,
2069       .type = ARM_CP_CONST, .resetvalue = 0 },
2070     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2071       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2072       .access = PL1_RW, .accessfn = access_tvm_trvm,
2073       .type = ARM_CP_CONST, .resetvalue = 0 },
2074     /* MAIR can just read-as-written because we don't implement caches
2075      * and so don't need to care about memory attributes.
2076      */
2077     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2078       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2079       .access = PL1_RW, .accessfn = access_tvm_trvm,
2080       .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2081       .resetvalue = 0 },
2082     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2083       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2084       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2085       .resetvalue = 0 },
2086     /* For non-long-descriptor page tables these are PRRR and NMRR;
2087      * regardless they still act as reads-as-written for QEMU.
2088      */
2089      /* MAIR0/1 are defined separately from their 64-bit counterpart which
2090       * allows them to assign the correct fieldoffset based on the endianness
2091       * handled in the field definitions.
2092       */
2093     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2094       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2095       .access = PL1_RW, .accessfn = access_tvm_trvm,
2096       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2097                              offsetof(CPUARMState, cp15.mair0_ns) },
2098       .resetfn = arm_cp_reset_ignore },
2099     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2100       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2101       .access = PL1_RW, .accessfn = access_tvm_trvm,
2102       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2103                              offsetof(CPUARMState, cp15.mair1_ns) },
2104       .resetfn = arm_cp_reset_ignore },
2105     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2106       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2107       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2108     /* 32 bit ITLB invalidates */
2109     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2110       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2111       .writefn = tlbiall_write },
2112     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2113       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2114       .writefn = tlbimva_write },
2115     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2116       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2117       .writefn = tlbiasid_write },
2118     /* 32 bit DTLB invalidates */
2119     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2120       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2121       .writefn = tlbiall_write },
2122     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2123       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2124       .writefn = tlbimva_write },
2125     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2126       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2127       .writefn = tlbiasid_write },
2128     /* 32 bit TLB invalidates */
2129     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2130       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2131       .writefn = tlbiall_write },
2132     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2133       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2134       .writefn = tlbimva_write },
2135     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2136       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2137       .writefn = tlbiasid_write },
2138     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2139       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2140       .writefn = tlbimvaa_write },
2141 };
2142 
2143 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2144     /* 32 bit TLB invalidates, Inner Shareable */
2145     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2146       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2147       .writefn = tlbiall_is_write },
2148     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2149       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2150       .writefn = tlbimva_is_write },
2151     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2152       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2153       .writefn = tlbiasid_is_write },
2154     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2155       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2156       .writefn = tlbimvaa_is_write },
2157 };
2158 
2159 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2160     /* PMOVSSET is not implemented in v7 before v7ve */
2161     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2162       .access = PL0_RW, .accessfn = pmreg_access,
2163       .type = ARM_CP_ALIAS | ARM_CP_IO,
2164       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2165       .writefn = pmovsset_write,
2166       .raw_writefn = raw_write },
2167     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2168       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2169       .access = PL0_RW, .accessfn = pmreg_access,
2170       .type = ARM_CP_ALIAS | ARM_CP_IO,
2171       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2172       .writefn = pmovsset_write,
2173       .raw_writefn = raw_write },
2174 };
2175 
2176 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2177                         uint64_t value)
2178 {
2179     value &= 1;
2180     env->teecr = value;
2181 }
2182 
2183 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2184                                    bool isread)
2185 {
2186     /*
2187      * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2188      * at all, so we don't need to check whether we're v8A.
2189      */
2190     if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2191         (env->cp15.hstr_el2 & HSTR_TTEE)) {
2192         return CP_ACCESS_TRAP_EL2;
2193     }
2194     return CP_ACCESS_OK;
2195 }
2196 
2197 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2198                                     bool isread)
2199 {
2200     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2201         return CP_ACCESS_TRAP;
2202     }
2203     return teecr_access(env, ri, isread);
2204 }
2205 
2206 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2207     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2208       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2209       .resetvalue = 0,
2210       .writefn = teecr_write, .accessfn = teecr_access },
2211     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2212       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2213       .accessfn = teehbr_access, .resetvalue = 0 },
2214 };
2215 
2216 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2217     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2218       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2219       .access = PL0_RW,
2220       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2221     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2222       .access = PL0_RW,
2223       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2224                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2225       .resetfn = arm_cp_reset_ignore },
2226     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2227       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2228       .access = PL0_R|PL1_W,
2229       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2230       .resetvalue = 0},
2231     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2232       .access = PL0_R|PL1_W,
2233       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2234                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2235       .resetfn = arm_cp_reset_ignore },
2236     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2237       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2238       .access = PL1_RW,
2239       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2240     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2241       .access = PL1_RW,
2242       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2243                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2244       .resetvalue = 0 },
2245 };
2246 
2247 #ifndef CONFIG_USER_ONLY
2248 
2249 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2250                                        bool isread)
2251 {
2252     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2253      * Writable only at the highest implemented exception level.
2254      */
2255     int el = arm_current_el(env);
2256     uint64_t hcr;
2257     uint32_t cntkctl;
2258 
2259     switch (el) {
2260     case 0:
2261         hcr = arm_hcr_el2_eff(env);
2262         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2263             cntkctl = env->cp15.cnthctl_el2;
2264         } else {
2265             cntkctl = env->cp15.c14_cntkctl;
2266         }
2267         if (!extract32(cntkctl, 0, 2)) {
2268             return CP_ACCESS_TRAP;
2269         }
2270         break;
2271     case 1:
2272         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2273             arm_is_secure_below_el3(env)) {
2274             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2275             return CP_ACCESS_TRAP_UNCATEGORIZED;
2276         }
2277         break;
2278     case 2:
2279     case 3:
2280         break;
2281     }
2282 
2283     if (!isread && el < arm_highest_el(env)) {
2284         return CP_ACCESS_TRAP_UNCATEGORIZED;
2285     }
2286 
2287     return CP_ACCESS_OK;
2288 }
2289 
2290 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2291                                         bool isread)
2292 {
2293     unsigned int cur_el = arm_current_el(env);
2294     bool has_el2 = arm_is_el2_enabled(env);
2295     uint64_t hcr = arm_hcr_el2_eff(env);
2296 
2297     switch (cur_el) {
2298     case 0:
2299         /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2300         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2301             return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2302                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2303         }
2304 
2305         /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2306         if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2307             return CP_ACCESS_TRAP;
2308         }
2309 
2310         /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2311         if (hcr & HCR_E2H) {
2312             if (timeridx == GTIMER_PHYS &&
2313                 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2314                 return CP_ACCESS_TRAP_EL2;
2315             }
2316         } else {
2317             /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2318             if (has_el2 && timeridx == GTIMER_PHYS &&
2319                 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2320                 return CP_ACCESS_TRAP_EL2;
2321             }
2322         }
2323         break;
2324 
2325     case 1:
2326         /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2327         if (has_el2 && timeridx == GTIMER_PHYS &&
2328             (hcr & HCR_E2H
2329              ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2330              : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2331             return CP_ACCESS_TRAP_EL2;
2332         }
2333         break;
2334     }
2335     return CP_ACCESS_OK;
2336 }
2337 
2338 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2339                                       bool isread)
2340 {
2341     unsigned int cur_el = arm_current_el(env);
2342     bool has_el2 = arm_is_el2_enabled(env);
2343     uint64_t hcr = arm_hcr_el2_eff(env);
2344 
2345     switch (cur_el) {
2346     case 0:
2347         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2348             /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2349             return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2350                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2351         }
2352 
2353         /*
2354          * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2355          * EL0 if EL0[PV]TEN is zero.
2356          */
2357         if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2358             return CP_ACCESS_TRAP;
2359         }
2360         /* fall through */
2361 
2362     case 1:
2363         if (has_el2 && timeridx == GTIMER_PHYS) {
2364             if (hcr & HCR_E2H) {
2365                 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2366                 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2367                     return CP_ACCESS_TRAP_EL2;
2368                 }
2369             } else {
2370                 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2371                 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2372                     return CP_ACCESS_TRAP_EL2;
2373                 }
2374             }
2375         }
2376         break;
2377     }
2378     return CP_ACCESS_OK;
2379 }
2380 
2381 static CPAccessResult gt_pct_access(CPUARMState *env,
2382                                     const ARMCPRegInfo *ri,
2383                                     bool isread)
2384 {
2385     return gt_counter_access(env, GTIMER_PHYS, isread);
2386 }
2387 
2388 static CPAccessResult gt_vct_access(CPUARMState *env,
2389                                     const ARMCPRegInfo *ri,
2390                                     bool isread)
2391 {
2392     return gt_counter_access(env, GTIMER_VIRT, isread);
2393 }
2394 
2395 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2396                                        bool isread)
2397 {
2398     return gt_timer_access(env, GTIMER_PHYS, isread);
2399 }
2400 
2401 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2402                                        bool isread)
2403 {
2404     return gt_timer_access(env, GTIMER_VIRT, isread);
2405 }
2406 
2407 static CPAccessResult gt_stimer_access(CPUARMState *env,
2408                                        const ARMCPRegInfo *ri,
2409                                        bool isread)
2410 {
2411     /* The AArch64 register view of the secure physical timer is
2412      * always accessible from EL3, and configurably accessible from
2413      * Secure EL1.
2414      */
2415     switch (arm_current_el(env)) {
2416     case 1:
2417         if (!arm_is_secure(env)) {
2418             return CP_ACCESS_TRAP;
2419         }
2420         if (!(env->cp15.scr_el3 & SCR_ST)) {
2421             return CP_ACCESS_TRAP_EL3;
2422         }
2423         return CP_ACCESS_OK;
2424     case 0:
2425     case 2:
2426         return CP_ACCESS_TRAP;
2427     case 3:
2428         return CP_ACCESS_OK;
2429     default:
2430         g_assert_not_reached();
2431     }
2432 }
2433 
2434 static uint64_t gt_get_countervalue(CPUARMState *env)
2435 {
2436     ARMCPU *cpu = env_archcpu(env);
2437 
2438     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2439 }
2440 
2441 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2442 {
2443     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2444 
2445     if (gt->ctl & 1) {
2446         /* Timer enabled: calculate and set current ISTATUS, irq, and
2447          * reset timer to when ISTATUS next has to change
2448          */
2449         uint64_t offset = timeridx == GTIMER_VIRT ?
2450                                       cpu->env.cp15.cntvoff_el2 : 0;
2451         uint64_t count = gt_get_countervalue(&cpu->env);
2452         /* Note that this must be unsigned 64 bit arithmetic: */
2453         int istatus = count - offset >= gt->cval;
2454         uint64_t nexttick;
2455         int irqstate;
2456 
2457         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2458 
2459         irqstate = (istatus && !(gt->ctl & 2));
2460         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2461 
2462         if (istatus) {
2463             /* Next transition is when count rolls back over to zero */
2464             nexttick = UINT64_MAX;
2465         } else {
2466             /* Next transition is when we hit cval */
2467             nexttick = gt->cval + offset;
2468         }
2469         /* Note that the desired next expiry time might be beyond the
2470          * signed-64-bit range of a QEMUTimer -- in this case we just
2471          * set the timer for as far in the future as possible. When the
2472          * timer expires we will reset the timer for any remaining period.
2473          */
2474         if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2475             timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2476         } else {
2477             timer_mod(cpu->gt_timer[timeridx], nexttick);
2478         }
2479         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2480     } else {
2481         /* Timer disabled: ISTATUS and timer output always clear */
2482         gt->ctl &= ~4;
2483         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2484         timer_del(cpu->gt_timer[timeridx]);
2485         trace_arm_gt_recalc_disabled(timeridx);
2486     }
2487 }
2488 
2489 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2490                            int timeridx)
2491 {
2492     ARMCPU *cpu = env_archcpu(env);
2493 
2494     timer_del(cpu->gt_timer[timeridx]);
2495 }
2496 
2497 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2498 {
2499     return gt_get_countervalue(env);
2500 }
2501 
2502 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2503 {
2504     uint64_t hcr;
2505 
2506     switch (arm_current_el(env)) {
2507     case 2:
2508         hcr = arm_hcr_el2_eff(env);
2509         if (hcr & HCR_E2H) {
2510             return 0;
2511         }
2512         break;
2513     case 0:
2514         hcr = arm_hcr_el2_eff(env);
2515         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2516             return 0;
2517         }
2518         break;
2519     }
2520 
2521     return env->cp15.cntvoff_el2;
2522 }
2523 
2524 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2525 {
2526     return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2527 }
2528 
2529 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2530                           int timeridx,
2531                           uint64_t value)
2532 {
2533     trace_arm_gt_cval_write(timeridx, value);
2534     env->cp15.c14_timer[timeridx].cval = value;
2535     gt_recalc_timer(env_archcpu(env), timeridx);
2536 }
2537 
2538 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2539                              int timeridx)
2540 {
2541     uint64_t offset = 0;
2542 
2543     switch (timeridx) {
2544     case GTIMER_VIRT:
2545     case GTIMER_HYPVIRT:
2546         offset = gt_virt_cnt_offset(env);
2547         break;
2548     }
2549 
2550     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2551                       (gt_get_countervalue(env) - offset));
2552 }
2553 
2554 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2555                           int timeridx,
2556                           uint64_t value)
2557 {
2558     uint64_t offset = 0;
2559 
2560     switch (timeridx) {
2561     case GTIMER_VIRT:
2562     case GTIMER_HYPVIRT:
2563         offset = gt_virt_cnt_offset(env);
2564         break;
2565     }
2566 
2567     trace_arm_gt_tval_write(timeridx, value);
2568     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2569                                          sextract64(value, 0, 32);
2570     gt_recalc_timer(env_archcpu(env), timeridx);
2571 }
2572 
2573 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2574                          int timeridx,
2575                          uint64_t value)
2576 {
2577     ARMCPU *cpu = env_archcpu(env);
2578     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2579 
2580     trace_arm_gt_ctl_write(timeridx, value);
2581     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2582     if ((oldval ^ value) & 1) {
2583         /* Enable toggled */
2584         gt_recalc_timer(cpu, timeridx);
2585     } else if ((oldval ^ value) & 2) {
2586         /* IMASK toggled: don't need to recalculate,
2587          * just set the interrupt line based on ISTATUS
2588          */
2589         int irqstate = (oldval & 4) && !(value & 2);
2590 
2591         trace_arm_gt_imask_toggle(timeridx, irqstate);
2592         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2593     }
2594 }
2595 
2596 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2597 {
2598     gt_timer_reset(env, ri, GTIMER_PHYS);
2599 }
2600 
2601 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2602                                uint64_t value)
2603 {
2604     gt_cval_write(env, ri, GTIMER_PHYS, value);
2605 }
2606 
2607 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2608 {
2609     return gt_tval_read(env, ri, GTIMER_PHYS);
2610 }
2611 
2612 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2613                                uint64_t value)
2614 {
2615     gt_tval_write(env, ri, GTIMER_PHYS, value);
2616 }
2617 
2618 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2619                               uint64_t value)
2620 {
2621     gt_ctl_write(env, ri, GTIMER_PHYS, value);
2622 }
2623 
2624 static int gt_phys_redir_timeridx(CPUARMState *env)
2625 {
2626     switch (arm_mmu_idx(env)) {
2627     case ARMMMUIdx_E20_0:
2628     case ARMMMUIdx_E20_2:
2629     case ARMMMUIdx_E20_2_PAN:
2630     case ARMMMUIdx_SE20_0:
2631     case ARMMMUIdx_SE20_2:
2632     case ARMMMUIdx_SE20_2_PAN:
2633         return GTIMER_HYP;
2634     default:
2635         return GTIMER_PHYS;
2636     }
2637 }
2638 
2639 static int gt_virt_redir_timeridx(CPUARMState *env)
2640 {
2641     switch (arm_mmu_idx(env)) {
2642     case ARMMMUIdx_E20_0:
2643     case ARMMMUIdx_E20_2:
2644     case ARMMMUIdx_E20_2_PAN:
2645     case ARMMMUIdx_SE20_0:
2646     case ARMMMUIdx_SE20_2:
2647     case ARMMMUIdx_SE20_2_PAN:
2648         return GTIMER_HYPVIRT;
2649     default:
2650         return GTIMER_VIRT;
2651     }
2652 }
2653 
2654 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2655                                         const ARMCPRegInfo *ri)
2656 {
2657     int timeridx = gt_phys_redir_timeridx(env);
2658     return env->cp15.c14_timer[timeridx].cval;
2659 }
2660 
2661 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2662                                      uint64_t value)
2663 {
2664     int timeridx = gt_phys_redir_timeridx(env);
2665     gt_cval_write(env, ri, timeridx, value);
2666 }
2667 
2668 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2669                                         const ARMCPRegInfo *ri)
2670 {
2671     int timeridx = gt_phys_redir_timeridx(env);
2672     return gt_tval_read(env, ri, timeridx);
2673 }
2674 
2675 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2676                                      uint64_t value)
2677 {
2678     int timeridx = gt_phys_redir_timeridx(env);
2679     gt_tval_write(env, ri, timeridx, value);
2680 }
2681 
2682 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2683                                        const ARMCPRegInfo *ri)
2684 {
2685     int timeridx = gt_phys_redir_timeridx(env);
2686     return env->cp15.c14_timer[timeridx].ctl;
2687 }
2688 
2689 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2690                                     uint64_t value)
2691 {
2692     int timeridx = gt_phys_redir_timeridx(env);
2693     gt_ctl_write(env, ri, timeridx, value);
2694 }
2695 
2696 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2697 {
2698     gt_timer_reset(env, ri, GTIMER_VIRT);
2699 }
2700 
2701 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2702                                uint64_t value)
2703 {
2704     gt_cval_write(env, ri, GTIMER_VIRT, value);
2705 }
2706 
2707 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2708 {
2709     return gt_tval_read(env, ri, GTIMER_VIRT);
2710 }
2711 
2712 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2713                                uint64_t value)
2714 {
2715     gt_tval_write(env, ri, GTIMER_VIRT, value);
2716 }
2717 
2718 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2719                               uint64_t value)
2720 {
2721     gt_ctl_write(env, ri, GTIMER_VIRT, value);
2722 }
2723 
2724 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2725                               uint64_t value)
2726 {
2727     ARMCPU *cpu = env_archcpu(env);
2728 
2729     trace_arm_gt_cntvoff_write(value);
2730     raw_write(env, ri, value);
2731     gt_recalc_timer(cpu, GTIMER_VIRT);
2732 }
2733 
2734 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2735                                         const ARMCPRegInfo *ri)
2736 {
2737     int timeridx = gt_virt_redir_timeridx(env);
2738     return env->cp15.c14_timer[timeridx].cval;
2739 }
2740 
2741 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2742                                      uint64_t value)
2743 {
2744     int timeridx = gt_virt_redir_timeridx(env);
2745     gt_cval_write(env, ri, timeridx, value);
2746 }
2747 
2748 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2749                                         const ARMCPRegInfo *ri)
2750 {
2751     int timeridx = gt_virt_redir_timeridx(env);
2752     return gt_tval_read(env, ri, timeridx);
2753 }
2754 
2755 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2756                                      uint64_t value)
2757 {
2758     int timeridx = gt_virt_redir_timeridx(env);
2759     gt_tval_write(env, ri, timeridx, value);
2760 }
2761 
2762 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2763                                        const ARMCPRegInfo *ri)
2764 {
2765     int timeridx = gt_virt_redir_timeridx(env);
2766     return env->cp15.c14_timer[timeridx].ctl;
2767 }
2768 
2769 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2770                                     uint64_t value)
2771 {
2772     int timeridx = gt_virt_redir_timeridx(env);
2773     gt_ctl_write(env, ri, timeridx, value);
2774 }
2775 
2776 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2777 {
2778     gt_timer_reset(env, ri, GTIMER_HYP);
2779 }
2780 
2781 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2782                               uint64_t value)
2783 {
2784     gt_cval_write(env, ri, GTIMER_HYP, value);
2785 }
2786 
2787 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2788 {
2789     return gt_tval_read(env, ri, GTIMER_HYP);
2790 }
2791 
2792 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2793                               uint64_t value)
2794 {
2795     gt_tval_write(env, ri, GTIMER_HYP, value);
2796 }
2797 
2798 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2799                               uint64_t value)
2800 {
2801     gt_ctl_write(env, ri, GTIMER_HYP, value);
2802 }
2803 
2804 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2805 {
2806     gt_timer_reset(env, ri, GTIMER_SEC);
2807 }
2808 
2809 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2810                               uint64_t value)
2811 {
2812     gt_cval_write(env, ri, GTIMER_SEC, value);
2813 }
2814 
2815 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2816 {
2817     return gt_tval_read(env, ri, GTIMER_SEC);
2818 }
2819 
2820 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2821                               uint64_t value)
2822 {
2823     gt_tval_write(env, ri, GTIMER_SEC, value);
2824 }
2825 
2826 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2827                               uint64_t value)
2828 {
2829     gt_ctl_write(env, ri, GTIMER_SEC, value);
2830 }
2831 
2832 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2833 {
2834     gt_timer_reset(env, ri, GTIMER_HYPVIRT);
2835 }
2836 
2837 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2838                              uint64_t value)
2839 {
2840     gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
2841 }
2842 
2843 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2844 {
2845     return gt_tval_read(env, ri, GTIMER_HYPVIRT);
2846 }
2847 
2848 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2849                              uint64_t value)
2850 {
2851     gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
2852 }
2853 
2854 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2855                             uint64_t value)
2856 {
2857     gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
2858 }
2859 
2860 void arm_gt_ptimer_cb(void *opaque)
2861 {
2862     ARMCPU *cpu = opaque;
2863 
2864     gt_recalc_timer(cpu, GTIMER_PHYS);
2865 }
2866 
2867 void arm_gt_vtimer_cb(void *opaque)
2868 {
2869     ARMCPU *cpu = opaque;
2870 
2871     gt_recalc_timer(cpu, GTIMER_VIRT);
2872 }
2873 
2874 void arm_gt_htimer_cb(void *opaque)
2875 {
2876     ARMCPU *cpu = opaque;
2877 
2878     gt_recalc_timer(cpu, GTIMER_HYP);
2879 }
2880 
2881 void arm_gt_stimer_cb(void *opaque)
2882 {
2883     ARMCPU *cpu = opaque;
2884 
2885     gt_recalc_timer(cpu, GTIMER_SEC);
2886 }
2887 
2888 void arm_gt_hvtimer_cb(void *opaque)
2889 {
2890     ARMCPU *cpu = opaque;
2891 
2892     gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2893 }
2894 
2895 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2896 {
2897     ARMCPU *cpu = env_archcpu(env);
2898 
2899     cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2900 }
2901 
2902 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2903     /* Note that CNTFRQ is purely reads-as-written for the benefit
2904      * of software; writing it doesn't actually change the timer frequency.
2905      * Our reset value matches the fixed frequency we implement the timer at.
2906      */
2907     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2908       .type = ARM_CP_ALIAS,
2909       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2910       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2911     },
2912     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2913       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2914       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2915       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2916       .resetfn = arm_gt_cntfrq_reset,
2917     },
2918     /* overall control: mostly access permissions */
2919     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2920       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2921       .access = PL1_RW,
2922       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2923       .resetvalue = 0,
2924     },
2925     /* per-timer control */
2926     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2927       .secure = ARM_CP_SECSTATE_NS,
2928       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2929       .accessfn = gt_ptimer_access,
2930       .fieldoffset = offsetoflow32(CPUARMState,
2931                                    cp15.c14_timer[GTIMER_PHYS].ctl),
2932       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2933       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2934     },
2935     { .name = "CNTP_CTL_S",
2936       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2937       .secure = ARM_CP_SECSTATE_S,
2938       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2939       .accessfn = gt_ptimer_access,
2940       .fieldoffset = offsetoflow32(CPUARMState,
2941                                    cp15.c14_timer[GTIMER_SEC].ctl),
2942       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2943     },
2944     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2945       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2946       .type = ARM_CP_IO, .access = PL0_RW,
2947       .accessfn = gt_ptimer_access,
2948       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2949       .resetvalue = 0,
2950       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2951       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
2952     },
2953     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2954       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2955       .accessfn = gt_vtimer_access,
2956       .fieldoffset = offsetoflow32(CPUARMState,
2957                                    cp15.c14_timer[GTIMER_VIRT].ctl),
2958       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2959       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2960     },
2961     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2962       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2963       .type = ARM_CP_IO, .access = PL0_RW,
2964       .accessfn = gt_vtimer_access,
2965       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2966       .resetvalue = 0,
2967       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2968       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
2969     },
2970     /* TimerValue views: a 32 bit downcounting view of the underlying state */
2971     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2972       .secure = ARM_CP_SECSTATE_NS,
2973       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2974       .accessfn = gt_ptimer_access,
2975       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2976     },
2977     { .name = "CNTP_TVAL_S",
2978       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2979       .secure = ARM_CP_SECSTATE_S,
2980       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2981       .accessfn = gt_ptimer_access,
2982       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2983     },
2984     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2985       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2986       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2987       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2988       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
2989     },
2990     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2991       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2992       .accessfn = gt_vtimer_access,
2993       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
2994     },
2995     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2996       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2997       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2998       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2999       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3000     },
3001     /* The counter itself */
3002     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3003       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3004       .accessfn = gt_pct_access,
3005       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3006     },
3007     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3008       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3009       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3010       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3011     },
3012     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3013       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3014       .accessfn = gt_vct_access,
3015       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3016     },
3017     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3018       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3019       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3020       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3021     },
3022     /* Comparison value, indicating when the timer goes off */
3023     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3024       .secure = ARM_CP_SECSTATE_NS,
3025       .access = PL0_RW,
3026       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3027       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3028       .accessfn = gt_ptimer_access,
3029       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3030       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3031     },
3032     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3033       .secure = ARM_CP_SECSTATE_S,
3034       .access = PL0_RW,
3035       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3036       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3037       .accessfn = gt_ptimer_access,
3038       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3039     },
3040     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3041       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3042       .access = PL0_RW,
3043       .type = ARM_CP_IO,
3044       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3045       .resetvalue = 0, .accessfn = gt_ptimer_access,
3046       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3047       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3048     },
3049     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3050       .access = PL0_RW,
3051       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3052       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3053       .accessfn = gt_vtimer_access,
3054       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3055       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3056     },
3057     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3058       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3059       .access = PL0_RW,
3060       .type = ARM_CP_IO,
3061       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3062       .resetvalue = 0, .accessfn = gt_vtimer_access,
3063       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3064       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3065     },
3066     /* Secure timer -- this is actually restricted to only EL3
3067      * and configurably Secure-EL1 via the accessfn.
3068      */
3069     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3070       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3071       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3072       .accessfn = gt_stimer_access,
3073       .readfn = gt_sec_tval_read,
3074       .writefn = gt_sec_tval_write,
3075       .resetfn = gt_sec_timer_reset,
3076     },
3077     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3078       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3079       .type = ARM_CP_IO, .access = PL1_RW,
3080       .accessfn = gt_stimer_access,
3081       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3082       .resetvalue = 0,
3083       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3084     },
3085     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3086       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3087       .type = ARM_CP_IO, .access = PL1_RW,
3088       .accessfn = gt_stimer_access,
3089       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3090       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3091     },
3092 };
3093 
3094 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3095                                  bool isread)
3096 {
3097     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3098         return CP_ACCESS_TRAP;
3099     }
3100     return CP_ACCESS_OK;
3101 }
3102 
3103 #else
3104 
3105 /* In user-mode most of the generic timer registers are inaccessible
3106  * however modern kernels (4.12+) allow access to cntvct_el0
3107  */
3108 
3109 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3110 {
3111     ARMCPU *cpu = env_archcpu(env);
3112 
3113     /* Currently we have no support for QEMUTimer in linux-user so we
3114      * can't call gt_get_countervalue(env), instead we directly
3115      * call the lower level functions.
3116      */
3117     return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3118 }
3119 
3120 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3121     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3122       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3123       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3124       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3125       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3126     },
3127     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3128       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3129       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3130       .readfn = gt_virt_cnt_read,
3131     },
3132 };
3133 
3134 #endif
3135 
3136 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3137 {
3138     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3139         raw_write(env, ri, value);
3140     } else if (arm_feature(env, ARM_FEATURE_V7)) {
3141         raw_write(env, ri, value & 0xfffff6ff);
3142     } else {
3143         raw_write(env, ri, value & 0xfffff1ff);
3144     }
3145 }
3146 
3147 #ifndef CONFIG_USER_ONLY
3148 /* get_phys_addr() isn't present for user-mode-only targets */
3149 
3150 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3151                                  bool isread)
3152 {
3153     if (ri->opc2 & 4) {
3154         /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3155          * Secure EL1 (which can only happen if EL3 is AArch64).
3156          * They are simply UNDEF if executed from NS EL1.
3157          * They function normally from EL2 or EL3.
3158          */
3159         if (arm_current_el(env) == 1) {
3160             if (arm_is_secure_below_el3(env)) {
3161                 if (env->cp15.scr_el3 & SCR_EEL2) {
3162                     return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
3163                 }
3164                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3165             }
3166             return CP_ACCESS_TRAP_UNCATEGORIZED;
3167         }
3168     }
3169     return CP_ACCESS_OK;
3170 }
3171 
3172 #ifdef CONFIG_TCG
3173 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3174                              MMUAccessType access_type, ARMMMUIdx mmu_idx)
3175 {
3176     hwaddr phys_addr;
3177     target_ulong page_size;
3178     int prot;
3179     bool ret;
3180     uint64_t par64;
3181     bool format64 = false;
3182     MemTxAttrs attrs = {};
3183     ARMMMUFaultInfo fi = {};
3184     ARMCacheAttrs cacheattrs = {};
3185 
3186     ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
3187                         &prot, &page_size, &fi, &cacheattrs);
3188 
3189     /*
3190      * ATS operations only do S1 or S1+S2 translations, so we never
3191      * have to deal with the ARMCacheAttrs format for S2 only.
3192      */
3193     assert(!cacheattrs.is_s2_format);
3194 
3195     if (ret) {
3196         /*
3197          * Some kinds of translation fault must cause exceptions rather
3198          * than being reported in the PAR.
3199          */
3200         int current_el = arm_current_el(env);
3201         int target_el;
3202         uint32_t syn, fsr, fsc;
3203         bool take_exc = false;
3204 
3205         if (fi.s1ptw && current_el == 1
3206             && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3207             /*
3208              * Synchronous stage 2 fault on an access made as part of the
3209              * translation table walk for AT S1E0* or AT S1E1* insn
3210              * executed from NS EL1. If this is a synchronous external abort
3211              * and SCR_EL3.EA == 1, then we take a synchronous external abort
3212              * to EL3. Otherwise the fault is taken as an exception to EL2,
3213              * and HPFAR_EL2 holds the faulting IPA.
3214              */
3215             if (fi.type == ARMFault_SyncExternalOnWalk &&
3216                 (env->cp15.scr_el3 & SCR_EA)) {
3217                 target_el = 3;
3218             } else {
3219                 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3220                 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3221                     env->cp15.hpfar_el2 |= HPFAR_NS;
3222                 }
3223                 target_el = 2;
3224             }
3225             take_exc = true;
3226         } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3227             /*
3228              * Synchronous external aborts during a translation table walk
3229              * are taken as Data Abort exceptions.
3230              */
3231             if (fi.stage2) {
3232                 if (current_el == 3) {
3233                     target_el = 3;
3234                 } else {
3235                     target_el = 2;
3236                 }
3237             } else {
3238                 target_el = exception_target_el(env);
3239             }
3240             take_exc = true;
3241         }
3242 
3243         if (take_exc) {
3244             /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3245             if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3246                 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3247                 fsr = arm_fi_to_lfsc(&fi);
3248                 fsc = extract32(fsr, 0, 6);
3249             } else {
3250                 fsr = arm_fi_to_sfsc(&fi);
3251                 fsc = 0x3f;
3252             }
3253             /*
3254              * Report exception with ESR indicating a fault due to a
3255              * translation table walk for a cache maintenance instruction.
3256              */
3257             syn = syn_data_abort_no_iss(current_el == target_el, 0,
3258                                         fi.ea, 1, fi.s1ptw, 1, fsc);
3259             env->exception.vaddress = value;
3260             env->exception.fsr = fsr;
3261             raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3262         }
3263     }
3264 
3265     if (is_a64(env)) {
3266         format64 = true;
3267     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3268         /*
3269          * ATS1Cxx:
3270          * * TTBCR.EAE determines whether the result is returned using the
3271          *   32-bit or the 64-bit PAR format
3272          * * Instructions executed in Hyp mode always use the 64bit format
3273          *
3274          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3275          * * The Non-secure TTBCR.EAE bit is set to 1
3276          * * The implementation includes EL2, and the value of HCR.VM is 1
3277          *
3278          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3279          *
3280          * ATS1Hx always uses the 64bit format.
3281          */
3282         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3283 
3284         if (arm_feature(env, ARM_FEATURE_EL2)) {
3285             if (mmu_idx == ARMMMUIdx_E10_0 ||
3286                 mmu_idx == ARMMMUIdx_E10_1 ||
3287                 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3288                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3289             } else {
3290                 format64 |= arm_current_el(env) == 2;
3291             }
3292         }
3293     }
3294 
3295     if (format64) {
3296         /* Create a 64-bit PAR */
3297         par64 = (1 << 11); /* LPAE bit always set */
3298         if (!ret) {
3299             par64 |= phys_addr & ~0xfffULL;
3300             if (!attrs.secure) {
3301                 par64 |= (1 << 9); /* NS */
3302             }
3303             par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3304             par64 |= cacheattrs.shareability << 7; /* SH */
3305         } else {
3306             uint32_t fsr = arm_fi_to_lfsc(&fi);
3307 
3308             par64 |= 1; /* F */
3309             par64 |= (fsr & 0x3f) << 1; /* FS */
3310             if (fi.stage2) {
3311                 par64 |= (1 << 9); /* S */
3312             }
3313             if (fi.s1ptw) {
3314                 par64 |= (1 << 8); /* PTW */
3315             }
3316         }
3317     } else {
3318         /* fsr is a DFSR/IFSR value for the short descriptor
3319          * translation table format (with WnR always clear).
3320          * Convert it to a 32-bit PAR.
3321          */
3322         if (!ret) {
3323             /* We do not set any attribute bits in the PAR */
3324             if (page_size == (1 << 24)
3325                 && arm_feature(env, ARM_FEATURE_V7)) {
3326                 par64 = (phys_addr & 0xff000000) | (1 << 1);
3327             } else {
3328                 par64 = phys_addr & 0xfffff000;
3329             }
3330             if (!attrs.secure) {
3331                 par64 |= (1 << 9); /* NS */
3332             }
3333         } else {
3334             uint32_t fsr = arm_fi_to_sfsc(&fi);
3335 
3336             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3337                     ((fsr & 0xf) << 1) | 1;
3338         }
3339     }
3340     return par64;
3341 }
3342 #endif /* CONFIG_TCG */
3343 
3344 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3345 {
3346 #ifdef CONFIG_TCG
3347     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3348     uint64_t par64;
3349     ARMMMUIdx mmu_idx;
3350     int el = arm_current_el(env);
3351     bool secure = arm_is_secure_below_el3(env);
3352 
3353     switch (ri->opc2 & 6) {
3354     case 0:
3355         /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3356         switch (el) {
3357         case 3:
3358             mmu_idx = ARMMMUIdx_SE3;
3359             break;
3360         case 2:
3361             g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3362             /* fall through */
3363         case 1:
3364             if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
3365                 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
3366                            : ARMMMUIdx_Stage1_E1_PAN);
3367             } else {
3368                 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
3369             }
3370             break;
3371         default:
3372             g_assert_not_reached();
3373         }
3374         break;
3375     case 2:
3376         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3377         switch (el) {
3378         case 3:
3379             mmu_idx = ARMMMUIdx_SE10_0;
3380             break;
3381         case 2:
3382             g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3383             mmu_idx = ARMMMUIdx_Stage1_E0;
3384             break;
3385         case 1:
3386             mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
3387             break;
3388         default:
3389             g_assert_not_reached();
3390         }
3391         break;
3392     case 4:
3393         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3394         mmu_idx = ARMMMUIdx_E10_1;
3395         break;
3396     case 6:
3397         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3398         mmu_idx = ARMMMUIdx_E10_0;
3399         break;
3400     default:
3401         g_assert_not_reached();
3402     }
3403 
3404     par64 = do_ats_write(env, value, access_type, mmu_idx);
3405 
3406     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3407 #else
3408     /* Handled by hardware accelerator. */
3409     g_assert_not_reached();
3410 #endif /* CONFIG_TCG */
3411 }
3412 
3413 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3414                         uint64_t value)
3415 {
3416 #ifdef CONFIG_TCG
3417     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3418     uint64_t par64;
3419 
3420     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
3421 
3422     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3423 #else
3424     /* Handled by hardware accelerator. */
3425     g_assert_not_reached();
3426 #endif /* CONFIG_TCG */
3427 }
3428 
3429 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3430                                      bool isread)
3431 {
3432     if (arm_current_el(env) == 3 &&
3433         !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3434         return CP_ACCESS_TRAP;
3435     }
3436     return CP_ACCESS_OK;
3437 }
3438 
3439 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3440                         uint64_t value)
3441 {
3442 #ifdef CONFIG_TCG
3443     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3444     ARMMMUIdx mmu_idx;
3445     int secure = arm_is_secure_below_el3(env);
3446 
3447     switch (ri->opc2 & 6) {
3448     case 0:
3449         switch (ri->opc1) {
3450         case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3451             if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
3452                 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
3453                            : ARMMMUIdx_Stage1_E1_PAN);
3454             } else {
3455                 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
3456             }
3457             break;
3458         case 4: /* AT S1E2R, AT S1E2W */
3459             mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
3460             break;
3461         case 6: /* AT S1E3R, AT S1E3W */
3462             mmu_idx = ARMMMUIdx_SE3;
3463             break;
3464         default:
3465             g_assert_not_reached();
3466         }
3467         break;
3468     case 2: /* AT S1E0R, AT S1E0W */
3469         mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
3470         break;
3471     case 4: /* AT S12E1R, AT S12E1W */
3472         mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
3473         break;
3474     case 6: /* AT S12E0R, AT S12E0W */
3475         mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
3476         break;
3477     default:
3478         g_assert_not_reached();
3479     }
3480 
3481     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3482 #else
3483     /* Handled by hardware accelerator. */
3484     g_assert_not_reached();
3485 #endif /* CONFIG_TCG */
3486 }
3487 #endif
3488 
3489 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3490     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3491       .access = PL1_RW, .resetvalue = 0,
3492       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3493                              offsetoflow32(CPUARMState, cp15.par_ns) },
3494       .writefn = par_write },
3495 #ifndef CONFIG_USER_ONLY
3496     /* This underdecoding is safe because the reginfo is NO_RAW. */
3497     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3498       .access = PL1_W, .accessfn = ats_access,
3499       .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3500 #endif
3501 };
3502 
3503 /* Return basic MPU access permission bits.  */
3504 static uint32_t simple_mpu_ap_bits(uint32_t val)
3505 {
3506     uint32_t ret;
3507     uint32_t mask;
3508     int i;
3509     ret = 0;
3510     mask = 3;
3511     for (i = 0; i < 16; i += 2) {
3512         ret |= (val >> i) & mask;
3513         mask <<= 2;
3514     }
3515     return ret;
3516 }
3517 
3518 /* Pad basic MPU access permission bits to extended format.  */
3519 static uint32_t extended_mpu_ap_bits(uint32_t val)
3520 {
3521     uint32_t ret;
3522     uint32_t mask;
3523     int i;
3524     ret = 0;
3525     mask = 3;
3526     for (i = 0; i < 16; i += 2) {
3527         ret |= (val & mask) << i;
3528         mask <<= 2;
3529     }
3530     return ret;
3531 }
3532 
3533 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3534                                  uint64_t value)
3535 {
3536     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3537 }
3538 
3539 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3540 {
3541     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3542 }
3543 
3544 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3545                                  uint64_t value)
3546 {
3547     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3548 }
3549 
3550 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3551 {
3552     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3553 }
3554 
3555 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3556 {
3557     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3558 
3559     if (!u32p) {
3560         return 0;
3561     }
3562 
3563     u32p += env->pmsav7.rnr[M_REG_NS];
3564     return *u32p;
3565 }
3566 
3567 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3568                          uint64_t value)
3569 {
3570     ARMCPU *cpu = env_archcpu(env);
3571     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3572 
3573     if (!u32p) {
3574         return;
3575     }
3576 
3577     u32p += env->pmsav7.rnr[M_REG_NS];
3578     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3579     *u32p = value;
3580 }
3581 
3582 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3583                               uint64_t value)
3584 {
3585     ARMCPU *cpu = env_archcpu(env);
3586     uint32_t nrgs = cpu->pmsav7_dregion;
3587 
3588     if (value >= nrgs) {
3589         qemu_log_mask(LOG_GUEST_ERROR,
3590                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3591                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3592         return;
3593     }
3594 
3595     raw_write(env, ri, value);
3596 }
3597 
3598 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3599     /* Reset for all these registers is handled in arm_cpu_reset(),
3600      * because the PMSAv7 is also used by M-profile CPUs, which do
3601      * not register cpregs but still need the state to be reset.
3602      */
3603     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3604       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3605       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3606       .readfn = pmsav7_read, .writefn = pmsav7_write,
3607       .resetfn = arm_cp_reset_ignore },
3608     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3609       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3610       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3611       .readfn = pmsav7_read, .writefn = pmsav7_write,
3612       .resetfn = arm_cp_reset_ignore },
3613     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3614       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3615       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3616       .readfn = pmsav7_read, .writefn = pmsav7_write,
3617       .resetfn = arm_cp_reset_ignore },
3618     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3619       .access = PL1_RW,
3620       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3621       .writefn = pmsav7_rgnr_write,
3622       .resetfn = arm_cp_reset_ignore },
3623 };
3624 
3625 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3626     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3627       .access = PL1_RW, .type = ARM_CP_ALIAS,
3628       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3629       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3630     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3631       .access = PL1_RW, .type = ARM_CP_ALIAS,
3632       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3633       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3634     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3635       .access = PL1_RW,
3636       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3637       .resetvalue = 0, },
3638     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3639       .access = PL1_RW,
3640       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3641       .resetvalue = 0, },
3642     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3643       .access = PL1_RW,
3644       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3645     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3646       .access = PL1_RW,
3647       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3648     /* Protection region base and size registers */
3649     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3650       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3651       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3652     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3653       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3654       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3655     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3656       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3657       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3658     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3659       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3660       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3661     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3662       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3663       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3664     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3665       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3666       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3667     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3668       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3669       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3670     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3671       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3672       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3673 };
3674 
3675 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3676                                  uint64_t value)
3677 {
3678     TCR *tcr = raw_ptr(env, ri);
3679     int maskshift = extract32(value, 0, 3);
3680 
3681     if (!arm_feature(env, ARM_FEATURE_V8)) {
3682         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3683             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3684              * using Long-desciptor translation table format */
3685             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3686         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3687             /* In an implementation that includes the Security Extensions
3688              * TTBCR has additional fields PD0 [4] and PD1 [5] for
3689              * Short-descriptor translation table format.
3690              */
3691             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3692         } else {
3693             value &= TTBCR_N;
3694         }
3695     }
3696 
3697     /* Update the masks corresponding to the TCR bank being written
3698      * Note that we always calculate mask and base_mask, but
3699      * they are only used for short-descriptor tables (ie if EAE is 0);
3700      * for long-descriptor tables the TCR fields are used differently
3701      * and the mask and base_mask values are meaningless.
3702      */
3703     tcr->raw_tcr = value;
3704     tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3705     tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3706 }
3707 
3708 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3709                              uint64_t value)
3710 {
3711     ARMCPU *cpu = env_archcpu(env);
3712     TCR *tcr = raw_ptr(env, ri);
3713 
3714     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3715         /* With LPAE the TTBCR could result in a change of ASID
3716          * via the TTBCR.A1 bit, so do a TLB flush.
3717          */
3718         tlb_flush(CPU(cpu));
3719     }
3720     /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
3721     value = deposit64(tcr->raw_tcr, 0, 32, value);
3722     vmsa_ttbcr_raw_write(env, ri, value);
3723 }
3724 
3725 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3726 {
3727     TCR *tcr = raw_ptr(env, ri);
3728 
3729     /* Reset both the TCR as well as the masks corresponding to the bank of
3730      * the TCR being reset.
3731      */
3732     tcr->raw_tcr = 0;
3733     tcr->mask = 0;
3734     tcr->base_mask = 0xffffc000u;
3735 }
3736 
3737 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
3738                                uint64_t value)
3739 {
3740     ARMCPU *cpu = env_archcpu(env);
3741     TCR *tcr = raw_ptr(env, ri);
3742 
3743     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3744     tlb_flush(CPU(cpu));
3745     tcr->raw_tcr = value;
3746 }
3747 
3748 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3749                             uint64_t value)
3750 {
3751     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
3752     if (cpreg_field_is_64bit(ri) &&
3753         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3754         ARMCPU *cpu = env_archcpu(env);
3755         tlb_flush(CPU(cpu));
3756     }
3757     raw_write(env, ri, value);
3758 }
3759 
3760 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3761                                     uint64_t value)
3762 {
3763     /*
3764      * If we are running with E2&0 regime, then an ASID is active.
3765      * Flush if that might be changing.  Note we're not checking
3766      * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3767      * holds the active ASID, only checking the field that might.
3768      */
3769     if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
3770         (arm_hcr_el2_eff(env) & HCR_E2H)) {
3771         uint16_t mask = ARMMMUIdxBit_E20_2 |
3772                         ARMMMUIdxBit_E20_2_PAN |
3773                         ARMMMUIdxBit_E20_0;
3774 
3775         if (arm_is_secure_below_el3(env)) {
3776             mask >>= ARM_MMU_IDX_A_NS;
3777         }
3778 
3779         tlb_flush_by_mmuidx(env_cpu(env), mask);
3780     }
3781     raw_write(env, ri, value);
3782 }
3783 
3784 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3785                         uint64_t value)
3786 {
3787     ARMCPU *cpu = env_archcpu(env);
3788     CPUState *cs = CPU(cpu);
3789 
3790     /*
3791      * A change in VMID to the stage2 page table (Stage2) invalidates
3792      * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3793      */
3794     if (raw_read(env, ri) != value) {
3795         uint16_t mask = ARMMMUIdxBit_E10_1 |
3796                         ARMMMUIdxBit_E10_1_PAN |
3797                         ARMMMUIdxBit_E10_0;
3798 
3799         if (arm_is_secure_below_el3(env)) {
3800             mask >>= ARM_MMU_IDX_A_NS;
3801         }
3802 
3803         tlb_flush_by_mmuidx(cs, mask);
3804         raw_write(env, ri, value);
3805     }
3806 }
3807 
3808 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3809     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3810       .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
3811       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3812                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3813     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3814       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3815       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3816                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3817     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3818       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
3819       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3820                              offsetof(CPUARMState, cp15.dfar_ns) } },
3821     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3822       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3823       .access = PL1_RW, .accessfn = access_tvm_trvm,
3824       .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3825       .resetvalue = 0, },
3826 };
3827 
3828 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3829     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3830       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3831       .access = PL1_RW, .accessfn = access_tvm_trvm,
3832       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3833     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3834       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3835       .access = PL1_RW, .accessfn = access_tvm_trvm,
3836       .writefn = vmsa_ttbr_write, .resetvalue = 0,
3837       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3838                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
3839     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3840       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3841       .access = PL1_RW, .accessfn = access_tvm_trvm,
3842       .writefn = vmsa_ttbr_write, .resetvalue = 0,
3843       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3844                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
3845     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3846       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3847       .access = PL1_RW, .accessfn = access_tvm_trvm,
3848       .writefn = vmsa_tcr_el12_write,
3849       .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3850       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3851     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3852       .access = PL1_RW, .accessfn = access_tvm_trvm,
3853       .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3854       .raw_writefn = vmsa_ttbcr_raw_write,
3855       /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
3856       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
3857                              offsetof(CPUARMState, cp15.tcr_el[1])} },
3858 };
3859 
3860 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3861  * qemu tlbs nor adjusting cached masks.
3862  */
3863 static const ARMCPRegInfo ttbcr2_reginfo = {
3864     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3865     .access = PL1_RW, .accessfn = access_tvm_trvm,
3866     .type = ARM_CP_ALIAS,
3867     .bank_fieldoffsets = {
3868         offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
3869         offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
3870     },
3871 };
3872 
3873 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3874                                 uint64_t value)
3875 {
3876     env->cp15.c15_ticonfig = value & 0xe7;
3877     /* The OS_TYPE bit in this register changes the reported CPUID! */
3878     env->cp15.c0_cpuid = (value & (1 << 5)) ?
3879         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3880 }
3881 
3882 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3883                                 uint64_t value)
3884 {
3885     env->cp15.c15_threadid = value & 0xffff;
3886 }
3887 
3888 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3889                            uint64_t value)
3890 {
3891     /* Wait-for-interrupt (deprecated) */
3892     cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
3893 }
3894 
3895 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3896                                   uint64_t value)
3897 {
3898     /* On OMAP there are registers indicating the max/min index of dcache lines
3899      * containing a dirty line; cache flush operations have to reset these.
3900      */
3901     env->cp15.c15_i_max = 0x000;
3902     env->cp15.c15_i_min = 0xff0;
3903 }
3904 
3905 static const ARMCPRegInfo omap_cp_reginfo[] = {
3906     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3907       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3908       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3909       .resetvalue = 0, },
3910     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3911       .access = PL1_RW, .type = ARM_CP_NOP },
3912     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3913       .access = PL1_RW,
3914       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3915       .writefn = omap_ticonfig_write },
3916     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3917       .access = PL1_RW,
3918       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3919     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3920       .access = PL1_RW, .resetvalue = 0xff0,
3921       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3922     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3923       .access = PL1_RW,
3924       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3925       .writefn = omap_threadid_write },
3926     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3927       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3928       .type = ARM_CP_NO_RAW,
3929       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3930     /* TODO: Peripheral port remap register:
3931      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3932      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3933      * when MMU is off.
3934      */
3935     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3936       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3937       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3938       .writefn = omap_cachemaint_write },
3939     { .name = "C9", .cp = 15, .crn = 9,
3940       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3941       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3942 };
3943 
3944 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3945                               uint64_t value)
3946 {
3947     env->cp15.c15_cpar = value & 0x3fff;
3948 }
3949 
3950 static const ARMCPRegInfo xscale_cp_reginfo[] = {
3951     { .name = "XSCALE_CPAR",
3952       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3953       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3954       .writefn = xscale_cpar_write, },
3955     { .name = "XSCALE_AUXCR",
3956       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3957       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3958       .resetvalue = 0, },
3959     /* XScale specific cache-lockdown: since we have no cache we NOP these
3960      * and hope the guest does not really rely on cache behaviour.
3961      */
3962     { .name = "XSCALE_LOCK_ICACHE_LINE",
3963       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3964       .access = PL1_W, .type = ARM_CP_NOP },
3965     { .name = "XSCALE_UNLOCK_ICACHE",
3966       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3967       .access = PL1_W, .type = ARM_CP_NOP },
3968     { .name = "XSCALE_DCACHE_LOCK",
3969       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3970       .access = PL1_RW, .type = ARM_CP_NOP },
3971     { .name = "XSCALE_UNLOCK_DCACHE",
3972       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3973       .access = PL1_W, .type = ARM_CP_NOP },
3974 };
3975 
3976 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3977     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3978      * implementation of this implementation-defined space.
3979      * Ideally this should eventually disappear in favour of actually
3980      * implementing the correct behaviour for all cores.
3981      */
3982     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3983       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3984       .access = PL1_RW,
3985       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3986       .resetvalue = 0 },
3987 };
3988 
3989 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3990     /* Cache status: RAZ because we have no cache so it's always clean */
3991     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3992       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3993       .resetvalue = 0 },
3994 };
3995 
3996 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3997     /* We never have a a block transfer operation in progress */
3998     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3999       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4000       .resetvalue = 0 },
4001     /* The cache ops themselves: these all NOP for QEMU */
4002     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4003       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4004     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4005       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4006     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4007       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4008     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4009       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4010     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4011       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4012     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4013       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4014 };
4015 
4016 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4017     /* The cache test-and-clean instructions always return (1 << 30)
4018      * to indicate that there are no dirty cache lines.
4019      */
4020     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4021       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4022       .resetvalue = (1 << 30) },
4023     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4024       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4025       .resetvalue = (1 << 30) },
4026 };
4027 
4028 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4029     /* Ignore ReadBuffer accesses */
4030     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4031       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4032       .access = PL1_RW, .resetvalue = 0,
4033       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4034 };
4035 
4036 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4037 {
4038     unsigned int cur_el = arm_current_el(env);
4039 
4040     if (arm_is_el2_enabled(env) && cur_el == 1) {
4041         return env->cp15.vpidr_el2;
4042     }
4043     return raw_read(env, ri);
4044 }
4045 
4046 static uint64_t mpidr_read_val(CPUARMState *env)
4047 {
4048     ARMCPU *cpu = env_archcpu(env);
4049     uint64_t mpidr = cpu->mp_affinity;
4050 
4051     if (arm_feature(env, ARM_FEATURE_V7MP)) {
4052         mpidr |= (1U << 31);
4053         /* Cores which are uniprocessor (non-coherent)
4054          * but still implement the MP extensions set
4055          * bit 30. (For instance, Cortex-R5).
4056          */
4057         if (cpu->mp_is_up) {
4058             mpidr |= (1u << 30);
4059         }
4060     }
4061     return mpidr;
4062 }
4063 
4064 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4065 {
4066     unsigned int cur_el = arm_current_el(env);
4067 
4068     if (arm_is_el2_enabled(env) && cur_el == 1) {
4069         return env->cp15.vmpidr_el2;
4070     }
4071     return mpidr_read_val(env);
4072 }
4073 
4074 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4075     /* NOP AMAIR0/1 */
4076     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4077       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4078       .access = PL1_RW, .accessfn = access_tvm_trvm,
4079       .type = ARM_CP_CONST, .resetvalue = 0 },
4080     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4081     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4082       .access = PL1_RW, .accessfn = access_tvm_trvm,
4083       .type = ARM_CP_CONST, .resetvalue = 0 },
4084     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4085       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4086       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4087                              offsetof(CPUARMState, cp15.par_ns)} },
4088     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4089       .access = PL1_RW, .accessfn = access_tvm_trvm,
4090       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4091       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4092                              offsetof(CPUARMState, cp15.ttbr0_ns) },
4093       .writefn = vmsa_ttbr_write, },
4094     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4095       .access = PL1_RW, .accessfn = access_tvm_trvm,
4096       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4097       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4098                              offsetof(CPUARMState, cp15.ttbr1_ns) },
4099       .writefn = vmsa_ttbr_write, },
4100 };
4101 
4102 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4103 {
4104     return vfp_get_fpcr(env);
4105 }
4106 
4107 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4108                             uint64_t value)
4109 {
4110     vfp_set_fpcr(env, value);
4111 }
4112 
4113 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4114 {
4115     return vfp_get_fpsr(env);
4116 }
4117 
4118 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4119                             uint64_t value)
4120 {
4121     vfp_set_fpsr(env, value);
4122 }
4123 
4124 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4125                                        bool isread)
4126 {
4127     if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4128         return CP_ACCESS_TRAP;
4129     }
4130     return CP_ACCESS_OK;
4131 }
4132 
4133 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4134                             uint64_t value)
4135 {
4136     env->daif = value & PSTATE_DAIF;
4137 }
4138 
4139 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4140 {
4141     return env->pstate & PSTATE_PAN;
4142 }
4143 
4144 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4145                            uint64_t value)
4146 {
4147     env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4148 }
4149 
4150 static const ARMCPRegInfo pan_reginfo = {
4151     .name = "PAN", .state = ARM_CP_STATE_AA64,
4152     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4153     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4154     .readfn = aa64_pan_read, .writefn = aa64_pan_write
4155 };
4156 
4157 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4158 {
4159     return env->pstate & PSTATE_UAO;
4160 }
4161 
4162 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4163                            uint64_t value)
4164 {
4165     env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4166 }
4167 
4168 static const ARMCPRegInfo uao_reginfo = {
4169     .name = "UAO", .state = ARM_CP_STATE_AA64,
4170     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4171     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4172     .readfn = aa64_uao_read, .writefn = aa64_uao_write
4173 };
4174 
4175 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4176 {
4177     return env->pstate & PSTATE_DIT;
4178 }
4179 
4180 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4181                            uint64_t value)
4182 {
4183     env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4184 }
4185 
4186 static const ARMCPRegInfo dit_reginfo = {
4187     .name = "DIT", .state = ARM_CP_STATE_AA64,
4188     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4189     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4190     .readfn = aa64_dit_read, .writefn = aa64_dit_write
4191 };
4192 
4193 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4194 {
4195     return env->pstate & PSTATE_SSBS;
4196 }
4197 
4198 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4199                            uint64_t value)
4200 {
4201     env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4202 }
4203 
4204 static const ARMCPRegInfo ssbs_reginfo = {
4205     .name = "SSBS", .state = ARM_CP_STATE_AA64,
4206     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4207     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4208     .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4209 };
4210 
4211 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4212                                               const ARMCPRegInfo *ri,
4213                                               bool isread)
4214 {
4215     /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4216     switch (arm_current_el(env)) {
4217     case 0:
4218         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4219         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4220             return CP_ACCESS_TRAP;
4221         }
4222         /* fall through */
4223     case 1:
4224         /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4225         if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4226             return CP_ACCESS_TRAP_EL2;
4227         }
4228         break;
4229     }
4230     return CP_ACCESS_OK;
4231 }
4232 
4233 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
4234                                               const ARMCPRegInfo *ri,
4235                                               bool isread)
4236 {
4237     /* Cache invalidate/clean to Point of Unification... */
4238     switch (arm_current_el(env)) {
4239     case 0:
4240         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4241         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4242             return CP_ACCESS_TRAP;
4243         }
4244         /* fall through */
4245     case 1:
4246         /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set.  */
4247         if (arm_hcr_el2_eff(env) & HCR_TPU) {
4248             return CP_ACCESS_TRAP_EL2;
4249         }
4250         break;
4251     }
4252     return CP_ACCESS_OK;
4253 }
4254 
4255 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4256  * Page D4-1736 (DDI0487A.b)
4257  */
4258 
4259 static int vae1_tlbmask(CPUARMState *env)
4260 {
4261     uint64_t hcr = arm_hcr_el2_eff(env);
4262     uint16_t mask;
4263 
4264     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4265         mask = ARMMMUIdxBit_E20_2 |
4266                ARMMMUIdxBit_E20_2_PAN |
4267                ARMMMUIdxBit_E20_0;
4268     } else {
4269         mask = ARMMMUIdxBit_E10_1 |
4270                ARMMMUIdxBit_E10_1_PAN |
4271                ARMMMUIdxBit_E10_0;
4272     }
4273 
4274     if (arm_is_secure_below_el3(env)) {
4275         mask >>= ARM_MMU_IDX_A_NS;
4276     }
4277 
4278     return mask;
4279 }
4280 
4281 /* Return 56 if TBI is enabled, 64 otherwise. */
4282 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4283                               uint64_t addr)
4284 {
4285     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
4286     int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4287     int select = extract64(addr, 55, 1);
4288 
4289     return (tbi >> select) & 1 ? 56 : 64;
4290 }
4291 
4292 static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4293 {
4294     uint64_t hcr = arm_hcr_el2_eff(env);
4295     ARMMMUIdx mmu_idx;
4296 
4297     /* Only the regime of the mmu_idx below is significant. */
4298     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4299         mmu_idx = ARMMMUIdx_E20_0;
4300     } else {
4301         mmu_idx = ARMMMUIdx_E10_0;
4302     }
4303 
4304     if (arm_is_secure_below_el3(env)) {
4305         mmu_idx &= ~ARM_MMU_IDX_A_NS;
4306     }
4307 
4308     return tlbbits_for_regime(env, mmu_idx, addr);
4309 }
4310 
4311 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4312                                       uint64_t value)
4313 {
4314     CPUState *cs = env_cpu(env);
4315     int mask = vae1_tlbmask(env);
4316 
4317     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4318 }
4319 
4320 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4321                                     uint64_t value)
4322 {
4323     CPUState *cs = env_cpu(env);
4324     int mask = vae1_tlbmask(env);
4325 
4326     if (tlb_force_broadcast(env)) {
4327         tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4328     } else {
4329         tlb_flush_by_mmuidx(cs, mask);
4330     }
4331 }
4332 
4333 static int alle1_tlbmask(CPUARMState *env)
4334 {
4335     /*
4336      * Note that the 'ALL' scope must invalidate both stage 1 and
4337      * stage 2 translations, whereas most other scopes only invalidate
4338      * stage 1 translations.
4339      */
4340     if (arm_is_secure_below_el3(env)) {
4341         return ARMMMUIdxBit_SE10_1 |
4342                ARMMMUIdxBit_SE10_1_PAN |
4343                ARMMMUIdxBit_SE10_0;
4344     } else {
4345         return ARMMMUIdxBit_E10_1 |
4346                ARMMMUIdxBit_E10_1_PAN |
4347                ARMMMUIdxBit_E10_0;
4348     }
4349 }
4350 
4351 static int e2_tlbmask(CPUARMState *env)
4352 {
4353     if (arm_is_secure_below_el3(env)) {
4354         return ARMMMUIdxBit_SE20_0 |
4355                ARMMMUIdxBit_SE20_2 |
4356                ARMMMUIdxBit_SE20_2_PAN |
4357                ARMMMUIdxBit_SE2;
4358     } else {
4359         return ARMMMUIdxBit_E20_0 |
4360                ARMMMUIdxBit_E20_2 |
4361                ARMMMUIdxBit_E20_2_PAN |
4362                ARMMMUIdxBit_E2;
4363     }
4364 }
4365 
4366 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4367                                   uint64_t value)
4368 {
4369     CPUState *cs = env_cpu(env);
4370     int mask = alle1_tlbmask(env);
4371 
4372     tlb_flush_by_mmuidx(cs, mask);
4373 }
4374 
4375 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4376                                   uint64_t value)
4377 {
4378     CPUState *cs = env_cpu(env);
4379     int mask = e2_tlbmask(env);
4380 
4381     tlb_flush_by_mmuidx(cs, mask);
4382 }
4383 
4384 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4385                                   uint64_t value)
4386 {
4387     ARMCPU *cpu = env_archcpu(env);
4388     CPUState *cs = CPU(cpu);
4389 
4390     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
4391 }
4392 
4393 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4394                                     uint64_t value)
4395 {
4396     CPUState *cs = env_cpu(env);
4397     int mask = alle1_tlbmask(env);
4398 
4399     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4400 }
4401 
4402 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4403                                     uint64_t value)
4404 {
4405     CPUState *cs = env_cpu(env);
4406     int mask = e2_tlbmask(env);
4407 
4408     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4409 }
4410 
4411 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4412                                     uint64_t value)
4413 {
4414     CPUState *cs = env_cpu(env);
4415 
4416     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
4417 }
4418 
4419 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4420                                  uint64_t value)
4421 {
4422     /* Invalidate by VA, EL2
4423      * Currently handles both VAE2 and VALE2, since we don't support
4424      * flush-last-level-only.
4425      */
4426     CPUState *cs = env_cpu(env);
4427     int mask = e2_tlbmask(env);
4428     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4429 
4430     tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4431 }
4432 
4433 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4434                                  uint64_t value)
4435 {
4436     /* Invalidate by VA, EL3
4437      * Currently handles both VAE3 and VALE3, since we don't support
4438      * flush-last-level-only.
4439      */
4440     ARMCPU *cpu = env_archcpu(env);
4441     CPUState *cs = CPU(cpu);
4442     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4443 
4444     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
4445 }
4446 
4447 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4448                                    uint64_t value)
4449 {
4450     CPUState *cs = env_cpu(env);
4451     int mask = vae1_tlbmask(env);
4452     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4453     int bits = vae1_tlbbits(env, pageaddr);
4454 
4455     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4456 }
4457 
4458 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4459                                  uint64_t value)
4460 {
4461     /* Invalidate by VA, EL1&0 (AArch64 version).
4462      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4463      * since we don't support flush-for-specific-ASID-only or
4464      * flush-last-level-only.
4465      */
4466     CPUState *cs = env_cpu(env);
4467     int mask = vae1_tlbmask(env);
4468     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4469     int bits = vae1_tlbbits(env, pageaddr);
4470 
4471     if (tlb_force_broadcast(env)) {
4472         tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4473     } else {
4474         tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4475     }
4476 }
4477 
4478 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4479                                    uint64_t value)
4480 {
4481     CPUState *cs = env_cpu(env);
4482     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4483     bool secure = arm_is_secure_below_el3(env);
4484     int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
4485     int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
4486                                   pageaddr);
4487 
4488     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4489 }
4490 
4491 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4492                                    uint64_t value)
4493 {
4494     CPUState *cs = env_cpu(env);
4495     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4496     int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
4497 
4498     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4499                                                   ARMMMUIdxBit_SE3, bits);
4500 }
4501 
4502 #ifdef TARGET_AARCH64
4503 typedef struct {
4504     uint64_t base;
4505     uint64_t length;
4506 } TLBIRange;
4507 
4508 static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
4509                                      uint64_t value)
4510 {
4511     unsigned int page_size_granule, page_shift, num, scale, exponent;
4512     /* Extract one bit to represent the va selector in use. */
4513     uint64_t select = sextract64(value, 36, 1);
4514     ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
4515     TLBIRange ret = { };
4516 
4517     page_size_granule = extract64(value, 46, 2);
4518 
4519     /* The granule encoded in value must match the granule in use. */
4520     if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
4521         qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
4522                       page_size_granule);
4523         return ret;
4524     }
4525 
4526     page_shift = (page_size_granule - 1) * 2 + 12;
4527     num = extract64(value, 39, 5);
4528     scale = extract64(value, 44, 2);
4529     exponent = (5 * scale) + 1;
4530 
4531     ret.length = (num + 1) << (exponent + page_shift);
4532 
4533     if (param.select) {
4534         ret.base = sextract64(value, 0, 37);
4535     } else {
4536         ret.base = extract64(value, 0, 37);
4537     }
4538     if (param.ds) {
4539         /*
4540          * With DS=1, BaseADDR is always shifted 16 so that it is able
4541          * to address all 52 va bits.  The input address is perforce
4542          * aligned on a 64k boundary regardless of translation granule.
4543          */
4544         page_shift = 16;
4545     }
4546     ret.base <<= page_shift;
4547 
4548     return ret;
4549 }
4550 
4551 static void do_rvae_write(CPUARMState *env, uint64_t value,
4552                           int idxmap, bool synced)
4553 {
4554     ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
4555     TLBIRange range;
4556     int bits;
4557 
4558     range = tlbi_aa64_get_range(env, one_idx, value);
4559     bits = tlbbits_for_regime(env, one_idx, range.base);
4560 
4561     if (synced) {
4562         tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
4563                                                   range.base,
4564                                                   range.length,
4565                                                   idxmap,
4566                                                   bits);
4567     } else {
4568         tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
4569                                   range.length, idxmap, bits);
4570     }
4571 }
4572 
4573 static void tlbi_aa64_rvae1_write(CPUARMState *env,
4574                                   const ARMCPRegInfo *ri,
4575                                   uint64_t value)
4576 {
4577     /*
4578      * Invalidate by VA range, EL1&0.
4579      * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4580      * since we don't support flush-for-specific-ASID-only or
4581      * flush-last-level-only.
4582      */
4583 
4584     do_rvae_write(env, value, vae1_tlbmask(env),
4585                   tlb_force_broadcast(env));
4586 }
4587 
4588 static void tlbi_aa64_rvae1is_write(CPUARMState *env,
4589                                     const ARMCPRegInfo *ri,
4590                                     uint64_t value)
4591 {
4592     /*
4593      * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4594      * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4595      * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4596      * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4597      * shareable specific flushes.
4598      */
4599 
4600     do_rvae_write(env, value, vae1_tlbmask(env), true);
4601 }
4602 
4603 static int vae2_tlbmask(CPUARMState *env)
4604 {
4605     return (arm_is_secure_below_el3(env)
4606             ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
4607 }
4608 
4609 static void tlbi_aa64_rvae2_write(CPUARMState *env,
4610                                   const ARMCPRegInfo *ri,
4611                                   uint64_t value)
4612 {
4613     /*
4614      * Invalidate by VA range, EL2.
4615      * Currently handles all of RVAE2 and RVALE2,
4616      * since we don't support flush-for-specific-ASID-only or
4617      * flush-last-level-only.
4618      */
4619 
4620     do_rvae_write(env, value, vae2_tlbmask(env),
4621                   tlb_force_broadcast(env));
4622 
4623 
4624 }
4625 
4626 static void tlbi_aa64_rvae2is_write(CPUARMState *env,
4627                                     const ARMCPRegInfo *ri,
4628                                     uint64_t value)
4629 {
4630     /*
4631      * Invalidate by VA range, Inner/Outer Shareable, EL2.
4632      * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4633      * since we don't support flush-for-specific-ASID-only,
4634      * flush-last-level-only or inner/outer shareable specific flushes.
4635      */
4636 
4637     do_rvae_write(env, value, vae2_tlbmask(env), true);
4638 
4639 }
4640 
4641 static void tlbi_aa64_rvae3_write(CPUARMState *env,
4642                                   const ARMCPRegInfo *ri,
4643                                   uint64_t value)
4644 {
4645     /*
4646      * Invalidate by VA range, EL3.
4647      * Currently handles all of RVAE3 and RVALE3,
4648      * since we don't support flush-for-specific-ASID-only or
4649      * flush-last-level-only.
4650      */
4651 
4652     do_rvae_write(env, value, ARMMMUIdxBit_SE3,
4653                   tlb_force_broadcast(env));
4654 }
4655 
4656 static void tlbi_aa64_rvae3is_write(CPUARMState *env,
4657                                     const ARMCPRegInfo *ri,
4658                                     uint64_t value)
4659 {
4660     /*
4661      * Invalidate by VA range, EL3, Inner/Outer Shareable.
4662      * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4663      * since we don't support flush-for-specific-ASID-only,
4664      * flush-last-level-only or inner/outer specific flushes.
4665      */
4666 
4667     do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
4668 }
4669 #endif
4670 
4671 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4672                                       bool isread)
4673 {
4674     int cur_el = arm_current_el(env);
4675 
4676     if (cur_el < 2) {
4677         uint64_t hcr = arm_hcr_el2_eff(env);
4678 
4679         if (cur_el == 0) {
4680             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4681                 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4682                     return CP_ACCESS_TRAP_EL2;
4683                 }
4684             } else {
4685                 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4686                     return CP_ACCESS_TRAP;
4687                 }
4688                 if (hcr & HCR_TDZ) {
4689                     return CP_ACCESS_TRAP_EL2;
4690                 }
4691             }
4692         } else if (hcr & HCR_TDZ) {
4693             return CP_ACCESS_TRAP_EL2;
4694         }
4695     }
4696     return CP_ACCESS_OK;
4697 }
4698 
4699 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4700 {
4701     ARMCPU *cpu = env_archcpu(env);
4702     int dzp_bit = 1 << 4;
4703 
4704     /* DZP indicates whether DC ZVA access is allowed */
4705     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4706         dzp_bit = 0;
4707     }
4708     return cpu->dcz_blocksize | dzp_bit;
4709 }
4710 
4711 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4712                                     bool isread)
4713 {
4714     if (!(env->pstate & PSTATE_SP)) {
4715         /* Access to SP_EL0 is undefined if it's being used as
4716          * the stack pointer.
4717          */
4718         return CP_ACCESS_TRAP_UNCATEGORIZED;
4719     }
4720     return CP_ACCESS_OK;
4721 }
4722 
4723 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4724 {
4725     return env->pstate & PSTATE_SP;
4726 }
4727 
4728 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4729 {
4730     update_spsel(env, val);
4731 }
4732 
4733 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4734                         uint64_t value)
4735 {
4736     ARMCPU *cpu = env_archcpu(env);
4737 
4738     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4739         /* M bit is RAZ/WI for PMSA with no MPU implemented */
4740         value &= ~SCTLR_M;
4741     }
4742 
4743     /* ??? Lots of these bits are not implemented.  */
4744 
4745     if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4746         if (ri->opc1 == 6) { /* SCTLR_EL3 */
4747             value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4748         } else {
4749             value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4750                        SCTLR_ATA0 | SCTLR_ATA);
4751         }
4752     }
4753 
4754     if (raw_read(env, ri) == value) {
4755         /* Skip the TLB flush if nothing actually changed; Linux likes
4756          * to do a lot of pointless SCTLR writes.
4757          */
4758         return;
4759     }
4760 
4761     raw_write(env, ri, value);
4762 
4763     /* This may enable/disable the MMU, so do a TLB flush.  */
4764     tlb_flush(CPU(cpu));
4765 
4766     if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4767         /*
4768          * Normally we would always end the TB on an SCTLR write; see the
4769          * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4770          * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4771          * of hflags from the translator, so do it here.
4772          */
4773         arm_rebuild_hflags(env);
4774     }
4775 }
4776 
4777 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4778                        uint64_t value)
4779 {
4780     env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4781 }
4782 
4783 static const ARMCPRegInfo v8_cp_reginfo[] = {
4784     /* Minimal set of EL0-visible registers. This will need to be expanded
4785      * significantly for system emulation of AArch64 CPUs.
4786      */
4787     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4788       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4789       .access = PL0_RW, .type = ARM_CP_NZCV },
4790     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4791       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4792       .type = ARM_CP_NO_RAW,
4793       .access = PL0_RW, .accessfn = aa64_daif_access,
4794       .fieldoffset = offsetof(CPUARMState, daif),
4795       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4796     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4797       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4798       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4799       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4800     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4801       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4802       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4803       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4804     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4805       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4806       .access = PL0_R, .type = ARM_CP_NO_RAW,
4807       .readfn = aa64_dczid_read },
4808     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4809       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4810       .access = PL0_W, .type = ARM_CP_DC_ZVA,
4811 #ifndef CONFIG_USER_ONLY
4812       /* Avoid overhead of an access check that always passes in user-mode */
4813       .accessfn = aa64_zva_access,
4814 #endif
4815     },
4816     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4817       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4818       .access = PL1_R, .type = ARM_CP_CURRENTEL },
4819     /* Cache ops: all NOPs since we don't emulate caches */
4820     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4821       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4822       .access = PL1_W, .type = ARM_CP_NOP,
4823       .accessfn = aa64_cacheop_pou_access },
4824     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4825       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4826       .access = PL1_W, .type = ARM_CP_NOP,
4827       .accessfn = aa64_cacheop_pou_access },
4828     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4829       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4830       .access = PL0_W, .type = ARM_CP_NOP,
4831       .accessfn = aa64_cacheop_pou_access },
4832     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4833       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4834       .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4835       .type = ARM_CP_NOP },
4836     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4837       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4838       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4839     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4840       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4841       .access = PL0_W, .type = ARM_CP_NOP,
4842       .accessfn = aa64_cacheop_poc_access },
4843     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4844       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4845       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4846     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4847       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4848       .access = PL0_W, .type = ARM_CP_NOP,
4849       .accessfn = aa64_cacheop_pou_access },
4850     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4851       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4852       .access = PL0_W, .type = ARM_CP_NOP,
4853       .accessfn = aa64_cacheop_poc_access },
4854     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4855       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4856       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4857     /* TLBI operations */
4858     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4859       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4860       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4861       .writefn = tlbi_aa64_vmalle1is_write },
4862     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4863       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4864       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4865       .writefn = tlbi_aa64_vae1is_write },
4866     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4867       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4868       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4869       .writefn = tlbi_aa64_vmalle1is_write },
4870     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4871       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4872       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4873       .writefn = tlbi_aa64_vae1is_write },
4874     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4875       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4876       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4877       .writefn = tlbi_aa64_vae1is_write },
4878     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4879       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4880       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4881       .writefn = tlbi_aa64_vae1is_write },
4882     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4883       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4884       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4885       .writefn = tlbi_aa64_vmalle1_write },
4886     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4887       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4888       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4889       .writefn = tlbi_aa64_vae1_write },
4890     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4891       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4892       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4893       .writefn = tlbi_aa64_vmalle1_write },
4894     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4895       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4896       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4897       .writefn = tlbi_aa64_vae1_write },
4898     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4899       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4900       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4901       .writefn = tlbi_aa64_vae1_write },
4902     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4903       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4904       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4905       .writefn = tlbi_aa64_vae1_write },
4906     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4907       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4908       .access = PL2_W, .type = ARM_CP_NOP },
4909     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4910       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4911       .access = PL2_W, .type = ARM_CP_NOP },
4912     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4913       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4914       .access = PL2_W, .type = ARM_CP_NO_RAW,
4915       .writefn = tlbi_aa64_alle1is_write },
4916     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4917       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4918       .access = PL2_W, .type = ARM_CP_NO_RAW,
4919       .writefn = tlbi_aa64_alle1is_write },
4920     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4921       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4922       .access = PL2_W, .type = ARM_CP_NOP },
4923     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4924       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4925       .access = PL2_W, .type = ARM_CP_NOP },
4926     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4927       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4928       .access = PL2_W, .type = ARM_CP_NO_RAW,
4929       .writefn = tlbi_aa64_alle1_write },
4930     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4931       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4932       .access = PL2_W, .type = ARM_CP_NO_RAW,
4933       .writefn = tlbi_aa64_alle1is_write },
4934 #ifndef CONFIG_USER_ONLY
4935     /* 64 bit address translation operations */
4936     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4937       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4938       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4939       .writefn = ats_write64 },
4940     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4941       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4942       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4943       .writefn = ats_write64 },
4944     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4945       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4946       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4947       .writefn = ats_write64 },
4948     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4949       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4950       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4951       .writefn = ats_write64 },
4952     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4953       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4954       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4955       .writefn = ats_write64 },
4956     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4957       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4958       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4959       .writefn = ats_write64 },
4960     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4961       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4962       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4963       .writefn = ats_write64 },
4964     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4965       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4966       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4967       .writefn = ats_write64 },
4968     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4969     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4970       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4971       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4972       .writefn = ats_write64 },
4973     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4974       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4975       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4976       .writefn = ats_write64 },
4977     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4978       .type = ARM_CP_ALIAS,
4979       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4980       .access = PL1_RW, .resetvalue = 0,
4981       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4982       .writefn = par_write },
4983 #endif
4984     /* TLB invalidate last level of translation table walk */
4985     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4986       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4987       .writefn = tlbimva_is_write },
4988     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4989       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4990       .writefn = tlbimvaa_is_write },
4991     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4992       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4993       .writefn = tlbimva_write },
4994     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4995       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4996       .writefn = tlbimvaa_write },
4997     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4998       .type = ARM_CP_NO_RAW, .access = PL2_W,
4999       .writefn = tlbimva_hyp_write },
5000     { .name = "TLBIMVALHIS",
5001       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5002       .type = ARM_CP_NO_RAW, .access = PL2_W,
5003       .writefn = tlbimva_hyp_is_write },
5004     { .name = "TLBIIPAS2",
5005       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5006       .type = ARM_CP_NOP, .access = PL2_W },
5007     { .name = "TLBIIPAS2IS",
5008       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5009       .type = ARM_CP_NOP, .access = PL2_W },
5010     { .name = "TLBIIPAS2L",
5011       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5012       .type = ARM_CP_NOP, .access = PL2_W },
5013     { .name = "TLBIIPAS2LIS",
5014       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5015       .type = ARM_CP_NOP, .access = PL2_W },
5016     /* 32 bit cache operations */
5017     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5018       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5019     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5020       .type = ARM_CP_NOP, .access = PL1_W },
5021     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5022       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5023     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5024       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5025     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5026       .type = ARM_CP_NOP, .access = PL1_W },
5027     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5028       .type = ARM_CP_NOP, .access = PL1_W },
5029     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5030       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5031     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5032       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5033     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5034       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5035     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5036       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5037     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5038       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5039     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5040       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5041     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5042       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5043     /* MMU Domain access control / MPU write buffer control */
5044     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5045       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5046       .writefn = dacr_write, .raw_writefn = raw_write,
5047       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5048                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5049     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5050       .type = ARM_CP_ALIAS,
5051       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5052       .access = PL1_RW,
5053       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5054     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5055       .type = ARM_CP_ALIAS,
5056       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5057       .access = PL1_RW,
5058       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5059     /* We rely on the access checks not allowing the guest to write to the
5060      * state field when SPSel indicates that it's being used as the stack
5061      * pointer.
5062      */
5063     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5064       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5065       .access = PL1_RW, .accessfn = sp_el0_access,
5066       .type = ARM_CP_ALIAS,
5067       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5068     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5069       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5070       .access = PL2_RW, .type = ARM_CP_ALIAS,
5071       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5072     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5073       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5074       .type = ARM_CP_NO_RAW,
5075       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5076     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5077       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5078       .access = PL2_RW,
5079       .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
5080       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
5081     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5082       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5083       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5084       .writefn = dacr_write, .raw_writefn = raw_write,
5085       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5086     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5087       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5088       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5089       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5090     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5091       .type = ARM_CP_ALIAS,
5092       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5093       .access = PL2_RW,
5094       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5095     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5096       .type = ARM_CP_ALIAS,
5097       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5098       .access = PL2_RW,
5099       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5100     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5101       .type = ARM_CP_ALIAS,
5102       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5103       .access = PL2_RW,
5104       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5105     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5106       .type = ARM_CP_ALIAS,
5107       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5108       .access = PL2_RW,
5109       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5110     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5111       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5112       .resetvalue = 0,
5113       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5114     { .name = "SDCR", .type = ARM_CP_ALIAS,
5115       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5116       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5117       .writefn = sdcr_write,
5118       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5119 };
5120 
5121 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5122 {
5123     ARMCPU *cpu = env_archcpu(env);
5124 
5125     if (arm_feature(env, ARM_FEATURE_V8)) {
5126         valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5127     } else {
5128         valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5129     }
5130 
5131     if (arm_feature(env, ARM_FEATURE_EL3)) {
5132         valid_mask &= ~HCR_HCD;
5133     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5134         /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5135          * However, if we're using the SMC PSCI conduit then QEMU is
5136          * effectively acting like EL3 firmware and so the guest at
5137          * EL2 should retain the ability to prevent EL1 from being
5138          * able to make SMC calls into the ersatz firmware, so in
5139          * that case HCR.TSC should be read/write.
5140          */
5141         valid_mask &= ~HCR_TSC;
5142     }
5143 
5144     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5145         if (cpu_isar_feature(aa64_vh, cpu)) {
5146             valid_mask |= HCR_E2H;
5147         }
5148         if (cpu_isar_feature(aa64_ras, cpu)) {
5149             valid_mask |= HCR_TERR | HCR_TEA;
5150         }
5151         if (cpu_isar_feature(aa64_lor, cpu)) {
5152             valid_mask |= HCR_TLOR;
5153         }
5154         if (cpu_isar_feature(aa64_pauth, cpu)) {
5155             valid_mask |= HCR_API | HCR_APK;
5156         }
5157         if (cpu_isar_feature(aa64_mte, cpu)) {
5158             valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5159         }
5160         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
5161             valid_mask |= HCR_ENSCXT;
5162         }
5163         if (cpu_isar_feature(aa64_fwb, cpu)) {
5164             valid_mask |= HCR_FWB;
5165         }
5166     }
5167 
5168     /* Clear RES0 bits.  */
5169     value &= valid_mask;
5170 
5171     /*
5172      * These bits change the MMU setup:
5173      * HCR_VM enables stage 2 translation
5174      * HCR_PTW forbids certain page-table setups
5175      * HCR_DC disables stage1 and enables stage2 translation
5176      * HCR_DCT enables tagging on (disabled) stage1 translation
5177      * HCR_FWB changes the interpretation of stage2 descriptor bits
5178      */
5179     if ((env->cp15.hcr_el2 ^ value) &
5180         (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
5181         tlb_flush(CPU(cpu));
5182     }
5183     env->cp15.hcr_el2 = value;
5184 
5185     /*
5186      * Updates to VI and VF require us to update the status of
5187      * virtual interrupts, which are the logical OR of these bits
5188      * and the state of the input lines from the GIC. (This requires
5189      * that we have the iothread lock, which is done by marking the
5190      * reginfo structs as ARM_CP_IO.)
5191      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5192      * possible for it to be taken immediately, because VIRQ and
5193      * VFIQ are masked unless running at EL0 or EL1, and HCR
5194      * can only be written at EL2.
5195      */
5196     g_assert(qemu_mutex_iothread_locked());
5197     arm_cpu_update_virq(cpu);
5198     arm_cpu_update_vfiq(cpu);
5199     arm_cpu_update_vserr(cpu);
5200 }
5201 
5202 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5203 {
5204     do_hcr_write(env, value, 0);
5205 }
5206 
5207 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5208                           uint64_t value)
5209 {
5210     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5211     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5212     do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5213 }
5214 
5215 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5216                          uint64_t value)
5217 {
5218     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5219     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5220     do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5221 }
5222 
5223 /*
5224  * Return the effective value of HCR_EL2.
5225  * Bits that are not included here:
5226  * RW       (read from SCR_EL3.RW as needed)
5227  */
5228 uint64_t arm_hcr_el2_eff(CPUARMState *env)
5229 {
5230     uint64_t ret = env->cp15.hcr_el2;
5231 
5232     if (!arm_is_el2_enabled(env)) {
5233         /*
5234          * "This register has no effect if EL2 is not enabled in the
5235          * current Security state".  This is ARMv8.4-SecEL2 speak for
5236          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5237          *
5238          * Prior to that, the language was "In an implementation that
5239          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5240          * as if this field is 0 for all purposes other than a direct
5241          * read or write access of HCR_EL2".  With lots of enumeration
5242          * on a per-field basis.  In current QEMU, this is condition
5243          * is arm_is_secure_below_el3.
5244          *
5245          * Since the v8.4 language applies to the entire register, and
5246          * appears to be backward compatible, use that.
5247          */
5248         return 0;
5249     }
5250 
5251     /*
5252      * For a cpu that supports both aarch64 and aarch32, we can set bits
5253      * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5254      * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5255      */
5256     if (!arm_el_is_aa64(env, 2)) {
5257         uint64_t aa32_valid;
5258 
5259         /*
5260          * These bits are up-to-date as of ARMv8.6.
5261          * For HCR, it's easiest to list just the 2 bits that are invalid.
5262          * For HCR2, list those that are valid.
5263          */
5264         aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5265         aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5266                        HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5267         ret &= aa32_valid;
5268     }
5269 
5270     if (ret & HCR_TGE) {
5271         /* These bits are up-to-date as of ARMv8.6.  */
5272         if (ret & HCR_E2H) {
5273             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5274                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5275                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5276                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5277                      HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5278                      HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
5279         } else {
5280             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5281         }
5282         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5283                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5284                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5285                  HCR_TLOR);
5286     }
5287 
5288     return ret;
5289 }
5290 
5291 /*
5292  * Corresponds to ARM pseudocode function ELIsInHost().
5293  */
5294 bool el_is_in_host(CPUARMState *env, int el)
5295 {
5296     uint64_t mask;
5297 
5298     /*
5299      * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
5300      * Perform the simplest bit tests first, and validate EL2 afterward.
5301      */
5302     if (el & 1) {
5303         return false; /* EL1 or EL3 */
5304     }
5305 
5306     /*
5307      * Note that hcr_write() checks isar_feature_aa64_vh(),
5308      * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
5309      */
5310     mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
5311     if ((env->cp15.hcr_el2 & mask) != mask) {
5312         return false;
5313     }
5314 
5315     /* TGE and/or E2H set: double check those bits are currently legal. */
5316     return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
5317 }
5318 
5319 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
5320                        uint64_t value)
5321 {
5322     uint64_t valid_mask = 0;
5323 
5324     /* No features adding bits to HCRX are implemented. */
5325 
5326     /* Clear RES0 bits.  */
5327     env->cp15.hcrx_el2 = value & valid_mask;
5328 }
5329 
5330 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
5331                                   bool isread)
5332 {
5333     if (arm_current_el(env) < 3
5334         && arm_feature(env, ARM_FEATURE_EL3)
5335         && !(env->cp15.scr_el3 & SCR_HXEN)) {
5336         return CP_ACCESS_TRAP_EL3;
5337     }
5338     return CP_ACCESS_OK;
5339 }
5340 
5341 static const ARMCPRegInfo hcrx_el2_reginfo = {
5342     .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
5343     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
5344     .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
5345     .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
5346 };
5347 
5348 /* Return the effective value of HCRX_EL2.  */
5349 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
5350 {
5351     /*
5352      * The bits in this register behave as 0 for all purposes other than
5353      * direct reads of the register if:
5354      *   - EL2 is not enabled in the current security state,
5355      *   - SCR_EL3.HXEn is 0.
5356      */
5357     if (!arm_is_el2_enabled(env)
5358         || (arm_feature(env, ARM_FEATURE_EL3)
5359             && !(env->cp15.scr_el3 & SCR_HXEN))) {
5360         return 0;
5361     }
5362     return env->cp15.hcrx_el2;
5363 }
5364 
5365 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5366                            uint64_t value)
5367 {
5368     /*
5369      * For A-profile AArch32 EL3, if NSACR.CP10
5370      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5371      */
5372     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5373         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5374         uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5375         value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
5376     }
5377     env->cp15.cptr_el[2] = value;
5378 }
5379 
5380 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5381 {
5382     /*
5383      * For A-profile AArch32 EL3, if NSACR.CP10
5384      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5385      */
5386     uint64_t value = env->cp15.cptr_el[2];
5387 
5388     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5389         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5390         value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5391     }
5392     return value;
5393 }
5394 
5395 static const ARMCPRegInfo el2_cp_reginfo[] = {
5396     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5397       .type = ARM_CP_IO,
5398       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5399       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5400       .writefn = hcr_write },
5401     { .name = "HCR", .state = ARM_CP_STATE_AA32,
5402       .type = ARM_CP_ALIAS | ARM_CP_IO,
5403       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5404       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5405       .writefn = hcr_writelow },
5406     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5407       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5408       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5409     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5410       .type = ARM_CP_ALIAS,
5411       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5412       .access = PL2_RW,
5413       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5414     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5415       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5416       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5417     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5418       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5419       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5420     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5421       .type = ARM_CP_ALIAS,
5422       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5423       .access = PL2_RW,
5424       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5425     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5426       .type = ARM_CP_ALIAS,
5427       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5428       .access = PL2_RW,
5429       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5430     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5431       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5432       .access = PL2_RW, .writefn = vbar_write,
5433       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5434       .resetvalue = 0 },
5435     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5436       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5437       .access = PL3_RW, .type = ARM_CP_ALIAS,
5438       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5439     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5440       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5441       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5442       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5443       .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5444     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5445       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5446       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5447       .resetvalue = 0 },
5448     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5449       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5450       .access = PL2_RW, .type = ARM_CP_ALIAS,
5451       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
5452     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5453       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5454       .access = PL2_RW, .type = ARM_CP_CONST,
5455       .resetvalue = 0 },
5456     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5457     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5458       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5459       .access = PL2_RW, .type = ARM_CP_CONST,
5460       .resetvalue = 0 },
5461     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5462       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5463       .access = PL2_RW, .type = ARM_CP_CONST,
5464       .resetvalue = 0 },
5465     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5466       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5467       .access = PL2_RW, .type = ARM_CP_CONST,
5468       .resetvalue = 0 },
5469     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5470       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5471       .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
5472       /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5473       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
5474     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5475       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5476       .type = ARM_CP_ALIAS,
5477       .access = PL2_RW, .accessfn = access_el3_aa32ns,
5478       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5479     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5480       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5481       .access = PL2_RW,
5482       /* no .writefn needed as this can't cause an ASID change;
5483        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5484        */
5485       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5486     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5487       .cp = 15, .opc1 = 6, .crm = 2,
5488       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5489       .access = PL2_RW, .accessfn = access_el3_aa32ns,
5490       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5491       .writefn = vttbr_write },
5492     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5493       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5494       .access = PL2_RW, .writefn = vttbr_write,
5495       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
5496     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5497       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5498       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5499       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
5500     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5501       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5502       .access = PL2_RW, .resetvalue = 0,
5503       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
5504     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5505       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5506       .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
5507       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5508     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5509       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5510       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5511     { .name = "TLBIALLNSNH",
5512       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5513       .type = ARM_CP_NO_RAW, .access = PL2_W,
5514       .writefn = tlbiall_nsnh_write },
5515     { .name = "TLBIALLNSNHIS",
5516       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5517       .type = ARM_CP_NO_RAW, .access = PL2_W,
5518       .writefn = tlbiall_nsnh_is_write },
5519     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5520       .type = ARM_CP_NO_RAW, .access = PL2_W,
5521       .writefn = tlbiall_hyp_write },
5522     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5523       .type = ARM_CP_NO_RAW, .access = PL2_W,
5524       .writefn = tlbiall_hyp_is_write },
5525     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5526       .type = ARM_CP_NO_RAW, .access = PL2_W,
5527       .writefn = tlbimva_hyp_write },
5528     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5529       .type = ARM_CP_NO_RAW, .access = PL2_W,
5530       .writefn = tlbimva_hyp_is_write },
5531     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5532       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5533       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
5534       .writefn = tlbi_aa64_alle2_write },
5535     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5536       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5537       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
5538       .writefn = tlbi_aa64_vae2_write },
5539     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5540       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5541       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
5542       .writefn = tlbi_aa64_vae2_write },
5543     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5544       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5545       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
5546       .writefn = tlbi_aa64_alle2is_write },
5547     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5548       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5549       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
5550       .writefn = tlbi_aa64_vae2is_write },
5551     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5552       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5553       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
5554       .writefn = tlbi_aa64_vae2is_write },
5555 #ifndef CONFIG_USER_ONLY
5556     /* Unlike the other EL2-related AT operations, these must
5557      * UNDEF from EL3 if EL2 is not implemented, which is why we
5558      * define them here rather than with the rest of the AT ops.
5559      */
5560     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5561       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5562       .access = PL2_W, .accessfn = at_s1e2_access,
5563       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5564       .writefn = ats_write64 },
5565     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5566       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5567       .access = PL2_W, .accessfn = at_s1e2_access,
5568       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5569       .writefn = ats_write64 },
5570     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5571      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5572      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5573      * to behave as if SCR.NS was 1.
5574      */
5575     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5576       .access = PL2_W,
5577       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5578     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5579       .access = PL2_W,
5580       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5581     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5582       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5583       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5584        * reset values as IMPDEF. We choose to reset to 3 to comply with
5585        * both ARMv7 and ARMv8.
5586        */
5587       .access = PL2_RW, .resetvalue = 3,
5588       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5589     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5590       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5591       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5592       .writefn = gt_cntvoff_write,
5593       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5594     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5595       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5596       .writefn = gt_cntvoff_write,
5597       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5598     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5599       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5600       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5601       .type = ARM_CP_IO, .access = PL2_RW,
5602       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5603     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5604       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5605       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5606       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5607     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5608       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5609       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5610       .resetfn = gt_hyp_timer_reset,
5611       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5612     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5613       .type = ARM_CP_IO,
5614       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5615       .access = PL2_RW,
5616       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5617       .resetvalue = 0,
5618       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5619 #endif
5620     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5621       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5622       .access = PL2_RW, .accessfn = access_el3_aa32ns,
5623       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5624     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5625       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5626       .access = PL2_RW,
5627       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5628     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5629       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5630       .access = PL2_RW,
5631       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5632 };
5633 
5634 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5635     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5636       .type = ARM_CP_ALIAS | ARM_CP_IO,
5637       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5638       .access = PL2_RW,
5639       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5640       .writefn = hcr_writehigh },
5641 };
5642 
5643 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
5644                                   bool isread)
5645 {
5646     if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
5647         return CP_ACCESS_OK;
5648     }
5649     return CP_ACCESS_TRAP_UNCATEGORIZED;
5650 }
5651 
5652 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
5653     { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5654       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
5655       .access = PL2_RW, .accessfn = sel2_access,
5656       .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
5657     { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5658       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
5659       .access = PL2_RW, .accessfn = sel2_access,
5660       .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
5661 };
5662 
5663 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5664                                    bool isread)
5665 {
5666     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5667      * At Secure EL1 it traps to EL3 or EL2.
5668      */
5669     if (arm_current_el(env) == 3) {
5670         return CP_ACCESS_OK;
5671     }
5672     if (arm_is_secure_below_el3(env)) {
5673         if (env->cp15.scr_el3 & SCR_EEL2) {
5674             return CP_ACCESS_TRAP_EL2;
5675         }
5676         return CP_ACCESS_TRAP_EL3;
5677     }
5678     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5679     if (isread) {
5680         return CP_ACCESS_OK;
5681     }
5682     return CP_ACCESS_TRAP_UNCATEGORIZED;
5683 }
5684 
5685 static const ARMCPRegInfo el3_cp_reginfo[] = {
5686     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5687       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5688       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5689       .resetfn = scr_reset, .writefn = scr_write },
5690     { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5691       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5692       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5693       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5694       .writefn = scr_write },
5695     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5696       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5697       .access = PL3_RW, .resetvalue = 0,
5698       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5699     { .name = "SDER",
5700       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5701       .access = PL3_RW, .resetvalue = 0,
5702       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5703     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5704       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5705       .writefn = vbar_write, .resetvalue = 0,
5706       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5707     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5708       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5709       .access = PL3_RW, .resetvalue = 0,
5710       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5711     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5712       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5713       .access = PL3_RW,
5714       /* no .writefn needed as this can't cause an ASID change;
5715        * we must provide a .raw_writefn and .resetfn because we handle
5716        * reset and migration for the AArch32 TTBCR(S), which might be
5717        * using mask and base_mask.
5718        */
5719       .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5720       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5721     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5722       .type = ARM_CP_ALIAS,
5723       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5724       .access = PL3_RW,
5725       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5726     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5727       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5728       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5729     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5730       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5731       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5732     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5733       .type = ARM_CP_ALIAS,
5734       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5735       .access = PL3_RW,
5736       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5737     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5738       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5739       .access = PL3_RW, .writefn = vbar_write,
5740       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5741       .resetvalue = 0 },
5742     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5743       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5744       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5745       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5746     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5747       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5748       .access = PL3_RW, .resetvalue = 0,
5749       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5750     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5751       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5752       .access = PL3_RW, .type = ARM_CP_CONST,
5753       .resetvalue = 0 },
5754     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5755       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5756       .access = PL3_RW, .type = ARM_CP_CONST,
5757       .resetvalue = 0 },
5758     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5759       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5760       .access = PL3_RW, .type = ARM_CP_CONST,
5761       .resetvalue = 0 },
5762     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5763       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5764       .access = PL3_W, .type = ARM_CP_NO_RAW,
5765       .writefn = tlbi_aa64_alle3is_write },
5766     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5767       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5768       .access = PL3_W, .type = ARM_CP_NO_RAW,
5769       .writefn = tlbi_aa64_vae3is_write },
5770     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5771       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5772       .access = PL3_W, .type = ARM_CP_NO_RAW,
5773       .writefn = tlbi_aa64_vae3is_write },
5774     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5775       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5776       .access = PL3_W, .type = ARM_CP_NO_RAW,
5777       .writefn = tlbi_aa64_alle3_write },
5778     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5779       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5780       .access = PL3_W, .type = ARM_CP_NO_RAW,
5781       .writefn = tlbi_aa64_vae3_write },
5782     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5783       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5784       .access = PL3_W, .type = ARM_CP_NO_RAW,
5785       .writefn = tlbi_aa64_vae3_write },
5786 };
5787 
5788 #ifndef CONFIG_USER_ONLY
5789 /* Test if system register redirection is to occur in the current state.  */
5790 static bool redirect_for_e2h(CPUARMState *env)
5791 {
5792     return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5793 }
5794 
5795 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5796 {
5797     CPReadFn *readfn;
5798 
5799     if (redirect_for_e2h(env)) {
5800         /* Switch to the saved EL2 version of the register.  */
5801         ri = ri->opaque;
5802         readfn = ri->readfn;
5803     } else {
5804         readfn = ri->orig_readfn;
5805     }
5806     if (readfn == NULL) {
5807         readfn = raw_read;
5808     }
5809     return readfn(env, ri);
5810 }
5811 
5812 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5813                           uint64_t value)
5814 {
5815     CPWriteFn *writefn;
5816 
5817     if (redirect_for_e2h(env)) {
5818         /* Switch to the saved EL2 version of the register.  */
5819         ri = ri->opaque;
5820         writefn = ri->writefn;
5821     } else {
5822         writefn = ri->orig_writefn;
5823     }
5824     if (writefn == NULL) {
5825         writefn = raw_write;
5826     }
5827     writefn(env, ri, value);
5828 }
5829 
5830 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5831 {
5832     struct E2HAlias {
5833         uint32_t src_key, dst_key, new_key;
5834         const char *src_name, *dst_name, *new_name;
5835         bool (*feature)(const ARMISARegisters *id);
5836     };
5837 
5838 #define K(op0, op1, crn, crm, op2) \
5839     ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5840 
5841     static const struct E2HAlias aliases[] = {
5842         { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
5843           "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5844         { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
5845           "CPACR", "CPTR_EL2", "CPACR_EL12" },
5846         { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
5847           "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5848         { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
5849           "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5850         { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
5851           "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5852         { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
5853           "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5854         { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
5855           "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5856         { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
5857           "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5858         { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
5859           "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5860         { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
5861           "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5862         { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
5863           "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5864         { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5865           "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5866         { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5867           "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5868         { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5869           "VBAR", "VBAR_EL2", "VBAR_EL12" },
5870         { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5871           "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5872         { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5873           "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5874 
5875         /*
5876          * Note that redirection of ZCR is mentioned in the description
5877          * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5878          * not in the summary table.
5879          */
5880         { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
5881           "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
5882 
5883         { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
5884           "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
5885 
5886         { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
5887           "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
5888           isar_feature_aa64_scxtnum },
5889 
5890         /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5891         /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5892     };
5893 #undef K
5894 
5895     size_t i;
5896 
5897     for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5898         const struct E2HAlias *a = &aliases[i];
5899         ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
5900         bool ok;
5901 
5902         if (a->feature && !a->feature(&cpu->isar)) {
5903             continue;
5904         }
5905 
5906         src_reg = g_hash_table_lookup(cpu->cp_regs,
5907                                       (gpointer)(uintptr_t)a->src_key);
5908         dst_reg = g_hash_table_lookup(cpu->cp_regs,
5909                                       (gpointer)(uintptr_t)a->dst_key);
5910         g_assert(src_reg != NULL);
5911         g_assert(dst_reg != NULL);
5912 
5913         /* Cross-compare names to detect typos in the keys.  */
5914         g_assert(strcmp(src_reg->name, a->src_name) == 0);
5915         g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5916 
5917         /* None of the core system registers use opaque; we will.  */
5918         g_assert(src_reg->opaque == NULL);
5919 
5920         /* Create alias before redirection so we dup the right data. */
5921         new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
5922 
5923         new_reg->name = a->new_name;
5924         new_reg->type |= ARM_CP_ALIAS;
5925         /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
5926         new_reg->access &= PL2_RW | PL3_RW;
5927 
5928         ok = g_hash_table_insert(cpu->cp_regs,
5929                                  (gpointer)(uintptr_t)a->new_key, new_reg);
5930         g_assert(ok);
5931 
5932         src_reg->opaque = dst_reg;
5933         src_reg->orig_readfn = src_reg->readfn ?: raw_read;
5934         src_reg->orig_writefn = src_reg->writefn ?: raw_write;
5935         if (!src_reg->raw_readfn) {
5936             src_reg->raw_readfn = raw_read;
5937         }
5938         if (!src_reg->raw_writefn) {
5939             src_reg->raw_writefn = raw_write;
5940         }
5941         src_reg->readfn = el2_e2h_read;
5942         src_reg->writefn = el2_e2h_write;
5943     }
5944 }
5945 #endif
5946 
5947 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5948                                      bool isread)
5949 {
5950     int cur_el = arm_current_el(env);
5951 
5952     if (cur_el < 2) {
5953         uint64_t hcr = arm_hcr_el2_eff(env);
5954 
5955         if (cur_el == 0) {
5956             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5957                 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
5958                     return CP_ACCESS_TRAP_EL2;
5959                 }
5960             } else {
5961                 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5962                     return CP_ACCESS_TRAP;
5963                 }
5964                 if (hcr & HCR_TID2) {
5965                     return CP_ACCESS_TRAP_EL2;
5966                 }
5967             }
5968         } else if (hcr & HCR_TID2) {
5969             return CP_ACCESS_TRAP_EL2;
5970         }
5971     }
5972 
5973     if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
5974         return CP_ACCESS_TRAP_EL2;
5975     }
5976 
5977     return CP_ACCESS_OK;
5978 }
5979 
5980 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5981                         uint64_t value)
5982 {
5983     /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5984      * read via a bit in OSLSR_EL1.
5985      */
5986     int oslock;
5987 
5988     if (ri->state == ARM_CP_STATE_AA32) {
5989         oslock = (value == 0xC5ACCE55);
5990     } else {
5991         oslock = value & 1;
5992     }
5993 
5994     env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5995 }
5996 
5997 static const ARMCPRegInfo debug_cp_reginfo[] = {
5998     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5999      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6000      * unlike DBGDRAR it is never accessible from EL0.
6001      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6002      * accessor.
6003      */
6004     { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
6005       .access = PL0_R, .accessfn = access_tdra,
6006       .type = ARM_CP_CONST, .resetvalue = 0 },
6007     { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
6008       .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6009       .access = PL1_R, .accessfn = access_tdra,
6010       .type = ARM_CP_CONST, .resetvalue = 0 },
6011     { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
6012       .access = PL0_R, .accessfn = access_tdra,
6013       .type = ARM_CP_CONST, .resetvalue = 0 },
6014     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6015     { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
6016       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6017       .access = PL1_RW, .accessfn = access_tda,
6018       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
6019       .resetvalue = 0 },
6020     /*
6021      * MDCCSR_EL0[30:29] map to EDSCR[30:29].  Simply RAZ as the external
6022      * Debug Communication Channel is not implemented.
6023      */
6024     { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
6025       .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
6026       .access = PL0_R, .accessfn = access_tda,
6027       .type = ARM_CP_CONST, .resetvalue = 0 },
6028     /*
6029      * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2].  Map all bits as
6030      * it is unlikely a guest will care.
6031      * We don't implement the configurable EL0 access.
6032      */
6033     { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32,
6034       .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6035       .type = ARM_CP_ALIAS,
6036       .access = PL1_R, .accessfn = access_tda,
6037       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
6038     { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
6039       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
6040       .access = PL1_W, .type = ARM_CP_NO_RAW,
6041       .accessfn = access_tdosa,
6042       .writefn = oslar_write },
6043     { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
6044       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
6045       .access = PL1_R, .resetvalue = 10,
6046       .accessfn = access_tdosa,
6047       .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
6048     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6049     { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
6050       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
6051       .access = PL1_RW, .accessfn = access_tdosa,
6052       .type = ARM_CP_NOP },
6053     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6054      * implement vector catch debug events yet.
6055      */
6056     { .name = "DBGVCR",
6057       .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6058       .access = PL1_RW, .accessfn = access_tda,
6059       .type = ARM_CP_NOP },
6060     /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6061      * to save and restore a 32-bit guest's DBGVCR)
6062      */
6063     { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
6064       .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
6065       .access = PL2_RW, .accessfn = access_tda,
6066       .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
6067     /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6068      * Channel but Linux may try to access this register. The 32-bit
6069      * alias is DBGDCCINT.
6070      */
6071     { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
6072       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6073       .access = PL1_RW, .accessfn = access_tda,
6074       .type = ARM_CP_NOP },
6075 };
6076 
6077 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
6078     /* 64 bit access versions of the (dummy) debug registers */
6079     { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
6080       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6081     { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
6082       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6083 };
6084 
6085 /*
6086  * Check for traps to RAS registers, which are controlled
6087  * by HCR_EL2.TERR and SCR_EL3.TERR.
6088  */
6089 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
6090                                   bool isread)
6091 {
6092     int el = arm_current_el(env);
6093 
6094     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
6095         return CP_ACCESS_TRAP_EL2;
6096     }
6097     if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
6098         return CP_ACCESS_TRAP_EL3;
6099     }
6100     return CP_ACCESS_OK;
6101 }
6102 
6103 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
6104 {
6105     int el = arm_current_el(env);
6106 
6107     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6108         return env->cp15.vdisr_el2;
6109     }
6110     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6111         return 0; /* RAZ/WI */
6112     }
6113     return env->cp15.disr_el1;
6114 }
6115 
6116 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6117 {
6118     int el = arm_current_el(env);
6119 
6120     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6121         env->cp15.vdisr_el2 = val;
6122         return;
6123     }
6124     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6125         return; /* RAZ/WI */
6126     }
6127     env->cp15.disr_el1 = val;
6128 }
6129 
6130 /*
6131  * Minimal RAS implementation with no Error Records.
6132  * Which means that all of the Error Record registers:
6133  *   ERXADDR_EL1
6134  *   ERXCTLR_EL1
6135  *   ERXFR_EL1
6136  *   ERXMISC0_EL1
6137  *   ERXMISC1_EL1
6138  *   ERXMISC2_EL1
6139  *   ERXMISC3_EL1
6140  *   ERXPFGCDN_EL1  (RASv1p1)
6141  *   ERXPFGCTL_EL1  (RASv1p1)
6142  *   ERXPFGF_EL1    (RASv1p1)
6143  *   ERXSTATUS_EL1
6144  * and
6145  *   ERRSELR_EL1
6146  * may generate UNDEFINED, which is the effect we get by not
6147  * listing them at all.
6148  */
6149 static const ARMCPRegInfo minimal_ras_reginfo[] = {
6150     { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
6151       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
6152       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
6153       .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
6154     { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
6155       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
6156       .access = PL1_R, .accessfn = access_terr,
6157       .type = ARM_CP_CONST, .resetvalue = 0 },
6158     { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
6159       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
6160       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
6161     { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
6162       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
6163       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
6164 };
6165 
6166 /*
6167  * Return the exception level to which exceptions should be taken
6168  * via SVEAccessTrap.  This excludes the check for whether the exception
6169  * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
6170  * be found by testing 0 < fp_exception_el < sve_exception_el.
6171  *
6172  * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
6173  * pseudocode does *not* separate out the FP trap checks, but has them
6174  * all in one function.
6175  */
6176 int sve_exception_el(CPUARMState *env, int el)
6177 {
6178 #ifndef CONFIG_USER_ONLY
6179     if (el <= 1 && !el_is_in_host(env, el)) {
6180         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
6181         case 1:
6182             if (el != 0) {
6183                 break;
6184             }
6185             /* fall through */
6186         case 0:
6187         case 2:
6188             return 1;
6189         }
6190     }
6191 
6192     if (el <= 2 && arm_is_el2_enabled(env)) {
6193         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6194         if (env->cp15.hcr_el2 & HCR_E2H) {
6195             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
6196             case 1:
6197                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6198                     break;
6199                 }
6200                 /* fall through */
6201             case 0:
6202             case 2:
6203                 return 2;
6204             }
6205         } else {
6206             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
6207                 return 2;
6208             }
6209         }
6210     }
6211 
6212     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
6213     if (arm_feature(env, ARM_FEATURE_EL3)
6214         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
6215         return 3;
6216     }
6217 #endif
6218     return 0;
6219 }
6220 
6221 /*
6222  * Given that SVE is enabled, return the vector length for EL.
6223  */
6224 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
6225 {
6226     ARMCPU *cpu = env_archcpu(env);
6227     uint32_t len = cpu->sve_max_vq - 1;
6228 
6229     if (el <= 1 && !el_is_in_host(env, el)) {
6230         len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
6231     }
6232     if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6233         len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
6234     }
6235     if (arm_feature(env, ARM_FEATURE_EL3)) {
6236         len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
6237     }
6238 
6239     len = 31 - clz32(cpu->sve_vq_map & MAKE_64BIT_MASK(0, len + 1));
6240     return len;
6241 }
6242 
6243 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6244                       uint64_t value)
6245 {
6246     int cur_el = arm_current_el(env);
6247     int old_len = sve_vqm1_for_el(env, cur_el);
6248     int new_len;
6249 
6250     /* Bits other than [3:0] are RAZ/WI.  */
6251     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6252     raw_write(env, ri, value & 0xf);
6253 
6254     /*
6255      * Because we arrived here, we know both FP and SVE are enabled;
6256      * otherwise we would have trapped access to the ZCR_ELn register.
6257      */
6258     new_len = sve_vqm1_for_el(env, cur_el);
6259     if (new_len < old_len) {
6260         aarch64_sve_narrow_vq(env, new_len + 1);
6261     }
6262 }
6263 
6264 static const ARMCPRegInfo zcr_reginfo[] = {
6265     { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6266       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6267       .access = PL1_RW, .type = ARM_CP_SVE,
6268       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6269       .writefn = zcr_write, .raw_writefn = raw_write },
6270     { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6271       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6272       .access = PL2_RW, .type = ARM_CP_SVE,
6273       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6274       .writefn = zcr_write, .raw_writefn = raw_write },
6275     { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6276       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6277       .access = PL3_RW, .type = ARM_CP_SVE,
6278       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6279       .writefn = zcr_write, .raw_writefn = raw_write },
6280 };
6281 
6282 void hw_watchpoint_update(ARMCPU *cpu, int n)
6283 {
6284     CPUARMState *env = &cpu->env;
6285     vaddr len = 0;
6286     vaddr wvr = env->cp15.dbgwvr[n];
6287     uint64_t wcr = env->cp15.dbgwcr[n];
6288     int mask;
6289     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
6290 
6291     if (env->cpu_watchpoint[n]) {
6292         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
6293         env->cpu_watchpoint[n] = NULL;
6294     }
6295 
6296     if (!FIELD_EX64(wcr, DBGWCR, E)) {
6297         /* E bit clear : watchpoint disabled */
6298         return;
6299     }
6300 
6301     switch (FIELD_EX64(wcr, DBGWCR, LSC)) {
6302     case 0:
6303         /* LSC 00 is reserved and must behave as if the wp is disabled */
6304         return;
6305     case 1:
6306         flags |= BP_MEM_READ;
6307         break;
6308     case 2:
6309         flags |= BP_MEM_WRITE;
6310         break;
6311     case 3:
6312         flags |= BP_MEM_ACCESS;
6313         break;
6314     }
6315 
6316     /* Attempts to use both MASK and BAS fields simultaneously are
6317      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6318      * thus generating a watchpoint for every byte in the masked region.
6319      */
6320     mask = FIELD_EX64(wcr, DBGWCR, MASK);
6321     if (mask == 1 || mask == 2) {
6322         /* Reserved values of MASK; we must act as if the mask value was
6323          * some non-reserved value, or as if the watchpoint were disabled.
6324          * We choose the latter.
6325          */
6326         return;
6327     } else if (mask) {
6328         /* Watchpoint covers an aligned area up to 2GB in size */
6329         len = 1ULL << mask;
6330         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6331          * whether the watchpoint fires when the unmasked bits match; we opt
6332          * to generate the exceptions.
6333          */
6334         wvr &= ~(len - 1);
6335     } else {
6336         /* Watchpoint covers bytes defined by the byte address select bits */
6337         int bas = FIELD_EX64(wcr, DBGWCR, BAS);
6338         int basstart;
6339 
6340         if (extract64(wvr, 2, 1)) {
6341             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6342              * ignored, and BAS[3:0] define which bytes to watch.
6343              */
6344             bas &= 0xf;
6345         }
6346 
6347         if (bas == 0) {
6348             /* This must act as if the watchpoint is disabled */
6349             return;
6350         }
6351 
6352         /* The BAS bits are supposed to be programmed to indicate a contiguous
6353          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6354          * we fire for each byte in the word/doubleword addressed by the WVR.
6355          * We choose to ignore any non-zero bits after the first range of 1s.
6356          */
6357         basstart = ctz32(bas);
6358         len = cto32(bas >> basstart);
6359         wvr += basstart;
6360     }
6361 
6362     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
6363                           &env->cpu_watchpoint[n]);
6364 }
6365 
6366 void hw_watchpoint_update_all(ARMCPU *cpu)
6367 {
6368     int i;
6369     CPUARMState *env = &cpu->env;
6370 
6371     /* Completely clear out existing QEMU watchpoints and our array, to
6372      * avoid possible stale entries following migration load.
6373      */
6374     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
6375     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
6376 
6377     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
6378         hw_watchpoint_update(cpu, i);
6379     }
6380 }
6381 
6382 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6383                          uint64_t value)
6384 {
6385     ARMCPU *cpu = env_archcpu(env);
6386     int i = ri->crm;
6387 
6388     /*
6389      * Bits [1:0] are RES0.
6390      *
6391      * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA)
6392      * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if
6393      * they contain the value written.  It is CONSTRAINED UNPREDICTABLE
6394      * whether the RESS bits are ignored when comparing an address.
6395      *
6396      * Therefore we are allowed to compare the entire register, which lets
6397      * us avoid considering whether or not FEAT_LVA is actually enabled.
6398      */
6399     value &= ~3ULL;
6400 
6401     raw_write(env, ri, value);
6402     hw_watchpoint_update(cpu, i);
6403 }
6404 
6405 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6406                          uint64_t value)
6407 {
6408     ARMCPU *cpu = env_archcpu(env);
6409     int i = ri->crm;
6410 
6411     raw_write(env, ri, value);
6412     hw_watchpoint_update(cpu, i);
6413 }
6414 
6415 void hw_breakpoint_update(ARMCPU *cpu, int n)
6416 {
6417     CPUARMState *env = &cpu->env;
6418     uint64_t bvr = env->cp15.dbgbvr[n];
6419     uint64_t bcr = env->cp15.dbgbcr[n];
6420     vaddr addr;
6421     int bt;
6422     int flags = BP_CPU;
6423 
6424     if (env->cpu_breakpoint[n]) {
6425         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
6426         env->cpu_breakpoint[n] = NULL;
6427     }
6428 
6429     if (!extract64(bcr, 0, 1)) {
6430         /* E bit clear : watchpoint disabled */
6431         return;
6432     }
6433 
6434     bt = extract64(bcr, 20, 4);
6435 
6436     switch (bt) {
6437     case 4: /* unlinked address mismatch (reserved if AArch64) */
6438     case 5: /* linked address mismatch (reserved if AArch64) */
6439         qemu_log_mask(LOG_UNIMP,
6440                       "arm: address mismatch breakpoint types not implemented\n");
6441         return;
6442     case 0: /* unlinked address match */
6443     case 1: /* linked address match */
6444     {
6445         /*
6446          * Bits [1:0] are RES0.
6447          *
6448          * It is IMPLEMENTATION DEFINED whether bits [63:49]
6449          * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
6450          * of the VA field ([48] or [52] for FEAT_LVA), or whether the
6451          * value is read as written.  It is CONSTRAINED UNPREDICTABLE
6452          * whether the RESS bits are ignored when comparing an address.
6453          * Therefore we are allowed to compare the entire register, which
6454          * lets us avoid considering whether FEAT_LVA is actually enabled.
6455          *
6456          * The BAS field is used to allow setting breakpoints on 16-bit
6457          * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6458          * a bp will fire if the addresses covered by the bp and the addresses
6459          * covered by the insn overlap but the insn doesn't start at the
6460          * start of the bp address range. We choose to require the insn and
6461          * the bp to have the same address. The constraints on writing to
6462          * BAS enforced in dbgbcr_write mean we have only four cases:
6463          *  0b0000  => no breakpoint
6464          *  0b0011  => breakpoint on addr
6465          *  0b1100  => breakpoint on addr + 2
6466          *  0b1111  => breakpoint on addr
6467          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6468          */
6469         int bas = extract64(bcr, 5, 4);
6470         addr = bvr & ~3ULL;
6471         if (bas == 0) {
6472             return;
6473         }
6474         if (bas == 0xc) {
6475             addr += 2;
6476         }
6477         break;
6478     }
6479     case 2: /* unlinked context ID match */
6480     case 8: /* unlinked VMID match (reserved if no EL2) */
6481     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6482         qemu_log_mask(LOG_UNIMP,
6483                       "arm: unlinked context breakpoint types not implemented\n");
6484         return;
6485     case 9: /* linked VMID match (reserved if no EL2) */
6486     case 11: /* linked context ID and VMID match (reserved if no EL2) */
6487     case 3: /* linked context ID match */
6488     default:
6489         /* We must generate no events for Linked context matches (unless
6490          * they are linked to by some other bp/wp, which is handled in
6491          * updates for the linking bp/wp). We choose to also generate no events
6492          * for reserved values.
6493          */
6494         return;
6495     }
6496 
6497     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
6498 }
6499 
6500 void hw_breakpoint_update_all(ARMCPU *cpu)
6501 {
6502     int i;
6503     CPUARMState *env = &cpu->env;
6504 
6505     /* Completely clear out existing QEMU breakpoints and our array, to
6506      * avoid possible stale entries following migration load.
6507      */
6508     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
6509     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
6510 
6511     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
6512         hw_breakpoint_update(cpu, i);
6513     }
6514 }
6515 
6516 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6517                          uint64_t value)
6518 {
6519     ARMCPU *cpu = env_archcpu(env);
6520     int i = ri->crm;
6521 
6522     raw_write(env, ri, value);
6523     hw_breakpoint_update(cpu, i);
6524 }
6525 
6526 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6527                          uint64_t value)
6528 {
6529     ARMCPU *cpu = env_archcpu(env);
6530     int i = ri->crm;
6531 
6532     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6533      * copy of BAS[0].
6534      */
6535     value = deposit64(value, 6, 1, extract64(value, 5, 1));
6536     value = deposit64(value, 8, 1, extract64(value, 7, 1));
6537 
6538     raw_write(env, ri, value);
6539     hw_breakpoint_update(cpu, i);
6540 }
6541 
6542 static void define_debug_regs(ARMCPU *cpu)
6543 {
6544     /* Define v7 and v8 architectural debug registers.
6545      * These are just dummy implementations for now.
6546      */
6547     int i;
6548     int wrps, brps, ctx_cmps;
6549 
6550     /*
6551      * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
6552      * use AArch32.  Given that bit 15 is RES1, if the value is 0 then
6553      * the register must not exist for this cpu.
6554      */
6555     if (cpu->isar.dbgdidr != 0) {
6556         ARMCPRegInfo dbgdidr = {
6557             .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0,
6558             .opc1 = 0, .opc2 = 0,
6559             .access = PL0_R, .accessfn = access_tda,
6560             .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
6561         };
6562         define_one_arm_cp_reg(cpu, &dbgdidr);
6563     }
6564 
6565     brps = arm_num_brps(cpu);
6566     wrps = arm_num_wrps(cpu);
6567     ctx_cmps = arm_num_ctx_cmps(cpu);
6568 
6569     assert(ctx_cmps <= brps);
6570 
6571     define_arm_cp_regs(cpu, debug_cp_reginfo);
6572 
6573     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
6574         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
6575     }
6576 
6577     for (i = 0; i < brps; i++) {
6578         char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i);
6579         char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i);
6580         ARMCPRegInfo dbgregs[] = {
6581             { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH,
6582               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
6583               .access = PL1_RW, .accessfn = access_tda,
6584               .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
6585               .writefn = dbgbvr_write, .raw_writefn = raw_write
6586             },
6587             { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH,
6588               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
6589               .access = PL1_RW, .accessfn = access_tda,
6590               .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
6591               .writefn = dbgbcr_write, .raw_writefn = raw_write
6592             },
6593         };
6594         define_arm_cp_regs(cpu, dbgregs);
6595         g_free(dbgbvr_el1_name);
6596         g_free(dbgbcr_el1_name);
6597     }
6598 
6599     for (i = 0; i < wrps; i++) {
6600         char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i);
6601         char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i);
6602         ARMCPRegInfo dbgregs[] = {
6603             { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH,
6604               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
6605               .access = PL1_RW, .accessfn = access_tda,
6606               .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
6607               .writefn = dbgwvr_write, .raw_writefn = raw_write
6608             },
6609             { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH,
6610               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
6611               .access = PL1_RW, .accessfn = access_tda,
6612               .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
6613               .writefn = dbgwcr_write, .raw_writefn = raw_write
6614             },
6615         };
6616         define_arm_cp_regs(cpu, dbgregs);
6617         g_free(dbgwvr_el1_name);
6618         g_free(dbgwcr_el1_name);
6619     }
6620 }
6621 
6622 static void define_pmu_regs(ARMCPU *cpu)
6623 {
6624     /*
6625      * v7 performance monitor control register: same implementor
6626      * field as main ID register, and we implement four counters in
6627      * addition to the cycle count register.
6628      */
6629     unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
6630     ARMCPRegInfo pmcr = {
6631         .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6632         .access = PL0_RW,
6633         .type = ARM_CP_IO | ARM_CP_ALIAS,
6634         .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6635         .accessfn = pmreg_access, .writefn = pmcr_write,
6636         .raw_writefn = raw_write,
6637     };
6638     ARMCPRegInfo pmcr64 = {
6639         .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6640         .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6641         .access = PL0_RW, .accessfn = pmreg_access,
6642         .type = ARM_CP_IO,
6643         .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6644         .resetvalue = cpu->isar.reset_pmcr_el0,
6645         .writefn = pmcr_write, .raw_writefn = raw_write,
6646     };
6647 
6648     define_one_arm_cp_reg(cpu, &pmcr);
6649     define_one_arm_cp_reg(cpu, &pmcr64);
6650     for (i = 0; i < pmcrn; i++) {
6651         char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6652         char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6653         char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6654         char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6655         ARMCPRegInfo pmev_regs[] = {
6656             { .name = pmevcntr_name, .cp = 15, .crn = 14,
6657               .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6658               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6659               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6660               .accessfn = pmreg_access_xevcntr },
6661             { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6662               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6663               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
6664               .type = ARM_CP_IO,
6665               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6666               .raw_readfn = pmevcntr_rawread,
6667               .raw_writefn = pmevcntr_rawwrite },
6668             { .name = pmevtyper_name, .cp = 15, .crn = 14,
6669               .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6670               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6671               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6672               .accessfn = pmreg_access },
6673             { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6674               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6675               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6676               .type = ARM_CP_IO,
6677               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6678               .raw_writefn = pmevtyper_rawwrite },
6679         };
6680         define_arm_cp_regs(cpu, pmev_regs);
6681         g_free(pmevcntr_name);
6682         g_free(pmevcntr_el0_name);
6683         g_free(pmevtyper_name);
6684         g_free(pmevtyper_el0_name);
6685     }
6686     if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
6687         ARMCPRegInfo v81_pmu_regs[] = {
6688             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6689               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6690               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6691               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6692             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6693               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6694               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6695               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6696         };
6697         define_arm_cp_regs(cpu, v81_pmu_regs);
6698     }
6699     if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6700         static const ARMCPRegInfo v84_pmmir = {
6701             .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6702             .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6703             .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6704             .resetvalue = 0
6705         };
6706         define_one_arm_cp_reg(cpu, &v84_pmmir);
6707     }
6708 }
6709 
6710 /* We don't know until after realize whether there's a GICv3
6711  * attached, and that is what registers the gicv3 sysregs.
6712  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6713  * at runtime.
6714  */
6715 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6716 {
6717     ARMCPU *cpu = env_archcpu(env);
6718     uint64_t pfr1 = cpu->isar.id_pfr1;
6719 
6720     if (env->gicv3state) {
6721         pfr1 |= 1 << 28;
6722     }
6723     return pfr1;
6724 }
6725 
6726 #ifndef CONFIG_USER_ONLY
6727 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6728 {
6729     ARMCPU *cpu = env_archcpu(env);
6730     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
6731 
6732     if (env->gicv3state) {
6733         pfr0 |= 1 << 24;
6734     }
6735     return pfr0;
6736 }
6737 #endif
6738 
6739 /* Shared logic between LORID and the rest of the LOR* registers.
6740  * Secure state exclusion has already been dealt with.
6741  */
6742 static CPAccessResult access_lor_ns(CPUARMState *env,
6743                                     const ARMCPRegInfo *ri, bool isread)
6744 {
6745     int el = arm_current_el(env);
6746 
6747     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6748         return CP_ACCESS_TRAP_EL2;
6749     }
6750     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6751         return CP_ACCESS_TRAP_EL3;
6752     }
6753     return CP_ACCESS_OK;
6754 }
6755 
6756 static CPAccessResult access_lor_other(CPUARMState *env,
6757                                        const ARMCPRegInfo *ri, bool isread)
6758 {
6759     if (arm_is_secure_below_el3(env)) {
6760         /* Access denied in secure mode.  */
6761         return CP_ACCESS_TRAP;
6762     }
6763     return access_lor_ns(env, ri, isread);
6764 }
6765 
6766 /*
6767  * A trivial implementation of ARMv8.1-LOR leaves all of these
6768  * registers fixed at 0, which indicates that there are zero
6769  * supported Limited Ordering regions.
6770  */
6771 static const ARMCPRegInfo lor_reginfo[] = {
6772     { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6773       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6774       .access = PL1_RW, .accessfn = access_lor_other,
6775       .type = ARM_CP_CONST, .resetvalue = 0 },
6776     { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6777       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6778       .access = PL1_RW, .accessfn = access_lor_other,
6779       .type = ARM_CP_CONST, .resetvalue = 0 },
6780     { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6781       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6782       .access = PL1_RW, .accessfn = access_lor_other,
6783       .type = ARM_CP_CONST, .resetvalue = 0 },
6784     { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6785       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6786       .access = PL1_RW, .accessfn = access_lor_other,
6787       .type = ARM_CP_CONST, .resetvalue = 0 },
6788     { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6789       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6790       .access = PL1_R, .accessfn = access_lor_ns,
6791       .type = ARM_CP_CONST, .resetvalue = 0 },
6792 };
6793 
6794 #ifdef TARGET_AARCH64
6795 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6796                                    bool isread)
6797 {
6798     int el = arm_current_el(env);
6799 
6800     if (el < 2 &&
6801         arm_is_el2_enabled(env) &&
6802         !(arm_hcr_el2_eff(env) & HCR_APK)) {
6803         return CP_ACCESS_TRAP_EL2;
6804     }
6805     if (el < 3 &&
6806         arm_feature(env, ARM_FEATURE_EL3) &&
6807         !(env->cp15.scr_el3 & SCR_APK)) {
6808         return CP_ACCESS_TRAP_EL3;
6809     }
6810     return CP_ACCESS_OK;
6811 }
6812 
6813 static const ARMCPRegInfo pauth_reginfo[] = {
6814     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6815       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6816       .access = PL1_RW, .accessfn = access_pauth,
6817       .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
6818     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6819       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6820       .access = PL1_RW, .accessfn = access_pauth,
6821       .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
6822     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6823       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6824       .access = PL1_RW, .accessfn = access_pauth,
6825       .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
6826     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6827       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6828       .access = PL1_RW, .accessfn = access_pauth,
6829       .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
6830     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6831       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6832       .access = PL1_RW, .accessfn = access_pauth,
6833       .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
6834     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6835       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6836       .access = PL1_RW, .accessfn = access_pauth,
6837       .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
6838     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6839       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6840       .access = PL1_RW, .accessfn = access_pauth,
6841       .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
6842     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6843       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6844       .access = PL1_RW, .accessfn = access_pauth,
6845       .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
6846     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6847       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6848       .access = PL1_RW, .accessfn = access_pauth,
6849       .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
6850     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6851       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6852       .access = PL1_RW, .accessfn = access_pauth,
6853       .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
6854 };
6855 
6856 static const ARMCPRegInfo tlbirange_reginfo[] = {
6857     { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
6858       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
6859       .access = PL1_W, .type = ARM_CP_NO_RAW,
6860       .writefn = tlbi_aa64_rvae1is_write },
6861     { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
6862       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
6863       .access = PL1_W, .type = ARM_CP_NO_RAW,
6864       .writefn = tlbi_aa64_rvae1is_write },
6865    { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
6866       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
6867       .access = PL1_W, .type = ARM_CP_NO_RAW,
6868       .writefn = tlbi_aa64_rvae1is_write },
6869     { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
6870       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
6871       .access = PL1_W, .type = ARM_CP_NO_RAW,
6872       .writefn = tlbi_aa64_rvae1is_write },
6873     { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
6874       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
6875       .access = PL1_W, .type = ARM_CP_NO_RAW,
6876       .writefn = tlbi_aa64_rvae1is_write },
6877     { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
6878       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
6879       .access = PL1_W, .type = ARM_CP_NO_RAW,
6880       .writefn = tlbi_aa64_rvae1is_write },
6881    { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
6882       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
6883       .access = PL1_W, .type = ARM_CP_NO_RAW,
6884       .writefn = tlbi_aa64_rvae1is_write },
6885     { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
6886       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
6887       .access = PL1_W, .type = ARM_CP_NO_RAW,
6888       .writefn = tlbi_aa64_rvae1is_write },
6889     { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
6890       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
6891       .access = PL1_W, .type = ARM_CP_NO_RAW,
6892       .writefn = tlbi_aa64_rvae1_write },
6893     { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
6894       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
6895       .access = PL1_W, .type = ARM_CP_NO_RAW,
6896       .writefn = tlbi_aa64_rvae1_write },
6897    { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
6898       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
6899       .access = PL1_W, .type = ARM_CP_NO_RAW,
6900       .writefn = tlbi_aa64_rvae1_write },
6901     { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
6902       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
6903       .access = PL1_W, .type = ARM_CP_NO_RAW,
6904       .writefn = tlbi_aa64_rvae1_write },
6905     { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
6906       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
6907       .access = PL2_W, .type = ARM_CP_NOP },
6908     { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
6909       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
6910       .access = PL2_W, .type = ARM_CP_NOP },
6911     { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
6912       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
6913       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6914       .writefn = tlbi_aa64_rvae2is_write },
6915    { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
6916       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
6917       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6918       .writefn = tlbi_aa64_rvae2is_write },
6919     { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
6920       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
6921       .access = PL2_W, .type = ARM_CP_NOP },
6922    { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
6923       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
6924       .access = PL2_W, .type = ARM_CP_NOP },
6925    { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
6926       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
6927       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6928       .writefn = tlbi_aa64_rvae2is_write },
6929    { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
6930       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
6931       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6932       .writefn = tlbi_aa64_rvae2is_write },
6933     { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
6934       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
6935       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6936       .writefn = tlbi_aa64_rvae2_write },
6937    { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
6938       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
6939       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6940       .writefn = tlbi_aa64_rvae2_write },
6941    { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
6942       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
6943       .access = PL3_W, .type = ARM_CP_NO_RAW,
6944       .writefn = tlbi_aa64_rvae3is_write },
6945    { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
6946       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
6947       .access = PL3_W, .type = ARM_CP_NO_RAW,
6948       .writefn = tlbi_aa64_rvae3is_write },
6949    { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
6950       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
6951       .access = PL3_W, .type = ARM_CP_NO_RAW,
6952       .writefn = tlbi_aa64_rvae3is_write },
6953    { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
6954       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
6955       .access = PL3_W, .type = ARM_CP_NO_RAW,
6956       .writefn = tlbi_aa64_rvae3is_write },
6957    { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
6958       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
6959       .access = PL3_W, .type = ARM_CP_NO_RAW,
6960       .writefn = tlbi_aa64_rvae3_write },
6961    { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
6962       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
6963       .access = PL3_W, .type = ARM_CP_NO_RAW,
6964       .writefn = tlbi_aa64_rvae3_write },
6965 };
6966 
6967 static const ARMCPRegInfo tlbios_reginfo[] = {
6968     { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
6969       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
6970       .access = PL1_W, .type = ARM_CP_NO_RAW,
6971       .writefn = tlbi_aa64_vmalle1is_write },
6972     { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
6973       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
6974       .access = PL1_W, .type = ARM_CP_NO_RAW,
6975       .writefn = tlbi_aa64_vae1is_write },
6976     { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
6977       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
6978       .access = PL1_W, .type = ARM_CP_NO_RAW,
6979       .writefn = tlbi_aa64_vmalle1is_write },
6980     { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
6981       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
6982       .access = PL1_W, .type = ARM_CP_NO_RAW,
6983       .writefn = tlbi_aa64_vae1is_write },
6984     { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
6985       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
6986       .access = PL1_W, .type = ARM_CP_NO_RAW,
6987       .writefn = tlbi_aa64_vae1is_write },
6988     { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
6989       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
6990       .access = PL1_W, .type = ARM_CP_NO_RAW,
6991       .writefn = tlbi_aa64_vae1is_write },
6992     { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
6993       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
6994       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6995       .writefn = tlbi_aa64_alle2is_write },
6996     { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
6997       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
6998       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6999       .writefn = tlbi_aa64_vae2is_write },
7000    { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
7001       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
7002       .access = PL2_W, .type = ARM_CP_NO_RAW,
7003       .writefn = tlbi_aa64_alle1is_write },
7004     { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
7005       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
7006       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7007       .writefn = tlbi_aa64_vae2is_write },
7008     { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
7009       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
7010       .access = PL2_W, .type = ARM_CP_NO_RAW,
7011       .writefn = tlbi_aa64_alle1is_write },
7012     { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
7013       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
7014       .access = PL2_W, .type = ARM_CP_NOP },
7015     { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
7016       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
7017       .access = PL2_W, .type = ARM_CP_NOP },
7018     { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7019       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
7020       .access = PL2_W, .type = ARM_CP_NOP },
7021     { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7022       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
7023       .access = PL2_W, .type = ARM_CP_NOP },
7024     { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
7025       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
7026       .access = PL3_W, .type = ARM_CP_NO_RAW,
7027       .writefn = tlbi_aa64_alle3is_write },
7028     { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
7029       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
7030       .access = PL3_W, .type = ARM_CP_NO_RAW,
7031       .writefn = tlbi_aa64_vae3is_write },
7032     { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
7033       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
7034       .access = PL3_W, .type = ARM_CP_NO_RAW,
7035       .writefn = tlbi_aa64_vae3is_write },
7036 };
7037 
7038 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
7039 {
7040     Error *err = NULL;
7041     uint64_t ret;
7042 
7043     /* Success sets NZCV = 0000.  */
7044     env->NF = env->CF = env->VF = 0, env->ZF = 1;
7045 
7046     if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
7047         /*
7048          * ??? Failed, for unknown reasons in the crypto subsystem.
7049          * The best we can do is log the reason and return the
7050          * timed-out indication to the guest.  There is no reason
7051          * we know to expect this failure to be transitory, so the
7052          * guest may well hang retrying the operation.
7053          */
7054         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
7055                       ri->name, error_get_pretty(err));
7056         error_free(err);
7057 
7058         env->ZF = 0; /* NZCF = 0100 */
7059         return 0;
7060     }
7061     return ret;
7062 }
7063 
7064 /* We do not support re-seeding, so the two registers operate the same.  */
7065 static const ARMCPRegInfo rndr_reginfo[] = {
7066     { .name = "RNDR", .state = ARM_CP_STATE_AA64,
7067       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7068       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
7069       .access = PL0_R, .readfn = rndr_readfn },
7070     { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
7071       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7072       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
7073       .access = PL0_R, .readfn = rndr_readfn },
7074 };
7075 
7076 #ifndef CONFIG_USER_ONLY
7077 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
7078                           uint64_t value)
7079 {
7080     ARMCPU *cpu = env_archcpu(env);
7081     /* CTR_EL0 System register -> DminLine, bits [19:16] */
7082     uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
7083     uint64_t vaddr_in = (uint64_t) value;
7084     uint64_t vaddr = vaddr_in & ~(dline_size - 1);
7085     void *haddr;
7086     int mem_idx = cpu_mmu_index(env, false);
7087 
7088     /* This won't be crossing page boundaries */
7089     haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
7090     if (haddr) {
7091 
7092         ram_addr_t offset;
7093         MemoryRegion *mr;
7094 
7095         /* RCU lock is already being held */
7096         mr = memory_region_from_host(haddr, &offset);
7097 
7098         if (mr) {
7099             memory_region_writeback(mr, offset, dline_size);
7100         }
7101     }
7102 }
7103 
7104 static const ARMCPRegInfo dcpop_reg[] = {
7105     { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
7106       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
7107       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7108       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7109 };
7110 
7111 static const ARMCPRegInfo dcpodp_reg[] = {
7112     { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
7113       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
7114       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7115       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7116 };
7117 #endif /*CONFIG_USER_ONLY*/
7118 
7119 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
7120                                        bool isread)
7121 {
7122     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
7123         return CP_ACCESS_TRAP_EL2;
7124     }
7125 
7126     return CP_ACCESS_OK;
7127 }
7128 
7129 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
7130                                  bool isread)
7131 {
7132     int el = arm_current_el(env);
7133 
7134     if (el < 2 && arm_is_el2_enabled(env)) {
7135         uint64_t hcr = arm_hcr_el2_eff(env);
7136         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7137             return CP_ACCESS_TRAP_EL2;
7138         }
7139     }
7140     if (el < 3 &&
7141         arm_feature(env, ARM_FEATURE_EL3) &&
7142         !(env->cp15.scr_el3 & SCR_ATA)) {
7143         return CP_ACCESS_TRAP_EL3;
7144     }
7145     return CP_ACCESS_OK;
7146 }
7147 
7148 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
7149 {
7150     return env->pstate & PSTATE_TCO;
7151 }
7152 
7153 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7154 {
7155     env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7156 }
7157 
7158 static const ARMCPRegInfo mte_reginfo[] = {
7159     { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7160       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
7161       .access = PL1_RW, .accessfn = access_mte,
7162       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
7163     { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7164       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
7165       .access = PL1_RW, .accessfn = access_mte,
7166       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
7167     { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7168       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
7169       .access = PL2_RW, .accessfn = access_mte,
7170       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
7171     { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7172       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
7173       .access = PL3_RW,
7174       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
7175     { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7176       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
7177       .access = PL1_RW, .accessfn = access_mte,
7178       .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
7179     { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7180       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
7181       .access = PL1_RW, .accessfn = access_mte,
7182       .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
7183     { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7184       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7185       .access = PL1_R, .accessfn = access_aa64_tid5,
7186       .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
7187     { .name = "TCO", .state = ARM_CP_STATE_AA64,
7188       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7189       .type = ARM_CP_NO_RAW,
7190       .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
7191     { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7192       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
7193       .type = ARM_CP_NOP, .access = PL1_W,
7194       .accessfn = aa64_cacheop_poc_access },
7195     { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7196       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
7197       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7198     { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
7199       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
7200       .type = ARM_CP_NOP, .access = PL1_W,
7201       .accessfn = aa64_cacheop_poc_access },
7202     { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
7203       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
7204       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7205     { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
7206       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
7207       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7208     { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
7209       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
7210       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7211     { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
7212       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
7213       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7214     { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
7215       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
7216       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7217 };
7218 
7219 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
7220     { .name = "TCO", .state = ARM_CP_STATE_AA64,
7221       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7222       .type = ARM_CP_CONST, .access = PL0_RW, },
7223 };
7224 
7225 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
7226     { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
7227       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
7228       .type = ARM_CP_NOP, .access = PL0_W,
7229       .accessfn = aa64_cacheop_poc_access },
7230     { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
7231       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
7232       .type = ARM_CP_NOP, .access = PL0_W,
7233       .accessfn = aa64_cacheop_poc_access },
7234     { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7235       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
7236       .type = ARM_CP_NOP, .access = PL0_W,
7237       .accessfn = aa64_cacheop_poc_access },
7238     { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7239       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
7240       .type = ARM_CP_NOP, .access = PL0_W,
7241       .accessfn = aa64_cacheop_poc_access },
7242     { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7243       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
7244       .type = ARM_CP_NOP, .access = PL0_W,
7245       .accessfn = aa64_cacheop_poc_access },
7246     { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7247       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
7248       .type = ARM_CP_NOP, .access = PL0_W,
7249       .accessfn = aa64_cacheop_poc_access },
7250     { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7251       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
7252       .type = ARM_CP_NOP, .access = PL0_W,
7253       .accessfn = aa64_cacheop_poc_access },
7254     { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7255       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
7256       .type = ARM_CP_NOP, .access = PL0_W,
7257       .accessfn = aa64_cacheop_poc_access },
7258     { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7259       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
7260       .access = PL0_W, .type = ARM_CP_DC_GVA,
7261 #ifndef CONFIG_USER_ONLY
7262       /* Avoid overhead of an access check that always passes in user-mode */
7263       .accessfn = aa64_zva_access,
7264 #endif
7265     },
7266     { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7267       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
7268       .access = PL0_W, .type = ARM_CP_DC_GZVA,
7269 #ifndef CONFIG_USER_ONLY
7270       /* Avoid overhead of an access check that always passes in user-mode */
7271       .accessfn = aa64_zva_access,
7272 #endif
7273     },
7274 };
7275 
7276 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
7277                                      bool isread)
7278 {
7279     uint64_t hcr = arm_hcr_el2_eff(env);
7280     int el = arm_current_el(env);
7281 
7282     if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
7283         if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
7284             if (hcr & HCR_TGE) {
7285                 return CP_ACCESS_TRAP_EL2;
7286             }
7287             return CP_ACCESS_TRAP;
7288         }
7289     } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
7290         return CP_ACCESS_TRAP_EL2;
7291     }
7292     if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
7293         return CP_ACCESS_TRAP_EL2;
7294     }
7295     if (el < 3
7296         && arm_feature(env, ARM_FEATURE_EL3)
7297         && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
7298         return CP_ACCESS_TRAP_EL3;
7299     }
7300     return CP_ACCESS_OK;
7301 }
7302 
7303 static const ARMCPRegInfo scxtnum_reginfo[] = {
7304     { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
7305       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
7306       .access = PL0_RW, .accessfn = access_scxtnum,
7307       .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
7308     { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
7309       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
7310       .access = PL1_RW, .accessfn = access_scxtnum,
7311       .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
7312     { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
7313       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
7314       .access = PL2_RW, .accessfn = access_scxtnum,
7315       .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
7316     { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
7317       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
7318       .access = PL3_RW,
7319       .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
7320 };
7321 #endif /* TARGET_AARCH64 */
7322 
7323 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7324                                      bool isread)
7325 {
7326     int el = arm_current_el(env);
7327 
7328     if (el == 0) {
7329         uint64_t sctlr = arm_sctlr(env, el);
7330         if (!(sctlr & SCTLR_EnRCTX)) {
7331             return CP_ACCESS_TRAP;
7332         }
7333     } else if (el == 1) {
7334         uint64_t hcr = arm_hcr_el2_eff(env);
7335         if (hcr & HCR_NV) {
7336             return CP_ACCESS_TRAP_EL2;
7337         }
7338     }
7339     return CP_ACCESS_OK;
7340 }
7341 
7342 static const ARMCPRegInfo predinv_reginfo[] = {
7343     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7344       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7345       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7346     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7347       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7348       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7349     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7350       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7351       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7352     /*
7353      * Note the AArch32 opcodes have a different OPC1.
7354      */
7355     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7356       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7357       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7358     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7359       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7360       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7361     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7362       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7363       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7364 };
7365 
7366 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7367 {
7368     /* Read the high 32 bits of the current CCSIDR */
7369     return extract64(ccsidr_read(env, ri), 32, 32);
7370 }
7371 
7372 static const ARMCPRegInfo ccsidr2_reginfo[] = {
7373     { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7374       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7375       .access = PL1_R,
7376       .accessfn = access_aa64_tid2,
7377       .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
7378 };
7379 
7380 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7381                                        bool isread)
7382 {
7383     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7384         return CP_ACCESS_TRAP_EL2;
7385     }
7386 
7387     return CP_ACCESS_OK;
7388 }
7389 
7390 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7391                                        bool isread)
7392 {
7393     if (arm_feature(env, ARM_FEATURE_V8)) {
7394         return access_aa64_tid3(env, ri, isread);
7395     }
7396 
7397     return CP_ACCESS_OK;
7398 }
7399 
7400 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7401                                      bool isread)
7402 {
7403     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7404         return CP_ACCESS_TRAP_EL2;
7405     }
7406 
7407     return CP_ACCESS_OK;
7408 }
7409 
7410 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
7411                                         const ARMCPRegInfo *ri, bool isread)
7412 {
7413     /*
7414      * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7415      * in v7A, not in v8A.
7416      */
7417     if (!arm_feature(env, ARM_FEATURE_V8) &&
7418         arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
7419         (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7420         return CP_ACCESS_TRAP_EL2;
7421     }
7422     return CP_ACCESS_OK;
7423 }
7424 
7425 static const ARMCPRegInfo jazelle_regs[] = {
7426     { .name = "JIDR",
7427       .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7428       .access = PL1_R, .accessfn = access_jazelle,
7429       .type = ARM_CP_CONST, .resetvalue = 0 },
7430     { .name = "JOSCR",
7431       .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
7432       .accessfn = access_joscr_jmcr,
7433       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7434     { .name = "JMCR",
7435       .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
7436       .accessfn = access_joscr_jmcr,
7437       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7438 };
7439 
7440 static const ARMCPRegInfo contextidr_el2 = {
7441     .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7442     .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7443     .access = PL2_RW,
7444     .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
7445 };
7446 
7447 static const ARMCPRegInfo vhe_reginfo[] = {
7448     { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7449       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7450       .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7451       .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
7452 #ifndef CONFIG_USER_ONLY
7453     { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7454       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7455       .fieldoffset =
7456         offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7457       .type = ARM_CP_IO, .access = PL2_RW,
7458       .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7459     { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7460       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7461       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7462       .resetfn = gt_hv_timer_reset,
7463       .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7464     { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7465       .type = ARM_CP_IO,
7466       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7467       .access = PL2_RW,
7468       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7469       .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
7470     { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7471       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7472       .type = ARM_CP_IO | ARM_CP_ALIAS,
7473       .access = PL2_RW, .accessfn = e2h_access,
7474       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7475       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7476     { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7477       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7478       .type = ARM_CP_IO | ARM_CP_ALIAS,
7479       .access = PL2_RW, .accessfn = e2h_access,
7480       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7481       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7482     { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7483       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7484       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7485       .access = PL2_RW, .accessfn = e2h_access,
7486       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7487     { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7488       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7489       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7490       .access = PL2_RW, .accessfn = e2h_access,
7491       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7492     { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7493       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7494       .type = ARM_CP_IO | ARM_CP_ALIAS,
7495       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7496       .access = PL2_RW, .accessfn = e2h_access,
7497       .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7498     { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7499       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7500       .type = ARM_CP_IO | ARM_CP_ALIAS,
7501       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7502       .access = PL2_RW, .accessfn = e2h_access,
7503       .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
7504 #endif
7505 };
7506 
7507 #ifndef CONFIG_USER_ONLY
7508 static const ARMCPRegInfo ats1e1_reginfo[] = {
7509     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
7510       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7511       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7512       .writefn = ats_write64 },
7513     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
7514       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7515       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7516       .writefn = ats_write64 },
7517 };
7518 
7519 static const ARMCPRegInfo ats1cp_reginfo[] = {
7520     { .name = "ATS1CPRP",
7521       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7522       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7523       .writefn = ats_write },
7524     { .name = "ATS1CPWP",
7525       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7526       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7527       .writefn = ats_write },
7528 };
7529 #endif
7530 
7531 /*
7532  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7533  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7534  * is non-zero, which is never for ARMv7, optionally in ARMv8
7535  * and mandatorily for ARMv8.2 and up.
7536  * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7537  * implementation is RAZ/WI we can ignore this detail, as we
7538  * do for ACTLR.
7539  */
7540 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7541     { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7542       .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
7543       .access = PL1_RW, .accessfn = access_tacr,
7544       .type = ARM_CP_CONST, .resetvalue = 0 },
7545     { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7546       .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7547       .access = PL2_RW, .type = ARM_CP_CONST,
7548       .resetvalue = 0 },
7549 };
7550 
7551 void register_cp_regs_for_features(ARMCPU *cpu)
7552 {
7553     /* Register all the coprocessor registers based on feature bits */
7554     CPUARMState *env = &cpu->env;
7555     if (arm_feature(env, ARM_FEATURE_M)) {
7556         /* M profile has no coprocessor registers */
7557         return;
7558     }
7559 
7560     define_arm_cp_regs(cpu, cp_reginfo);
7561     if (!arm_feature(env, ARM_FEATURE_V8)) {
7562         /* Must go early as it is full of wildcards that may be
7563          * overridden by later definitions.
7564          */
7565         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7566     }
7567 
7568     if (arm_feature(env, ARM_FEATURE_V6)) {
7569         /* The ID registers all have impdef reset values */
7570         ARMCPRegInfo v6_idregs[] = {
7571             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7572               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7573               .access = PL1_R, .type = ARM_CP_CONST,
7574               .accessfn = access_aa32_tid3,
7575               .resetvalue = cpu->isar.id_pfr0 },
7576             /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7577              * the value of the GIC field until after we define these regs.
7578              */
7579             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7580               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
7581               .access = PL1_R, .type = ARM_CP_NO_RAW,
7582               .accessfn = access_aa32_tid3,
7583               .readfn = id_pfr1_read,
7584               .writefn = arm_cp_write_ignore },
7585             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7586               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7587               .access = PL1_R, .type = ARM_CP_CONST,
7588               .accessfn = access_aa32_tid3,
7589               .resetvalue = cpu->isar.id_dfr0 },
7590             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7591               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7592               .access = PL1_R, .type = ARM_CP_CONST,
7593               .accessfn = access_aa32_tid3,
7594               .resetvalue = cpu->id_afr0 },
7595             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7596               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7597               .access = PL1_R, .type = ARM_CP_CONST,
7598               .accessfn = access_aa32_tid3,
7599               .resetvalue = cpu->isar.id_mmfr0 },
7600             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7601               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7602               .access = PL1_R, .type = ARM_CP_CONST,
7603               .accessfn = access_aa32_tid3,
7604               .resetvalue = cpu->isar.id_mmfr1 },
7605             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7606               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7607               .access = PL1_R, .type = ARM_CP_CONST,
7608               .accessfn = access_aa32_tid3,
7609               .resetvalue = cpu->isar.id_mmfr2 },
7610             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7611               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7612               .access = PL1_R, .type = ARM_CP_CONST,
7613               .accessfn = access_aa32_tid3,
7614               .resetvalue = cpu->isar.id_mmfr3 },
7615             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7616               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7617               .access = PL1_R, .type = ARM_CP_CONST,
7618               .accessfn = access_aa32_tid3,
7619               .resetvalue = cpu->isar.id_isar0 },
7620             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7621               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7622               .access = PL1_R, .type = ARM_CP_CONST,
7623               .accessfn = access_aa32_tid3,
7624               .resetvalue = cpu->isar.id_isar1 },
7625             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7626               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7627               .access = PL1_R, .type = ARM_CP_CONST,
7628               .accessfn = access_aa32_tid3,
7629               .resetvalue = cpu->isar.id_isar2 },
7630             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7631               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7632               .access = PL1_R, .type = ARM_CP_CONST,
7633               .accessfn = access_aa32_tid3,
7634               .resetvalue = cpu->isar.id_isar3 },
7635             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7636               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7637               .access = PL1_R, .type = ARM_CP_CONST,
7638               .accessfn = access_aa32_tid3,
7639               .resetvalue = cpu->isar.id_isar4 },
7640             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7641               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7642               .access = PL1_R, .type = ARM_CP_CONST,
7643               .accessfn = access_aa32_tid3,
7644               .resetvalue = cpu->isar.id_isar5 },
7645             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7646               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7647               .access = PL1_R, .type = ARM_CP_CONST,
7648               .accessfn = access_aa32_tid3,
7649               .resetvalue = cpu->isar.id_mmfr4 },
7650             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
7651               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7652               .access = PL1_R, .type = ARM_CP_CONST,
7653               .accessfn = access_aa32_tid3,
7654               .resetvalue = cpu->isar.id_isar6 },
7655         };
7656         define_arm_cp_regs(cpu, v6_idregs);
7657         define_arm_cp_regs(cpu, v6_cp_reginfo);
7658     } else {
7659         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7660     }
7661     if (arm_feature(env, ARM_FEATURE_V6K)) {
7662         define_arm_cp_regs(cpu, v6k_cp_reginfo);
7663     }
7664     if (arm_feature(env, ARM_FEATURE_V7MP) &&
7665         !arm_feature(env, ARM_FEATURE_PMSA)) {
7666         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7667     }
7668     if (arm_feature(env, ARM_FEATURE_V7VE)) {
7669         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7670     }
7671     if (arm_feature(env, ARM_FEATURE_V7)) {
7672         ARMCPRegInfo clidr = {
7673             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7674             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
7675             .access = PL1_R, .type = ARM_CP_CONST,
7676             .accessfn = access_aa64_tid2,
7677             .resetvalue = cpu->clidr
7678         };
7679         define_one_arm_cp_reg(cpu, &clidr);
7680         define_arm_cp_regs(cpu, v7_cp_reginfo);
7681         define_debug_regs(cpu);
7682         define_pmu_regs(cpu);
7683     } else {
7684         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
7685     }
7686     if (arm_feature(env, ARM_FEATURE_V8)) {
7687         /* AArch64 ID registers, which all have impdef reset values.
7688          * Note that within the ID register ranges the unused slots
7689          * must all RAZ, not UNDEF; future architecture versions may
7690          * define new registers here.
7691          */
7692         ARMCPRegInfo v8_idregs[] = {
7693             /*
7694              * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7695              * emulation because we don't know the right value for the
7696              * GIC field until after we define these regs.
7697              */
7698             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7699               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
7700               .access = PL1_R,
7701 #ifdef CONFIG_USER_ONLY
7702               .type = ARM_CP_CONST,
7703               .resetvalue = cpu->isar.id_aa64pfr0
7704 #else
7705               .type = ARM_CP_NO_RAW,
7706               .accessfn = access_aa64_tid3,
7707               .readfn = id_aa64pfr0_read,
7708               .writefn = arm_cp_write_ignore
7709 #endif
7710             },
7711             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7712               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7713               .access = PL1_R, .type = ARM_CP_CONST,
7714               .accessfn = access_aa64_tid3,
7715               .resetvalue = cpu->isar.id_aa64pfr1},
7716             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7717               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7718               .access = PL1_R, .type = ARM_CP_CONST,
7719               .accessfn = access_aa64_tid3,
7720               .resetvalue = 0 },
7721             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7722               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7723               .access = PL1_R, .type = ARM_CP_CONST,
7724               .accessfn = access_aa64_tid3,
7725               .resetvalue = 0 },
7726             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
7727               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7728               .access = PL1_R, .type = ARM_CP_CONST,
7729               .accessfn = access_aa64_tid3,
7730               .resetvalue = cpu->isar.id_aa64zfr0 },
7731             { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
7732               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7733               .access = PL1_R, .type = ARM_CP_CONST,
7734               .accessfn = access_aa64_tid3,
7735               .resetvalue = cpu->isar.id_aa64smfr0 },
7736             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7737               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7738               .access = PL1_R, .type = ARM_CP_CONST,
7739               .accessfn = access_aa64_tid3,
7740               .resetvalue = 0 },
7741             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7742               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7743               .access = PL1_R, .type = ARM_CP_CONST,
7744               .accessfn = access_aa64_tid3,
7745               .resetvalue = 0 },
7746             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7747               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7748               .access = PL1_R, .type = ARM_CP_CONST,
7749               .accessfn = access_aa64_tid3,
7750               .resetvalue = cpu->isar.id_aa64dfr0 },
7751             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7752               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7753               .access = PL1_R, .type = ARM_CP_CONST,
7754               .accessfn = access_aa64_tid3,
7755               .resetvalue = cpu->isar.id_aa64dfr1 },
7756             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7757               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7758               .access = PL1_R, .type = ARM_CP_CONST,
7759               .accessfn = access_aa64_tid3,
7760               .resetvalue = 0 },
7761             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7762               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7763               .access = PL1_R, .type = ARM_CP_CONST,
7764               .accessfn = access_aa64_tid3,
7765               .resetvalue = 0 },
7766             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7767               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7768               .access = PL1_R, .type = ARM_CP_CONST,
7769               .accessfn = access_aa64_tid3,
7770               .resetvalue = cpu->id_aa64afr0 },
7771             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7772               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7773               .access = PL1_R, .type = ARM_CP_CONST,
7774               .accessfn = access_aa64_tid3,
7775               .resetvalue = cpu->id_aa64afr1 },
7776             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7777               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7778               .access = PL1_R, .type = ARM_CP_CONST,
7779               .accessfn = access_aa64_tid3,
7780               .resetvalue = 0 },
7781             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7782               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7783               .access = PL1_R, .type = ARM_CP_CONST,
7784               .accessfn = access_aa64_tid3,
7785               .resetvalue = 0 },
7786             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7787               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7788               .access = PL1_R, .type = ARM_CP_CONST,
7789               .accessfn = access_aa64_tid3,
7790               .resetvalue = cpu->isar.id_aa64isar0 },
7791             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7792               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7793               .access = PL1_R, .type = ARM_CP_CONST,
7794               .accessfn = access_aa64_tid3,
7795               .resetvalue = cpu->isar.id_aa64isar1 },
7796             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7797               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7798               .access = PL1_R, .type = ARM_CP_CONST,
7799               .accessfn = access_aa64_tid3,
7800               .resetvalue = 0 },
7801             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7802               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7803               .access = PL1_R, .type = ARM_CP_CONST,
7804               .accessfn = access_aa64_tid3,
7805               .resetvalue = 0 },
7806             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7807               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7808               .access = PL1_R, .type = ARM_CP_CONST,
7809               .accessfn = access_aa64_tid3,
7810               .resetvalue = 0 },
7811             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7812               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7813               .access = PL1_R, .type = ARM_CP_CONST,
7814               .accessfn = access_aa64_tid3,
7815               .resetvalue = 0 },
7816             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7817               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7818               .access = PL1_R, .type = ARM_CP_CONST,
7819               .accessfn = access_aa64_tid3,
7820               .resetvalue = 0 },
7821             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7822               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7823               .access = PL1_R, .type = ARM_CP_CONST,
7824               .accessfn = access_aa64_tid3,
7825               .resetvalue = 0 },
7826             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7827               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7828               .access = PL1_R, .type = ARM_CP_CONST,
7829               .accessfn = access_aa64_tid3,
7830               .resetvalue = cpu->isar.id_aa64mmfr0 },
7831             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7832               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7833               .access = PL1_R, .type = ARM_CP_CONST,
7834               .accessfn = access_aa64_tid3,
7835               .resetvalue = cpu->isar.id_aa64mmfr1 },
7836             { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
7837               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7838               .access = PL1_R, .type = ARM_CP_CONST,
7839               .accessfn = access_aa64_tid3,
7840               .resetvalue = cpu->isar.id_aa64mmfr2 },
7841             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7842               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7843               .access = PL1_R, .type = ARM_CP_CONST,
7844               .accessfn = access_aa64_tid3,
7845               .resetvalue = 0 },
7846             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7847               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7848               .access = PL1_R, .type = ARM_CP_CONST,
7849               .accessfn = access_aa64_tid3,
7850               .resetvalue = 0 },
7851             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7852               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7853               .access = PL1_R, .type = ARM_CP_CONST,
7854               .accessfn = access_aa64_tid3,
7855               .resetvalue = 0 },
7856             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7857               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7858               .access = PL1_R, .type = ARM_CP_CONST,
7859               .accessfn = access_aa64_tid3,
7860               .resetvalue = 0 },
7861             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7862               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7863               .access = PL1_R, .type = ARM_CP_CONST,
7864               .accessfn = access_aa64_tid3,
7865               .resetvalue = 0 },
7866             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7867               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7868               .access = PL1_R, .type = ARM_CP_CONST,
7869               .accessfn = access_aa64_tid3,
7870               .resetvalue = cpu->isar.mvfr0 },
7871             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7872               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7873               .access = PL1_R, .type = ARM_CP_CONST,
7874               .accessfn = access_aa64_tid3,
7875               .resetvalue = cpu->isar.mvfr1 },
7876             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7877               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7878               .access = PL1_R, .type = ARM_CP_CONST,
7879               .accessfn = access_aa64_tid3,
7880               .resetvalue = cpu->isar.mvfr2 },
7881             { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7882               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7883               .access = PL1_R, .type = ARM_CP_CONST,
7884               .accessfn = access_aa64_tid3,
7885               .resetvalue = 0 },
7886             { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
7887               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7888               .access = PL1_R, .type = ARM_CP_CONST,
7889               .accessfn = access_aa64_tid3,
7890               .resetvalue = cpu->isar.id_pfr2 },
7891             { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7892               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7893               .access = PL1_R, .type = ARM_CP_CONST,
7894               .accessfn = access_aa64_tid3,
7895               .resetvalue = 0 },
7896             { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7897               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7898               .access = PL1_R, .type = ARM_CP_CONST,
7899               .accessfn = access_aa64_tid3,
7900               .resetvalue = 0 },
7901             { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7902               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7903               .access = PL1_R, .type = ARM_CP_CONST,
7904               .accessfn = access_aa64_tid3,
7905               .resetvalue = 0 },
7906             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7907               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7908               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7909               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
7910             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7911               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7912               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7913               .resetvalue = cpu->pmceid0 },
7914             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7915               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7916               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7917               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
7918             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7919               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7920               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7921               .resetvalue = cpu->pmceid1 },
7922         };
7923 #ifdef CONFIG_USER_ONLY
7924         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
7925             { .name = "ID_AA64PFR0_EL1",
7926               .exported_bits = 0x000f000f00ff0000,
7927               .fixed_bits    = 0x0000000000000011 },
7928             { .name = "ID_AA64PFR1_EL1",
7929               .exported_bits = 0x00000000000000f0 },
7930             { .name = "ID_AA64PFR*_EL1_RESERVED",
7931               .is_glob = true                     },
7932             { .name = "ID_AA64ZFR0_EL1"           },
7933             { .name = "ID_AA64MMFR0_EL1",
7934               .fixed_bits    = 0x00000000ff000000 },
7935             { .name = "ID_AA64MMFR1_EL1"          },
7936             { .name = "ID_AA64MMFR*_EL1_RESERVED",
7937               .is_glob = true                     },
7938             { .name = "ID_AA64DFR0_EL1",
7939               .fixed_bits    = 0x0000000000000006 },
7940             { .name = "ID_AA64DFR1_EL1"           },
7941             { .name = "ID_AA64DFR*_EL1_RESERVED",
7942               .is_glob = true                     },
7943             { .name = "ID_AA64AFR*",
7944               .is_glob = true                     },
7945             { .name = "ID_AA64ISAR0_EL1",
7946               .exported_bits = 0x00fffffff0fffff0 },
7947             { .name = "ID_AA64ISAR1_EL1",
7948               .exported_bits = 0x000000f0ffffffff },
7949             { .name = "ID_AA64ISAR*_EL1_RESERVED",
7950               .is_glob = true                     },
7951         };
7952         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7953 #endif
7954         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7955         if (!arm_feature(env, ARM_FEATURE_EL3) &&
7956             !arm_feature(env, ARM_FEATURE_EL2)) {
7957             ARMCPRegInfo rvbar = {
7958                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7959                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
7960                 .access = PL1_R,
7961                 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
7962             };
7963             define_one_arm_cp_reg(cpu, &rvbar);
7964         }
7965         define_arm_cp_regs(cpu, v8_idregs);
7966         define_arm_cp_regs(cpu, v8_cp_reginfo);
7967     }
7968 
7969     /*
7970      * Register the base EL2 cpregs.
7971      * Pre v8, these registers are implemented only as part of the
7972      * Virtualization Extensions (EL2 present).  Beginning with v8,
7973      * if EL2 is missing but EL3 is enabled, mostly these become
7974      * RES0 from EL3, with some specific exceptions.
7975      */
7976     if (arm_feature(env, ARM_FEATURE_EL2)
7977         || (arm_feature(env, ARM_FEATURE_EL3)
7978             && arm_feature(env, ARM_FEATURE_V8))) {
7979         uint64_t vmpidr_def = mpidr_read_val(env);
7980         ARMCPRegInfo vpidr_regs[] = {
7981             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7982               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7983               .access = PL2_RW, .accessfn = access_el3_aa32ns,
7984               .resetvalue = cpu->midr,
7985               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
7986               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
7987             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7988               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7989               .access = PL2_RW, .resetvalue = cpu->midr,
7990               .type = ARM_CP_EL3_NO_EL2_C_NZ,
7991               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7992             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7993               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7994               .access = PL2_RW, .accessfn = access_el3_aa32ns,
7995               .resetvalue = vmpidr_def,
7996               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
7997               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
7998             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7999               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
8000               .access = PL2_RW, .resetvalue = vmpidr_def,
8001               .type = ARM_CP_EL3_NO_EL2_C_NZ,
8002               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
8003         };
8004         /*
8005          * The only field of MDCR_EL2 that has a defined architectural reset
8006          * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
8007          */
8008         ARMCPRegInfo mdcr_el2 = {
8009             .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
8010             .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
8011             .access = PL2_RW, .resetvalue = pmu_num_counters(env),
8012             .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
8013         };
8014         define_one_arm_cp_reg(cpu, &mdcr_el2);
8015         define_arm_cp_regs(cpu, vpidr_regs);
8016         define_arm_cp_regs(cpu, el2_cp_reginfo);
8017         if (arm_feature(env, ARM_FEATURE_V8)) {
8018             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
8019         }
8020         if (cpu_isar_feature(aa64_sel2, cpu)) {
8021             define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
8022         }
8023         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
8024         if (!arm_feature(env, ARM_FEATURE_EL3)) {
8025             ARMCPRegInfo rvbar = {
8026                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
8027                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
8028                 .access = PL2_R,
8029                 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
8030             };
8031             define_one_arm_cp_reg(cpu, &rvbar);
8032         }
8033     }
8034 
8035     /* Register the base EL3 cpregs. */
8036     if (arm_feature(env, ARM_FEATURE_EL3)) {
8037         define_arm_cp_regs(cpu, el3_cp_reginfo);
8038         ARMCPRegInfo el3_regs[] = {
8039             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
8040               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
8041               .access = PL3_R,
8042               .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
8043             },
8044             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
8045               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
8046               .access = PL3_RW,
8047               .raw_writefn = raw_write, .writefn = sctlr_write,
8048               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
8049               .resetvalue = cpu->reset_sctlr },
8050         };
8051 
8052         define_arm_cp_regs(cpu, el3_regs);
8053     }
8054     /* The behaviour of NSACR is sufficiently various that we don't
8055      * try to describe it in a single reginfo:
8056      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
8057      *     reads as constant 0xc00 from NS EL1 and NS EL2
8058      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8059      *  if v7 without EL3, register doesn't exist
8060      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8061      */
8062     if (arm_feature(env, ARM_FEATURE_EL3)) {
8063         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8064             static const ARMCPRegInfo nsacr = {
8065                 .name = "NSACR", .type = ARM_CP_CONST,
8066                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8067                 .access = PL1_RW, .accessfn = nsacr_access,
8068                 .resetvalue = 0xc00
8069             };
8070             define_one_arm_cp_reg(cpu, &nsacr);
8071         } else {
8072             static const ARMCPRegInfo nsacr = {
8073                 .name = "NSACR",
8074                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8075                 .access = PL3_RW | PL1_R,
8076                 .resetvalue = 0,
8077                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
8078             };
8079             define_one_arm_cp_reg(cpu, &nsacr);
8080         }
8081     } else {
8082         if (arm_feature(env, ARM_FEATURE_V8)) {
8083             static const ARMCPRegInfo nsacr = {
8084                 .name = "NSACR", .type = ARM_CP_CONST,
8085                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8086                 .access = PL1_R,
8087                 .resetvalue = 0xc00
8088             };
8089             define_one_arm_cp_reg(cpu, &nsacr);
8090         }
8091     }
8092 
8093     if (arm_feature(env, ARM_FEATURE_PMSA)) {
8094         if (arm_feature(env, ARM_FEATURE_V6)) {
8095             /* PMSAv6 not implemented */
8096             assert(arm_feature(env, ARM_FEATURE_V7));
8097             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8098             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
8099         } else {
8100             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
8101         }
8102     } else {
8103         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8104         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
8105         /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
8106         if (cpu_isar_feature(aa32_hpd, cpu)) {
8107             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
8108         }
8109     }
8110     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
8111         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
8112     }
8113     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
8114         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
8115     }
8116     if (arm_feature(env, ARM_FEATURE_VAPA)) {
8117         define_arm_cp_regs(cpu, vapa_cp_reginfo);
8118     }
8119     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
8120         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
8121     }
8122     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
8123         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
8124     }
8125     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
8126         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
8127     }
8128     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
8129         define_arm_cp_regs(cpu, omap_cp_reginfo);
8130     }
8131     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
8132         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
8133     }
8134     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8135         define_arm_cp_regs(cpu, xscale_cp_reginfo);
8136     }
8137     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
8138         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
8139     }
8140     if (arm_feature(env, ARM_FEATURE_LPAE)) {
8141         define_arm_cp_regs(cpu, lpae_cp_reginfo);
8142     }
8143     if (cpu_isar_feature(aa32_jazelle, cpu)) {
8144         define_arm_cp_regs(cpu, jazelle_regs);
8145     }
8146     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
8147      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8148      * be read-only (ie write causes UNDEF exception).
8149      */
8150     {
8151         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
8152             /* Pre-v8 MIDR space.
8153              * Note that the MIDR isn't a simple constant register because
8154              * of the TI925 behaviour where writes to another register can
8155              * cause the MIDR value to change.
8156              *
8157              * Unimplemented registers in the c15 0 0 0 space default to
8158              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8159              * and friends override accordingly.
8160              */
8161             { .name = "MIDR",
8162               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
8163               .access = PL1_R, .resetvalue = cpu->midr,
8164               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
8165               .readfn = midr_read,
8166               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8167               .type = ARM_CP_OVERRIDE },
8168             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8169             { .name = "DUMMY",
8170               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
8171               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8172             { .name = "DUMMY",
8173               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
8174               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8175             { .name = "DUMMY",
8176               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
8177               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8178             { .name = "DUMMY",
8179               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
8180               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8181             { .name = "DUMMY",
8182               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
8183               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8184         };
8185         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
8186             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
8187               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
8188               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
8189               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8190               .readfn = midr_read },
8191             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
8192             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8193               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8194               .access = PL1_R, .resetvalue = cpu->midr },
8195             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8196               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
8197               .access = PL1_R, .resetvalue = cpu->midr },
8198             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
8199               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
8200               .access = PL1_R,
8201               .accessfn = access_aa64_tid1,
8202               .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
8203         };
8204         ARMCPRegInfo id_cp_reginfo[] = {
8205             /* These are common to v8 and pre-v8 */
8206             { .name = "CTR",
8207               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
8208               .access = PL1_R, .accessfn = ctr_el0_access,
8209               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8210             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
8211               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
8212               .access = PL0_R, .accessfn = ctr_el0_access,
8213               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8214             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8215             { .name = "TCMTR",
8216               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
8217               .access = PL1_R,
8218               .accessfn = access_aa32_tid1,
8219               .type = ARM_CP_CONST, .resetvalue = 0 },
8220         };
8221         /* TLBTR is specific to VMSA */
8222         ARMCPRegInfo id_tlbtr_reginfo = {
8223               .name = "TLBTR",
8224               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
8225               .access = PL1_R,
8226               .accessfn = access_aa32_tid1,
8227               .type = ARM_CP_CONST, .resetvalue = 0,
8228         };
8229         /* MPUIR is specific to PMSA V6+ */
8230         ARMCPRegInfo id_mpuir_reginfo = {
8231               .name = "MPUIR",
8232               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8233               .access = PL1_R, .type = ARM_CP_CONST,
8234               .resetvalue = cpu->pmsav7_dregion << 8
8235         };
8236         static const ARMCPRegInfo crn0_wi_reginfo = {
8237             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
8238             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
8239             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
8240         };
8241 #ifdef CONFIG_USER_ONLY
8242         static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
8243             { .name = "MIDR_EL1",
8244               .exported_bits = 0x00000000ffffffff },
8245             { .name = "REVIDR_EL1"                },
8246         };
8247         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
8248 #endif
8249         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
8250             arm_feature(env, ARM_FEATURE_STRONGARM)) {
8251             size_t i;
8252             /* Register the blanket "writes ignored" value first to cover the
8253              * whole space. Then update the specific ID registers to allow write
8254              * access, so that they ignore writes rather than causing them to
8255              * UNDEF.
8256              */
8257             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
8258             for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
8259                 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
8260             }
8261             for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
8262                 id_cp_reginfo[i].access = PL1_RW;
8263             }
8264             id_mpuir_reginfo.access = PL1_RW;
8265             id_tlbtr_reginfo.access = PL1_RW;
8266         }
8267         if (arm_feature(env, ARM_FEATURE_V8)) {
8268             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
8269         } else {
8270             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
8271         }
8272         define_arm_cp_regs(cpu, id_cp_reginfo);
8273         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8274             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
8275         } else if (arm_feature(env, ARM_FEATURE_V7)) {
8276             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8277         }
8278     }
8279 
8280     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
8281         ARMCPRegInfo mpidr_cp_reginfo[] = {
8282             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
8283               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
8284               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
8285         };
8286 #ifdef CONFIG_USER_ONLY
8287         static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
8288             { .name = "MPIDR_EL1",
8289               .fixed_bits = 0x0000000080000000 },
8290         };
8291         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
8292 #endif
8293         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
8294     }
8295 
8296     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
8297         ARMCPRegInfo auxcr_reginfo[] = {
8298             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
8299               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
8300               .access = PL1_RW, .accessfn = access_tacr,
8301               .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
8302             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
8303               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
8304               .access = PL2_RW, .type = ARM_CP_CONST,
8305               .resetvalue = 0 },
8306             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
8307               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
8308               .access = PL3_RW, .type = ARM_CP_CONST,
8309               .resetvalue = 0 },
8310         };
8311         define_arm_cp_regs(cpu, auxcr_reginfo);
8312         if (cpu_isar_feature(aa32_ac2, cpu)) {
8313             define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
8314         }
8315     }
8316 
8317     if (arm_feature(env, ARM_FEATURE_CBAR)) {
8318         /*
8319          * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8320          * There are two flavours:
8321          *  (1) older 32-bit only cores have a simple 32-bit CBAR
8322          *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8323          *      32-bit register visible to AArch32 at a different encoding
8324          *      to the "flavour 1" register and with the bits rearranged to
8325          *      be able to squash a 64-bit address into the 32-bit view.
8326          * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8327          * in future if we support AArch32-only configs of some of the
8328          * AArch64 cores we might need to add a specific feature flag
8329          * to indicate cores with "flavour 2" CBAR.
8330          */
8331         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8332             /* 32 bit view is [31:18] 0...0 [43:32]. */
8333             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8334                 | extract64(cpu->reset_cbar, 32, 12);
8335             ARMCPRegInfo cbar_reginfo[] = {
8336                 { .name = "CBAR",
8337                   .type = ARM_CP_CONST,
8338                   .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8339                   .access = PL1_R, .resetvalue = cbar32 },
8340                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8341                   .type = ARM_CP_CONST,
8342                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
8343                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
8344             };
8345             /* We don't implement a r/w 64 bit CBAR currently */
8346             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8347             define_arm_cp_regs(cpu, cbar_reginfo);
8348         } else {
8349             ARMCPRegInfo cbar = {
8350                 .name = "CBAR",
8351                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8352                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
8353                 .fieldoffset = offsetof(CPUARMState,
8354                                         cp15.c15_config_base_address)
8355             };
8356             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8357                 cbar.access = PL1_R;
8358                 cbar.fieldoffset = 0;
8359                 cbar.type = ARM_CP_CONST;
8360             }
8361             define_one_arm_cp_reg(cpu, &cbar);
8362         }
8363     }
8364 
8365     if (arm_feature(env, ARM_FEATURE_VBAR)) {
8366         static const ARMCPRegInfo vbar_cp_reginfo[] = {
8367             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8368               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8369               .access = PL1_RW, .writefn = vbar_write,
8370               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8371                                      offsetof(CPUARMState, cp15.vbar_ns) },
8372               .resetvalue = 0 },
8373         };
8374         define_arm_cp_regs(cpu, vbar_cp_reginfo);
8375     }
8376 
8377     /* Generic registers whose values depend on the implementation */
8378     {
8379         ARMCPRegInfo sctlr = {
8380             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
8381             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
8382             .access = PL1_RW, .accessfn = access_tvm_trvm,
8383             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8384                                    offsetof(CPUARMState, cp15.sctlr_ns) },
8385             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8386             .raw_writefn = raw_write,
8387         };
8388         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8389             /* Normally we would always end the TB on an SCTLR write, but Linux
8390              * arch/arm/mach-pxa/sleep.S expects two instructions following
8391              * an MMU enable to execute from cache.  Imitate this behaviour.
8392              */
8393             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8394         }
8395         define_one_arm_cp_reg(cpu, &sctlr);
8396     }
8397 
8398     if (cpu_isar_feature(aa64_lor, cpu)) {
8399         define_arm_cp_regs(cpu, lor_reginfo);
8400     }
8401     if (cpu_isar_feature(aa64_pan, cpu)) {
8402         define_one_arm_cp_reg(cpu, &pan_reginfo);
8403     }
8404 #ifndef CONFIG_USER_ONLY
8405     if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8406         define_arm_cp_regs(cpu, ats1e1_reginfo);
8407     }
8408     if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8409         define_arm_cp_regs(cpu, ats1cp_reginfo);
8410     }
8411 #endif
8412     if (cpu_isar_feature(aa64_uao, cpu)) {
8413         define_one_arm_cp_reg(cpu, &uao_reginfo);
8414     }
8415 
8416     if (cpu_isar_feature(aa64_dit, cpu)) {
8417         define_one_arm_cp_reg(cpu, &dit_reginfo);
8418     }
8419     if (cpu_isar_feature(aa64_ssbs, cpu)) {
8420         define_one_arm_cp_reg(cpu, &ssbs_reginfo);
8421     }
8422     if (cpu_isar_feature(any_ras, cpu)) {
8423         define_arm_cp_regs(cpu, minimal_ras_reginfo);
8424     }
8425 
8426     if (cpu_isar_feature(aa64_vh, cpu) ||
8427         cpu_isar_feature(aa64_debugv8p2, cpu)) {
8428         define_one_arm_cp_reg(cpu, &contextidr_el2);
8429     }
8430     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8431         define_arm_cp_regs(cpu, vhe_reginfo);
8432     }
8433 
8434     if (cpu_isar_feature(aa64_sve, cpu)) {
8435         define_arm_cp_regs(cpu, zcr_reginfo);
8436     }
8437 
8438     if (cpu_isar_feature(aa64_hcx, cpu)) {
8439         define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
8440     }
8441 
8442 #ifdef TARGET_AARCH64
8443     if (cpu_isar_feature(aa64_pauth, cpu)) {
8444         define_arm_cp_regs(cpu, pauth_reginfo);
8445     }
8446     if (cpu_isar_feature(aa64_rndr, cpu)) {
8447         define_arm_cp_regs(cpu, rndr_reginfo);
8448     }
8449     if (cpu_isar_feature(aa64_tlbirange, cpu)) {
8450         define_arm_cp_regs(cpu, tlbirange_reginfo);
8451     }
8452     if (cpu_isar_feature(aa64_tlbios, cpu)) {
8453         define_arm_cp_regs(cpu, tlbios_reginfo);
8454     }
8455 #ifndef CONFIG_USER_ONLY
8456     /* Data Cache clean instructions up to PoP */
8457     if (cpu_isar_feature(aa64_dcpop, cpu)) {
8458         define_one_arm_cp_reg(cpu, dcpop_reg);
8459 
8460         if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8461             define_one_arm_cp_reg(cpu, dcpodp_reg);
8462         }
8463     }
8464 #endif /*CONFIG_USER_ONLY*/
8465 
8466     /*
8467      * If full MTE is enabled, add all of the system registers.
8468      * If only "instructions available at EL0" are enabled,
8469      * then define only a RAZ/WI version of PSTATE.TCO.
8470      */
8471     if (cpu_isar_feature(aa64_mte, cpu)) {
8472         define_arm_cp_regs(cpu, mte_reginfo);
8473         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8474     } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8475         define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
8476         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8477     }
8478 
8479     if (cpu_isar_feature(aa64_scxtnum, cpu)) {
8480         define_arm_cp_regs(cpu, scxtnum_reginfo);
8481     }
8482 #endif
8483 
8484     if (cpu_isar_feature(any_predinv, cpu)) {
8485         define_arm_cp_regs(cpu, predinv_reginfo);
8486     }
8487 
8488     if (cpu_isar_feature(any_ccidx, cpu)) {
8489         define_arm_cp_regs(cpu, ccsidr2_reginfo);
8490     }
8491 
8492 #ifndef CONFIG_USER_ONLY
8493     /*
8494      * Register redirections and aliases must be done last,
8495      * after the registers from the other extensions have been defined.
8496      */
8497     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8498         define_arm_vh_e2h_redirects_aliases(cpu);
8499     }
8500 #endif
8501 }
8502 
8503 /* Sort alphabetically by type name, except for "any". */
8504 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
8505 {
8506     ObjectClass *class_a = (ObjectClass *)a;
8507     ObjectClass *class_b = (ObjectClass *)b;
8508     const char *name_a, *name_b;
8509 
8510     name_a = object_class_get_name(class_a);
8511     name_b = object_class_get_name(class_b);
8512     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
8513         return 1;
8514     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
8515         return -1;
8516     } else {
8517         return strcmp(name_a, name_b);
8518     }
8519 }
8520 
8521 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
8522 {
8523     ObjectClass *oc = data;
8524     const char *typename;
8525     char *name;
8526 
8527     typename = object_class_get_name(oc);
8528     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
8529     qemu_printf("  %s\n", name);
8530     g_free(name);
8531 }
8532 
8533 void arm_cpu_list(void)
8534 {
8535     GSList *list;
8536 
8537     list = object_class_get_list(TYPE_ARM_CPU, false);
8538     list = g_slist_sort(list, arm_cpu_list_compare);
8539     qemu_printf("Available CPUs:\n");
8540     g_slist_foreach(list, arm_cpu_list_entry, NULL);
8541     g_slist_free(list);
8542 }
8543 
8544 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
8545 {
8546     ObjectClass *oc = data;
8547     CpuDefinitionInfoList **cpu_list = user_data;
8548     CpuDefinitionInfo *info;
8549     const char *typename;
8550 
8551     typename = object_class_get_name(oc);
8552     info = g_malloc0(sizeof(*info));
8553     info->name = g_strndup(typename,
8554                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
8555     info->q_typename = g_strdup(typename);
8556 
8557     QAPI_LIST_PREPEND(*cpu_list, info);
8558 }
8559 
8560 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
8561 {
8562     CpuDefinitionInfoList *cpu_list = NULL;
8563     GSList *list;
8564 
8565     list = object_class_get_list(TYPE_ARM_CPU, false);
8566     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
8567     g_slist_free(list);
8568 
8569     return cpu_list;
8570 }
8571 
8572 /*
8573  * Private utility function for define_one_arm_cp_reg_with_opaque():
8574  * add a single reginfo struct to the hash table.
8575  */
8576 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
8577                                    void *opaque, CPState state,
8578                                    CPSecureState secstate,
8579                                    int crm, int opc1, int opc2,
8580                                    const char *name)
8581 {
8582     CPUARMState *env = &cpu->env;
8583     uint32_t key;
8584     ARMCPRegInfo *r2;
8585     bool is64 = r->type & ARM_CP_64BIT;
8586     bool ns = secstate & ARM_CP_SECSTATE_NS;
8587     int cp = r->cp;
8588     size_t name_len;
8589     bool make_const;
8590 
8591     switch (state) {
8592     case ARM_CP_STATE_AA32:
8593         /* We assume it is a cp15 register if the .cp field is left unset. */
8594         if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
8595             cp = 15;
8596         }
8597         key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
8598         break;
8599     case ARM_CP_STATE_AA64:
8600         /*
8601          * To allow abbreviation of ARMCPRegInfo definitions, we treat
8602          * cp == 0 as equivalent to the value for "standard guest-visible
8603          * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
8604          * in their AArch64 view (the .cp value may be non-zero for the
8605          * benefit of the AArch32 view).
8606          */
8607         if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8608             cp = CP_REG_ARM64_SYSREG_CP;
8609         }
8610         key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
8611         break;
8612     default:
8613         g_assert_not_reached();
8614     }
8615 
8616     /* Overriding of an existing definition must be explicitly requested. */
8617     if (!(r->type & ARM_CP_OVERRIDE)) {
8618         const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
8619         if (oldreg) {
8620             assert(oldreg->type & ARM_CP_OVERRIDE);
8621         }
8622     }
8623 
8624     /*
8625      * Eliminate registers that are not present because the EL is missing.
8626      * Doing this here makes it easier to put all registers for a given
8627      * feature into the same ARMCPRegInfo array and define them all at once.
8628      */
8629     make_const = false;
8630     if (arm_feature(env, ARM_FEATURE_EL3)) {
8631         /*
8632          * An EL2 register without EL2 but with EL3 is (usually) RES0.
8633          * See rule RJFFP in section D1.1.3 of DDI0487H.a.
8634          */
8635         int min_el = ctz32(r->access) / 2;
8636         if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
8637             if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
8638                 return;
8639             }
8640             make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
8641         }
8642     } else {
8643         CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
8644                                  ? PL2_RW : PL1_RW);
8645         if ((r->access & max_el) == 0) {
8646             return;
8647         }
8648     }
8649 
8650     /* Combine cpreg and name into one allocation. */
8651     name_len = strlen(name) + 1;
8652     r2 = g_malloc(sizeof(*r2) + name_len);
8653     *r2 = *r;
8654     r2->name = memcpy(r2 + 1, name, name_len);
8655 
8656     /*
8657      * Update fields to match the instantiation, overwiting wildcards
8658      * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
8659      */
8660     r2->cp = cp;
8661     r2->crm = crm;
8662     r2->opc1 = opc1;
8663     r2->opc2 = opc2;
8664     r2->state = state;
8665     r2->secure = secstate;
8666     if (opaque) {
8667         r2->opaque = opaque;
8668     }
8669 
8670     if (make_const) {
8671         /* This should not have been a very special register to begin. */
8672         int old_special = r2->type & ARM_CP_SPECIAL_MASK;
8673         assert(old_special == 0 || old_special == ARM_CP_NOP);
8674         /*
8675          * Set the special function to CONST, retaining the other flags.
8676          * This is important for e.g. ARM_CP_SVE so that we still
8677          * take the SVE trap if CPTR_EL3.EZ == 0.
8678          */
8679         r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
8680         /*
8681          * Usually, these registers become RES0, but there are a few
8682          * special cases like VPIDR_EL2 which have a constant non-zero
8683          * value with writes ignored.
8684          */
8685         if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
8686             r2->resetvalue = 0;
8687         }
8688         /*
8689          * ARM_CP_CONST has precedence, so removing the callbacks and
8690          * offsets are not strictly necessary, but it is potentially
8691          * less confusing to debug later.
8692          */
8693         r2->readfn = NULL;
8694         r2->writefn = NULL;
8695         r2->raw_readfn = NULL;
8696         r2->raw_writefn = NULL;
8697         r2->resetfn = NULL;
8698         r2->fieldoffset = 0;
8699         r2->bank_fieldoffsets[0] = 0;
8700         r2->bank_fieldoffsets[1] = 0;
8701     } else {
8702         bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
8703 
8704         if (isbanked) {
8705             /*
8706              * Register is banked (using both entries in array).
8707              * Overwriting fieldoffset as the array is only used to define
8708              * banked registers but later only fieldoffset is used.
8709              */
8710             r2->fieldoffset = r->bank_fieldoffsets[ns];
8711         }
8712         if (state == ARM_CP_STATE_AA32) {
8713             if (isbanked) {
8714                 /*
8715                  * If the register is banked then we don't need to migrate or
8716                  * reset the 32-bit instance in certain cases:
8717                  *
8718                  * 1) If the register has both 32-bit and 64-bit instances
8719                  *    then we can count on the 64-bit instance taking care
8720                  *    of the non-secure bank.
8721                  * 2) If ARMv8 is enabled then we can count on a 64-bit
8722                  *    version taking care of the secure bank.  This requires
8723                  *    that separate 32 and 64-bit definitions are provided.
8724                  */
8725                 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8726                     (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
8727                     r2->type |= ARM_CP_ALIAS;
8728                 }
8729             } else if ((secstate != r->secure) && !ns) {
8730                 /*
8731                  * The register is not banked so we only want to allow
8732                  * migration of the non-secure instance.
8733                  */
8734                 r2->type |= ARM_CP_ALIAS;
8735             }
8736 
8737             if (HOST_BIG_ENDIAN &&
8738                 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
8739                 r2->fieldoffset += sizeof(uint32_t);
8740             }
8741         }
8742     }
8743 
8744     /*
8745      * By convention, for wildcarded registers only the first
8746      * entry is used for migration; the others are marked as
8747      * ALIAS so we don't try to transfer the register
8748      * multiple times. Special registers (ie NOP/WFI) are
8749      * never migratable and not even raw-accessible.
8750      */
8751     if (r2->type & ARM_CP_SPECIAL_MASK) {
8752         r2->type |= ARM_CP_NO_RAW;
8753     }
8754     if (((r->crm == CP_ANY) && crm != 0) ||
8755         ((r->opc1 == CP_ANY) && opc1 != 0) ||
8756         ((r->opc2 == CP_ANY) && opc2 != 0)) {
8757         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
8758     }
8759 
8760     /*
8761      * Check that raw accesses are either forbidden or handled. Note that
8762      * we can't assert this earlier because the setup of fieldoffset for
8763      * banked registers has to be done first.
8764      */
8765     if (!(r2->type & ARM_CP_NO_RAW)) {
8766         assert(!raw_accessors_invalid(r2));
8767     }
8768 
8769     g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
8770 }
8771 
8772 
8773 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8774                                        const ARMCPRegInfo *r, void *opaque)
8775 {
8776     /* Define implementations of coprocessor registers.
8777      * We store these in a hashtable because typically
8778      * there are less than 150 registers in a space which
8779      * is 16*16*16*8*8 = 262144 in size.
8780      * Wildcarding is supported for the crm, opc1 and opc2 fields.
8781      * If a register is defined twice then the second definition is
8782      * used, so this can be used to define some generic registers and
8783      * then override them with implementation specific variations.
8784      * At least one of the original and the second definition should
8785      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8786      * against accidental use.
8787      *
8788      * The state field defines whether the register is to be
8789      * visible in the AArch32 or AArch64 execution state. If the
8790      * state is set to ARM_CP_STATE_BOTH then we synthesise a
8791      * reginfo structure for the AArch32 view, which sees the lower
8792      * 32 bits of the 64 bit register.
8793      *
8794      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8795      * be wildcarded. AArch64 registers are always considered to be 64
8796      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8797      * the register, if any.
8798      */
8799     int crm, opc1, opc2;
8800     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8801     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8802     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8803     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8804     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8805     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
8806     CPState state;
8807 
8808     /* 64 bit registers have only CRm and Opc1 fields */
8809     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
8810     /* op0 only exists in the AArch64 encodings */
8811     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8812     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8813     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
8814     /*
8815      * This API is only for Arm's system coprocessors (14 and 15) or
8816      * (M-profile or v7A-and-earlier only) for implementation defined
8817      * coprocessors in the range 0..7.  Our decode assumes this, since
8818      * 8..13 can be used for other insns including VFP and Neon. See
8819      * valid_cp() in translate.c.  Assert here that we haven't tried
8820      * to use an invalid coprocessor number.
8821      */
8822     switch (r->state) {
8823     case ARM_CP_STATE_BOTH:
8824         /* 0 has a special meaning, but otherwise the same rules as AA32. */
8825         if (r->cp == 0) {
8826             break;
8827         }
8828         /* fall through */
8829     case ARM_CP_STATE_AA32:
8830         if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
8831             !arm_feature(&cpu->env, ARM_FEATURE_M)) {
8832             assert(r->cp >= 14 && r->cp <= 15);
8833         } else {
8834             assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
8835         }
8836         break;
8837     case ARM_CP_STATE_AA64:
8838         assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
8839         break;
8840     default:
8841         g_assert_not_reached();
8842     }
8843     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8844      * encodes a minimum access level for the register. We roll this
8845      * runtime check into our general permission check code, so check
8846      * here that the reginfo's specified permissions are strict enough
8847      * to encompass the generic architectural permission check.
8848      */
8849     if (r->state != ARM_CP_STATE_AA32) {
8850         CPAccessRights mask;
8851         switch (r->opc1) {
8852         case 0:
8853             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8854             mask = PL0U_R | PL1_RW;
8855             break;
8856         case 1: case 2:
8857             /* min_EL EL1 */
8858             mask = PL1_RW;
8859             break;
8860         case 3:
8861             /* min_EL EL0 */
8862             mask = PL0_RW;
8863             break;
8864         case 4:
8865         case 5:
8866             /* min_EL EL2 */
8867             mask = PL2_RW;
8868             break;
8869         case 6:
8870             /* min_EL EL3 */
8871             mask = PL3_RW;
8872             break;
8873         case 7:
8874             /* min_EL EL1, secure mode only (we don't check the latter) */
8875             mask = PL1_RW;
8876             break;
8877         default:
8878             /* broken reginfo with out-of-range opc1 */
8879             g_assert_not_reached();
8880         }
8881         /* assert our permissions are not too lax (stricter is fine) */
8882         assert((r->access & ~mask) == 0);
8883     }
8884 
8885     /* Check that the register definition has enough info to handle
8886      * reads and writes if they are permitted.
8887      */
8888     if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
8889         if (r->access & PL3_R) {
8890             assert((r->fieldoffset ||
8891                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8892                    r->readfn);
8893         }
8894         if (r->access & PL3_W) {
8895             assert((r->fieldoffset ||
8896                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8897                    r->writefn);
8898         }
8899     }
8900 
8901     for (crm = crmmin; crm <= crmmax; crm++) {
8902         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8903             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
8904                 for (state = ARM_CP_STATE_AA32;
8905                      state <= ARM_CP_STATE_AA64; state++) {
8906                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8907                         continue;
8908                     }
8909                     if (state == ARM_CP_STATE_AA32) {
8910                         /* Under AArch32 CP registers can be common
8911                          * (same for secure and non-secure world) or banked.
8912                          */
8913                         char *name;
8914 
8915                         switch (r->secure) {
8916                         case ARM_CP_SECSTATE_S:
8917                         case ARM_CP_SECSTATE_NS:
8918                             add_cpreg_to_hashtable(cpu, r, opaque, state,
8919                                                    r->secure, crm, opc1, opc2,
8920                                                    r->name);
8921                             break;
8922                         case ARM_CP_SECSTATE_BOTH:
8923                             name = g_strdup_printf("%s_S", r->name);
8924                             add_cpreg_to_hashtable(cpu, r, opaque, state,
8925                                                    ARM_CP_SECSTATE_S,
8926                                                    crm, opc1, opc2, name);
8927                             g_free(name);
8928                             add_cpreg_to_hashtable(cpu, r, opaque, state,
8929                                                    ARM_CP_SECSTATE_NS,
8930                                                    crm, opc1, opc2, r->name);
8931                             break;
8932                         default:
8933                             g_assert_not_reached();
8934                         }
8935                     } else {
8936                         /* AArch64 registers get mapped to non-secure instance
8937                          * of AArch32 */
8938                         add_cpreg_to_hashtable(cpu, r, opaque, state,
8939                                                ARM_CP_SECSTATE_NS,
8940                                                crm, opc1, opc2, r->name);
8941                     }
8942                 }
8943             }
8944         }
8945     }
8946 }
8947 
8948 /* Define a whole list of registers */
8949 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
8950                                         void *opaque, size_t len)
8951 {
8952     size_t i;
8953     for (i = 0; i < len; ++i) {
8954         define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
8955     }
8956 }
8957 
8958 /*
8959  * Modify ARMCPRegInfo for access from userspace.
8960  *
8961  * This is a data driven modification directed by
8962  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8963  * user-space cannot alter any values and dynamic values pertaining to
8964  * execution state are hidden from user space view anyway.
8965  */
8966 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
8967                                  const ARMCPRegUserSpaceInfo *mods,
8968                                  size_t mods_len)
8969 {
8970     for (size_t mi = 0; mi < mods_len; ++mi) {
8971         const ARMCPRegUserSpaceInfo *m = mods + mi;
8972         GPatternSpec *pat = NULL;
8973 
8974         if (m->is_glob) {
8975             pat = g_pattern_spec_new(m->name);
8976         }
8977         for (size_t ri = 0; ri < regs_len; ++ri) {
8978             ARMCPRegInfo *r = regs + ri;
8979 
8980             if (pat && g_pattern_match_string(pat, r->name)) {
8981                 r->type = ARM_CP_CONST;
8982                 r->access = PL0U_R;
8983                 r->resetvalue = 0;
8984                 /* continue */
8985             } else if (strcmp(r->name, m->name) == 0) {
8986                 r->type = ARM_CP_CONST;
8987                 r->access = PL0U_R;
8988                 r->resetvalue &= m->exported_bits;
8989                 r->resetvalue |= m->fixed_bits;
8990                 break;
8991             }
8992         }
8993         if (pat) {
8994             g_pattern_spec_free(pat);
8995         }
8996     }
8997 }
8998 
8999 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
9000 {
9001     return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
9002 }
9003 
9004 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
9005                          uint64_t value)
9006 {
9007     /* Helper coprocessor write function for write-ignore registers */
9008 }
9009 
9010 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
9011 {
9012     /* Helper coprocessor write function for read-as-zero registers */
9013     return 0;
9014 }
9015 
9016 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
9017 {
9018     /* Helper coprocessor reset function for do-nothing-on-reset registers */
9019 }
9020 
9021 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
9022 {
9023     /* Return true if it is not valid for us to switch to
9024      * this CPU mode (ie all the UNPREDICTABLE cases in
9025      * the ARM ARM CPSRWriteByInstr pseudocode).
9026      */
9027 
9028     /* Changes to or from Hyp via MSR and CPS are illegal. */
9029     if (write_type == CPSRWriteByInstr &&
9030         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
9031          mode == ARM_CPU_MODE_HYP)) {
9032         return 1;
9033     }
9034 
9035     switch (mode) {
9036     case ARM_CPU_MODE_USR:
9037         return 0;
9038     case ARM_CPU_MODE_SYS:
9039     case ARM_CPU_MODE_SVC:
9040     case ARM_CPU_MODE_ABT:
9041     case ARM_CPU_MODE_UND:
9042     case ARM_CPU_MODE_IRQ:
9043     case ARM_CPU_MODE_FIQ:
9044         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
9045          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
9046          */
9047         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
9048          * and CPS are treated as illegal mode changes.
9049          */
9050         if (write_type == CPSRWriteByInstr &&
9051             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
9052             (arm_hcr_el2_eff(env) & HCR_TGE)) {
9053             return 1;
9054         }
9055         return 0;
9056     case ARM_CPU_MODE_HYP:
9057         return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
9058     case ARM_CPU_MODE_MON:
9059         return arm_current_el(env) < 3;
9060     default:
9061         return 1;
9062     }
9063 }
9064 
9065 uint32_t cpsr_read(CPUARMState *env)
9066 {
9067     int ZF;
9068     ZF = (env->ZF == 0);
9069     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
9070         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
9071         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
9072         | ((env->condexec_bits & 0xfc) << 8)
9073         | (env->GE << 16) | (env->daif & CPSR_AIF);
9074 }
9075 
9076 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
9077                 CPSRWriteType write_type)
9078 {
9079     uint32_t changed_daif;
9080     bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
9081         (mask & (CPSR_M | CPSR_E | CPSR_IL));
9082 
9083     if (mask & CPSR_NZCV) {
9084         env->ZF = (~val) & CPSR_Z;
9085         env->NF = val;
9086         env->CF = (val >> 29) & 1;
9087         env->VF = (val << 3) & 0x80000000;
9088     }
9089     if (mask & CPSR_Q)
9090         env->QF = ((val & CPSR_Q) != 0);
9091     if (mask & CPSR_T)
9092         env->thumb = ((val & CPSR_T) != 0);
9093     if (mask & CPSR_IT_0_1) {
9094         env->condexec_bits &= ~3;
9095         env->condexec_bits |= (val >> 25) & 3;
9096     }
9097     if (mask & CPSR_IT_2_7) {
9098         env->condexec_bits &= 3;
9099         env->condexec_bits |= (val >> 8) & 0xfc;
9100     }
9101     if (mask & CPSR_GE) {
9102         env->GE = (val >> 16) & 0xf;
9103     }
9104 
9105     /* In a V7 implementation that includes the security extensions but does
9106      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9107      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9108      * bits respectively.
9109      *
9110      * In a V8 implementation, it is permitted for privileged software to
9111      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9112      */
9113     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
9114         arm_feature(env, ARM_FEATURE_EL3) &&
9115         !arm_feature(env, ARM_FEATURE_EL2) &&
9116         !arm_is_secure(env)) {
9117 
9118         changed_daif = (env->daif ^ val) & mask;
9119 
9120         if (changed_daif & CPSR_A) {
9121             /* Check to see if we are allowed to change the masking of async
9122              * abort exceptions from a non-secure state.
9123              */
9124             if (!(env->cp15.scr_el3 & SCR_AW)) {
9125                 qemu_log_mask(LOG_GUEST_ERROR,
9126                               "Ignoring attempt to switch CPSR_A flag from "
9127                               "non-secure world with SCR.AW bit clear\n");
9128                 mask &= ~CPSR_A;
9129             }
9130         }
9131 
9132         if (changed_daif & CPSR_F) {
9133             /* Check to see if we are allowed to change the masking of FIQ
9134              * exceptions from a non-secure state.
9135              */
9136             if (!(env->cp15.scr_el3 & SCR_FW)) {
9137                 qemu_log_mask(LOG_GUEST_ERROR,
9138                               "Ignoring attempt to switch CPSR_F flag from "
9139                               "non-secure world with SCR.FW bit clear\n");
9140                 mask &= ~CPSR_F;
9141             }
9142 
9143             /* Check whether non-maskable FIQ (NMFI) support is enabled.
9144              * If this bit is set software is not allowed to mask
9145              * FIQs, but is allowed to set CPSR_F to 0.
9146              */
9147             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
9148                 (val & CPSR_F)) {
9149                 qemu_log_mask(LOG_GUEST_ERROR,
9150                               "Ignoring attempt to enable CPSR_F flag "
9151                               "(non-maskable FIQ [NMFI] support enabled)\n");
9152                 mask &= ~CPSR_F;
9153             }
9154         }
9155     }
9156 
9157     env->daif &= ~(CPSR_AIF & mask);
9158     env->daif |= val & CPSR_AIF & mask;
9159 
9160     if (write_type != CPSRWriteRaw &&
9161         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
9162         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
9163             /* Note that we can only get here in USR mode if this is a
9164              * gdb stub write; for this case we follow the architectural
9165              * behaviour for guest writes in USR mode of ignoring an attempt
9166              * to switch mode. (Those are caught by translate.c for writes
9167              * triggered by guest instructions.)
9168              */
9169             mask &= ~CPSR_M;
9170         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
9171             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9172              * v7, and has defined behaviour in v8:
9173              *  + leave CPSR.M untouched
9174              *  + allow changes to the other CPSR fields
9175              *  + set PSTATE.IL
9176              * For user changes via the GDB stub, we don't set PSTATE.IL,
9177              * as this would be unnecessarily harsh for a user error.
9178              */
9179             mask &= ~CPSR_M;
9180             if (write_type != CPSRWriteByGDBStub &&
9181                 arm_feature(env, ARM_FEATURE_V8)) {
9182                 mask |= CPSR_IL;
9183                 val |= CPSR_IL;
9184             }
9185             qemu_log_mask(LOG_GUEST_ERROR,
9186                           "Illegal AArch32 mode switch attempt from %s to %s\n",
9187                           aarch32_mode_name(env->uncached_cpsr),
9188                           aarch32_mode_name(val));
9189         } else {
9190             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
9191                           write_type == CPSRWriteExceptionReturn ?
9192                           "Exception return from AArch32" :
9193                           "AArch32 mode switch from",
9194                           aarch32_mode_name(env->uncached_cpsr),
9195                           aarch32_mode_name(val), env->regs[15]);
9196             switch_mode(env, val & CPSR_M);
9197         }
9198     }
9199     mask &= ~CACHED_CPSR_BITS;
9200     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
9201     if (rebuild_hflags) {
9202         arm_rebuild_hflags(env);
9203     }
9204 }
9205 
9206 /* Sign/zero extend */
9207 uint32_t HELPER(sxtb16)(uint32_t x)
9208 {
9209     uint32_t res;
9210     res = (uint16_t)(int8_t)x;
9211     res |= (uint32_t)(int8_t)(x >> 16) << 16;
9212     return res;
9213 }
9214 
9215 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
9216 {
9217     /*
9218      * Take a division-by-zero exception if necessary; otherwise return
9219      * to get the usual non-trapping division behaviour (result of 0)
9220      */
9221     if (arm_feature(env, ARM_FEATURE_M)
9222         && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
9223         raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
9224     }
9225 }
9226 
9227 uint32_t HELPER(uxtb16)(uint32_t x)
9228 {
9229     uint32_t res;
9230     res = (uint16_t)(uint8_t)x;
9231     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
9232     return res;
9233 }
9234 
9235 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
9236 {
9237     if (den == 0) {
9238         handle_possible_div0_trap(env, GETPC());
9239         return 0;
9240     }
9241     if (num == INT_MIN && den == -1) {
9242         return INT_MIN;
9243     }
9244     return num / den;
9245 }
9246 
9247 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
9248 {
9249     if (den == 0) {
9250         handle_possible_div0_trap(env, GETPC());
9251         return 0;
9252     }
9253     return num / den;
9254 }
9255 
9256 uint32_t HELPER(rbit)(uint32_t x)
9257 {
9258     return revbit32(x);
9259 }
9260 
9261 #ifdef CONFIG_USER_ONLY
9262 
9263 static void switch_mode(CPUARMState *env, int mode)
9264 {
9265     ARMCPU *cpu = env_archcpu(env);
9266 
9267     if (mode != ARM_CPU_MODE_USR) {
9268         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
9269     }
9270 }
9271 
9272 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9273                                  uint32_t cur_el, bool secure)
9274 {
9275     return 1;
9276 }
9277 
9278 void aarch64_sync_64_to_32(CPUARMState *env)
9279 {
9280     g_assert_not_reached();
9281 }
9282 
9283 #else
9284 
9285 static void switch_mode(CPUARMState *env, int mode)
9286 {
9287     int old_mode;
9288     int i;
9289 
9290     old_mode = env->uncached_cpsr & CPSR_M;
9291     if (mode == old_mode)
9292         return;
9293 
9294     if (old_mode == ARM_CPU_MODE_FIQ) {
9295         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
9296         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
9297     } else if (mode == ARM_CPU_MODE_FIQ) {
9298         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
9299         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
9300     }
9301 
9302     i = bank_number(old_mode);
9303     env->banked_r13[i] = env->regs[13];
9304     env->banked_spsr[i] = env->spsr;
9305 
9306     i = bank_number(mode);
9307     env->regs[13] = env->banked_r13[i];
9308     env->spsr = env->banked_spsr[i];
9309 
9310     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
9311     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
9312 }
9313 
9314 /* Physical Interrupt Target EL Lookup Table
9315  *
9316  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9317  *
9318  * The below multi-dimensional table is used for looking up the target
9319  * exception level given numerous condition criteria.  Specifically, the
9320  * target EL is based on SCR and HCR routing controls as well as the
9321  * currently executing EL and secure state.
9322  *
9323  *    Dimensions:
9324  *    target_el_table[2][2][2][2][2][4]
9325  *                    |  |  |  |  |  +--- Current EL
9326  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
9327  *                    |  |  |  +--------- HCR mask override
9328  *                    |  |  +------------ SCR exec state control
9329  *                    |  +--------------- SCR mask override
9330  *                    +------------------ 32-bit(0)/64-bit(1) EL3
9331  *
9332  *    The table values are as such:
9333  *    0-3 = EL0-EL3
9334  *     -1 = Cannot occur
9335  *
9336  * The ARM ARM target EL table includes entries indicating that an "exception
9337  * is not taken".  The two cases where this is applicable are:
9338  *    1) An exception is taken from EL3 but the SCR does not have the exception
9339  *    routed to EL3.
9340  *    2) An exception is taken from EL2 but the HCR does not have the exception
9341  *    routed to EL2.
9342  * In these two cases, the below table contain a target of EL1.  This value is
9343  * returned as it is expected that the consumer of the table data will check
9344  * for "target EL >= current EL" to ensure the exception is not taken.
9345  *
9346  *            SCR     HCR
9347  *         64  EA     AMO                 From
9348  *        BIT IRQ     IMO      Non-secure         Secure
9349  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
9350  */
9351 static const int8_t target_el_table[2][2][2][2][2][4] = {
9352     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
9353        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
9354       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
9355        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
9356      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
9357        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
9358       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
9359        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
9360     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
9361        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
9362       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
9363        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
9364      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
9365        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
9366       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
9367        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
9368 };
9369 
9370 /*
9371  * Determine the target EL for physical exceptions
9372  */
9373 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9374                                  uint32_t cur_el, bool secure)
9375 {
9376     CPUARMState *env = cs->env_ptr;
9377     bool rw;
9378     bool scr;
9379     bool hcr;
9380     int target_el;
9381     /* Is the highest EL AArch64? */
9382     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9383     uint64_t hcr_el2;
9384 
9385     if (arm_feature(env, ARM_FEATURE_EL3)) {
9386         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9387     } else {
9388         /* Either EL2 is the highest EL (and so the EL2 register width
9389          * is given by is64); or there is no EL2 or EL3, in which case
9390          * the value of 'rw' does not affect the table lookup anyway.
9391          */
9392         rw = is64;
9393     }
9394 
9395     hcr_el2 = arm_hcr_el2_eff(env);
9396     switch (excp_idx) {
9397     case EXCP_IRQ:
9398         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
9399         hcr = hcr_el2 & HCR_IMO;
9400         break;
9401     case EXCP_FIQ:
9402         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
9403         hcr = hcr_el2 & HCR_FMO;
9404         break;
9405     default:
9406         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
9407         hcr = hcr_el2 & HCR_AMO;
9408         break;
9409     };
9410 
9411     /*
9412      * For these purposes, TGE and AMO/IMO/FMO both force the
9413      * interrupt to EL2.  Fold TGE into the bit extracted above.
9414      */
9415     hcr |= (hcr_el2 & HCR_TGE) != 0;
9416 
9417     /* Perform a table-lookup for the target EL given the current state */
9418     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9419 
9420     assert(target_el > 0);
9421 
9422     return target_el;
9423 }
9424 
9425 void arm_log_exception(CPUState *cs)
9426 {
9427     int idx = cs->exception_index;
9428 
9429     if (qemu_loglevel_mask(CPU_LOG_INT)) {
9430         const char *exc = NULL;
9431         static const char * const excnames[] = {
9432             [EXCP_UDEF] = "Undefined Instruction",
9433             [EXCP_SWI] = "SVC",
9434             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9435             [EXCP_DATA_ABORT] = "Data Abort",
9436             [EXCP_IRQ] = "IRQ",
9437             [EXCP_FIQ] = "FIQ",
9438             [EXCP_BKPT] = "Breakpoint",
9439             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9440             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9441             [EXCP_HVC] = "Hypervisor Call",
9442             [EXCP_HYP_TRAP] = "Hypervisor Trap",
9443             [EXCP_SMC] = "Secure Monitor Call",
9444             [EXCP_VIRQ] = "Virtual IRQ",
9445             [EXCP_VFIQ] = "Virtual FIQ",
9446             [EXCP_SEMIHOST] = "Semihosting call",
9447             [EXCP_NOCP] = "v7M NOCP UsageFault",
9448             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9449             [EXCP_STKOF] = "v8M STKOF UsageFault",
9450             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9451             [EXCP_LSERR] = "v8M LSERR UsageFault",
9452             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
9453             [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
9454             [EXCP_VSERR] = "Virtual SERR",
9455         };
9456 
9457         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9458             exc = excnames[idx];
9459         }
9460         if (!exc) {
9461             exc = "unknown";
9462         }
9463         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
9464                       idx, exc, cs->cpu_index);
9465     }
9466 }
9467 
9468 /*
9469  * Function used to synchronize QEMU's AArch64 register set with AArch32
9470  * register set.  This is necessary when switching between AArch32 and AArch64
9471  * execution state.
9472  */
9473 void aarch64_sync_32_to_64(CPUARMState *env)
9474 {
9475     int i;
9476     uint32_t mode = env->uncached_cpsr & CPSR_M;
9477 
9478     /* We can blanket copy R[0:7] to X[0:7] */
9479     for (i = 0; i < 8; i++) {
9480         env->xregs[i] = env->regs[i];
9481     }
9482 
9483     /*
9484      * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9485      * Otherwise, they come from the banked user regs.
9486      */
9487     if (mode == ARM_CPU_MODE_FIQ) {
9488         for (i = 8; i < 13; i++) {
9489             env->xregs[i] = env->usr_regs[i - 8];
9490         }
9491     } else {
9492         for (i = 8; i < 13; i++) {
9493             env->xregs[i] = env->regs[i];
9494         }
9495     }
9496 
9497     /*
9498      * Registers x13-x23 are the various mode SP and FP registers. Registers
9499      * r13 and r14 are only copied if we are in that mode, otherwise we copy
9500      * from the mode banked register.
9501      */
9502     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9503         env->xregs[13] = env->regs[13];
9504         env->xregs[14] = env->regs[14];
9505     } else {
9506         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9507         /* HYP is an exception in that it is copied from r14 */
9508         if (mode == ARM_CPU_MODE_HYP) {
9509             env->xregs[14] = env->regs[14];
9510         } else {
9511             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9512         }
9513     }
9514 
9515     if (mode == ARM_CPU_MODE_HYP) {
9516         env->xregs[15] = env->regs[13];
9517     } else {
9518         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9519     }
9520 
9521     if (mode == ARM_CPU_MODE_IRQ) {
9522         env->xregs[16] = env->regs[14];
9523         env->xregs[17] = env->regs[13];
9524     } else {
9525         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9526         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9527     }
9528 
9529     if (mode == ARM_CPU_MODE_SVC) {
9530         env->xregs[18] = env->regs[14];
9531         env->xregs[19] = env->regs[13];
9532     } else {
9533         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9534         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9535     }
9536 
9537     if (mode == ARM_CPU_MODE_ABT) {
9538         env->xregs[20] = env->regs[14];
9539         env->xregs[21] = env->regs[13];
9540     } else {
9541         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9542         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9543     }
9544 
9545     if (mode == ARM_CPU_MODE_UND) {
9546         env->xregs[22] = env->regs[14];
9547         env->xregs[23] = env->regs[13];
9548     } else {
9549         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9550         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9551     }
9552 
9553     /*
9554      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9555      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
9556      * FIQ bank for r8-r14.
9557      */
9558     if (mode == ARM_CPU_MODE_FIQ) {
9559         for (i = 24; i < 31; i++) {
9560             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
9561         }
9562     } else {
9563         for (i = 24; i < 29; i++) {
9564             env->xregs[i] = env->fiq_regs[i - 24];
9565         }
9566         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9567         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9568     }
9569 
9570     env->pc = env->regs[15];
9571 }
9572 
9573 /*
9574  * Function used to synchronize QEMU's AArch32 register set with AArch64
9575  * register set.  This is necessary when switching between AArch32 and AArch64
9576  * execution state.
9577  */
9578 void aarch64_sync_64_to_32(CPUARMState *env)
9579 {
9580     int i;
9581     uint32_t mode = env->uncached_cpsr & CPSR_M;
9582 
9583     /* We can blanket copy X[0:7] to R[0:7] */
9584     for (i = 0; i < 8; i++) {
9585         env->regs[i] = env->xregs[i];
9586     }
9587 
9588     /*
9589      * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9590      * Otherwise, we copy x8-x12 into the banked user regs.
9591      */
9592     if (mode == ARM_CPU_MODE_FIQ) {
9593         for (i = 8; i < 13; i++) {
9594             env->usr_regs[i - 8] = env->xregs[i];
9595         }
9596     } else {
9597         for (i = 8; i < 13; i++) {
9598             env->regs[i] = env->xregs[i];
9599         }
9600     }
9601 
9602     /*
9603      * Registers r13 & r14 depend on the current mode.
9604      * If we are in a given mode, we copy the corresponding x registers to r13
9605      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
9606      * for the mode.
9607      */
9608     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9609         env->regs[13] = env->xregs[13];
9610         env->regs[14] = env->xregs[14];
9611     } else {
9612         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9613 
9614         /*
9615          * HYP is an exception in that it does not have its own banked r14 but
9616          * shares the USR r14
9617          */
9618         if (mode == ARM_CPU_MODE_HYP) {
9619             env->regs[14] = env->xregs[14];
9620         } else {
9621             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9622         }
9623     }
9624 
9625     if (mode == ARM_CPU_MODE_HYP) {
9626         env->regs[13] = env->xregs[15];
9627     } else {
9628         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9629     }
9630 
9631     if (mode == ARM_CPU_MODE_IRQ) {
9632         env->regs[14] = env->xregs[16];
9633         env->regs[13] = env->xregs[17];
9634     } else {
9635         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9636         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9637     }
9638 
9639     if (mode == ARM_CPU_MODE_SVC) {
9640         env->regs[14] = env->xregs[18];
9641         env->regs[13] = env->xregs[19];
9642     } else {
9643         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9644         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9645     }
9646 
9647     if (mode == ARM_CPU_MODE_ABT) {
9648         env->regs[14] = env->xregs[20];
9649         env->regs[13] = env->xregs[21];
9650     } else {
9651         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9652         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9653     }
9654 
9655     if (mode == ARM_CPU_MODE_UND) {
9656         env->regs[14] = env->xregs[22];
9657         env->regs[13] = env->xregs[23];
9658     } else {
9659         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9660         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9661     }
9662 
9663     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9664      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
9665      * FIQ bank for r8-r14.
9666      */
9667     if (mode == ARM_CPU_MODE_FIQ) {
9668         for (i = 24; i < 31; i++) {
9669             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
9670         }
9671     } else {
9672         for (i = 24; i < 29; i++) {
9673             env->fiq_regs[i - 24] = env->xregs[i];
9674         }
9675         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9676         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9677     }
9678 
9679     env->regs[15] = env->pc;
9680 }
9681 
9682 static void take_aarch32_exception(CPUARMState *env, int new_mode,
9683                                    uint32_t mask, uint32_t offset,
9684                                    uint32_t newpc)
9685 {
9686     int new_el;
9687 
9688     /* Change the CPU state so as to actually take the exception. */
9689     switch_mode(env, new_mode);
9690 
9691     /*
9692      * For exceptions taken to AArch32 we must clear the SS bit in both
9693      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9694      */
9695     env->pstate &= ~PSTATE_SS;
9696     env->spsr = cpsr_read(env);
9697     /* Clear IT bits.  */
9698     env->condexec_bits = 0;
9699     /* Switch to the new mode, and to the correct instruction set.  */
9700     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9701 
9702     /* This must be after mode switching. */
9703     new_el = arm_current_el(env);
9704 
9705     /* Set new mode endianness */
9706     env->uncached_cpsr &= ~CPSR_E;
9707     if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
9708         env->uncached_cpsr |= CPSR_E;
9709     }
9710     /* J and IL must always be cleared for exception entry */
9711     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9712     env->daif |= mask;
9713 
9714     if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
9715         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
9716             env->uncached_cpsr |= CPSR_SSBS;
9717         } else {
9718             env->uncached_cpsr &= ~CPSR_SSBS;
9719         }
9720     }
9721 
9722     if (new_mode == ARM_CPU_MODE_HYP) {
9723         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9724         env->elr_el[2] = env->regs[15];
9725     } else {
9726         /* CPSR.PAN is normally preserved preserved unless...  */
9727         if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
9728             switch (new_el) {
9729             case 3:
9730                 if (!arm_is_secure_below_el3(env)) {
9731                     /* ... the target is EL3, from non-secure state.  */
9732                     env->uncached_cpsr &= ~CPSR_PAN;
9733                     break;
9734                 }
9735                 /* ... the target is EL3, from secure state ... */
9736                 /* fall through */
9737             case 1:
9738                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
9739                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9740                     env->uncached_cpsr |= CPSR_PAN;
9741                 }
9742                 break;
9743             }
9744         }
9745         /*
9746          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9747          * and we should just guard the thumb mode on V4
9748          */
9749         if (arm_feature(env, ARM_FEATURE_V4T)) {
9750             env->thumb =
9751                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9752         }
9753         env->regs[14] = env->regs[15] + offset;
9754     }
9755     env->regs[15] = newpc;
9756     arm_rebuild_hflags(env);
9757 }
9758 
9759 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9760 {
9761     /*
9762      * Handle exception entry to Hyp mode; this is sufficiently
9763      * different to entry to other AArch32 modes that we handle it
9764      * separately here.
9765      *
9766      * The vector table entry used is always the 0x14 Hyp mode entry point,
9767      * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
9768      * The offset applied to the preferred return address is always zero
9769      * (see DDI0487C.a section G1.12.3).
9770      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9771      */
9772     uint32_t addr, mask;
9773     ARMCPU *cpu = ARM_CPU(cs);
9774     CPUARMState *env = &cpu->env;
9775 
9776     switch (cs->exception_index) {
9777     case EXCP_UDEF:
9778         addr = 0x04;
9779         break;
9780     case EXCP_SWI:
9781         addr = 0x08;
9782         break;
9783     case EXCP_BKPT:
9784         /* Fall through to prefetch abort.  */
9785     case EXCP_PREFETCH_ABORT:
9786         env->cp15.ifar_s = env->exception.vaddress;
9787         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9788                       (uint32_t)env->exception.vaddress);
9789         addr = 0x0c;
9790         break;
9791     case EXCP_DATA_ABORT:
9792         env->cp15.dfar_s = env->exception.vaddress;
9793         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9794                       (uint32_t)env->exception.vaddress);
9795         addr = 0x10;
9796         break;
9797     case EXCP_IRQ:
9798         addr = 0x18;
9799         break;
9800     case EXCP_FIQ:
9801         addr = 0x1c;
9802         break;
9803     case EXCP_HVC:
9804         addr = 0x08;
9805         break;
9806     case EXCP_HYP_TRAP:
9807         addr = 0x14;
9808         break;
9809     default:
9810         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9811     }
9812 
9813     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9814         if (!arm_feature(env, ARM_FEATURE_V8)) {
9815             /*
9816              * QEMU syndrome values are v8-style. v7 has the IL bit
9817              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9818              * If this is a v7 CPU, squash the IL bit in those cases.
9819              */
9820             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9821                 (cs->exception_index == EXCP_DATA_ABORT &&
9822                  !(env->exception.syndrome & ARM_EL_ISV)) ||
9823                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9824                 env->exception.syndrome &= ~ARM_EL_IL;
9825             }
9826         }
9827         env->cp15.esr_el[2] = env->exception.syndrome;
9828     }
9829 
9830     if (arm_current_el(env) != 2 && addr < 0x14) {
9831         addr = 0x14;
9832     }
9833 
9834     mask = 0;
9835     if (!(env->cp15.scr_el3 & SCR_EA)) {
9836         mask |= CPSR_A;
9837     }
9838     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9839         mask |= CPSR_I;
9840     }
9841     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9842         mask |= CPSR_F;
9843     }
9844 
9845     addr += env->cp15.hvbar;
9846 
9847     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9848 }
9849 
9850 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
9851 {
9852     ARMCPU *cpu = ARM_CPU(cs);
9853     CPUARMState *env = &cpu->env;
9854     uint32_t addr;
9855     uint32_t mask;
9856     int new_mode;
9857     uint32_t offset;
9858     uint32_t moe;
9859 
9860     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9861     switch (syn_get_ec(env->exception.syndrome)) {
9862     case EC_BREAKPOINT:
9863     case EC_BREAKPOINT_SAME_EL:
9864         moe = 1;
9865         break;
9866     case EC_WATCHPOINT:
9867     case EC_WATCHPOINT_SAME_EL:
9868         moe = 10;
9869         break;
9870     case EC_AA32_BKPT:
9871         moe = 3;
9872         break;
9873     case EC_VECTORCATCH:
9874         moe = 5;
9875         break;
9876     default:
9877         moe = 0;
9878         break;
9879     }
9880 
9881     if (moe) {
9882         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9883     }
9884 
9885     if (env->exception.target_el == 2) {
9886         arm_cpu_do_interrupt_aarch32_hyp(cs);
9887         return;
9888     }
9889 
9890     switch (cs->exception_index) {
9891     case EXCP_UDEF:
9892         new_mode = ARM_CPU_MODE_UND;
9893         addr = 0x04;
9894         mask = CPSR_I;
9895         if (env->thumb)
9896             offset = 2;
9897         else
9898             offset = 4;
9899         break;
9900     case EXCP_SWI:
9901         new_mode = ARM_CPU_MODE_SVC;
9902         addr = 0x08;
9903         mask = CPSR_I;
9904         /* The PC already points to the next instruction.  */
9905         offset = 0;
9906         break;
9907     case EXCP_BKPT:
9908         /* Fall through to prefetch abort.  */
9909     case EXCP_PREFETCH_ABORT:
9910         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
9911         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
9912         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
9913                       env->exception.fsr, (uint32_t)env->exception.vaddress);
9914         new_mode = ARM_CPU_MODE_ABT;
9915         addr = 0x0c;
9916         mask = CPSR_A | CPSR_I;
9917         offset = 4;
9918         break;
9919     case EXCP_DATA_ABORT:
9920         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9921         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
9922         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
9923                       env->exception.fsr,
9924                       (uint32_t)env->exception.vaddress);
9925         new_mode = ARM_CPU_MODE_ABT;
9926         addr = 0x10;
9927         mask = CPSR_A | CPSR_I;
9928         offset = 8;
9929         break;
9930     case EXCP_IRQ:
9931         new_mode = ARM_CPU_MODE_IRQ;
9932         addr = 0x18;
9933         /* Disable IRQ and imprecise data aborts.  */
9934         mask = CPSR_A | CPSR_I;
9935         offset = 4;
9936         if (env->cp15.scr_el3 & SCR_IRQ) {
9937             /* IRQ routed to monitor mode */
9938             new_mode = ARM_CPU_MODE_MON;
9939             mask |= CPSR_F;
9940         }
9941         break;
9942     case EXCP_FIQ:
9943         new_mode = ARM_CPU_MODE_FIQ;
9944         addr = 0x1c;
9945         /* Disable FIQ, IRQ and imprecise data aborts.  */
9946         mask = CPSR_A | CPSR_I | CPSR_F;
9947         if (env->cp15.scr_el3 & SCR_FIQ) {
9948             /* FIQ routed to monitor mode */
9949             new_mode = ARM_CPU_MODE_MON;
9950         }
9951         offset = 4;
9952         break;
9953     case EXCP_VIRQ:
9954         new_mode = ARM_CPU_MODE_IRQ;
9955         addr = 0x18;
9956         /* Disable IRQ and imprecise data aborts.  */
9957         mask = CPSR_A | CPSR_I;
9958         offset = 4;
9959         break;
9960     case EXCP_VFIQ:
9961         new_mode = ARM_CPU_MODE_FIQ;
9962         addr = 0x1c;
9963         /* Disable FIQ, IRQ and imprecise data aborts.  */
9964         mask = CPSR_A | CPSR_I | CPSR_F;
9965         offset = 4;
9966         break;
9967     case EXCP_VSERR:
9968         {
9969             /*
9970              * Note that this is reported as a data abort, but the DFAR
9971              * has an UNKNOWN value.  Construct the SError syndrome from
9972              * AET and ExT fields.
9973              */
9974             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
9975 
9976             if (extended_addresses_enabled(env)) {
9977                 env->exception.fsr = arm_fi_to_lfsc(&fi);
9978             } else {
9979                 env->exception.fsr = arm_fi_to_sfsc(&fi);
9980             }
9981             env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
9982             A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9983             qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
9984                           env->exception.fsr);
9985 
9986             new_mode = ARM_CPU_MODE_ABT;
9987             addr = 0x10;
9988             mask = CPSR_A | CPSR_I;
9989             offset = 8;
9990         }
9991         break;
9992     case EXCP_SMC:
9993         new_mode = ARM_CPU_MODE_MON;
9994         addr = 0x08;
9995         mask = CPSR_A | CPSR_I | CPSR_F;
9996         offset = 0;
9997         break;
9998     default:
9999         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10000         return; /* Never happens.  Keep compiler happy.  */
10001     }
10002 
10003     if (new_mode == ARM_CPU_MODE_MON) {
10004         addr += env->cp15.mvbar;
10005     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
10006         /* High vectors. When enabled, base address cannot be remapped. */
10007         addr += 0xffff0000;
10008     } else {
10009         /* ARM v7 architectures provide a vector base address register to remap
10010          * the interrupt vector table.
10011          * This register is only followed in non-monitor mode, and is banked.
10012          * Note: only bits 31:5 are valid.
10013          */
10014         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
10015     }
10016 
10017     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10018         env->cp15.scr_el3 &= ~SCR_NS;
10019     }
10020 
10021     take_aarch32_exception(env, new_mode, mask, offset, addr);
10022 }
10023 
10024 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
10025 {
10026     /*
10027      * Return the register number of the AArch64 view of the AArch32
10028      * register @aarch32_reg. The CPUARMState CPSR is assumed to still
10029      * be that of the AArch32 mode the exception came from.
10030      */
10031     int mode = env->uncached_cpsr & CPSR_M;
10032 
10033     switch (aarch32_reg) {
10034     case 0 ... 7:
10035         return aarch32_reg;
10036     case 8 ... 12:
10037         return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
10038     case 13:
10039         switch (mode) {
10040         case ARM_CPU_MODE_USR:
10041         case ARM_CPU_MODE_SYS:
10042             return 13;
10043         case ARM_CPU_MODE_HYP:
10044             return 15;
10045         case ARM_CPU_MODE_IRQ:
10046             return 17;
10047         case ARM_CPU_MODE_SVC:
10048             return 19;
10049         case ARM_CPU_MODE_ABT:
10050             return 21;
10051         case ARM_CPU_MODE_UND:
10052             return 23;
10053         case ARM_CPU_MODE_FIQ:
10054             return 29;
10055         default:
10056             g_assert_not_reached();
10057         }
10058     case 14:
10059         switch (mode) {
10060         case ARM_CPU_MODE_USR:
10061         case ARM_CPU_MODE_SYS:
10062         case ARM_CPU_MODE_HYP:
10063             return 14;
10064         case ARM_CPU_MODE_IRQ:
10065             return 16;
10066         case ARM_CPU_MODE_SVC:
10067             return 18;
10068         case ARM_CPU_MODE_ABT:
10069             return 20;
10070         case ARM_CPU_MODE_UND:
10071             return 22;
10072         case ARM_CPU_MODE_FIQ:
10073             return 30;
10074         default:
10075             g_assert_not_reached();
10076         }
10077     case 15:
10078         return 31;
10079     default:
10080         g_assert_not_reached();
10081     }
10082 }
10083 
10084 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
10085 {
10086     uint32_t ret = cpsr_read(env);
10087 
10088     /* Move DIT to the correct location for SPSR_ELx */
10089     if (ret & CPSR_DIT) {
10090         ret &= ~CPSR_DIT;
10091         ret |= PSTATE_DIT;
10092     }
10093     /* Merge PSTATE.SS into SPSR_ELx */
10094     ret |= env->pstate & PSTATE_SS;
10095 
10096     return ret;
10097 }
10098 
10099 static bool syndrome_is_sync_extabt(uint32_t syndrome)
10100 {
10101     /* Return true if this syndrome value is a synchronous external abort */
10102     switch (syn_get_ec(syndrome)) {
10103     case EC_INSNABORT:
10104     case EC_INSNABORT_SAME_EL:
10105     case EC_DATAABORT:
10106     case EC_DATAABORT_SAME_EL:
10107         /* Look at fault status code for all the synchronous ext abort cases */
10108         switch (syndrome & 0x3f) {
10109         case 0x10:
10110         case 0x13:
10111         case 0x14:
10112         case 0x15:
10113         case 0x16:
10114         case 0x17:
10115             return true;
10116         default:
10117             return false;
10118         }
10119     default:
10120         return false;
10121     }
10122 }
10123 
10124 /* Handle exception entry to a target EL which is using AArch64 */
10125 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
10126 {
10127     ARMCPU *cpu = ARM_CPU(cs);
10128     CPUARMState *env = &cpu->env;
10129     unsigned int new_el = env->exception.target_el;
10130     target_ulong addr = env->cp15.vbar_el[new_el];
10131     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
10132     unsigned int old_mode;
10133     unsigned int cur_el = arm_current_el(env);
10134     int rt;
10135 
10136     /*
10137      * Note that new_el can never be 0.  If cur_el is 0, then
10138      * el0_a64 is is_a64(), else el0_a64 is ignored.
10139      */
10140     aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
10141 
10142     if (cur_el < new_el) {
10143         /* Entry vector offset depends on whether the implemented EL
10144          * immediately lower than the target level is using AArch32 or AArch64
10145          */
10146         bool is_aa64;
10147         uint64_t hcr;
10148 
10149         switch (new_el) {
10150         case 3:
10151             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
10152             break;
10153         case 2:
10154             hcr = arm_hcr_el2_eff(env);
10155             if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
10156                 is_aa64 = (hcr & HCR_RW) != 0;
10157                 break;
10158             }
10159             /* fall through */
10160         case 1:
10161             is_aa64 = is_a64(env);
10162             break;
10163         default:
10164             g_assert_not_reached();
10165         }
10166 
10167         if (is_aa64) {
10168             addr += 0x400;
10169         } else {
10170             addr += 0x600;
10171         }
10172     } else if (pstate_read(env) & PSTATE_SP) {
10173         addr += 0x200;
10174     }
10175 
10176     switch (cs->exception_index) {
10177     case EXCP_PREFETCH_ABORT:
10178     case EXCP_DATA_ABORT:
10179         /*
10180          * FEAT_DoubleFault allows synchronous external aborts taken to EL3
10181          * to be taken to the SError vector entrypoint.
10182          */
10183         if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
10184             syndrome_is_sync_extabt(env->exception.syndrome)) {
10185             addr += 0x180;
10186         }
10187         env->cp15.far_el[new_el] = env->exception.vaddress;
10188         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
10189                       env->cp15.far_el[new_el]);
10190         /* fall through */
10191     case EXCP_BKPT:
10192     case EXCP_UDEF:
10193     case EXCP_SWI:
10194     case EXCP_HVC:
10195     case EXCP_HYP_TRAP:
10196     case EXCP_SMC:
10197         switch (syn_get_ec(env->exception.syndrome)) {
10198         case EC_ADVSIMDFPACCESSTRAP:
10199             /*
10200              * QEMU internal FP/SIMD syndromes from AArch32 include the
10201              * TA and coproc fields which are only exposed if the exception
10202              * is taken to AArch32 Hyp mode. Mask them out to get a valid
10203              * AArch64 format syndrome.
10204              */
10205             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
10206             break;
10207         case EC_CP14RTTRAP:
10208         case EC_CP15RTTRAP:
10209         case EC_CP14DTTRAP:
10210             /*
10211              * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
10212              * the raw register field from the insn; when taking this to
10213              * AArch64 we must convert it to the AArch64 view of the register
10214              * number. Notice that we read a 4-bit AArch32 register number and
10215              * write back a 5-bit AArch64 one.
10216              */
10217             rt = extract32(env->exception.syndrome, 5, 4);
10218             rt = aarch64_regnum(env, rt);
10219             env->exception.syndrome = deposit32(env->exception.syndrome,
10220                                                 5, 5, rt);
10221             break;
10222         case EC_CP15RRTTRAP:
10223         case EC_CP14RRTTRAP:
10224             /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
10225             rt = extract32(env->exception.syndrome, 5, 4);
10226             rt = aarch64_regnum(env, rt);
10227             env->exception.syndrome = deposit32(env->exception.syndrome,
10228                                                 5, 5, rt);
10229             rt = extract32(env->exception.syndrome, 10, 4);
10230             rt = aarch64_regnum(env, rt);
10231             env->exception.syndrome = deposit32(env->exception.syndrome,
10232                                                 10, 5, rt);
10233             break;
10234         }
10235         env->cp15.esr_el[new_el] = env->exception.syndrome;
10236         break;
10237     case EXCP_IRQ:
10238     case EXCP_VIRQ:
10239         addr += 0x80;
10240         break;
10241     case EXCP_FIQ:
10242     case EXCP_VFIQ:
10243         addr += 0x100;
10244         break;
10245     case EXCP_VSERR:
10246         addr += 0x180;
10247         /* Construct the SError syndrome from IDS and ISS fields. */
10248         env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
10249         env->cp15.esr_el[new_el] = env->exception.syndrome;
10250         break;
10251     default:
10252         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10253     }
10254 
10255     if (is_a64(env)) {
10256         old_mode = pstate_read(env);
10257         aarch64_save_sp(env, arm_current_el(env));
10258         env->elr_el[new_el] = env->pc;
10259     } else {
10260         old_mode = cpsr_read_for_spsr_elx(env);
10261         env->elr_el[new_el] = env->regs[15];
10262 
10263         aarch64_sync_32_to_64(env);
10264 
10265         env->condexec_bits = 0;
10266     }
10267     env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
10268 
10269     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
10270                   env->elr_el[new_el]);
10271 
10272     if (cpu_isar_feature(aa64_pan, cpu)) {
10273         /* The value of PSTATE.PAN is normally preserved, except when ... */
10274         new_mode |= old_mode & PSTATE_PAN;
10275         switch (new_el) {
10276         case 2:
10277             /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
10278             if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
10279                 != (HCR_E2H | HCR_TGE)) {
10280                 break;
10281             }
10282             /* fall through */
10283         case 1:
10284             /* ... the target is EL1 ... */
10285             /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
10286             if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
10287                 new_mode |= PSTATE_PAN;
10288             }
10289             break;
10290         }
10291     }
10292     if (cpu_isar_feature(aa64_mte, cpu)) {
10293         new_mode |= PSTATE_TCO;
10294     }
10295 
10296     if (cpu_isar_feature(aa64_ssbs, cpu)) {
10297         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
10298             new_mode |= PSTATE_SSBS;
10299         } else {
10300             new_mode &= ~PSTATE_SSBS;
10301         }
10302     }
10303 
10304     pstate_write(env, PSTATE_DAIF | new_mode);
10305     env->aarch64 = true;
10306     aarch64_restore_sp(env, new_el);
10307     helper_rebuild_hflags_a64(env, new_el);
10308 
10309     env->pc = addr;
10310 
10311     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
10312                   new_el, env->pc, pstate_read(env));
10313 }
10314 
10315 /*
10316  * Do semihosting call and set the appropriate return value. All the
10317  * permission and validity checks have been done at translate time.
10318  *
10319  * We only see semihosting exceptions in TCG only as they are not
10320  * trapped to the hypervisor in KVM.
10321  */
10322 #ifdef CONFIG_TCG
10323 static void handle_semihosting(CPUState *cs)
10324 {
10325     ARMCPU *cpu = ARM_CPU(cs);
10326     CPUARMState *env = &cpu->env;
10327 
10328     if (is_a64(env)) {
10329         qemu_log_mask(CPU_LOG_INT,
10330                       "...handling as semihosting call 0x%" PRIx64 "\n",
10331                       env->xregs[0]);
10332         env->xregs[0] = do_common_semihosting(cs);
10333         env->pc += 4;
10334     } else {
10335         qemu_log_mask(CPU_LOG_INT,
10336                       "...handling as semihosting call 0x%x\n",
10337                       env->regs[0]);
10338         env->regs[0] = do_common_semihosting(cs);
10339         env->regs[15] += env->thumb ? 2 : 4;
10340     }
10341 }
10342 #endif
10343 
10344 /* Handle a CPU exception for A and R profile CPUs.
10345  * Do any appropriate logging, handle PSCI calls, and then hand off
10346  * to the AArch64-entry or AArch32-entry function depending on the
10347  * target exception level's register width.
10348  *
10349  * Note: this is used for both TCG (as the do_interrupt tcg op),
10350  *       and KVM to re-inject guest debug exceptions, and to
10351  *       inject a Synchronous-External-Abort.
10352  */
10353 void arm_cpu_do_interrupt(CPUState *cs)
10354 {
10355     ARMCPU *cpu = ARM_CPU(cs);
10356     CPUARMState *env = &cpu->env;
10357     unsigned int new_el = env->exception.target_el;
10358 
10359     assert(!arm_feature(env, ARM_FEATURE_M));
10360 
10361     arm_log_exception(cs);
10362     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10363                   new_el);
10364     if (qemu_loglevel_mask(CPU_LOG_INT)
10365         && !excp_is_internal(cs->exception_index)) {
10366         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
10367                       syn_get_ec(env->exception.syndrome),
10368                       env->exception.syndrome);
10369     }
10370 
10371     if (arm_is_psci_call(cpu, cs->exception_index)) {
10372         arm_handle_psci_call(cpu);
10373         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10374         return;
10375     }
10376 
10377     /*
10378      * Semihosting semantics depend on the register width of the code
10379      * that caused the exception, not the target exception level, so
10380      * must be handled here.
10381      */
10382 #ifdef CONFIG_TCG
10383     if (cs->exception_index == EXCP_SEMIHOST) {
10384         handle_semihosting(cs);
10385         return;
10386     }
10387 #endif
10388 
10389     /* Hooks may change global state so BQL should be held, also the
10390      * BQL needs to be held for any modification of
10391      * cs->interrupt_request.
10392      */
10393     g_assert(qemu_mutex_iothread_locked());
10394 
10395     arm_call_pre_el_change_hook(cpu);
10396 
10397     assert(!excp_is_internal(cs->exception_index));
10398     if (arm_el_is_aa64(env, new_el)) {
10399         arm_cpu_do_interrupt_aarch64(cs);
10400     } else {
10401         arm_cpu_do_interrupt_aarch32(cs);
10402     }
10403 
10404     arm_call_el_change_hook(cpu);
10405 
10406     if (!kvm_enabled()) {
10407         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10408     }
10409 }
10410 #endif /* !CONFIG_USER_ONLY */
10411 
10412 uint64_t arm_sctlr(CPUARMState *env, int el)
10413 {
10414     /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10415     if (el == 0) {
10416         ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
10417         el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
10418              ? 2 : 1;
10419     }
10420     return env->cp15.sctlr_el[el];
10421 }
10422 
10423 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
10424 {
10425     if (regime_has_2_ranges(mmu_idx)) {
10426         return extract64(tcr, 37, 2);
10427     } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10428         return 0; /* VTCR_EL2 */
10429     } else {
10430         /* Replicate the single TBI bit so we always have 2 bits.  */
10431         return extract32(tcr, 20, 1) * 3;
10432     }
10433 }
10434 
10435 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
10436 {
10437     if (regime_has_2_ranges(mmu_idx)) {
10438         return extract64(tcr, 51, 2);
10439     } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10440         return 0; /* VTCR_EL2 */
10441     } else {
10442         /* Replicate the single TBID bit so we always have 2 bits.  */
10443         return extract32(tcr, 29, 1) * 3;
10444     }
10445 }
10446 
10447 static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
10448 {
10449     if (regime_has_2_ranges(mmu_idx)) {
10450         return extract64(tcr, 57, 2);
10451     } else {
10452         /* Replicate the single TCMA bit so we always have 2 bits.  */
10453         return extract32(tcr, 30, 1) * 3;
10454     }
10455 }
10456 
10457 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10458                                    ARMMMUIdx mmu_idx, bool data)
10459 {
10460     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10461     bool epd, hpd, using16k, using64k, tsz_oob, ds;
10462     int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
10463     ARMCPU *cpu = env_archcpu(env);
10464 
10465     if (!regime_has_2_ranges(mmu_idx)) {
10466         select = 0;
10467         tsz = extract32(tcr, 0, 6);
10468         using64k = extract32(tcr, 14, 1);
10469         using16k = extract32(tcr, 15, 1);
10470         if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10471             /* VTCR_EL2 */
10472             hpd = false;
10473         } else {
10474             hpd = extract32(tcr, 24, 1);
10475         }
10476         epd = false;
10477         sh = extract32(tcr, 12, 2);
10478         ps = extract32(tcr, 16, 3);
10479         ds = extract64(tcr, 32, 1);
10480     } else {
10481         /*
10482          * Bit 55 is always between the two regions, and is canonical for
10483          * determining if address tagging is enabled.
10484          */
10485         select = extract64(va, 55, 1);
10486         if (!select) {
10487             tsz = extract32(tcr, 0, 6);
10488             epd = extract32(tcr, 7, 1);
10489             sh = extract32(tcr, 12, 2);
10490             using64k = extract32(tcr, 14, 1);
10491             using16k = extract32(tcr, 15, 1);
10492             hpd = extract64(tcr, 41, 1);
10493         } else {
10494             int tg = extract32(tcr, 30, 2);
10495             using16k = tg == 1;
10496             using64k = tg == 3;
10497             tsz = extract32(tcr, 16, 6);
10498             epd = extract32(tcr, 23, 1);
10499             sh = extract32(tcr, 28, 2);
10500             hpd = extract64(tcr, 42, 1);
10501         }
10502         ps = extract64(tcr, 32, 3);
10503         ds = extract64(tcr, 59, 1);
10504     }
10505 
10506     if (cpu_isar_feature(aa64_st, cpu)) {
10507         max_tsz = 48 - using64k;
10508     } else {
10509         max_tsz = 39;
10510     }
10511 
10512     /*
10513      * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
10514      * adjust the effective value of DS, as documented.
10515      */
10516     min_tsz = 16;
10517     if (using64k) {
10518         if (cpu_isar_feature(aa64_lva, cpu)) {
10519             min_tsz = 12;
10520         }
10521         ds = false;
10522     } else if (ds) {
10523         switch (mmu_idx) {
10524         case ARMMMUIdx_Stage2:
10525         case ARMMMUIdx_Stage2_S:
10526             if (using16k) {
10527                 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
10528             } else {
10529                 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
10530             }
10531             break;
10532         default:
10533             if (using16k) {
10534                 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
10535             } else {
10536                 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
10537             }
10538             break;
10539         }
10540         if (ds) {
10541             min_tsz = 12;
10542         }
10543     }
10544 
10545     if (tsz > max_tsz) {
10546         tsz = max_tsz;
10547         tsz_oob = true;
10548     } else if (tsz < min_tsz) {
10549         tsz = min_tsz;
10550         tsz_oob = true;
10551     } else {
10552         tsz_oob = false;
10553     }
10554 
10555     /* Present TBI as a composite with TBID.  */
10556     tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
10557     if (!data) {
10558         tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
10559     }
10560     tbi = (tbi >> select) & 1;
10561 
10562     return (ARMVAParameters) {
10563         .tsz = tsz,
10564         .ps = ps,
10565         .sh = sh,
10566         .select = select,
10567         .tbi = tbi,
10568         .epd = epd,
10569         .hpd = hpd,
10570         .using16k = using16k,
10571         .using64k = using64k,
10572         .tsz_oob = tsz_oob,
10573         .ds = ds,
10574     };
10575 }
10576 
10577 /* Note that signed overflow is undefined in C.  The following routines are
10578    careful to use unsigned types where modulo arithmetic is required.
10579    Failure to do so _will_ break on newer gcc.  */
10580 
10581 /* Signed saturating arithmetic.  */
10582 
10583 /* Perform 16-bit signed saturating addition.  */
10584 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10585 {
10586     uint16_t res;
10587 
10588     res = a + b;
10589     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10590         if (a & 0x8000)
10591             res = 0x8000;
10592         else
10593             res = 0x7fff;
10594     }
10595     return res;
10596 }
10597 
10598 /* Perform 8-bit signed saturating addition.  */
10599 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10600 {
10601     uint8_t res;
10602 
10603     res = a + b;
10604     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10605         if (a & 0x80)
10606             res = 0x80;
10607         else
10608             res = 0x7f;
10609     }
10610     return res;
10611 }
10612 
10613 /* Perform 16-bit signed saturating subtraction.  */
10614 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10615 {
10616     uint16_t res;
10617 
10618     res = a - b;
10619     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10620         if (a & 0x8000)
10621             res = 0x8000;
10622         else
10623             res = 0x7fff;
10624     }
10625     return res;
10626 }
10627 
10628 /* Perform 8-bit signed saturating subtraction.  */
10629 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10630 {
10631     uint8_t res;
10632 
10633     res = a - b;
10634     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10635         if (a & 0x80)
10636             res = 0x80;
10637         else
10638             res = 0x7f;
10639     }
10640     return res;
10641 }
10642 
10643 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10644 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10645 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
10646 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
10647 #define PFX q
10648 
10649 #include "op_addsub.h"
10650 
10651 /* Unsigned saturating arithmetic.  */
10652 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
10653 {
10654     uint16_t res;
10655     res = a + b;
10656     if (res < a)
10657         res = 0xffff;
10658     return res;
10659 }
10660 
10661 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
10662 {
10663     if (a > b)
10664         return a - b;
10665     else
10666         return 0;
10667 }
10668 
10669 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
10670 {
10671     uint8_t res;
10672     res = a + b;
10673     if (res < a)
10674         res = 0xff;
10675     return res;
10676 }
10677 
10678 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
10679 {
10680     if (a > b)
10681         return a - b;
10682     else
10683         return 0;
10684 }
10685 
10686 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10687 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10688 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
10689 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
10690 #define PFX uq
10691 
10692 #include "op_addsub.h"
10693 
10694 /* Signed modulo arithmetic.  */
10695 #define SARITH16(a, b, n, op) do { \
10696     int32_t sum; \
10697     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
10698     RESULT(sum, n, 16); \
10699     if (sum >= 0) \
10700         ge |= 3 << (n * 2); \
10701     } while(0)
10702 
10703 #define SARITH8(a, b, n, op) do { \
10704     int32_t sum; \
10705     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
10706     RESULT(sum, n, 8); \
10707     if (sum >= 0) \
10708         ge |= 1 << n; \
10709     } while(0)
10710 
10711 
10712 #define ADD16(a, b, n) SARITH16(a, b, n, +)
10713 #define SUB16(a, b, n) SARITH16(a, b, n, -)
10714 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
10715 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
10716 #define PFX s
10717 #define ARITH_GE
10718 
10719 #include "op_addsub.h"
10720 
10721 /* Unsigned modulo arithmetic.  */
10722 #define ADD16(a, b, n) do { \
10723     uint32_t sum; \
10724     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10725     RESULT(sum, n, 16); \
10726     if ((sum >> 16) == 1) \
10727         ge |= 3 << (n * 2); \
10728     } while(0)
10729 
10730 #define ADD8(a, b, n) do { \
10731     uint32_t sum; \
10732     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10733     RESULT(sum, n, 8); \
10734     if ((sum >> 8) == 1) \
10735         ge |= 1 << n; \
10736     } while(0)
10737 
10738 #define SUB16(a, b, n) do { \
10739     uint32_t sum; \
10740     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10741     RESULT(sum, n, 16); \
10742     if ((sum >> 16) == 0) \
10743         ge |= 3 << (n * 2); \
10744     } while(0)
10745 
10746 #define SUB8(a, b, n) do { \
10747     uint32_t sum; \
10748     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10749     RESULT(sum, n, 8); \
10750     if ((sum >> 8) == 0) \
10751         ge |= 1 << n; \
10752     } while(0)
10753 
10754 #define PFX u
10755 #define ARITH_GE
10756 
10757 #include "op_addsub.h"
10758 
10759 /* Halved signed arithmetic.  */
10760 #define ADD16(a, b, n) \
10761   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10762 #define SUB16(a, b, n) \
10763   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10764 #define ADD8(a, b, n) \
10765   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10766 #define SUB8(a, b, n) \
10767   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10768 #define PFX sh
10769 
10770 #include "op_addsub.h"
10771 
10772 /* Halved unsigned arithmetic.  */
10773 #define ADD16(a, b, n) \
10774   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10775 #define SUB16(a, b, n) \
10776   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10777 #define ADD8(a, b, n) \
10778   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10779 #define SUB8(a, b, n) \
10780   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10781 #define PFX uh
10782 
10783 #include "op_addsub.h"
10784 
10785 static inline uint8_t do_usad(uint8_t a, uint8_t b)
10786 {
10787     if (a > b)
10788         return a - b;
10789     else
10790         return b - a;
10791 }
10792 
10793 /* Unsigned sum of absolute byte differences.  */
10794 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
10795 {
10796     uint32_t sum;
10797     sum = do_usad(a, b);
10798     sum += do_usad(a >> 8, b >> 8);
10799     sum += do_usad(a >> 16, b >> 16);
10800     sum += do_usad(a >> 24, b >> 24);
10801     return sum;
10802 }
10803 
10804 /* For ARMv6 SEL instruction.  */
10805 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
10806 {
10807     uint32_t mask;
10808 
10809     mask = 0;
10810     if (flags & 1)
10811         mask |= 0xff;
10812     if (flags & 2)
10813         mask |= 0xff00;
10814     if (flags & 4)
10815         mask |= 0xff0000;
10816     if (flags & 8)
10817         mask |= 0xff000000;
10818     return (a & mask) | (b & ~mask);
10819 }
10820 
10821 /* CRC helpers.
10822  * The upper bytes of val (above the number specified by 'bytes') must have
10823  * been zeroed out by the caller.
10824  */
10825 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
10826 {
10827     uint8_t buf[4];
10828 
10829     stl_le_p(buf, val);
10830 
10831     /* zlib crc32 converts the accumulator and output to one's complement.  */
10832     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
10833 }
10834 
10835 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
10836 {
10837     uint8_t buf[4];
10838 
10839     stl_le_p(buf, val);
10840 
10841     /* Linux crc32c converts the output to one's complement.  */
10842     return crc32c(acc, buf, bytes) ^ 0xffffffff;
10843 }
10844 
10845 /* Return the exception level to which FP-disabled exceptions should
10846  * be taken, or 0 if FP is enabled.
10847  */
10848 int fp_exception_el(CPUARMState *env, int cur_el)
10849 {
10850 #ifndef CONFIG_USER_ONLY
10851     uint64_t hcr_el2;
10852 
10853     /* CPACR and the CPTR registers don't exist before v6, so FP is
10854      * always accessible
10855      */
10856     if (!arm_feature(env, ARM_FEATURE_V6)) {
10857         return 0;
10858     }
10859 
10860     if (arm_feature(env, ARM_FEATURE_M)) {
10861         /* CPACR can cause a NOCP UsageFault taken to current security state */
10862         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
10863             return 1;
10864         }
10865 
10866         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
10867             if (!extract32(env->v7m.nsacr, 10, 1)) {
10868                 /* FP insns cause a NOCP UsageFault taken to Secure */
10869                 return 3;
10870             }
10871         }
10872 
10873         return 0;
10874     }
10875 
10876     hcr_el2 = arm_hcr_el2_eff(env);
10877 
10878     /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
10879      * 0, 2 : trap EL0 and EL1/PL1 accesses
10880      * 1    : trap only EL0 accesses
10881      * 3    : trap no accesses
10882      * This register is ignored if E2H+TGE are both set.
10883      */
10884     if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
10885         int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
10886 
10887         switch (fpen) {
10888         case 1:
10889             if (cur_el != 0) {
10890                 break;
10891             }
10892             /* fall through */
10893         case 0:
10894         case 2:
10895             /* Trap from Secure PL0 or PL1 to Secure PL1. */
10896             if (!arm_el_is_aa64(env, 3)
10897                 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
10898                 return 3;
10899             }
10900             if (cur_el <= 1) {
10901                 return 1;
10902             }
10903             break;
10904         }
10905     }
10906 
10907     /*
10908      * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
10909      * to control non-secure access to the FPU. It doesn't have any
10910      * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
10911      */
10912     if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
10913          cur_el <= 2 && !arm_is_secure_below_el3(env))) {
10914         if (!extract32(env->cp15.nsacr, 10, 1)) {
10915             /* FP insns act as UNDEF */
10916             return cur_el == 2 ? 2 : 1;
10917         }
10918     }
10919 
10920     /*
10921      * CPTR_EL2 is present in v7VE or v8, and changes format
10922      * with HCR_EL2.E2H (regardless of TGE).
10923      */
10924     if (cur_el <= 2) {
10925         if (hcr_el2 & HCR_E2H) {
10926             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
10927             case 1:
10928                 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
10929                     break;
10930                 }
10931                 /* fall through */
10932             case 0:
10933             case 2:
10934                 return 2;
10935             }
10936         } else if (arm_is_el2_enabled(env)) {
10937             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
10938                 return 2;
10939             }
10940         }
10941     }
10942 
10943     /* CPTR_EL3 : present in v8 */
10944     if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
10945         /* Trap all FP ops to EL3 */
10946         return 3;
10947     }
10948 #endif
10949     return 0;
10950 }
10951 
10952 /* Return the exception level we're running at if this is our mmu_idx */
10953 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
10954 {
10955     if (mmu_idx & ARM_MMU_IDX_M) {
10956         return mmu_idx & ARM_MMU_IDX_M_PRIV;
10957     }
10958 
10959     switch (mmu_idx) {
10960     case ARMMMUIdx_E10_0:
10961     case ARMMMUIdx_E20_0:
10962     case ARMMMUIdx_SE10_0:
10963     case ARMMMUIdx_SE20_0:
10964         return 0;
10965     case ARMMMUIdx_E10_1:
10966     case ARMMMUIdx_E10_1_PAN:
10967     case ARMMMUIdx_SE10_1:
10968     case ARMMMUIdx_SE10_1_PAN:
10969         return 1;
10970     case ARMMMUIdx_E2:
10971     case ARMMMUIdx_E20_2:
10972     case ARMMMUIdx_E20_2_PAN:
10973     case ARMMMUIdx_SE2:
10974     case ARMMMUIdx_SE20_2:
10975     case ARMMMUIdx_SE20_2_PAN:
10976         return 2;
10977     case ARMMMUIdx_SE3:
10978         return 3;
10979     default:
10980         g_assert_not_reached();
10981     }
10982 }
10983 
10984 #ifndef CONFIG_TCG
10985 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
10986 {
10987     g_assert_not_reached();
10988 }
10989 #endif
10990 
10991 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
10992 {
10993     ARMMMUIdx idx;
10994     uint64_t hcr;
10995 
10996     if (arm_feature(env, ARM_FEATURE_M)) {
10997         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
10998     }
10999 
11000     /* See ARM pseudo-function ELIsInHost.  */
11001     switch (el) {
11002     case 0:
11003         hcr = arm_hcr_el2_eff(env);
11004         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
11005             idx = ARMMMUIdx_E20_0;
11006         } else {
11007             idx = ARMMMUIdx_E10_0;
11008         }
11009         break;
11010     case 1:
11011         if (env->pstate & PSTATE_PAN) {
11012             idx = ARMMMUIdx_E10_1_PAN;
11013         } else {
11014             idx = ARMMMUIdx_E10_1;
11015         }
11016         break;
11017     case 2:
11018         /* Note that TGE does not apply at EL2.  */
11019         if (arm_hcr_el2_eff(env) & HCR_E2H) {
11020             if (env->pstate & PSTATE_PAN) {
11021                 idx = ARMMMUIdx_E20_2_PAN;
11022             } else {
11023                 idx = ARMMMUIdx_E20_2;
11024             }
11025         } else {
11026             idx = ARMMMUIdx_E2;
11027         }
11028         break;
11029     case 3:
11030         return ARMMMUIdx_SE3;
11031     default:
11032         g_assert_not_reached();
11033     }
11034 
11035     if (arm_is_secure_below_el3(env)) {
11036         idx &= ~ARM_MMU_IDX_A_NS;
11037     }
11038 
11039     return idx;
11040 }
11041 
11042 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
11043 {
11044     return arm_mmu_idx_el(env, arm_current_el(env));
11045 }
11046 
11047 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
11048                                            ARMMMUIdx mmu_idx,
11049                                            CPUARMTBFlags flags)
11050 {
11051     DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
11052     DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
11053 
11054     if (arm_singlestep_active(env)) {
11055         DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
11056     }
11057     return flags;
11058 }
11059 
11060 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
11061                                               ARMMMUIdx mmu_idx,
11062                                               CPUARMTBFlags flags)
11063 {
11064     bool sctlr_b = arm_sctlr_b(env);
11065 
11066     if (sctlr_b) {
11067         DP_TBFLAG_A32(flags, SCTLR__B, 1);
11068     }
11069     if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
11070         DP_TBFLAG_ANY(flags, BE_DATA, 1);
11071     }
11072     DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
11073 
11074     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11075 }
11076 
11077 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
11078                                         ARMMMUIdx mmu_idx)
11079 {
11080     CPUARMTBFlags flags = {};
11081     uint32_t ccr = env->v7m.ccr[env->v7m.secure];
11082 
11083     /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
11084     if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
11085         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
11086     }
11087 
11088     if (arm_v7m_is_handler_mode(env)) {
11089         DP_TBFLAG_M32(flags, HANDLER, 1);
11090     }
11091 
11092     /*
11093      * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
11094      * is suppressing them because the requested execution priority
11095      * is less than 0.
11096      */
11097     if (arm_feature(env, ARM_FEATURE_V8) &&
11098         !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
11099           (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
11100         DP_TBFLAG_M32(flags, STACKCHECK, 1);
11101     }
11102 
11103     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
11104 }
11105 
11106 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
11107                                         ARMMMUIdx mmu_idx)
11108 {
11109     CPUARMTBFlags flags = {};
11110     int el = arm_current_el(env);
11111 
11112     if (arm_sctlr(env, el) & SCTLR_A) {
11113         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
11114     }
11115 
11116     if (arm_el_is_aa64(env, 1)) {
11117         DP_TBFLAG_A32(flags, VFPEN, 1);
11118     }
11119 
11120     if (el < 2 && env->cp15.hstr_el2 &&
11121         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11122         DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
11123     }
11124 
11125     if (env->uncached_cpsr & CPSR_IL) {
11126         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
11127     }
11128 
11129     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
11130 }
11131 
11132 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
11133                                         ARMMMUIdx mmu_idx)
11134 {
11135     CPUARMTBFlags flags = {};
11136     ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
11137     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11138     uint64_t sctlr;
11139     int tbii, tbid;
11140 
11141     DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
11142 
11143     /* Get control bits for tagged addresses.  */
11144     tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
11145     tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
11146 
11147     DP_TBFLAG_A64(flags, TBII, tbii);
11148     DP_TBFLAG_A64(flags, TBID, tbid);
11149 
11150     if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
11151         int sve_el = sve_exception_el(env, el);
11152 
11153         /*
11154          * If either FP or SVE are disabled, translator does not need len.
11155          * If SVE EL > FP EL, FP exception has precedence, and translator
11156          * does not need SVE EL.  Save potential re-translations by forcing
11157          * the unneeded data to zero.
11158          */
11159         if (fp_el != 0) {
11160             if (sve_el > fp_el) {
11161                 sve_el = 0;
11162             }
11163         } else if (sve_el == 0) {
11164             DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
11165         }
11166         DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
11167     }
11168 
11169     sctlr = regime_sctlr(env, stage1);
11170 
11171     if (sctlr & SCTLR_A) {
11172         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
11173     }
11174 
11175     if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
11176         DP_TBFLAG_ANY(flags, BE_DATA, 1);
11177     }
11178 
11179     if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
11180         /*
11181          * In order to save space in flags, we record only whether
11182          * pauth is "inactive", meaning all insns are implemented as
11183          * a nop, or "active" when some action must be performed.
11184          * The decision of which action to take is left to a helper.
11185          */
11186         if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
11187             DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
11188         }
11189     }
11190 
11191     if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
11192         /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
11193         if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
11194             DP_TBFLAG_A64(flags, BT, 1);
11195         }
11196     }
11197 
11198     /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
11199     if (!(env->pstate & PSTATE_UAO)) {
11200         switch (mmu_idx) {
11201         case ARMMMUIdx_E10_1:
11202         case ARMMMUIdx_E10_1_PAN:
11203         case ARMMMUIdx_SE10_1:
11204         case ARMMMUIdx_SE10_1_PAN:
11205             /* TODO: ARMv8.3-NV */
11206             DP_TBFLAG_A64(flags, UNPRIV, 1);
11207             break;
11208         case ARMMMUIdx_E20_2:
11209         case ARMMMUIdx_E20_2_PAN:
11210         case ARMMMUIdx_SE20_2:
11211         case ARMMMUIdx_SE20_2_PAN:
11212             /*
11213              * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
11214              * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
11215              */
11216             if (env->cp15.hcr_el2 & HCR_TGE) {
11217                 DP_TBFLAG_A64(flags, UNPRIV, 1);
11218             }
11219             break;
11220         default:
11221             break;
11222         }
11223     }
11224 
11225     if (env->pstate & PSTATE_IL) {
11226         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
11227     }
11228 
11229     if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
11230         /*
11231          * Set MTE_ACTIVE if any access may be Checked, and leave clear
11232          * if all accesses must be Unchecked:
11233          * 1) If no TBI, then there are no tags in the address to check,
11234          * 2) If Tag Check Override, then all accesses are Unchecked,
11235          * 3) If Tag Check Fail == 0, then Checked access have no effect,
11236          * 4) If no Allocation Tag Access, then all accesses are Unchecked.
11237          */
11238         if (allocation_tag_access_enabled(env, el, sctlr)) {
11239             DP_TBFLAG_A64(flags, ATA, 1);
11240             if (tbid
11241                 && !(env->pstate & PSTATE_TCO)
11242                 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
11243                 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
11244             }
11245         }
11246         /* And again for unprivileged accesses, if required.  */
11247         if (EX_TBFLAG_A64(flags, UNPRIV)
11248             && tbid
11249             && !(env->pstate & PSTATE_TCO)
11250             && (sctlr & SCTLR_TCF0)
11251             && allocation_tag_access_enabled(env, 0, sctlr)) {
11252             DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
11253         }
11254         /* Cache TCMA as well as TBI. */
11255         DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
11256     }
11257 
11258     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11259 }
11260 
11261 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
11262 {
11263     int el = arm_current_el(env);
11264     int fp_el = fp_exception_el(env, el);
11265     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11266 
11267     if (is_a64(env)) {
11268         return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11269     } else if (arm_feature(env, ARM_FEATURE_M)) {
11270         return rebuild_hflags_m32(env, fp_el, mmu_idx);
11271     } else {
11272         return rebuild_hflags_a32(env, fp_el, mmu_idx);
11273     }
11274 }
11275 
11276 void arm_rebuild_hflags(CPUARMState *env)
11277 {
11278     env->hflags = rebuild_hflags_internal(env);
11279 }
11280 
11281 /*
11282  * If we have triggered a EL state change we can't rely on the
11283  * translator having passed it to us, we need to recompute.
11284  */
11285 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
11286 {
11287     int el = arm_current_el(env);
11288     int fp_el = fp_exception_el(env, el);
11289     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11290 
11291     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
11292 }
11293 
11294 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
11295 {
11296     int fp_el = fp_exception_el(env, el);
11297     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11298 
11299     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
11300 }
11301 
11302 /*
11303  * If we have triggered a EL state change we can't rely on the
11304  * translator having passed it to us, we need to recompute.
11305  */
11306 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
11307 {
11308     int el = arm_current_el(env);
11309     int fp_el = fp_exception_el(env, el);
11310     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11311     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11312 }
11313 
11314 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
11315 {
11316     int fp_el = fp_exception_el(env, el);
11317     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11318 
11319     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11320 }
11321 
11322 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
11323 {
11324     int fp_el = fp_exception_el(env, el);
11325     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11326 
11327     env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11328 }
11329 
11330 static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
11331 {
11332 #ifdef CONFIG_DEBUG_TCG
11333     CPUARMTBFlags c = env->hflags;
11334     CPUARMTBFlags r = rebuild_hflags_internal(env);
11335 
11336     if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
11337         fprintf(stderr, "TCG hflags mismatch "
11338                         "(current:(0x%08x,0x" TARGET_FMT_lx ")"
11339                         " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
11340                 c.flags, c.flags2, r.flags, r.flags2);
11341         abort();
11342     }
11343 #endif
11344 }
11345 
11346 static bool mve_no_pred(CPUARMState *env)
11347 {
11348     /*
11349      * Return true if there is definitely no predication of MVE
11350      * instructions by VPR or LTPSIZE. (Returning false even if there
11351      * isn't any predication is OK; generated code will just be
11352      * a little worse.)
11353      * If the CPU does not implement MVE then this TB flag is always 0.
11354      *
11355      * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
11356      * logic in gen_update_fp_context() needs to be updated to match.
11357      *
11358      * We do not include the effect of the ECI bits here -- they are
11359      * tracked in other TB flags. This simplifies the logic for
11360      * "when did we emit code that changes the MVE_NO_PRED TB flag
11361      * and thus need to end the TB?".
11362      */
11363     if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
11364         return false;
11365     }
11366     if (env->v7m.vpr) {
11367         return false;
11368     }
11369     if (env->v7m.ltpsize < 4) {
11370         return false;
11371     }
11372     return true;
11373 }
11374 
11375 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11376                           target_ulong *cs_base, uint32_t *pflags)
11377 {
11378     CPUARMTBFlags flags;
11379 
11380     assert_hflags_rebuild_correctly(env);
11381     flags = env->hflags;
11382 
11383     if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
11384         *pc = env->pc;
11385         if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
11386             DP_TBFLAG_A64(flags, BTYPE, env->btype);
11387         }
11388     } else {
11389         *pc = env->regs[15];
11390 
11391         if (arm_feature(env, ARM_FEATURE_M)) {
11392             if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11393                 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
11394                 != env->v7m.secure) {
11395                 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
11396             }
11397 
11398             if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11399                 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11400                  (env->v7m.secure &&
11401                   !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11402                 /*
11403                  * ASPEN is set, but FPCA/SFPA indicate that there is no
11404                  * active FP context; we must create a new FP context before
11405                  * executing any FP insn.
11406                  */
11407                 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
11408             }
11409 
11410             bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11411             if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
11412                 DP_TBFLAG_M32(flags, LSPACT, 1);
11413             }
11414 
11415             if (mve_no_pred(env)) {
11416                 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
11417             }
11418         } else {
11419             /*
11420              * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11421              * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11422              */
11423             if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11424                 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
11425             } else {
11426                 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
11427                 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
11428             }
11429             if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
11430                 DP_TBFLAG_A32(flags, VFPEN, 1);
11431             }
11432         }
11433 
11434         DP_TBFLAG_AM32(flags, THUMB, env->thumb);
11435         DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
11436     }
11437 
11438     /*
11439      * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
11440      * states defined in the ARM ARM for software singlestep:
11441      *  SS_ACTIVE   PSTATE.SS   State
11442      *     0            x       Inactive (the TB flag for SS is always 0)
11443      *     1            0       Active-pending
11444      *     1            1       Active-not-pending
11445      * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
11446      */
11447     if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
11448         DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
11449     }
11450 
11451     *pflags = flags.flags;
11452     *cs_base = flags.flags2;
11453 }
11454 
11455 #ifdef TARGET_AARCH64
11456 /*
11457  * The manual says that when SVE is enabled and VQ is widened the
11458  * implementation is allowed to zero the previously inaccessible
11459  * portion of the registers.  The corollary to that is that when
11460  * SVE is enabled and VQ is narrowed we are also allowed to zero
11461  * the now inaccessible portion of the registers.
11462  *
11463  * The intent of this is that no predicate bit beyond VQ is ever set.
11464  * Which means that some operations on predicate registers themselves
11465  * may operate on full uint64_t or even unrolled across the maximum
11466  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
11467  * may well be cheaper than conditionals to restrict the operation
11468  * to the relevant portion of a uint16_t[16].
11469  */
11470 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11471 {
11472     int i, j;
11473     uint64_t pmask;
11474 
11475     assert(vq >= 1 && vq <= ARM_MAX_VQ);
11476     assert(vq <= env_archcpu(env)->sve_max_vq);
11477 
11478     /* Zap the high bits of the zregs.  */
11479     for (i = 0; i < 32; i++) {
11480         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11481     }
11482 
11483     /* Zap the high bits of the pregs and ffr.  */
11484     pmask = 0;
11485     if (vq & 3) {
11486         pmask = ~(-1ULL << (16 * (vq & 3)));
11487     }
11488     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11489         for (i = 0; i < 17; ++i) {
11490             env->vfp.pregs[i].p[j] &= pmask;
11491         }
11492         pmask = 0;
11493     }
11494 }
11495 
11496 /*
11497  * Notice a change in SVE vector size when changing EL.
11498  */
11499 void aarch64_sve_change_el(CPUARMState *env, int old_el,
11500                            int new_el, bool el0_a64)
11501 {
11502     ARMCPU *cpu = env_archcpu(env);
11503     int old_len, new_len;
11504     bool old_a64, new_a64;
11505 
11506     /* Nothing to do if no SVE.  */
11507     if (!cpu_isar_feature(aa64_sve, cpu)) {
11508         return;
11509     }
11510 
11511     /* Nothing to do if FP is disabled in either EL.  */
11512     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11513         return;
11514     }
11515 
11516     /*
11517      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11518      * at ELx, or not available because the EL is in AArch32 state, then
11519      * for all purposes other than a direct read, the ZCR_ELx.LEN field
11520      * has an effective value of 0".
11521      *
11522      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11523      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11524      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
11525      * we already have the correct register contents when encountering the
11526      * vq0->vq0 transition between EL0->EL1.
11527      */
11528     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11529     old_len = (old_a64 && !sve_exception_el(env, old_el)
11530                ? sve_vqm1_for_el(env, old_el) : 0);
11531     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11532     new_len = (new_a64 && !sve_exception_el(env, new_el)
11533                ? sve_vqm1_for_el(env, new_el) : 0);
11534 
11535     /* When changing vector length, clear inaccessible state.  */
11536     if (new_len < old_len) {
11537         aarch64_sve_narrow_vq(env, new_len + 1);
11538     }
11539 }
11540 #endif
11541