xref: /openbmc/qemu/target/arm/helper.c (revision fa59483d)
1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
3 #include "trace.h"
4 #include "cpu.h"
5 #include "internals.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
15 #include "arm_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/kvm.h"
20 #include "fpu/softfloat.h"
21 #include "qemu/range.h"
22 #include "qapi/qapi-commands-target.h"
23 
24 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
25 
26 #ifndef CONFIG_USER_ONLY
27 /* Cacheability and shareability attributes for a memory access */
28 typedef struct ARMCacheAttrs {
29     unsigned int attrs:8; /* as in the MAIR register encoding */
30     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
31 } ARMCacheAttrs;
32 
33 static bool get_phys_addr(CPUARMState *env, target_ulong address,
34                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
35                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
36                           target_ulong *page_size,
37                           ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
38 
39 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
40                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
41                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
42                                target_ulong *page_size_ptr,
43                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
44 
45 /* Security attributes for an address, as returned by v8m_security_lookup. */
46 typedef struct V8M_SAttributes {
47     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
48     bool ns;
49     bool nsc;
50     uint8_t sregion;
51     bool srvalid;
52     uint8_t iregion;
53     bool irvalid;
54 } V8M_SAttributes;
55 
56 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
57                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
58                                 V8M_SAttributes *sattrs);
59 #endif
60 
61 static void switch_mode(CPUARMState *env, int mode);
62 
63 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
64 {
65     int nregs;
66 
67     /* VFP data registers are always little-endian.  */
68     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
69     if (reg < nregs) {
70         stq_le_p(buf, *aa32_vfp_dreg(env, reg));
71         return 8;
72     }
73     if (arm_feature(env, ARM_FEATURE_NEON)) {
74         /* Aliases for Q regs.  */
75         nregs += 16;
76         if (reg < nregs) {
77             uint64_t *q = aa32_vfp_qreg(env, reg - 32);
78             stq_le_p(buf, q[0]);
79             stq_le_p(buf + 8, q[1]);
80             return 16;
81         }
82     }
83     switch (reg - nregs) {
84     case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
85     case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
86     case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
87     }
88     return 0;
89 }
90 
91 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
92 {
93     int nregs;
94 
95     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
96     if (reg < nregs) {
97         *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
98         return 8;
99     }
100     if (arm_feature(env, ARM_FEATURE_NEON)) {
101         nregs += 16;
102         if (reg < nregs) {
103             uint64_t *q = aa32_vfp_qreg(env, reg - 32);
104             q[0] = ldq_le_p(buf);
105             q[1] = ldq_le_p(buf + 8);
106             return 16;
107         }
108     }
109     switch (reg - nregs) {
110     case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
111     case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
112     case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
113     }
114     return 0;
115 }
116 
117 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
118 {
119     switch (reg) {
120     case 0 ... 31:
121         /* 128 bit FP register */
122         {
123             uint64_t *q = aa64_vfp_qreg(env, reg);
124             stq_le_p(buf, q[0]);
125             stq_le_p(buf + 8, q[1]);
126             return 16;
127         }
128     case 32:
129         /* FPSR */
130         stl_p(buf, vfp_get_fpsr(env));
131         return 4;
132     case 33:
133         /* FPCR */
134         stl_p(buf, vfp_get_fpcr(env));
135         return 4;
136     default:
137         return 0;
138     }
139 }
140 
141 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
142 {
143     switch (reg) {
144     case 0 ... 31:
145         /* 128 bit FP register */
146         {
147             uint64_t *q = aa64_vfp_qreg(env, reg);
148             q[0] = ldq_le_p(buf);
149             q[1] = ldq_le_p(buf + 8);
150             return 16;
151         }
152     case 32:
153         /* FPSR */
154         vfp_set_fpsr(env, ldl_p(buf));
155         return 4;
156     case 33:
157         /* FPCR */
158         vfp_set_fpcr(env, ldl_p(buf));
159         return 4;
160     default:
161         return 0;
162     }
163 }
164 
165 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
166 {
167     assert(ri->fieldoffset);
168     if (cpreg_field_is_64bit(ri)) {
169         return CPREG_FIELD64(env, ri);
170     } else {
171         return CPREG_FIELD32(env, ri);
172     }
173 }
174 
175 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
176                       uint64_t value)
177 {
178     assert(ri->fieldoffset);
179     if (cpreg_field_is_64bit(ri)) {
180         CPREG_FIELD64(env, ri) = value;
181     } else {
182         CPREG_FIELD32(env, ri) = value;
183     }
184 }
185 
186 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
187 {
188     return (char *)env + ri->fieldoffset;
189 }
190 
191 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
192 {
193     /* Raw read of a coprocessor register (as needed for migration, etc). */
194     if (ri->type & ARM_CP_CONST) {
195         return ri->resetvalue;
196     } else if (ri->raw_readfn) {
197         return ri->raw_readfn(env, ri);
198     } else if (ri->readfn) {
199         return ri->readfn(env, ri);
200     } else {
201         return raw_read(env, ri);
202     }
203 }
204 
205 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
206                              uint64_t v)
207 {
208     /* Raw write of a coprocessor register (as needed for migration, etc).
209      * Note that constant registers are treated as write-ignored; the
210      * caller should check for success by whether a readback gives the
211      * value written.
212      */
213     if (ri->type & ARM_CP_CONST) {
214         return;
215     } else if (ri->raw_writefn) {
216         ri->raw_writefn(env, ri, v);
217     } else if (ri->writefn) {
218         ri->writefn(env, ri, v);
219     } else {
220         raw_write(env, ri, v);
221     }
222 }
223 
224 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
225 {
226     ARMCPU *cpu = arm_env_get_cpu(env);
227     const ARMCPRegInfo *ri;
228     uint32_t key;
229 
230     key = cpu->dyn_xml.cpregs_keys[reg];
231     ri = get_arm_cp_reginfo(cpu->cp_regs, key);
232     if (ri) {
233         if (cpreg_field_is_64bit(ri)) {
234             return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
235         } else {
236             return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
237         }
238     }
239     return 0;
240 }
241 
242 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
243 {
244     return 0;
245 }
246 
247 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
248 {
249    /* Return true if the regdef would cause an assertion if you called
250     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
251     * program bug for it not to have the NO_RAW flag).
252     * NB that returning false here doesn't necessarily mean that calling
253     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
254     * read/write access functions which are safe for raw use" from "has
255     * read/write access functions which have side effects but has forgotten
256     * to provide raw access functions".
257     * The tests here line up with the conditions in read/write_raw_cp_reg()
258     * and assertions in raw_read()/raw_write().
259     */
260     if ((ri->type & ARM_CP_CONST) ||
261         ri->fieldoffset ||
262         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
263         return false;
264     }
265     return true;
266 }
267 
268 bool write_cpustate_to_list(ARMCPU *cpu)
269 {
270     /* Write the coprocessor state from cpu->env to the (index,value) list. */
271     int i;
272     bool ok = true;
273 
274     for (i = 0; i < cpu->cpreg_array_len; i++) {
275         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
276         const ARMCPRegInfo *ri;
277 
278         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
279         if (!ri) {
280             ok = false;
281             continue;
282         }
283         if (ri->type & ARM_CP_NO_RAW) {
284             continue;
285         }
286         cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
287     }
288     return ok;
289 }
290 
291 bool write_list_to_cpustate(ARMCPU *cpu)
292 {
293     int i;
294     bool ok = true;
295 
296     for (i = 0; i < cpu->cpreg_array_len; i++) {
297         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
298         uint64_t v = cpu->cpreg_values[i];
299         const ARMCPRegInfo *ri;
300 
301         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
302         if (!ri) {
303             ok = false;
304             continue;
305         }
306         if (ri->type & ARM_CP_NO_RAW) {
307             continue;
308         }
309         /* Write value and confirm it reads back as written
310          * (to catch read-only registers and partially read-only
311          * registers where the incoming migration value doesn't match)
312          */
313         write_raw_cp_reg(&cpu->env, ri, v);
314         if (read_raw_cp_reg(&cpu->env, ri) != v) {
315             ok = false;
316         }
317     }
318     return ok;
319 }
320 
321 static void add_cpreg_to_list(gpointer key, gpointer opaque)
322 {
323     ARMCPU *cpu = opaque;
324     uint64_t regidx;
325     const ARMCPRegInfo *ri;
326 
327     regidx = *(uint32_t *)key;
328     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
329 
330     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
331         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
332         /* The value array need not be initialized at this point */
333         cpu->cpreg_array_len++;
334     }
335 }
336 
337 static void count_cpreg(gpointer key, gpointer opaque)
338 {
339     ARMCPU *cpu = opaque;
340     uint64_t regidx;
341     const ARMCPRegInfo *ri;
342 
343     regidx = *(uint32_t *)key;
344     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
345 
346     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
347         cpu->cpreg_array_len++;
348     }
349 }
350 
351 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
352 {
353     uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
354     uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
355 
356     if (aidx > bidx) {
357         return 1;
358     }
359     if (aidx < bidx) {
360         return -1;
361     }
362     return 0;
363 }
364 
365 void init_cpreg_list(ARMCPU *cpu)
366 {
367     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
368      * Note that we require cpreg_tuples[] to be sorted by key ID.
369      */
370     GList *keys;
371     int arraylen;
372 
373     keys = g_hash_table_get_keys(cpu->cp_regs);
374     keys = g_list_sort(keys, cpreg_key_compare);
375 
376     cpu->cpreg_array_len = 0;
377 
378     g_list_foreach(keys, count_cpreg, cpu);
379 
380     arraylen = cpu->cpreg_array_len;
381     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
382     cpu->cpreg_values = g_new(uint64_t, arraylen);
383     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
384     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
385     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
386     cpu->cpreg_array_len = 0;
387 
388     g_list_foreach(keys, add_cpreg_to_list, cpu);
389 
390     assert(cpu->cpreg_array_len == arraylen);
391 
392     g_list_free(keys);
393 }
394 
395 /*
396  * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
397  * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
398  *
399  * access_el3_aa32ns: Used to check AArch32 register views.
400  * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
401  */
402 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
403                                         const ARMCPRegInfo *ri,
404                                         bool isread)
405 {
406     bool secure = arm_is_secure_below_el3(env);
407 
408     assert(!arm_el_is_aa64(env, 3));
409     if (secure) {
410         return CP_ACCESS_TRAP_UNCATEGORIZED;
411     }
412     return CP_ACCESS_OK;
413 }
414 
415 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
416                                                 const ARMCPRegInfo *ri,
417                                                 bool isread)
418 {
419     if (!arm_el_is_aa64(env, 3)) {
420         return access_el3_aa32ns(env, ri, isread);
421     }
422     return CP_ACCESS_OK;
423 }
424 
425 /* Some secure-only AArch32 registers trap to EL3 if used from
426  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
427  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
428  * We assume that the .access field is set to PL1_RW.
429  */
430 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
431                                             const ARMCPRegInfo *ri,
432                                             bool isread)
433 {
434     if (arm_current_el(env) == 3) {
435         return CP_ACCESS_OK;
436     }
437     if (arm_is_secure_below_el3(env)) {
438         return CP_ACCESS_TRAP_EL3;
439     }
440     /* This will be EL1 NS and EL2 NS, which just UNDEF */
441     return CP_ACCESS_TRAP_UNCATEGORIZED;
442 }
443 
444 /* Check for traps to "powerdown debug" registers, which are controlled
445  * by MDCR.TDOSA
446  */
447 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
448                                    bool isread)
449 {
450     int el = arm_current_el(env);
451     bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
452         (env->cp15.mdcr_el2 & MDCR_TDE) ||
453         (arm_hcr_el2_eff(env) & HCR_TGE);
454 
455     if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
456         return CP_ACCESS_TRAP_EL2;
457     }
458     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
459         return CP_ACCESS_TRAP_EL3;
460     }
461     return CP_ACCESS_OK;
462 }
463 
464 /* Check for traps to "debug ROM" registers, which are controlled
465  * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
466  */
467 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
468                                   bool isread)
469 {
470     int el = arm_current_el(env);
471     bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
472         (env->cp15.mdcr_el2 & MDCR_TDE) ||
473         (arm_hcr_el2_eff(env) & HCR_TGE);
474 
475     if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
476         return CP_ACCESS_TRAP_EL2;
477     }
478     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
479         return CP_ACCESS_TRAP_EL3;
480     }
481     return CP_ACCESS_OK;
482 }
483 
484 /* Check for traps to general debug registers, which are controlled
485  * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
486  */
487 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
488                                   bool isread)
489 {
490     int el = arm_current_el(env);
491     bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
492         (env->cp15.mdcr_el2 & MDCR_TDE) ||
493         (arm_hcr_el2_eff(env) & HCR_TGE);
494 
495     if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
496         return CP_ACCESS_TRAP_EL2;
497     }
498     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
499         return CP_ACCESS_TRAP_EL3;
500     }
501     return CP_ACCESS_OK;
502 }
503 
504 /* Check for traps to performance monitor registers, which are controlled
505  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
506  */
507 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
508                                  bool isread)
509 {
510     int el = arm_current_el(env);
511 
512     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
513         && !arm_is_secure_below_el3(env)) {
514         return CP_ACCESS_TRAP_EL2;
515     }
516     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
517         return CP_ACCESS_TRAP_EL3;
518     }
519     return CP_ACCESS_OK;
520 }
521 
522 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
523 {
524     ARMCPU *cpu = arm_env_get_cpu(env);
525 
526     raw_write(env, ri, value);
527     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
528 }
529 
530 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
531 {
532     ARMCPU *cpu = arm_env_get_cpu(env);
533 
534     if (raw_read(env, ri) != value) {
535         /* Unlike real hardware the qemu TLB uses virtual addresses,
536          * not modified virtual addresses, so this causes a TLB flush.
537          */
538         tlb_flush(CPU(cpu));
539         raw_write(env, ri, value);
540     }
541 }
542 
543 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
544                              uint64_t value)
545 {
546     ARMCPU *cpu = arm_env_get_cpu(env);
547 
548     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
549         && !extended_addresses_enabled(env)) {
550         /* For VMSA (when not using the LPAE long descriptor page table
551          * format) this register includes the ASID, so do a TLB flush.
552          * For PMSA it is purely a process ID and no action is needed.
553          */
554         tlb_flush(CPU(cpu));
555     }
556     raw_write(env, ri, value);
557 }
558 
559 /* IS variants of TLB operations must affect all cores */
560 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
561                              uint64_t value)
562 {
563     CPUState *cs = ENV_GET_CPU(env);
564 
565     tlb_flush_all_cpus_synced(cs);
566 }
567 
568 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
569                              uint64_t value)
570 {
571     CPUState *cs = ENV_GET_CPU(env);
572 
573     tlb_flush_all_cpus_synced(cs);
574 }
575 
576 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
577                              uint64_t value)
578 {
579     CPUState *cs = ENV_GET_CPU(env);
580 
581     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
582 }
583 
584 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
585                              uint64_t value)
586 {
587     CPUState *cs = ENV_GET_CPU(env);
588 
589     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
590 }
591 
592 /*
593  * Non-IS variants of TLB operations are upgraded to
594  * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
595  * force broadcast of these operations.
596  */
597 static bool tlb_force_broadcast(CPUARMState *env)
598 {
599     return (env->cp15.hcr_el2 & HCR_FB) &&
600         arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
601 }
602 
603 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
604                           uint64_t value)
605 {
606     /* Invalidate all (TLBIALL) */
607     ARMCPU *cpu = arm_env_get_cpu(env);
608 
609     if (tlb_force_broadcast(env)) {
610         tlbiall_is_write(env, NULL, value);
611         return;
612     }
613 
614     tlb_flush(CPU(cpu));
615 }
616 
617 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
618                           uint64_t value)
619 {
620     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
621     ARMCPU *cpu = arm_env_get_cpu(env);
622 
623     if (tlb_force_broadcast(env)) {
624         tlbimva_is_write(env, NULL, value);
625         return;
626     }
627 
628     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
629 }
630 
631 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
632                            uint64_t value)
633 {
634     /* Invalidate by ASID (TLBIASID) */
635     ARMCPU *cpu = arm_env_get_cpu(env);
636 
637     if (tlb_force_broadcast(env)) {
638         tlbiasid_is_write(env, NULL, value);
639         return;
640     }
641 
642     tlb_flush(CPU(cpu));
643 }
644 
645 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
646                            uint64_t value)
647 {
648     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
649     ARMCPU *cpu = arm_env_get_cpu(env);
650 
651     if (tlb_force_broadcast(env)) {
652         tlbimvaa_is_write(env, NULL, value);
653         return;
654     }
655 
656     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
657 }
658 
659 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
660                                uint64_t value)
661 {
662     CPUState *cs = ENV_GET_CPU(env);
663 
664     tlb_flush_by_mmuidx(cs,
665                         ARMMMUIdxBit_S12NSE1 |
666                         ARMMMUIdxBit_S12NSE0 |
667                         ARMMMUIdxBit_S2NS);
668 }
669 
670 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
671                                   uint64_t value)
672 {
673     CPUState *cs = ENV_GET_CPU(env);
674 
675     tlb_flush_by_mmuidx_all_cpus_synced(cs,
676                                         ARMMMUIdxBit_S12NSE1 |
677                                         ARMMMUIdxBit_S12NSE0 |
678                                         ARMMMUIdxBit_S2NS);
679 }
680 
681 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
682                             uint64_t value)
683 {
684     /* Invalidate by IPA. This has to invalidate any structures that
685      * contain only stage 2 translation information, but does not need
686      * to apply to structures that contain combined stage 1 and stage 2
687      * translation information.
688      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
689      */
690     CPUState *cs = ENV_GET_CPU(env);
691     uint64_t pageaddr;
692 
693     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
694         return;
695     }
696 
697     pageaddr = sextract64(value << 12, 0, 40);
698 
699     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
700 }
701 
702 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
703                                uint64_t value)
704 {
705     CPUState *cs = ENV_GET_CPU(env);
706     uint64_t pageaddr;
707 
708     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
709         return;
710     }
711 
712     pageaddr = sextract64(value << 12, 0, 40);
713 
714     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
715                                              ARMMMUIdxBit_S2NS);
716 }
717 
718 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
719                               uint64_t value)
720 {
721     CPUState *cs = ENV_GET_CPU(env);
722 
723     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
724 }
725 
726 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
727                                  uint64_t value)
728 {
729     CPUState *cs = ENV_GET_CPU(env);
730 
731     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
732 }
733 
734 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
735                               uint64_t value)
736 {
737     CPUState *cs = ENV_GET_CPU(env);
738     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
739 
740     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
741 }
742 
743 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
744                                  uint64_t value)
745 {
746     CPUState *cs = ENV_GET_CPU(env);
747     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
748 
749     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
750                                              ARMMMUIdxBit_S1E2);
751 }
752 
753 static const ARMCPRegInfo cp_reginfo[] = {
754     /* Define the secure and non-secure FCSE identifier CP registers
755      * separately because there is no secure bank in V8 (no _EL3).  This allows
756      * the secure register to be properly reset and migrated. There is also no
757      * v8 EL1 version of the register so the non-secure instance stands alone.
758      */
759     { .name = "FCSEIDR",
760       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
761       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
762       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
763       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
764     { .name = "FCSEIDR_S",
765       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
766       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
767       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
768       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
769     /* Define the secure and non-secure context identifier CP registers
770      * separately because there is no secure bank in V8 (no _EL3).  This allows
771      * the secure register to be properly reset and migrated.  In the
772      * non-secure case, the 32-bit register will have reset and migration
773      * disabled during registration as it is handled by the 64-bit instance.
774      */
775     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
776       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
777       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
778       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
779       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
780     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
781       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
782       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
783       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
784       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
785     REGINFO_SENTINEL
786 };
787 
788 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
789     /* NB: Some of these registers exist in v8 but with more precise
790      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
791      */
792     /* MMU Domain access control / MPU write buffer control */
793     { .name = "DACR",
794       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
795       .access = PL1_RW, .resetvalue = 0,
796       .writefn = dacr_write, .raw_writefn = raw_write,
797       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
798                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
799     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
800      * For v6 and v5, these mappings are overly broad.
801      */
802     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
803       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
804     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
805       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
806     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
807       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
808     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
809       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
810     /* Cache maintenance ops; some of this space may be overridden later. */
811     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
812       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
813       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
814     REGINFO_SENTINEL
815 };
816 
817 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
818     /* Not all pre-v6 cores implemented this WFI, so this is slightly
819      * over-broad.
820      */
821     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
822       .access = PL1_W, .type = ARM_CP_WFI },
823     REGINFO_SENTINEL
824 };
825 
826 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
827     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
828      * is UNPREDICTABLE; we choose to NOP as most implementations do).
829      */
830     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
831       .access = PL1_W, .type = ARM_CP_WFI },
832     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
833      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
834      * OMAPCP will override this space.
835      */
836     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
837       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
838       .resetvalue = 0 },
839     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
840       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
841       .resetvalue = 0 },
842     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
843     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
844       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
845       .resetvalue = 0 },
846     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
847      * implementing it as RAZ means the "debug architecture version" bits
848      * will read as a reserved value, which should cause Linux to not try
849      * to use the debug hardware.
850      */
851     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
852       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
853     /* MMU TLB control. Note that the wildcarding means we cover not just
854      * the unified TLB ops but also the dside/iside/inner-shareable variants.
855      */
856     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
857       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
858       .type = ARM_CP_NO_RAW },
859     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
860       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
861       .type = ARM_CP_NO_RAW },
862     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
863       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
864       .type = ARM_CP_NO_RAW },
865     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
866       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
867       .type = ARM_CP_NO_RAW },
868     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
869       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
870     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
871       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
872     REGINFO_SENTINEL
873 };
874 
875 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
876                         uint64_t value)
877 {
878     uint32_t mask = 0;
879 
880     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
881     if (!arm_feature(env, ARM_FEATURE_V8)) {
882         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
883          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
884          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
885          */
886         if (arm_feature(env, ARM_FEATURE_VFP)) {
887             /* VFP coprocessor: cp10 & cp11 [23:20] */
888             mask |= (1 << 31) | (1 << 30) | (0xf << 20);
889 
890             if (!arm_feature(env, ARM_FEATURE_NEON)) {
891                 /* ASEDIS [31] bit is RAO/WI */
892                 value |= (1 << 31);
893             }
894 
895             /* VFPv3 and upwards with NEON implement 32 double precision
896              * registers (D0-D31).
897              */
898             if (!arm_feature(env, ARM_FEATURE_NEON) ||
899                     !arm_feature(env, ARM_FEATURE_VFP3)) {
900                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
901                 value |= (1 << 30);
902             }
903         }
904         value &= mask;
905     }
906     env->cp15.cpacr_el1 = value;
907 }
908 
909 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
910 {
911     /* Call cpacr_write() so that we reset with the correct RAO bits set
912      * for our CPU features.
913      */
914     cpacr_write(env, ri, 0);
915 }
916 
917 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
918                                    bool isread)
919 {
920     if (arm_feature(env, ARM_FEATURE_V8)) {
921         /* Check if CPACR accesses are to be trapped to EL2 */
922         if (arm_current_el(env) == 1 &&
923             (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
924             return CP_ACCESS_TRAP_EL2;
925         /* Check if CPACR accesses are to be trapped to EL3 */
926         } else if (arm_current_el(env) < 3 &&
927                    (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
928             return CP_ACCESS_TRAP_EL3;
929         }
930     }
931 
932     return CP_ACCESS_OK;
933 }
934 
935 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
936                                   bool isread)
937 {
938     /* Check if CPTR accesses are set to trap to EL3 */
939     if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
940         return CP_ACCESS_TRAP_EL3;
941     }
942 
943     return CP_ACCESS_OK;
944 }
945 
946 static const ARMCPRegInfo v6_cp_reginfo[] = {
947     /* prefetch by MVA in v6, NOP in v7 */
948     { .name = "MVA_prefetch",
949       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
950       .access = PL1_W, .type = ARM_CP_NOP },
951     /* We need to break the TB after ISB to execute self-modifying code
952      * correctly and also to take any pending interrupts immediately.
953      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
954      */
955     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
956       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
957     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
958       .access = PL0_W, .type = ARM_CP_NOP },
959     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
960       .access = PL0_W, .type = ARM_CP_NOP },
961     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
962       .access = PL1_RW,
963       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
964                              offsetof(CPUARMState, cp15.ifar_ns) },
965       .resetvalue = 0, },
966     /* Watchpoint Fault Address Register : should actually only be present
967      * for 1136, 1176, 11MPCore.
968      */
969     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
970       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
971     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
972       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
973       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
974       .resetfn = cpacr_reset, .writefn = cpacr_write },
975     REGINFO_SENTINEL
976 };
977 
978 /* Definitions for the PMU registers */
979 #define PMCRN_MASK  0xf800
980 #define PMCRN_SHIFT 11
981 #define PMCRLC  0x40
982 #define PMCRDP  0x10
983 #define PMCRD   0x8
984 #define PMCRC   0x4
985 #define PMCRP   0x2
986 #define PMCRE   0x1
987 
988 #define PMXEVTYPER_P          0x80000000
989 #define PMXEVTYPER_U          0x40000000
990 #define PMXEVTYPER_NSK        0x20000000
991 #define PMXEVTYPER_NSU        0x10000000
992 #define PMXEVTYPER_NSH        0x08000000
993 #define PMXEVTYPER_M          0x04000000
994 #define PMXEVTYPER_MT         0x02000000
995 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
996 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
997                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
998                                PMXEVTYPER_M | PMXEVTYPER_MT | \
999                                PMXEVTYPER_EVTCOUNT)
1000 
1001 #define PMCCFILTR             0xf8000000
1002 #define PMCCFILTR_M           PMXEVTYPER_M
1003 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1004 
1005 static inline uint32_t pmu_num_counters(CPUARMState *env)
1006 {
1007   return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1008 }
1009 
1010 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1011 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1012 {
1013   return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1014 }
1015 
1016 typedef struct pm_event {
1017     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1018     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1019     bool (*supported)(CPUARMState *);
1020     /*
1021      * Retrieve the current count of the underlying event. The programmed
1022      * counters hold a difference from the return value from this function
1023      */
1024     uint64_t (*get_count)(CPUARMState *);
1025     /*
1026      * Return how many nanoseconds it will take (at a minimum) for count events
1027      * to occur. A negative value indicates the counter will never overflow, or
1028      * that the counter has otherwise arranged for the overflow bit to be set
1029      * and the PMU interrupt to be raised on overflow.
1030      */
1031     int64_t (*ns_per_count)(uint64_t);
1032 } pm_event;
1033 
1034 static bool event_always_supported(CPUARMState *env)
1035 {
1036     return true;
1037 }
1038 
1039 static uint64_t swinc_get_count(CPUARMState *env)
1040 {
1041     /*
1042      * SW_INCR events are written directly to the pmevcntr's by writes to
1043      * PMSWINC, so there is no underlying count maintained by the PMU itself
1044      */
1045     return 0;
1046 }
1047 
1048 static int64_t swinc_ns_per(uint64_t ignored)
1049 {
1050     return -1;
1051 }
1052 
1053 /*
1054  * Return the underlying cycle count for the PMU cycle counters. If we're in
1055  * usermode, simply return 0.
1056  */
1057 static uint64_t cycles_get_count(CPUARMState *env)
1058 {
1059 #ifndef CONFIG_USER_ONLY
1060     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1061                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1062 #else
1063     return cpu_get_host_ticks();
1064 #endif
1065 }
1066 
1067 #ifndef CONFIG_USER_ONLY
1068 static int64_t cycles_ns_per(uint64_t cycles)
1069 {
1070     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1071 }
1072 
1073 static bool instructions_supported(CPUARMState *env)
1074 {
1075     return use_icount == 1 /* Precise instruction counting */;
1076 }
1077 
1078 static uint64_t instructions_get_count(CPUARMState *env)
1079 {
1080     return (uint64_t)cpu_get_icount_raw();
1081 }
1082 
1083 static int64_t instructions_ns_per(uint64_t icount)
1084 {
1085     return cpu_icount_to_ns((int64_t)icount);
1086 }
1087 #endif
1088 
1089 static const pm_event pm_events[] = {
1090     { .number = 0x000, /* SW_INCR */
1091       .supported = event_always_supported,
1092       .get_count = swinc_get_count,
1093       .ns_per_count = swinc_ns_per,
1094     },
1095 #ifndef CONFIG_USER_ONLY
1096     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1097       .supported = instructions_supported,
1098       .get_count = instructions_get_count,
1099       .ns_per_count = instructions_ns_per,
1100     },
1101     { .number = 0x011, /* CPU_CYCLES, Cycle */
1102       .supported = event_always_supported,
1103       .get_count = cycles_get_count,
1104       .ns_per_count = cycles_ns_per,
1105     }
1106 #endif
1107 };
1108 
1109 /*
1110  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1111  * events (i.e. the statistical profiling extension), this implementation
1112  * should first be updated to something sparse instead of the current
1113  * supported_event_map[] array.
1114  */
1115 #define MAX_EVENT_ID 0x11
1116 #define UNSUPPORTED_EVENT UINT16_MAX
1117 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1118 
1119 /*
1120  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1121  * of ARM event numbers to indices in our pm_events array.
1122  *
1123  * Note: Events in the 0x40XX range are not currently supported.
1124  */
1125 void pmu_init(ARMCPU *cpu)
1126 {
1127     unsigned int i;
1128 
1129     /*
1130      * Empty supported_event_map and cpu->pmceid[01] before adding supported
1131      * events to them
1132      */
1133     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1134         supported_event_map[i] = UNSUPPORTED_EVENT;
1135     }
1136     cpu->pmceid0 = 0;
1137     cpu->pmceid1 = 0;
1138 
1139     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1140         const pm_event *cnt = &pm_events[i];
1141         assert(cnt->number <= MAX_EVENT_ID);
1142         /* We do not currently support events in the 0x40xx range */
1143         assert(cnt->number <= 0x3f);
1144 
1145         if (cnt->supported(&cpu->env)) {
1146             supported_event_map[cnt->number] = i;
1147             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1148             if (cnt->number & 0x20) {
1149                 cpu->pmceid1 |= event_mask;
1150             } else {
1151                 cpu->pmceid0 |= event_mask;
1152             }
1153         }
1154     }
1155 }
1156 
1157 /*
1158  * Check at runtime whether a PMU event is supported for the current machine
1159  */
1160 static bool event_supported(uint16_t number)
1161 {
1162     if (number > MAX_EVENT_ID) {
1163         return false;
1164     }
1165     return supported_event_map[number] != UNSUPPORTED_EVENT;
1166 }
1167 
1168 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1169                                    bool isread)
1170 {
1171     /* Performance monitor registers user accessibility is controlled
1172      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1173      * trapping to EL2 or EL3 for other accesses.
1174      */
1175     int el = arm_current_el(env);
1176 
1177     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1178         return CP_ACCESS_TRAP;
1179     }
1180     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1181         && !arm_is_secure_below_el3(env)) {
1182         return CP_ACCESS_TRAP_EL2;
1183     }
1184     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1185         return CP_ACCESS_TRAP_EL3;
1186     }
1187 
1188     return CP_ACCESS_OK;
1189 }
1190 
1191 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1192                                            const ARMCPRegInfo *ri,
1193                                            bool isread)
1194 {
1195     /* ER: event counter read trap control */
1196     if (arm_feature(env, ARM_FEATURE_V8)
1197         && arm_current_el(env) == 0
1198         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1199         && isread) {
1200         return CP_ACCESS_OK;
1201     }
1202 
1203     return pmreg_access(env, ri, isread);
1204 }
1205 
1206 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1207                                          const ARMCPRegInfo *ri,
1208                                          bool isread)
1209 {
1210     /* SW: software increment write trap control */
1211     if (arm_feature(env, ARM_FEATURE_V8)
1212         && arm_current_el(env) == 0
1213         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1214         && !isread) {
1215         return CP_ACCESS_OK;
1216     }
1217 
1218     return pmreg_access(env, ri, isread);
1219 }
1220 
1221 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1222                                         const ARMCPRegInfo *ri,
1223                                         bool isread)
1224 {
1225     /* ER: event counter read trap control */
1226     if (arm_feature(env, ARM_FEATURE_V8)
1227         && arm_current_el(env) == 0
1228         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1229         return CP_ACCESS_OK;
1230     }
1231 
1232     return pmreg_access(env, ri, isread);
1233 }
1234 
1235 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1236                                          const ARMCPRegInfo *ri,
1237                                          bool isread)
1238 {
1239     /* CR: cycle counter read trap control */
1240     if (arm_feature(env, ARM_FEATURE_V8)
1241         && arm_current_el(env) == 0
1242         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1243         && isread) {
1244         return CP_ACCESS_OK;
1245     }
1246 
1247     return pmreg_access(env, ri, isread);
1248 }
1249 
1250 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1251  * the current EL, security state, and register configuration.
1252  */
1253 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1254 {
1255     uint64_t filter;
1256     bool e, p, u, nsk, nsu, nsh, m;
1257     bool enabled, prohibited, filtered;
1258     bool secure = arm_is_secure(env);
1259     int el = arm_current_el(env);
1260     uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1261 
1262     if (!arm_feature(env, ARM_FEATURE_EL2) ||
1263             (counter < hpmn || counter == 31)) {
1264         e = env->cp15.c9_pmcr & PMCRE;
1265     } else {
1266         e = env->cp15.mdcr_el2 & MDCR_HPME;
1267     }
1268     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1269 
1270     if (!secure) {
1271         if (el == 2 && (counter < hpmn || counter == 31)) {
1272             prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1273         } else {
1274             prohibited = false;
1275         }
1276     } else {
1277         prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1278            (env->cp15.mdcr_el3 & MDCR_SPME);
1279     }
1280 
1281     if (prohibited && counter == 31) {
1282         prohibited = env->cp15.c9_pmcr & PMCRDP;
1283     }
1284 
1285     if (counter == 31) {
1286         filter = env->cp15.pmccfiltr_el0;
1287     } else {
1288         filter = env->cp15.c14_pmevtyper[counter];
1289     }
1290 
1291     p   = filter & PMXEVTYPER_P;
1292     u   = filter & PMXEVTYPER_U;
1293     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1294     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1295     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1296     m   = arm_el_is_aa64(env, 1) &&
1297               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1298 
1299     if (el == 0) {
1300         filtered = secure ? u : u != nsu;
1301     } else if (el == 1) {
1302         filtered = secure ? p : p != nsk;
1303     } else if (el == 2) {
1304         filtered = !nsh;
1305     } else { /* EL3 */
1306         filtered = m != p;
1307     }
1308 
1309     if (counter != 31) {
1310         /*
1311          * If not checking PMCCNTR, ensure the counter is setup to an event we
1312          * support
1313          */
1314         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1315         if (!event_supported(event)) {
1316             return false;
1317         }
1318     }
1319 
1320     return enabled && !prohibited && !filtered;
1321 }
1322 
1323 static void pmu_update_irq(CPUARMState *env)
1324 {
1325     ARMCPU *cpu = arm_env_get_cpu(env);
1326     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1327             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1328 }
1329 
1330 /*
1331  * Ensure c15_ccnt is the guest-visible count so that operations such as
1332  * enabling/disabling the counter or filtering, modifying the count itself,
1333  * etc. can be done logically. This is essentially a no-op if the counter is
1334  * not enabled at the time of the call.
1335  */
1336 void pmccntr_op_start(CPUARMState *env)
1337 {
1338     uint64_t cycles = cycles_get_count(env);
1339 
1340     if (pmu_counter_enabled(env, 31)) {
1341         uint64_t eff_cycles = cycles;
1342         if (env->cp15.c9_pmcr & PMCRD) {
1343             /* Increment once every 64 processor clock cycles */
1344             eff_cycles /= 64;
1345         }
1346 
1347         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1348 
1349         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1350                                  1ull << 63 : 1ull << 31;
1351         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1352             env->cp15.c9_pmovsr |= (1 << 31);
1353             pmu_update_irq(env);
1354         }
1355 
1356         env->cp15.c15_ccnt = new_pmccntr;
1357     }
1358     env->cp15.c15_ccnt_delta = cycles;
1359 }
1360 
1361 /*
1362  * If PMCCNTR is enabled, recalculate the delta between the clock and the
1363  * guest-visible count. A call to pmccntr_op_finish should follow every call to
1364  * pmccntr_op_start.
1365  */
1366 void pmccntr_op_finish(CPUARMState *env)
1367 {
1368     if (pmu_counter_enabled(env, 31)) {
1369 #ifndef CONFIG_USER_ONLY
1370         /* Calculate when the counter will next overflow */
1371         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1372         if (!(env->cp15.c9_pmcr & PMCRLC)) {
1373             remaining_cycles = (uint32_t)remaining_cycles;
1374         }
1375         int64_t overflow_in = cycles_ns_per(remaining_cycles);
1376 
1377         if (overflow_in > 0) {
1378             int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1379                 overflow_in;
1380             ARMCPU *cpu = arm_env_get_cpu(env);
1381             timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1382         }
1383 #endif
1384 
1385         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1386         if (env->cp15.c9_pmcr & PMCRD) {
1387             /* Increment once every 64 processor clock cycles */
1388             prev_cycles /= 64;
1389         }
1390         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1391     }
1392 }
1393 
1394 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1395 {
1396 
1397     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1398     uint64_t count = 0;
1399     if (event_supported(event)) {
1400         uint16_t event_idx = supported_event_map[event];
1401         count = pm_events[event_idx].get_count(env);
1402     }
1403 
1404     if (pmu_counter_enabled(env, counter)) {
1405         uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1406 
1407         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1408             env->cp15.c9_pmovsr |= (1 << counter);
1409             pmu_update_irq(env);
1410         }
1411         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1412     }
1413     env->cp15.c14_pmevcntr_delta[counter] = count;
1414 }
1415 
1416 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1417 {
1418     if (pmu_counter_enabled(env, counter)) {
1419 #ifndef CONFIG_USER_ONLY
1420         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1421         uint16_t event_idx = supported_event_map[event];
1422         uint64_t delta = UINT32_MAX -
1423             (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1424         int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1425 
1426         if (overflow_in > 0) {
1427             int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1428                 overflow_in;
1429             ARMCPU *cpu = arm_env_get_cpu(env);
1430             timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1431         }
1432 #endif
1433 
1434         env->cp15.c14_pmevcntr_delta[counter] -=
1435             env->cp15.c14_pmevcntr[counter];
1436     }
1437 }
1438 
1439 void pmu_op_start(CPUARMState *env)
1440 {
1441     unsigned int i;
1442     pmccntr_op_start(env);
1443     for (i = 0; i < pmu_num_counters(env); i++) {
1444         pmevcntr_op_start(env, i);
1445     }
1446 }
1447 
1448 void pmu_op_finish(CPUARMState *env)
1449 {
1450     unsigned int i;
1451     pmccntr_op_finish(env);
1452     for (i = 0; i < pmu_num_counters(env); i++) {
1453         pmevcntr_op_finish(env, i);
1454     }
1455 }
1456 
1457 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1458 {
1459     pmu_op_start(&cpu->env);
1460 }
1461 
1462 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1463 {
1464     pmu_op_finish(&cpu->env);
1465 }
1466 
1467 void arm_pmu_timer_cb(void *opaque)
1468 {
1469     ARMCPU *cpu = opaque;
1470 
1471     /*
1472      * Update all the counter values based on the current underlying counts,
1473      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1474      * has the effect of setting the cpu->pmu_timer to the next earliest time a
1475      * counter may expire.
1476      */
1477     pmu_op_start(&cpu->env);
1478     pmu_op_finish(&cpu->env);
1479 }
1480 
1481 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1482                        uint64_t value)
1483 {
1484     pmu_op_start(env);
1485 
1486     if (value & PMCRC) {
1487         /* The counter has been reset */
1488         env->cp15.c15_ccnt = 0;
1489     }
1490 
1491     if (value & PMCRP) {
1492         unsigned int i;
1493         for (i = 0; i < pmu_num_counters(env); i++) {
1494             env->cp15.c14_pmevcntr[i] = 0;
1495         }
1496     }
1497 
1498     /* only the DP, X, D and E bits are writable */
1499     env->cp15.c9_pmcr &= ~0x39;
1500     env->cp15.c9_pmcr |= (value & 0x39);
1501 
1502     pmu_op_finish(env);
1503 }
1504 
1505 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1506                           uint64_t value)
1507 {
1508     unsigned int i;
1509     for (i = 0; i < pmu_num_counters(env); i++) {
1510         /* Increment a counter's count iff: */
1511         if ((value & (1 << i)) && /* counter's bit is set */
1512                 /* counter is enabled and not filtered */
1513                 pmu_counter_enabled(env, i) &&
1514                 /* counter is SW_INCR */
1515                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1516             pmevcntr_op_start(env, i);
1517 
1518             /*
1519              * Detect if this write causes an overflow since we can't predict
1520              * PMSWINC overflows like we can for other events
1521              */
1522             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1523 
1524             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1525                 env->cp15.c9_pmovsr |= (1 << i);
1526                 pmu_update_irq(env);
1527             }
1528 
1529             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1530 
1531             pmevcntr_op_finish(env, i);
1532         }
1533     }
1534 }
1535 
1536 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1537 {
1538     uint64_t ret;
1539     pmccntr_op_start(env);
1540     ret = env->cp15.c15_ccnt;
1541     pmccntr_op_finish(env);
1542     return ret;
1543 }
1544 
1545 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1546                          uint64_t value)
1547 {
1548     /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1549      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1550      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1551      * accessed.
1552      */
1553     env->cp15.c9_pmselr = value & 0x1f;
1554 }
1555 
1556 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1557                         uint64_t value)
1558 {
1559     pmccntr_op_start(env);
1560     env->cp15.c15_ccnt = value;
1561     pmccntr_op_finish(env);
1562 }
1563 
1564 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1565                             uint64_t value)
1566 {
1567     uint64_t cur_val = pmccntr_read(env, NULL);
1568 
1569     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1570 }
1571 
1572 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1573                             uint64_t value)
1574 {
1575     pmccntr_op_start(env);
1576     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1577     pmccntr_op_finish(env);
1578 }
1579 
1580 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1581                             uint64_t value)
1582 {
1583     pmccntr_op_start(env);
1584     /* M is not accessible from AArch32 */
1585     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1586         (value & PMCCFILTR);
1587     pmccntr_op_finish(env);
1588 }
1589 
1590 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1591 {
1592     /* M is not visible in AArch32 */
1593     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1594 }
1595 
1596 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1597                             uint64_t value)
1598 {
1599     value &= pmu_counter_mask(env);
1600     env->cp15.c9_pmcnten |= value;
1601 }
1602 
1603 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1604                              uint64_t value)
1605 {
1606     value &= pmu_counter_mask(env);
1607     env->cp15.c9_pmcnten &= ~value;
1608 }
1609 
1610 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1611                          uint64_t value)
1612 {
1613     value &= pmu_counter_mask(env);
1614     env->cp15.c9_pmovsr &= ~value;
1615     pmu_update_irq(env);
1616 }
1617 
1618 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1619                          uint64_t value)
1620 {
1621     value &= pmu_counter_mask(env);
1622     env->cp15.c9_pmovsr |= value;
1623     pmu_update_irq(env);
1624 }
1625 
1626 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627                              uint64_t value, const uint8_t counter)
1628 {
1629     if (counter == 31) {
1630         pmccfiltr_write(env, ri, value);
1631     } else if (counter < pmu_num_counters(env)) {
1632         pmevcntr_op_start(env, counter);
1633 
1634         /*
1635          * If this counter's event type is changing, store the current
1636          * underlying count for the new type in c14_pmevcntr_delta[counter] so
1637          * pmevcntr_op_finish has the correct baseline when it converts back to
1638          * a delta.
1639          */
1640         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1641             PMXEVTYPER_EVTCOUNT;
1642         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1643         if (old_event != new_event) {
1644             uint64_t count = 0;
1645             if (event_supported(new_event)) {
1646                 uint16_t event_idx = supported_event_map[new_event];
1647                 count = pm_events[event_idx].get_count(env);
1648             }
1649             env->cp15.c14_pmevcntr_delta[counter] = count;
1650         }
1651 
1652         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1653         pmevcntr_op_finish(env, counter);
1654     }
1655     /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1656      * PMSELR value is equal to or greater than the number of implemented
1657      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1658      */
1659 }
1660 
1661 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1662                                const uint8_t counter)
1663 {
1664     if (counter == 31) {
1665         return env->cp15.pmccfiltr_el0;
1666     } else if (counter < pmu_num_counters(env)) {
1667         return env->cp15.c14_pmevtyper[counter];
1668     } else {
1669       /*
1670        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1671        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1672        */
1673         return 0;
1674     }
1675 }
1676 
1677 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1678                               uint64_t value)
1679 {
1680     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1681     pmevtyper_write(env, ri, value, counter);
1682 }
1683 
1684 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1685                                uint64_t value)
1686 {
1687     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1688     env->cp15.c14_pmevtyper[counter] = value;
1689 
1690     /*
1691      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1692      * pmu_op_finish calls when loading saved state for a migration. Because
1693      * we're potentially updating the type of event here, the value written to
1694      * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1695      * different counter type. Therefore, we need to set this value to the
1696      * current count for the counter type we're writing so that pmu_op_finish
1697      * has the correct count for its calculation.
1698      */
1699     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1700     if (event_supported(event)) {
1701         uint16_t event_idx = supported_event_map[event];
1702         env->cp15.c14_pmevcntr_delta[counter] =
1703             pm_events[event_idx].get_count(env);
1704     }
1705 }
1706 
1707 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1708 {
1709     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1710     return pmevtyper_read(env, ri, counter);
1711 }
1712 
1713 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1714                              uint64_t value)
1715 {
1716     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1717 }
1718 
1719 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1720 {
1721     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1722 }
1723 
1724 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1725                              uint64_t value, uint8_t counter)
1726 {
1727     if (counter < pmu_num_counters(env)) {
1728         pmevcntr_op_start(env, counter);
1729         env->cp15.c14_pmevcntr[counter] = value;
1730         pmevcntr_op_finish(env, counter);
1731     }
1732     /*
1733      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1734      * are CONSTRAINED UNPREDICTABLE.
1735      */
1736 }
1737 
1738 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1739                               uint8_t counter)
1740 {
1741     if (counter < pmu_num_counters(env)) {
1742         uint64_t ret;
1743         pmevcntr_op_start(env, counter);
1744         ret = env->cp15.c14_pmevcntr[counter];
1745         pmevcntr_op_finish(env, counter);
1746         return ret;
1747     } else {
1748       /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1749        * are CONSTRAINED UNPREDICTABLE. */
1750         return 0;
1751     }
1752 }
1753 
1754 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1755                              uint64_t value)
1756 {
1757     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1758     pmevcntr_write(env, ri, value, counter);
1759 }
1760 
1761 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1762 {
1763     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1764     return pmevcntr_read(env, ri, counter);
1765 }
1766 
1767 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1768                              uint64_t value)
1769 {
1770     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1771     assert(counter < pmu_num_counters(env));
1772     env->cp15.c14_pmevcntr[counter] = value;
1773     pmevcntr_write(env, ri, value, counter);
1774 }
1775 
1776 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1777 {
1778     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1779     assert(counter < pmu_num_counters(env));
1780     return env->cp15.c14_pmevcntr[counter];
1781 }
1782 
1783 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1784                              uint64_t value)
1785 {
1786     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1787 }
1788 
1789 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1790 {
1791     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1792 }
1793 
1794 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795                             uint64_t value)
1796 {
1797     if (arm_feature(env, ARM_FEATURE_V8)) {
1798         env->cp15.c9_pmuserenr = value & 0xf;
1799     } else {
1800         env->cp15.c9_pmuserenr = value & 1;
1801     }
1802 }
1803 
1804 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1805                              uint64_t value)
1806 {
1807     /* We have no event counters so only the C bit can be changed */
1808     value &= pmu_counter_mask(env);
1809     env->cp15.c9_pminten |= value;
1810     pmu_update_irq(env);
1811 }
1812 
1813 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1814                              uint64_t value)
1815 {
1816     value &= pmu_counter_mask(env);
1817     env->cp15.c9_pminten &= ~value;
1818     pmu_update_irq(env);
1819 }
1820 
1821 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1822                        uint64_t value)
1823 {
1824     /* Note that even though the AArch64 view of this register has bits
1825      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1826      * architectural requirements for bits which are RES0 only in some
1827      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1828      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1829      */
1830     raw_write(env, ri, value & ~0x1FULL);
1831 }
1832 
1833 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1834 {
1835     /* Begin with base v8.0 state.  */
1836     uint32_t valid_mask = 0x3fff;
1837     ARMCPU *cpu = arm_env_get_cpu(env);
1838 
1839     if (arm_el_is_aa64(env, 3)) {
1840         value |= SCR_FW | SCR_AW;   /* these two bits are RES1.  */
1841         valid_mask &= ~SCR_NET;
1842     } else {
1843         valid_mask &= ~(SCR_RW | SCR_ST);
1844     }
1845 
1846     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1847         valid_mask &= ~SCR_HCE;
1848 
1849         /* On ARMv7, SMD (or SCD as it is called in v7) is only
1850          * supported if EL2 exists. The bit is UNK/SBZP when
1851          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1852          * when EL2 is unavailable.
1853          * On ARMv8, this bit is always available.
1854          */
1855         if (arm_feature(env, ARM_FEATURE_V7) &&
1856             !arm_feature(env, ARM_FEATURE_V8)) {
1857             valid_mask &= ~SCR_SMD;
1858         }
1859     }
1860     if (cpu_isar_feature(aa64_lor, cpu)) {
1861         valid_mask |= SCR_TLOR;
1862     }
1863     if (cpu_isar_feature(aa64_pauth, cpu)) {
1864         valid_mask |= SCR_API | SCR_APK;
1865     }
1866 
1867     /* Clear all-context RES0 bits.  */
1868     value &= valid_mask;
1869     raw_write(env, ri, value);
1870 }
1871 
1872 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1873 {
1874     ARMCPU *cpu = arm_env_get_cpu(env);
1875 
1876     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1877      * bank
1878      */
1879     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1880                                         ri->secure & ARM_CP_SECSTATE_S);
1881 
1882     return cpu->ccsidr[index];
1883 }
1884 
1885 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1886                          uint64_t value)
1887 {
1888     raw_write(env, ri, value & 0xf);
1889 }
1890 
1891 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1892 {
1893     CPUState *cs = ENV_GET_CPU(env);
1894     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1895     uint64_t ret = 0;
1896 
1897     if (hcr_el2 & HCR_IMO) {
1898         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1899             ret |= CPSR_I;
1900         }
1901     } else {
1902         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1903             ret |= CPSR_I;
1904         }
1905     }
1906 
1907     if (hcr_el2 & HCR_FMO) {
1908         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1909             ret |= CPSR_F;
1910         }
1911     } else {
1912         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1913             ret |= CPSR_F;
1914         }
1915     }
1916 
1917     /* External aborts are not possible in QEMU so A bit is always clear */
1918     return ret;
1919 }
1920 
1921 static const ARMCPRegInfo v7_cp_reginfo[] = {
1922     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1923     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1924       .access = PL1_W, .type = ARM_CP_NOP },
1925     /* Performance monitors are implementation defined in v7,
1926      * but with an ARM recommended set of registers, which we
1927      * follow.
1928      *
1929      * Performance registers fall into three categories:
1930      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1931      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1932      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1933      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1934      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1935      */
1936     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1937       .access = PL0_RW, .type = ARM_CP_ALIAS,
1938       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1939       .writefn = pmcntenset_write,
1940       .accessfn = pmreg_access,
1941       .raw_writefn = raw_write },
1942     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1943       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1944       .access = PL0_RW, .accessfn = pmreg_access,
1945       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1946       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1947     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1948       .access = PL0_RW,
1949       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1950       .accessfn = pmreg_access,
1951       .writefn = pmcntenclr_write,
1952       .type = ARM_CP_ALIAS },
1953     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1954       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1955       .access = PL0_RW, .accessfn = pmreg_access,
1956       .type = ARM_CP_ALIAS,
1957       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1958       .writefn = pmcntenclr_write },
1959     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1960       .access = PL0_RW, .type = ARM_CP_IO,
1961       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1962       .accessfn = pmreg_access,
1963       .writefn = pmovsr_write,
1964       .raw_writefn = raw_write },
1965     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1966       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1967       .access = PL0_RW, .accessfn = pmreg_access,
1968       .type = ARM_CP_ALIAS | ARM_CP_IO,
1969       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1970       .writefn = pmovsr_write,
1971       .raw_writefn = raw_write },
1972     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1973       .access = PL0_W, .accessfn = pmreg_access_swinc,
1974       .type = ARM_CP_NO_RAW | ARM_CP_IO,
1975       .writefn = pmswinc_write },
1976     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1977       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
1978       .access = PL0_W, .accessfn = pmreg_access_swinc,
1979       .type = ARM_CP_NO_RAW | ARM_CP_IO,
1980       .writefn = pmswinc_write },
1981     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1982       .access = PL0_RW, .type = ARM_CP_ALIAS,
1983       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1984       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1985       .raw_writefn = raw_write},
1986     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1987       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1988       .access = PL0_RW, .accessfn = pmreg_access_selr,
1989       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1990       .writefn = pmselr_write, .raw_writefn = raw_write, },
1991     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1992       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1993       .readfn = pmccntr_read, .writefn = pmccntr_write32,
1994       .accessfn = pmreg_access_ccntr },
1995     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1996       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1997       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1998       .type = ARM_CP_IO,
1999       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2000       .readfn = pmccntr_read, .writefn = pmccntr_write,
2001       .raw_readfn = raw_read, .raw_writefn = raw_write, },
2002     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2003       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2004       .access = PL0_RW, .accessfn = pmreg_access,
2005       .type = ARM_CP_ALIAS | ARM_CP_IO,
2006       .resetvalue = 0, },
2007     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2008       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2009       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2010       .access = PL0_RW, .accessfn = pmreg_access,
2011       .type = ARM_CP_IO,
2012       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2013       .resetvalue = 0, },
2014     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2015       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2016       .accessfn = pmreg_access,
2017       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2018     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2019       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2020       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2021       .accessfn = pmreg_access,
2022       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2023     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2024       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2025       .accessfn = pmreg_access_xevcntr,
2026       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2027     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2028       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2029       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2030       .accessfn = pmreg_access_xevcntr,
2031       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2032     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2033       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2034       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2035       .resetvalue = 0,
2036       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2037     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2038       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2039       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2040       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2041       .resetvalue = 0,
2042       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2043     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2044       .access = PL1_RW, .accessfn = access_tpm,
2045       .type = ARM_CP_ALIAS | ARM_CP_IO,
2046       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2047       .resetvalue = 0,
2048       .writefn = pmintenset_write, .raw_writefn = raw_write },
2049     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2050       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2051       .access = PL1_RW, .accessfn = access_tpm,
2052       .type = ARM_CP_IO,
2053       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2054       .writefn = pmintenset_write, .raw_writefn = raw_write,
2055       .resetvalue = 0x0 },
2056     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2057       .access = PL1_RW, .accessfn = access_tpm,
2058       .type = ARM_CP_ALIAS | ARM_CP_IO,
2059       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2060       .writefn = pmintenclr_write, },
2061     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2062       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2063       .access = PL1_RW, .accessfn = access_tpm,
2064       .type = ARM_CP_ALIAS | ARM_CP_IO,
2065       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2066       .writefn = pmintenclr_write },
2067     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2068       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2069       .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2070     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2071       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2072       .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
2073       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2074                              offsetof(CPUARMState, cp15.csselr_ns) } },
2075     /* Auxiliary ID register: this actually has an IMPDEF value but for now
2076      * just RAZ for all cores:
2077      */
2078     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2079       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2080       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2081     /* Auxiliary fault status registers: these also are IMPDEF, and we
2082      * choose to RAZ/WI for all cores.
2083      */
2084     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2085       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2086       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2087     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2088       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2089       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2090     /* MAIR can just read-as-written because we don't implement caches
2091      * and so don't need to care about memory attributes.
2092      */
2093     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2094       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2095       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2096       .resetvalue = 0 },
2097     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2098       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2099       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2100       .resetvalue = 0 },
2101     /* For non-long-descriptor page tables these are PRRR and NMRR;
2102      * regardless they still act as reads-as-written for QEMU.
2103      */
2104      /* MAIR0/1 are defined separately from their 64-bit counterpart which
2105       * allows them to assign the correct fieldoffset based on the endianness
2106       * handled in the field definitions.
2107       */
2108     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2109       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
2110       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2111                              offsetof(CPUARMState, cp15.mair0_ns) },
2112       .resetfn = arm_cp_reset_ignore },
2113     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2114       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
2115       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2116                              offsetof(CPUARMState, cp15.mair1_ns) },
2117       .resetfn = arm_cp_reset_ignore },
2118     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2119       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2120       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2121     /* 32 bit ITLB invalidates */
2122     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2123       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2124     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2125       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2126     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2127       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2128     /* 32 bit DTLB invalidates */
2129     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2130       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2131     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2132       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2133     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2134       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2135     /* 32 bit TLB invalidates */
2136     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2137       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2138     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2139       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2140     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2141       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2142     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2143       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2144     REGINFO_SENTINEL
2145 };
2146 
2147 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2148     /* 32 bit TLB invalidates, Inner Shareable */
2149     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2150       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
2151     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2152       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2153     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2154       .type = ARM_CP_NO_RAW, .access = PL1_W,
2155       .writefn = tlbiasid_is_write },
2156     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2157       .type = ARM_CP_NO_RAW, .access = PL1_W,
2158       .writefn = tlbimvaa_is_write },
2159     REGINFO_SENTINEL
2160 };
2161 
2162 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2163     /* PMOVSSET is not implemented in v7 before v7ve */
2164     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2165       .access = PL0_RW, .accessfn = pmreg_access,
2166       .type = ARM_CP_ALIAS | ARM_CP_IO,
2167       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2168       .writefn = pmovsset_write,
2169       .raw_writefn = raw_write },
2170     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2171       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2172       .access = PL0_RW, .accessfn = pmreg_access,
2173       .type = ARM_CP_ALIAS | ARM_CP_IO,
2174       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2175       .writefn = pmovsset_write,
2176       .raw_writefn = raw_write },
2177     REGINFO_SENTINEL
2178 };
2179 
2180 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2181                         uint64_t value)
2182 {
2183     value &= 1;
2184     env->teecr = value;
2185 }
2186 
2187 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2188                                     bool isread)
2189 {
2190     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2191         return CP_ACCESS_TRAP;
2192     }
2193     return CP_ACCESS_OK;
2194 }
2195 
2196 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2197     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2198       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2199       .resetvalue = 0,
2200       .writefn = teecr_write },
2201     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2202       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2203       .accessfn = teehbr_access, .resetvalue = 0 },
2204     REGINFO_SENTINEL
2205 };
2206 
2207 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2208     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2209       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2210       .access = PL0_RW,
2211       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2212     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2213       .access = PL0_RW,
2214       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2215                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2216       .resetfn = arm_cp_reset_ignore },
2217     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2218       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2219       .access = PL0_R|PL1_W,
2220       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2221       .resetvalue = 0},
2222     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2223       .access = PL0_R|PL1_W,
2224       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2225                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2226       .resetfn = arm_cp_reset_ignore },
2227     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2228       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2229       .access = PL1_RW,
2230       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2231     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2232       .access = PL1_RW,
2233       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2234                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2235       .resetvalue = 0 },
2236     REGINFO_SENTINEL
2237 };
2238 
2239 #ifndef CONFIG_USER_ONLY
2240 
2241 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2242                                        bool isread)
2243 {
2244     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2245      * Writable only at the highest implemented exception level.
2246      */
2247     int el = arm_current_el(env);
2248 
2249     switch (el) {
2250     case 0:
2251         if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
2252             return CP_ACCESS_TRAP;
2253         }
2254         break;
2255     case 1:
2256         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2257             arm_is_secure_below_el3(env)) {
2258             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2259             return CP_ACCESS_TRAP_UNCATEGORIZED;
2260         }
2261         break;
2262     case 2:
2263     case 3:
2264         break;
2265     }
2266 
2267     if (!isread && el < arm_highest_el(env)) {
2268         return CP_ACCESS_TRAP_UNCATEGORIZED;
2269     }
2270 
2271     return CP_ACCESS_OK;
2272 }
2273 
2274 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2275                                         bool isread)
2276 {
2277     unsigned int cur_el = arm_current_el(env);
2278     bool secure = arm_is_secure(env);
2279 
2280     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2281     if (cur_el == 0 &&
2282         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2283         return CP_ACCESS_TRAP;
2284     }
2285 
2286     if (arm_feature(env, ARM_FEATURE_EL2) &&
2287         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2288         !extract32(env->cp15.cnthctl_el2, 0, 1)) {
2289         return CP_ACCESS_TRAP_EL2;
2290     }
2291     return CP_ACCESS_OK;
2292 }
2293 
2294 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2295                                       bool isread)
2296 {
2297     unsigned int cur_el = arm_current_el(env);
2298     bool secure = arm_is_secure(env);
2299 
2300     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2301      * EL0[PV]TEN is zero.
2302      */
2303     if (cur_el == 0 &&
2304         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2305         return CP_ACCESS_TRAP;
2306     }
2307 
2308     if (arm_feature(env, ARM_FEATURE_EL2) &&
2309         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2310         !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2311         return CP_ACCESS_TRAP_EL2;
2312     }
2313     return CP_ACCESS_OK;
2314 }
2315 
2316 static CPAccessResult gt_pct_access(CPUARMState *env,
2317                                     const ARMCPRegInfo *ri,
2318                                     bool isread)
2319 {
2320     return gt_counter_access(env, GTIMER_PHYS, isread);
2321 }
2322 
2323 static CPAccessResult gt_vct_access(CPUARMState *env,
2324                                     const ARMCPRegInfo *ri,
2325                                     bool isread)
2326 {
2327     return gt_counter_access(env, GTIMER_VIRT, isread);
2328 }
2329 
2330 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2331                                        bool isread)
2332 {
2333     return gt_timer_access(env, GTIMER_PHYS, isread);
2334 }
2335 
2336 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2337                                        bool isread)
2338 {
2339     return gt_timer_access(env, GTIMER_VIRT, isread);
2340 }
2341 
2342 static CPAccessResult gt_stimer_access(CPUARMState *env,
2343                                        const ARMCPRegInfo *ri,
2344                                        bool isread)
2345 {
2346     /* The AArch64 register view of the secure physical timer is
2347      * always accessible from EL3, and configurably accessible from
2348      * Secure EL1.
2349      */
2350     switch (arm_current_el(env)) {
2351     case 1:
2352         if (!arm_is_secure(env)) {
2353             return CP_ACCESS_TRAP;
2354         }
2355         if (!(env->cp15.scr_el3 & SCR_ST)) {
2356             return CP_ACCESS_TRAP_EL3;
2357         }
2358         return CP_ACCESS_OK;
2359     case 0:
2360     case 2:
2361         return CP_ACCESS_TRAP;
2362     case 3:
2363         return CP_ACCESS_OK;
2364     default:
2365         g_assert_not_reached();
2366     }
2367 }
2368 
2369 static uint64_t gt_get_countervalue(CPUARMState *env)
2370 {
2371     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
2372 }
2373 
2374 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2375 {
2376     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2377 
2378     if (gt->ctl & 1) {
2379         /* Timer enabled: calculate and set current ISTATUS, irq, and
2380          * reset timer to when ISTATUS next has to change
2381          */
2382         uint64_t offset = timeridx == GTIMER_VIRT ?
2383                                       cpu->env.cp15.cntvoff_el2 : 0;
2384         uint64_t count = gt_get_countervalue(&cpu->env);
2385         /* Note that this must be unsigned 64 bit arithmetic: */
2386         int istatus = count - offset >= gt->cval;
2387         uint64_t nexttick;
2388         int irqstate;
2389 
2390         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2391 
2392         irqstate = (istatus && !(gt->ctl & 2));
2393         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2394 
2395         if (istatus) {
2396             /* Next transition is when count rolls back over to zero */
2397             nexttick = UINT64_MAX;
2398         } else {
2399             /* Next transition is when we hit cval */
2400             nexttick = gt->cval + offset;
2401         }
2402         /* Note that the desired next expiry time might be beyond the
2403          * signed-64-bit range of a QEMUTimer -- in this case we just
2404          * set the timer for as far in the future as possible. When the
2405          * timer expires we will reset the timer for any remaining period.
2406          */
2407         if (nexttick > INT64_MAX / GTIMER_SCALE) {
2408             nexttick = INT64_MAX / GTIMER_SCALE;
2409         }
2410         timer_mod(cpu->gt_timer[timeridx], nexttick);
2411         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2412     } else {
2413         /* Timer disabled: ISTATUS and timer output always clear */
2414         gt->ctl &= ~4;
2415         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2416         timer_del(cpu->gt_timer[timeridx]);
2417         trace_arm_gt_recalc_disabled(timeridx);
2418     }
2419 }
2420 
2421 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2422                            int timeridx)
2423 {
2424     ARMCPU *cpu = arm_env_get_cpu(env);
2425 
2426     timer_del(cpu->gt_timer[timeridx]);
2427 }
2428 
2429 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2430 {
2431     return gt_get_countervalue(env);
2432 }
2433 
2434 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2435 {
2436     return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
2437 }
2438 
2439 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2440                           int timeridx,
2441                           uint64_t value)
2442 {
2443     trace_arm_gt_cval_write(timeridx, value);
2444     env->cp15.c14_timer[timeridx].cval = value;
2445     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
2446 }
2447 
2448 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2449                              int timeridx)
2450 {
2451     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2452 
2453     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2454                       (gt_get_countervalue(env) - offset));
2455 }
2456 
2457 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2458                           int timeridx,
2459                           uint64_t value)
2460 {
2461     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2462 
2463     trace_arm_gt_tval_write(timeridx, value);
2464     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2465                                          sextract64(value, 0, 32);
2466     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
2467 }
2468 
2469 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2470                          int timeridx,
2471                          uint64_t value)
2472 {
2473     ARMCPU *cpu = arm_env_get_cpu(env);
2474     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2475 
2476     trace_arm_gt_ctl_write(timeridx, value);
2477     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2478     if ((oldval ^ value) & 1) {
2479         /* Enable toggled */
2480         gt_recalc_timer(cpu, timeridx);
2481     } else if ((oldval ^ value) & 2) {
2482         /* IMASK toggled: don't need to recalculate,
2483          * just set the interrupt line based on ISTATUS
2484          */
2485         int irqstate = (oldval & 4) && !(value & 2);
2486 
2487         trace_arm_gt_imask_toggle(timeridx, irqstate);
2488         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2489     }
2490 }
2491 
2492 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2493 {
2494     gt_timer_reset(env, ri, GTIMER_PHYS);
2495 }
2496 
2497 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2498                                uint64_t value)
2499 {
2500     gt_cval_write(env, ri, GTIMER_PHYS, value);
2501 }
2502 
2503 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2504 {
2505     return gt_tval_read(env, ri, GTIMER_PHYS);
2506 }
2507 
2508 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2509                                uint64_t value)
2510 {
2511     gt_tval_write(env, ri, GTIMER_PHYS, value);
2512 }
2513 
2514 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2515                               uint64_t value)
2516 {
2517     gt_ctl_write(env, ri, GTIMER_PHYS, value);
2518 }
2519 
2520 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2521 {
2522     gt_timer_reset(env, ri, GTIMER_VIRT);
2523 }
2524 
2525 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2526                                uint64_t value)
2527 {
2528     gt_cval_write(env, ri, GTIMER_VIRT, value);
2529 }
2530 
2531 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2532 {
2533     return gt_tval_read(env, ri, GTIMER_VIRT);
2534 }
2535 
2536 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2537                                uint64_t value)
2538 {
2539     gt_tval_write(env, ri, GTIMER_VIRT, value);
2540 }
2541 
2542 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2543                               uint64_t value)
2544 {
2545     gt_ctl_write(env, ri, GTIMER_VIRT, value);
2546 }
2547 
2548 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2549                               uint64_t value)
2550 {
2551     ARMCPU *cpu = arm_env_get_cpu(env);
2552 
2553     trace_arm_gt_cntvoff_write(value);
2554     raw_write(env, ri, value);
2555     gt_recalc_timer(cpu, GTIMER_VIRT);
2556 }
2557 
2558 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2559 {
2560     gt_timer_reset(env, ri, GTIMER_HYP);
2561 }
2562 
2563 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2564                               uint64_t value)
2565 {
2566     gt_cval_write(env, ri, GTIMER_HYP, value);
2567 }
2568 
2569 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2570 {
2571     return gt_tval_read(env, ri, GTIMER_HYP);
2572 }
2573 
2574 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2575                               uint64_t value)
2576 {
2577     gt_tval_write(env, ri, GTIMER_HYP, value);
2578 }
2579 
2580 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2581                               uint64_t value)
2582 {
2583     gt_ctl_write(env, ri, GTIMER_HYP, value);
2584 }
2585 
2586 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2587 {
2588     gt_timer_reset(env, ri, GTIMER_SEC);
2589 }
2590 
2591 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2592                               uint64_t value)
2593 {
2594     gt_cval_write(env, ri, GTIMER_SEC, value);
2595 }
2596 
2597 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2598 {
2599     return gt_tval_read(env, ri, GTIMER_SEC);
2600 }
2601 
2602 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2603                               uint64_t value)
2604 {
2605     gt_tval_write(env, ri, GTIMER_SEC, value);
2606 }
2607 
2608 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2609                               uint64_t value)
2610 {
2611     gt_ctl_write(env, ri, GTIMER_SEC, value);
2612 }
2613 
2614 void arm_gt_ptimer_cb(void *opaque)
2615 {
2616     ARMCPU *cpu = opaque;
2617 
2618     gt_recalc_timer(cpu, GTIMER_PHYS);
2619 }
2620 
2621 void arm_gt_vtimer_cb(void *opaque)
2622 {
2623     ARMCPU *cpu = opaque;
2624 
2625     gt_recalc_timer(cpu, GTIMER_VIRT);
2626 }
2627 
2628 void arm_gt_htimer_cb(void *opaque)
2629 {
2630     ARMCPU *cpu = opaque;
2631 
2632     gt_recalc_timer(cpu, GTIMER_HYP);
2633 }
2634 
2635 void arm_gt_stimer_cb(void *opaque)
2636 {
2637     ARMCPU *cpu = opaque;
2638 
2639     gt_recalc_timer(cpu, GTIMER_SEC);
2640 }
2641 
2642 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2643     /* Note that CNTFRQ is purely reads-as-written for the benefit
2644      * of software; writing it doesn't actually change the timer frequency.
2645      * Our reset value matches the fixed frequency we implement the timer at.
2646      */
2647     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2648       .type = ARM_CP_ALIAS,
2649       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2650       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2651     },
2652     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2653       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2654       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2655       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2656       .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2657     },
2658     /* overall control: mostly access permissions */
2659     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2660       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2661       .access = PL1_RW,
2662       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2663       .resetvalue = 0,
2664     },
2665     /* per-timer control */
2666     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2667       .secure = ARM_CP_SECSTATE_NS,
2668       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2669       .accessfn = gt_ptimer_access,
2670       .fieldoffset = offsetoflow32(CPUARMState,
2671                                    cp15.c14_timer[GTIMER_PHYS].ctl),
2672       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2673     },
2674     { .name = "CNTP_CTL_S",
2675       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2676       .secure = ARM_CP_SECSTATE_S,
2677       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2678       .accessfn = gt_ptimer_access,
2679       .fieldoffset = offsetoflow32(CPUARMState,
2680                                    cp15.c14_timer[GTIMER_SEC].ctl),
2681       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2682     },
2683     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2684       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2685       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2686       .accessfn = gt_ptimer_access,
2687       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2688       .resetvalue = 0,
2689       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2690     },
2691     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2692       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2693       .accessfn = gt_vtimer_access,
2694       .fieldoffset = offsetoflow32(CPUARMState,
2695                                    cp15.c14_timer[GTIMER_VIRT].ctl),
2696       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2697     },
2698     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2699       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2700       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2701       .accessfn = gt_vtimer_access,
2702       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2703       .resetvalue = 0,
2704       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2705     },
2706     /* TimerValue views: a 32 bit downcounting view of the underlying state */
2707     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2708       .secure = ARM_CP_SECSTATE_NS,
2709       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2710       .accessfn = gt_ptimer_access,
2711       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2712     },
2713     { .name = "CNTP_TVAL_S",
2714       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2715       .secure = ARM_CP_SECSTATE_S,
2716       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2717       .accessfn = gt_ptimer_access,
2718       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2719     },
2720     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2721       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2722       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2723       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2724       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2725     },
2726     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2727       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2728       .accessfn = gt_vtimer_access,
2729       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2730     },
2731     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2732       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2733       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2734       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2735       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2736     },
2737     /* The counter itself */
2738     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2739       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2740       .accessfn = gt_pct_access,
2741       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2742     },
2743     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2744       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2745       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2746       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2747     },
2748     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2749       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2750       .accessfn = gt_vct_access,
2751       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2752     },
2753     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2754       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2755       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2756       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2757     },
2758     /* Comparison value, indicating when the timer goes off */
2759     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2760       .secure = ARM_CP_SECSTATE_NS,
2761       .access = PL1_RW | PL0_R,
2762       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2763       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2764       .accessfn = gt_ptimer_access,
2765       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2766     },
2767     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2768       .secure = ARM_CP_SECSTATE_S,
2769       .access = PL1_RW | PL0_R,
2770       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2771       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2772       .accessfn = gt_ptimer_access,
2773       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2774     },
2775     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2776       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2777       .access = PL1_RW | PL0_R,
2778       .type = ARM_CP_IO,
2779       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2780       .resetvalue = 0, .accessfn = gt_ptimer_access,
2781       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2782     },
2783     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2784       .access = PL1_RW | PL0_R,
2785       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2786       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2787       .accessfn = gt_vtimer_access,
2788       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2789     },
2790     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2791       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2792       .access = PL1_RW | PL0_R,
2793       .type = ARM_CP_IO,
2794       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2795       .resetvalue = 0, .accessfn = gt_vtimer_access,
2796       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2797     },
2798     /* Secure timer -- this is actually restricted to only EL3
2799      * and configurably Secure-EL1 via the accessfn.
2800      */
2801     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2802       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2803       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2804       .accessfn = gt_stimer_access,
2805       .readfn = gt_sec_tval_read,
2806       .writefn = gt_sec_tval_write,
2807       .resetfn = gt_sec_timer_reset,
2808     },
2809     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2810       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2811       .type = ARM_CP_IO, .access = PL1_RW,
2812       .accessfn = gt_stimer_access,
2813       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2814       .resetvalue = 0,
2815       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2816     },
2817     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2818       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2819       .type = ARM_CP_IO, .access = PL1_RW,
2820       .accessfn = gt_stimer_access,
2821       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2822       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2823     },
2824     REGINFO_SENTINEL
2825 };
2826 
2827 #else
2828 
2829 /* In user-mode most of the generic timer registers are inaccessible
2830  * however modern kernels (4.12+) allow access to cntvct_el0
2831  */
2832 
2833 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2834 {
2835     /* Currently we have no support for QEMUTimer in linux-user so we
2836      * can't call gt_get_countervalue(env), instead we directly
2837      * call the lower level functions.
2838      */
2839     return cpu_get_clock() / GTIMER_SCALE;
2840 }
2841 
2842 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2843     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2844       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2845       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2846       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2847       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2848     },
2849     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2850       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2851       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2852       .readfn = gt_virt_cnt_read,
2853     },
2854     REGINFO_SENTINEL
2855 };
2856 
2857 #endif
2858 
2859 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2860 {
2861     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2862         raw_write(env, ri, value);
2863     } else if (arm_feature(env, ARM_FEATURE_V7)) {
2864         raw_write(env, ri, value & 0xfffff6ff);
2865     } else {
2866         raw_write(env, ri, value & 0xfffff1ff);
2867     }
2868 }
2869 
2870 #ifndef CONFIG_USER_ONLY
2871 /* get_phys_addr() isn't present for user-mode-only targets */
2872 
2873 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2874                                  bool isread)
2875 {
2876     if (ri->opc2 & 4) {
2877         /* The ATS12NSO* operations must trap to EL3 if executed in
2878          * Secure EL1 (which can only happen if EL3 is AArch64).
2879          * They are simply UNDEF if executed from NS EL1.
2880          * They function normally from EL2 or EL3.
2881          */
2882         if (arm_current_el(env) == 1) {
2883             if (arm_is_secure_below_el3(env)) {
2884                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2885             }
2886             return CP_ACCESS_TRAP_UNCATEGORIZED;
2887         }
2888     }
2889     return CP_ACCESS_OK;
2890 }
2891 
2892 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2893                              MMUAccessType access_type, ARMMMUIdx mmu_idx)
2894 {
2895     hwaddr phys_addr;
2896     target_ulong page_size;
2897     int prot;
2898     bool ret;
2899     uint64_t par64;
2900     bool format64 = false;
2901     MemTxAttrs attrs = {};
2902     ARMMMUFaultInfo fi = {};
2903     ARMCacheAttrs cacheattrs = {};
2904 
2905     ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2906                         &prot, &page_size, &fi, &cacheattrs);
2907 
2908     if (is_a64(env)) {
2909         format64 = true;
2910     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2911         /*
2912          * ATS1Cxx:
2913          * * TTBCR.EAE determines whether the result is returned using the
2914          *   32-bit or the 64-bit PAR format
2915          * * Instructions executed in Hyp mode always use the 64bit format
2916          *
2917          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2918          * * The Non-secure TTBCR.EAE bit is set to 1
2919          * * The implementation includes EL2, and the value of HCR.VM is 1
2920          *
2921          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2922          *
2923          * ATS1Hx always uses the 64bit format.
2924          */
2925         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2926 
2927         if (arm_feature(env, ARM_FEATURE_EL2)) {
2928             if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2929                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2930             } else {
2931                 format64 |= arm_current_el(env) == 2;
2932             }
2933         }
2934     }
2935 
2936     if (format64) {
2937         /* Create a 64-bit PAR */
2938         par64 = (1 << 11); /* LPAE bit always set */
2939         if (!ret) {
2940             par64 |= phys_addr & ~0xfffULL;
2941             if (!attrs.secure) {
2942                 par64 |= (1 << 9); /* NS */
2943             }
2944             par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2945             par64 |= cacheattrs.shareability << 7; /* SH */
2946         } else {
2947             uint32_t fsr = arm_fi_to_lfsc(&fi);
2948 
2949             par64 |= 1; /* F */
2950             par64 |= (fsr & 0x3f) << 1; /* FS */
2951             if (fi.stage2) {
2952                 par64 |= (1 << 9); /* S */
2953             }
2954             if (fi.s1ptw) {
2955                 par64 |= (1 << 8); /* PTW */
2956             }
2957         }
2958     } else {
2959         /* fsr is a DFSR/IFSR value for the short descriptor
2960          * translation table format (with WnR always clear).
2961          * Convert it to a 32-bit PAR.
2962          */
2963         if (!ret) {
2964             /* We do not set any attribute bits in the PAR */
2965             if (page_size == (1 << 24)
2966                 && arm_feature(env, ARM_FEATURE_V7)) {
2967                 par64 = (phys_addr & 0xff000000) | (1 << 1);
2968             } else {
2969                 par64 = phys_addr & 0xfffff000;
2970             }
2971             if (!attrs.secure) {
2972                 par64 |= (1 << 9); /* NS */
2973             }
2974         } else {
2975             uint32_t fsr = arm_fi_to_sfsc(&fi);
2976 
2977             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2978                     ((fsr & 0xf) << 1) | 1;
2979         }
2980     }
2981     return par64;
2982 }
2983 
2984 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2985 {
2986     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2987     uint64_t par64;
2988     ARMMMUIdx mmu_idx;
2989     int el = arm_current_el(env);
2990     bool secure = arm_is_secure_below_el3(env);
2991 
2992     switch (ri->opc2 & 6) {
2993     case 0:
2994         /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2995         switch (el) {
2996         case 3:
2997             mmu_idx = ARMMMUIdx_S1E3;
2998             break;
2999         case 2:
3000             mmu_idx = ARMMMUIdx_S1NSE1;
3001             break;
3002         case 1:
3003             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3004             break;
3005         default:
3006             g_assert_not_reached();
3007         }
3008         break;
3009     case 2:
3010         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3011         switch (el) {
3012         case 3:
3013             mmu_idx = ARMMMUIdx_S1SE0;
3014             break;
3015         case 2:
3016             mmu_idx = ARMMMUIdx_S1NSE0;
3017             break;
3018         case 1:
3019             mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3020             break;
3021         default:
3022             g_assert_not_reached();
3023         }
3024         break;
3025     case 4:
3026         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3027         mmu_idx = ARMMMUIdx_S12NSE1;
3028         break;
3029     case 6:
3030         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3031         mmu_idx = ARMMMUIdx_S12NSE0;
3032         break;
3033     default:
3034         g_assert_not_reached();
3035     }
3036 
3037     par64 = do_ats_write(env, value, access_type, mmu_idx);
3038 
3039     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3040 }
3041 
3042 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3043                         uint64_t value)
3044 {
3045     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3046     uint64_t par64;
3047 
3048     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
3049 
3050     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3051 }
3052 
3053 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3054                                      bool isread)
3055 {
3056     if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3057         return CP_ACCESS_TRAP;
3058     }
3059     return CP_ACCESS_OK;
3060 }
3061 
3062 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3063                         uint64_t value)
3064 {
3065     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3066     ARMMMUIdx mmu_idx;
3067     int secure = arm_is_secure_below_el3(env);
3068 
3069     switch (ri->opc2 & 6) {
3070     case 0:
3071         switch (ri->opc1) {
3072         case 0: /* AT S1E1R, AT S1E1W */
3073             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3074             break;
3075         case 4: /* AT S1E2R, AT S1E2W */
3076             mmu_idx = ARMMMUIdx_S1E2;
3077             break;
3078         case 6: /* AT S1E3R, AT S1E3W */
3079             mmu_idx = ARMMMUIdx_S1E3;
3080             break;
3081         default:
3082             g_assert_not_reached();
3083         }
3084         break;
3085     case 2: /* AT S1E0R, AT S1E0W */
3086         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3087         break;
3088     case 4: /* AT S12E1R, AT S12E1W */
3089         mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
3090         break;
3091     case 6: /* AT S12E0R, AT S12E0W */
3092         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
3093         break;
3094     default:
3095         g_assert_not_reached();
3096     }
3097 
3098     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3099 }
3100 #endif
3101 
3102 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3103     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3104       .access = PL1_RW, .resetvalue = 0,
3105       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3106                              offsetoflow32(CPUARMState, cp15.par_ns) },
3107       .writefn = par_write },
3108 #ifndef CONFIG_USER_ONLY
3109     /* This underdecoding is safe because the reginfo is NO_RAW. */
3110     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3111       .access = PL1_W, .accessfn = ats_access,
3112       .writefn = ats_write, .type = ARM_CP_NO_RAW },
3113 #endif
3114     REGINFO_SENTINEL
3115 };
3116 
3117 /* Return basic MPU access permission bits.  */
3118 static uint32_t simple_mpu_ap_bits(uint32_t val)
3119 {
3120     uint32_t ret;
3121     uint32_t mask;
3122     int i;
3123     ret = 0;
3124     mask = 3;
3125     for (i = 0; i < 16; i += 2) {
3126         ret |= (val >> i) & mask;
3127         mask <<= 2;
3128     }
3129     return ret;
3130 }
3131 
3132 /* Pad basic MPU access permission bits to extended format.  */
3133 static uint32_t extended_mpu_ap_bits(uint32_t val)
3134 {
3135     uint32_t ret;
3136     uint32_t mask;
3137     int i;
3138     ret = 0;
3139     mask = 3;
3140     for (i = 0; i < 16; i += 2) {
3141         ret |= (val & mask) << i;
3142         mask <<= 2;
3143     }
3144     return ret;
3145 }
3146 
3147 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3148                                  uint64_t value)
3149 {
3150     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3151 }
3152 
3153 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3154 {
3155     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3156 }
3157 
3158 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3159                                  uint64_t value)
3160 {
3161     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3162 }
3163 
3164 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3165 {
3166     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3167 }
3168 
3169 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3170 {
3171     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3172 
3173     if (!u32p) {
3174         return 0;
3175     }
3176 
3177     u32p += env->pmsav7.rnr[M_REG_NS];
3178     return *u32p;
3179 }
3180 
3181 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3182                          uint64_t value)
3183 {
3184     ARMCPU *cpu = arm_env_get_cpu(env);
3185     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3186 
3187     if (!u32p) {
3188         return;
3189     }
3190 
3191     u32p += env->pmsav7.rnr[M_REG_NS];
3192     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3193     *u32p = value;
3194 }
3195 
3196 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3197                               uint64_t value)
3198 {
3199     ARMCPU *cpu = arm_env_get_cpu(env);
3200     uint32_t nrgs = cpu->pmsav7_dregion;
3201 
3202     if (value >= nrgs) {
3203         qemu_log_mask(LOG_GUEST_ERROR,
3204                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3205                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3206         return;
3207     }
3208 
3209     raw_write(env, ri, value);
3210 }
3211 
3212 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3213     /* Reset for all these registers is handled in arm_cpu_reset(),
3214      * because the PMSAv7 is also used by M-profile CPUs, which do
3215      * not register cpregs but still need the state to be reset.
3216      */
3217     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3218       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3219       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3220       .readfn = pmsav7_read, .writefn = pmsav7_write,
3221       .resetfn = arm_cp_reset_ignore },
3222     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3223       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3224       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3225       .readfn = pmsav7_read, .writefn = pmsav7_write,
3226       .resetfn = arm_cp_reset_ignore },
3227     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3228       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3229       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3230       .readfn = pmsav7_read, .writefn = pmsav7_write,
3231       .resetfn = arm_cp_reset_ignore },
3232     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3233       .access = PL1_RW,
3234       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3235       .writefn = pmsav7_rgnr_write,
3236       .resetfn = arm_cp_reset_ignore },
3237     REGINFO_SENTINEL
3238 };
3239 
3240 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3241     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3242       .access = PL1_RW, .type = ARM_CP_ALIAS,
3243       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3244       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3245     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3246       .access = PL1_RW, .type = ARM_CP_ALIAS,
3247       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3248       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3249     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3250       .access = PL1_RW,
3251       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3252       .resetvalue = 0, },
3253     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3254       .access = PL1_RW,
3255       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3256       .resetvalue = 0, },
3257     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3258       .access = PL1_RW,
3259       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3260     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3261       .access = PL1_RW,
3262       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3263     /* Protection region base and size registers */
3264     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3265       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3266       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3267     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3268       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3269       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3270     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3271       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3272       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3273     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3274       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3275       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3276     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3277       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3278       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3279     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3280       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3281       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3282     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3283       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3284       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3285     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3286       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3287       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3288     REGINFO_SENTINEL
3289 };
3290 
3291 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3292                                  uint64_t value)
3293 {
3294     TCR *tcr = raw_ptr(env, ri);
3295     int maskshift = extract32(value, 0, 3);
3296 
3297     if (!arm_feature(env, ARM_FEATURE_V8)) {
3298         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3299             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3300              * using Long-desciptor translation table format */
3301             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3302         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3303             /* In an implementation that includes the Security Extensions
3304              * TTBCR has additional fields PD0 [4] and PD1 [5] for
3305              * Short-descriptor translation table format.
3306              */
3307             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3308         } else {
3309             value &= TTBCR_N;
3310         }
3311     }
3312 
3313     /* Update the masks corresponding to the TCR bank being written
3314      * Note that we always calculate mask and base_mask, but
3315      * they are only used for short-descriptor tables (ie if EAE is 0);
3316      * for long-descriptor tables the TCR fields are used differently
3317      * and the mask and base_mask values are meaningless.
3318      */
3319     tcr->raw_tcr = value;
3320     tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3321     tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3322 }
3323 
3324 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3325                              uint64_t value)
3326 {
3327     ARMCPU *cpu = arm_env_get_cpu(env);
3328     TCR *tcr = raw_ptr(env, ri);
3329 
3330     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3331         /* With LPAE the TTBCR could result in a change of ASID
3332          * via the TTBCR.A1 bit, so do a TLB flush.
3333          */
3334         tlb_flush(CPU(cpu));
3335     }
3336     /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
3337     value = deposit64(tcr->raw_tcr, 0, 32, value);
3338     vmsa_ttbcr_raw_write(env, ri, value);
3339 }
3340 
3341 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3342 {
3343     TCR *tcr = raw_ptr(env, ri);
3344 
3345     /* Reset both the TCR as well as the masks corresponding to the bank of
3346      * the TCR being reset.
3347      */
3348     tcr->raw_tcr = 0;
3349     tcr->mask = 0;
3350     tcr->base_mask = 0xffffc000u;
3351 }
3352 
3353 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3354                                uint64_t value)
3355 {
3356     ARMCPU *cpu = arm_env_get_cpu(env);
3357     TCR *tcr = raw_ptr(env, ri);
3358 
3359     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3360     tlb_flush(CPU(cpu));
3361     tcr->raw_tcr = value;
3362 }
3363 
3364 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3365                             uint64_t value)
3366 {
3367     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
3368     if (cpreg_field_is_64bit(ri) &&
3369         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3370         ARMCPU *cpu = arm_env_get_cpu(env);
3371         tlb_flush(CPU(cpu));
3372     }
3373     raw_write(env, ri, value);
3374 }
3375 
3376 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3377                         uint64_t value)
3378 {
3379     ARMCPU *cpu = arm_env_get_cpu(env);
3380     CPUState *cs = CPU(cpu);
3381 
3382     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
3383     if (raw_read(env, ri) != value) {
3384         tlb_flush_by_mmuidx(cs,
3385                             ARMMMUIdxBit_S12NSE1 |
3386                             ARMMMUIdxBit_S12NSE0 |
3387                             ARMMMUIdxBit_S2NS);
3388         raw_write(env, ri, value);
3389     }
3390 }
3391 
3392 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3393     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3394       .access = PL1_RW, .type = ARM_CP_ALIAS,
3395       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3396                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3397     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3398       .access = PL1_RW, .resetvalue = 0,
3399       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3400                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3401     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3402       .access = PL1_RW, .resetvalue = 0,
3403       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3404                              offsetof(CPUARMState, cp15.dfar_ns) } },
3405     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3406       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3407       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3408       .resetvalue = 0, },
3409     REGINFO_SENTINEL
3410 };
3411 
3412 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3413     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3414       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3415       .access = PL1_RW,
3416       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3417     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3418       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3419       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3420       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3421                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
3422     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3423       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3424       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3425       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3426                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
3427     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3428       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3429       .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
3430       .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3431       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3432     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3433       .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3434       .raw_writefn = vmsa_ttbcr_raw_write,
3435       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3436                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
3437     REGINFO_SENTINEL
3438 };
3439 
3440 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3441  * qemu tlbs nor adjusting cached masks.
3442  */
3443 static const ARMCPRegInfo ttbcr2_reginfo = {
3444     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3445     .access = PL1_RW, .type = ARM_CP_ALIAS,
3446     .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3447                            offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3448 };
3449 
3450 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3451                                 uint64_t value)
3452 {
3453     env->cp15.c15_ticonfig = value & 0xe7;
3454     /* The OS_TYPE bit in this register changes the reported CPUID! */
3455     env->cp15.c0_cpuid = (value & (1 << 5)) ?
3456         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3457 }
3458 
3459 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3460                                 uint64_t value)
3461 {
3462     env->cp15.c15_threadid = value & 0xffff;
3463 }
3464 
3465 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3466                            uint64_t value)
3467 {
3468     /* Wait-for-interrupt (deprecated) */
3469     cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
3470 }
3471 
3472 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3473                                   uint64_t value)
3474 {
3475     /* On OMAP there are registers indicating the max/min index of dcache lines
3476      * containing a dirty line; cache flush operations have to reset these.
3477      */
3478     env->cp15.c15_i_max = 0x000;
3479     env->cp15.c15_i_min = 0xff0;
3480 }
3481 
3482 static const ARMCPRegInfo omap_cp_reginfo[] = {
3483     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3484       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3485       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3486       .resetvalue = 0, },
3487     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3488       .access = PL1_RW, .type = ARM_CP_NOP },
3489     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3490       .access = PL1_RW,
3491       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3492       .writefn = omap_ticonfig_write },
3493     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3494       .access = PL1_RW,
3495       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3496     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3497       .access = PL1_RW, .resetvalue = 0xff0,
3498       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3499     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3500       .access = PL1_RW,
3501       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3502       .writefn = omap_threadid_write },
3503     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3504       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3505       .type = ARM_CP_NO_RAW,
3506       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3507     /* TODO: Peripheral port remap register:
3508      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3509      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3510      * when MMU is off.
3511      */
3512     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3513       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3514       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3515       .writefn = omap_cachemaint_write },
3516     { .name = "C9", .cp = 15, .crn = 9,
3517       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3518       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3519     REGINFO_SENTINEL
3520 };
3521 
3522 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3523                               uint64_t value)
3524 {
3525     env->cp15.c15_cpar = value & 0x3fff;
3526 }
3527 
3528 static const ARMCPRegInfo xscale_cp_reginfo[] = {
3529     { .name = "XSCALE_CPAR",
3530       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3531       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3532       .writefn = xscale_cpar_write, },
3533     { .name = "XSCALE_AUXCR",
3534       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3535       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3536       .resetvalue = 0, },
3537     /* XScale specific cache-lockdown: since we have no cache we NOP these
3538      * and hope the guest does not really rely on cache behaviour.
3539      */
3540     { .name = "XSCALE_LOCK_ICACHE_LINE",
3541       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3542       .access = PL1_W, .type = ARM_CP_NOP },
3543     { .name = "XSCALE_UNLOCK_ICACHE",
3544       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3545       .access = PL1_W, .type = ARM_CP_NOP },
3546     { .name = "XSCALE_DCACHE_LOCK",
3547       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3548       .access = PL1_RW, .type = ARM_CP_NOP },
3549     { .name = "XSCALE_UNLOCK_DCACHE",
3550       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3551       .access = PL1_W, .type = ARM_CP_NOP },
3552     REGINFO_SENTINEL
3553 };
3554 
3555 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3556     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3557      * implementation of this implementation-defined space.
3558      * Ideally this should eventually disappear in favour of actually
3559      * implementing the correct behaviour for all cores.
3560      */
3561     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3562       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3563       .access = PL1_RW,
3564       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3565       .resetvalue = 0 },
3566     REGINFO_SENTINEL
3567 };
3568 
3569 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3570     /* Cache status: RAZ because we have no cache so it's always clean */
3571     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3572       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3573       .resetvalue = 0 },
3574     REGINFO_SENTINEL
3575 };
3576 
3577 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3578     /* We never have a a block transfer operation in progress */
3579     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3580       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3581       .resetvalue = 0 },
3582     /* The cache ops themselves: these all NOP for QEMU */
3583     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3584       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3585     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3586       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3587     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3588       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3589     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3590       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3591     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3592       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3593     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3594       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3595     REGINFO_SENTINEL
3596 };
3597 
3598 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3599     /* The cache test-and-clean instructions always return (1 << 30)
3600      * to indicate that there are no dirty cache lines.
3601      */
3602     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3603       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3604       .resetvalue = (1 << 30) },
3605     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3606       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3607       .resetvalue = (1 << 30) },
3608     REGINFO_SENTINEL
3609 };
3610 
3611 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3612     /* Ignore ReadBuffer accesses */
3613     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3614       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3615       .access = PL1_RW, .resetvalue = 0,
3616       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3617     REGINFO_SENTINEL
3618 };
3619 
3620 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3621 {
3622     ARMCPU *cpu = arm_env_get_cpu(env);
3623     unsigned int cur_el = arm_current_el(env);
3624     bool secure = arm_is_secure(env);
3625 
3626     if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3627         return env->cp15.vpidr_el2;
3628     }
3629     return raw_read(env, ri);
3630 }
3631 
3632 static uint64_t mpidr_read_val(CPUARMState *env)
3633 {
3634     ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
3635     uint64_t mpidr = cpu->mp_affinity;
3636 
3637     if (arm_feature(env, ARM_FEATURE_V7MP)) {
3638         mpidr |= (1U << 31);
3639         /* Cores which are uniprocessor (non-coherent)
3640          * but still implement the MP extensions set
3641          * bit 30. (For instance, Cortex-R5).
3642          */
3643         if (cpu->mp_is_up) {
3644             mpidr |= (1u << 30);
3645         }
3646     }
3647     return mpidr;
3648 }
3649 
3650 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3651 {
3652     unsigned int cur_el = arm_current_el(env);
3653     bool secure = arm_is_secure(env);
3654 
3655     if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3656         return env->cp15.vmpidr_el2;
3657     }
3658     return mpidr_read_val(env);
3659 }
3660 
3661 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3662     /* NOP AMAIR0/1 */
3663     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3664       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3665       .access = PL1_RW, .type = ARM_CP_CONST,
3666       .resetvalue = 0 },
3667     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3668     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3669       .access = PL1_RW, .type = ARM_CP_CONST,
3670       .resetvalue = 0 },
3671     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3672       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3673       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3674                              offsetof(CPUARMState, cp15.par_ns)} },
3675     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3676       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3677       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3678                              offsetof(CPUARMState, cp15.ttbr0_ns) },
3679       .writefn = vmsa_ttbr_write, },
3680     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3681       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3682       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3683                              offsetof(CPUARMState, cp15.ttbr1_ns) },
3684       .writefn = vmsa_ttbr_write, },
3685     REGINFO_SENTINEL
3686 };
3687 
3688 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3689 {
3690     return vfp_get_fpcr(env);
3691 }
3692 
3693 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3694                             uint64_t value)
3695 {
3696     vfp_set_fpcr(env, value);
3697 }
3698 
3699 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3700 {
3701     return vfp_get_fpsr(env);
3702 }
3703 
3704 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3705                             uint64_t value)
3706 {
3707     vfp_set_fpsr(env, value);
3708 }
3709 
3710 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3711                                        bool isread)
3712 {
3713     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3714         return CP_ACCESS_TRAP;
3715     }
3716     return CP_ACCESS_OK;
3717 }
3718 
3719 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3720                             uint64_t value)
3721 {
3722     env->daif = value & PSTATE_DAIF;
3723 }
3724 
3725 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3726                                           const ARMCPRegInfo *ri,
3727                                           bool isread)
3728 {
3729     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3730      * SCTLR_EL1.UCI is set.
3731      */
3732     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3733         return CP_ACCESS_TRAP;
3734     }
3735     return CP_ACCESS_OK;
3736 }
3737 
3738 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3739  * Page D4-1736 (DDI0487A.b)
3740  */
3741 
3742 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3743                                       uint64_t value)
3744 {
3745     CPUState *cs = ENV_GET_CPU(env);
3746     bool sec = arm_is_secure_below_el3(env);
3747 
3748     if (sec) {
3749         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3750                                             ARMMMUIdxBit_S1SE1 |
3751                                             ARMMMUIdxBit_S1SE0);
3752     } else {
3753         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3754                                             ARMMMUIdxBit_S12NSE1 |
3755                                             ARMMMUIdxBit_S12NSE0);
3756     }
3757 }
3758 
3759 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3760                                     uint64_t value)
3761 {
3762     CPUState *cs = ENV_GET_CPU(env);
3763 
3764     if (tlb_force_broadcast(env)) {
3765         tlbi_aa64_vmalle1is_write(env, NULL, value);
3766         return;
3767     }
3768 
3769     if (arm_is_secure_below_el3(env)) {
3770         tlb_flush_by_mmuidx(cs,
3771                             ARMMMUIdxBit_S1SE1 |
3772                             ARMMMUIdxBit_S1SE0);
3773     } else {
3774         tlb_flush_by_mmuidx(cs,
3775                             ARMMMUIdxBit_S12NSE1 |
3776                             ARMMMUIdxBit_S12NSE0);
3777     }
3778 }
3779 
3780 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3781                                   uint64_t value)
3782 {
3783     /* Note that the 'ALL' scope must invalidate both stage 1 and
3784      * stage 2 translations, whereas most other scopes only invalidate
3785      * stage 1 translations.
3786      */
3787     ARMCPU *cpu = arm_env_get_cpu(env);
3788     CPUState *cs = CPU(cpu);
3789 
3790     if (arm_is_secure_below_el3(env)) {
3791         tlb_flush_by_mmuidx(cs,
3792                             ARMMMUIdxBit_S1SE1 |
3793                             ARMMMUIdxBit_S1SE0);
3794     } else {
3795         if (arm_feature(env, ARM_FEATURE_EL2)) {
3796             tlb_flush_by_mmuidx(cs,
3797                                 ARMMMUIdxBit_S12NSE1 |
3798                                 ARMMMUIdxBit_S12NSE0 |
3799                                 ARMMMUIdxBit_S2NS);
3800         } else {
3801             tlb_flush_by_mmuidx(cs,
3802                                 ARMMMUIdxBit_S12NSE1 |
3803                                 ARMMMUIdxBit_S12NSE0);
3804         }
3805     }
3806 }
3807 
3808 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3809                                   uint64_t value)
3810 {
3811     ARMCPU *cpu = arm_env_get_cpu(env);
3812     CPUState *cs = CPU(cpu);
3813 
3814     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3815 }
3816 
3817 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3818                                   uint64_t value)
3819 {
3820     ARMCPU *cpu = arm_env_get_cpu(env);
3821     CPUState *cs = CPU(cpu);
3822 
3823     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3824 }
3825 
3826 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3827                                     uint64_t value)
3828 {
3829     /* Note that the 'ALL' scope must invalidate both stage 1 and
3830      * stage 2 translations, whereas most other scopes only invalidate
3831      * stage 1 translations.
3832      */
3833     CPUState *cs = ENV_GET_CPU(env);
3834     bool sec = arm_is_secure_below_el3(env);
3835     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3836 
3837     if (sec) {
3838         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3839                                             ARMMMUIdxBit_S1SE1 |
3840                                             ARMMMUIdxBit_S1SE0);
3841     } else if (has_el2) {
3842         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3843                                             ARMMMUIdxBit_S12NSE1 |
3844                                             ARMMMUIdxBit_S12NSE0 |
3845                                             ARMMMUIdxBit_S2NS);
3846     } else {
3847           tlb_flush_by_mmuidx_all_cpus_synced(cs,
3848                                               ARMMMUIdxBit_S12NSE1 |
3849                                               ARMMMUIdxBit_S12NSE0);
3850     }
3851 }
3852 
3853 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3854                                     uint64_t value)
3855 {
3856     CPUState *cs = ENV_GET_CPU(env);
3857 
3858     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3859 }
3860 
3861 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3862                                     uint64_t value)
3863 {
3864     CPUState *cs = ENV_GET_CPU(env);
3865 
3866     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3867 }
3868 
3869 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3870                                  uint64_t value)
3871 {
3872     /* Invalidate by VA, EL2
3873      * Currently handles both VAE2 and VALE2, since we don't support
3874      * flush-last-level-only.
3875      */
3876     ARMCPU *cpu = arm_env_get_cpu(env);
3877     CPUState *cs = CPU(cpu);
3878     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3879 
3880     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3881 }
3882 
3883 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3884                                  uint64_t value)
3885 {
3886     /* Invalidate by VA, EL3
3887      * Currently handles both VAE3 and VALE3, since we don't support
3888      * flush-last-level-only.
3889      */
3890     ARMCPU *cpu = arm_env_get_cpu(env);
3891     CPUState *cs = CPU(cpu);
3892     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3893 
3894     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3895 }
3896 
3897 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3898                                    uint64_t value)
3899 {
3900     ARMCPU *cpu = arm_env_get_cpu(env);
3901     CPUState *cs = CPU(cpu);
3902     bool sec = arm_is_secure_below_el3(env);
3903     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3904 
3905     if (sec) {
3906         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3907                                                  ARMMMUIdxBit_S1SE1 |
3908                                                  ARMMMUIdxBit_S1SE0);
3909     } else {
3910         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3911                                                  ARMMMUIdxBit_S12NSE1 |
3912                                                  ARMMMUIdxBit_S12NSE0);
3913     }
3914 }
3915 
3916 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3917                                  uint64_t value)
3918 {
3919     /* Invalidate by VA, EL1&0 (AArch64 version).
3920      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3921      * since we don't support flush-for-specific-ASID-only or
3922      * flush-last-level-only.
3923      */
3924     ARMCPU *cpu = arm_env_get_cpu(env);
3925     CPUState *cs = CPU(cpu);
3926     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3927 
3928     if (tlb_force_broadcast(env)) {
3929         tlbi_aa64_vae1is_write(env, NULL, value);
3930         return;
3931     }
3932 
3933     if (arm_is_secure_below_el3(env)) {
3934         tlb_flush_page_by_mmuidx(cs, pageaddr,
3935                                  ARMMMUIdxBit_S1SE1 |
3936                                  ARMMMUIdxBit_S1SE0);
3937     } else {
3938         tlb_flush_page_by_mmuidx(cs, pageaddr,
3939                                  ARMMMUIdxBit_S12NSE1 |
3940                                  ARMMMUIdxBit_S12NSE0);
3941     }
3942 }
3943 
3944 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3945                                    uint64_t value)
3946 {
3947     CPUState *cs = ENV_GET_CPU(env);
3948     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3949 
3950     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3951                                              ARMMMUIdxBit_S1E2);
3952 }
3953 
3954 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3955                                    uint64_t value)
3956 {
3957     CPUState *cs = ENV_GET_CPU(env);
3958     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3959 
3960     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3961                                              ARMMMUIdxBit_S1E3);
3962 }
3963 
3964 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3965                                     uint64_t value)
3966 {
3967     /* Invalidate by IPA. This has to invalidate any structures that
3968      * contain only stage 2 translation information, but does not need
3969      * to apply to structures that contain combined stage 1 and stage 2
3970      * translation information.
3971      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3972      */
3973     ARMCPU *cpu = arm_env_get_cpu(env);
3974     CPUState *cs = CPU(cpu);
3975     uint64_t pageaddr;
3976 
3977     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3978         return;
3979     }
3980 
3981     pageaddr = sextract64(value << 12, 0, 48);
3982 
3983     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3984 }
3985 
3986 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3987                                       uint64_t value)
3988 {
3989     CPUState *cs = ENV_GET_CPU(env);
3990     uint64_t pageaddr;
3991 
3992     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3993         return;
3994     }
3995 
3996     pageaddr = sextract64(value << 12, 0, 48);
3997 
3998     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3999                                              ARMMMUIdxBit_S2NS);
4000 }
4001 
4002 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4003                                       bool isread)
4004 {
4005     /* We don't implement EL2, so the only control on DC ZVA is the
4006      * bit in the SCTLR which can prohibit access for EL0.
4007      */
4008     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4009         return CP_ACCESS_TRAP;
4010     }
4011     return CP_ACCESS_OK;
4012 }
4013 
4014 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4015 {
4016     ARMCPU *cpu = arm_env_get_cpu(env);
4017     int dzp_bit = 1 << 4;
4018 
4019     /* DZP indicates whether DC ZVA access is allowed */
4020     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4021         dzp_bit = 0;
4022     }
4023     return cpu->dcz_blocksize | dzp_bit;
4024 }
4025 
4026 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4027                                     bool isread)
4028 {
4029     if (!(env->pstate & PSTATE_SP)) {
4030         /* Access to SP_EL0 is undefined if it's being used as
4031          * the stack pointer.
4032          */
4033         return CP_ACCESS_TRAP_UNCATEGORIZED;
4034     }
4035     return CP_ACCESS_OK;
4036 }
4037 
4038 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4039 {
4040     return env->pstate & PSTATE_SP;
4041 }
4042 
4043 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4044 {
4045     update_spsel(env, val);
4046 }
4047 
4048 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4049                         uint64_t value)
4050 {
4051     ARMCPU *cpu = arm_env_get_cpu(env);
4052 
4053     if (raw_read(env, ri) == value) {
4054         /* Skip the TLB flush if nothing actually changed; Linux likes
4055          * to do a lot of pointless SCTLR writes.
4056          */
4057         return;
4058     }
4059 
4060     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4061         /* M bit is RAZ/WI for PMSA with no MPU implemented */
4062         value &= ~SCTLR_M;
4063     }
4064 
4065     raw_write(env, ri, value);
4066     /* ??? Lots of these bits are not implemented.  */
4067     /* This may enable/disable the MMU, so do a TLB flush.  */
4068     tlb_flush(CPU(cpu));
4069 }
4070 
4071 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4072                                      bool isread)
4073 {
4074     if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4075         return CP_ACCESS_TRAP_FP_EL2;
4076     }
4077     if (env->cp15.cptr_el[3] & CPTR_TFP) {
4078         return CP_ACCESS_TRAP_FP_EL3;
4079     }
4080     return CP_ACCESS_OK;
4081 }
4082 
4083 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4084                        uint64_t value)
4085 {
4086     env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4087 }
4088 
4089 static const ARMCPRegInfo v8_cp_reginfo[] = {
4090     /* Minimal set of EL0-visible registers. This will need to be expanded
4091      * significantly for system emulation of AArch64 CPUs.
4092      */
4093     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4094       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4095       .access = PL0_RW, .type = ARM_CP_NZCV },
4096     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4097       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4098       .type = ARM_CP_NO_RAW,
4099       .access = PL0_RW, .accessfn = aa64_daif_access,
4100       .fieldoffset = offsetof(CPUARMState, daif),
4101       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4102     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4103       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4104       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4105       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4106     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4107       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4108       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4109       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4110     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4111       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4112       .access = PL0_R, .type = ARM_CP_NO_RAW,
4113       .readfn = aa64_dczid_read },
4114     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4115       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4116       .access = PL0_W, .type = ARM_CP_DC_ZVA,
4117 #ifndef CONFIG_USER_ONLY
4118       /* Avoid overhead of an access check that always passes in user-mode */
4119       .accessfn = aa64_zva_access,
4120 #endif
4121     },
4122     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4123       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4124       .access = PL1_R, .type = ARM_CP_CURRENTEL },
4125     /* Cache ops: all NOPs since we don't emulate caches */
4126     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4127       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4128       .access = PL1_W, .type = ARM_CP_NOP },
4129     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4130       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4131       .access = PL1_W, .type = ARM_CP_NOP },
4132     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4133       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4134       .access = PL0_W, .type = ARM_CP_NOP,
4135       .accessfn = aa64_cacheop_access },
4136     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4137       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4138       .access = PL1_W, .type = ARM_CP_NOP },
4139     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4140       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4141       .access = PL1_W, .type = ARM_CP_NOP },
4142     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4143       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4144       .access = PL0_W, .type = ARM_CP_NOP,
4145       .accessfn = aa64_cacheop_access },
4146     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4147       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4148       .access = PL1_W, .type = ARM_CP_NOP },
4149     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4150       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4151       .access = PL0_W, .type = ARM_CP_NOP,
4152       .accessfn = aa64_cacheop_access },
4153     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4154       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4155       .access = PL0_W, .type = ARM_CP_NOP,
4156       .accessfn = aa64_cacheop_access },
4157     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4158       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4159       .access = PL1_W, .type = ARM_CP_NOP },
4160     /* TLBI operations */
4161     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4162       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4163       .access = PL1_W, .type = ARM_CP_NO_RAW,
4164       .writefn = tlbi_aa64_vmalle1is_write },
4165     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4166       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4167       .access = PL1_W, .type = ARM_CP_NO_RAW,
4168       .writefn = tlbi_aa64_vae1is_write },
4169     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4170       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4171       .access = PL1_W, .type = ARM_CP_NO_RAW,
4172       .writefn = tlbi_aa64_vmalle1is_write },
4173     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4174       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4175       .access = PL1_W, .type = ARM_CP_NO_RAW,
4176       .writefn = tlbi_aa64_vae1is_write },
4177     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4178       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4179       .access = PL1_W, .type = ARM_CP_NO_RAW,
4180       .writefn = tlbi_aa64_vae1is_write },
4181     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4182       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4183       .access = PL1_W, .type = ARM_CP_NO_RAW,
4184       .writefn = tlbi_aa64_vae1is_write },
4185     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4186       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4187       .access = PL1_W, .type = ARM_CP_NO_RAW,
4188       .writefn = tlbi_aa64_vmalle1_write },
4189     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4190       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4191       .access = PL1_W, .type = ARM_CP_NO_RAW,
4192       .writefn = tlbi_aa64_vae1_write },
4193     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4194       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4195       .access = PL1_W, .type = ARM_CP_NO_RAW,
4196       .writefn = tlbi_aa64_vmalle1_write },
4197     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4198       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4199       .access = PL1_W, .type = ARM_CP_NO_RAW,
4200       .writefn = tlbi_aa64_vae1_write },
4201     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4202       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4203       .access = PL1_W, .type = ARM_CP_NO_RAW,
4204       .writefn = tlbi_aa64_vae1_write },
4205     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4206       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4207       .access = PL1_W, .type = ARM_CP_NO_RAW,
4208       .writefn = tlbi_aa64_vae1_write },
4209     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4210       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4211       .access = PL2_W, .type = ARM_CP_NO_RAW,
4212       .writefn = tlbi_aa64_ipas2e1is_write },
4213     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4214       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4215       .access = PL2_W, .type = ARM_CP_NO_RAW,
4216       .writefn = tlbi_aa64_ipas2e1is_write },
4217     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4218       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4219       .access = PL2_W, .type = ARM_CP_NO_RAW,
4220       .writefn = tlbi_aa64_alle1is_write },
4221     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4222       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4223       .access = PL2_W, .type = ARM_CP_NO_RAW,
4224       .writefn = tlbi_aa64_alle1is_write },
4225     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4226       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4227       .access = PL2_W, .type = ARM_CP_NO_RAW,
4228       .writefn = tlbi_aa64_ipas2e1_write },
4229     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4230       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4231       .access = PL2_W, .type = ARM_CP_NO_RAW,
4232       .writefn = tlbi_aa64_ipas2e1_write },
4233     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4234       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4235       .access = PL2_W, .type = ARM_CP_NO_RAW,
4236       .writefn = tlbi_aa64_alle1_write },
4237     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4238       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4239       .access = PL2_W, .type = ARM_CP_NO_RAW,
4240       .writefn = tlbi_aa64_alle1is_write },
4241 #ifndef CONFIG_USER_ONLY
4242     /* 64 bit address translation operations */
4243     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4244       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4245       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4246     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4247       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4248       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4249     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4250       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4251       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4252     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4253       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4254       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4255     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4256       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4257       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4258     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4259       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4260       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4261     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4262       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4263       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4264     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4265       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4266       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4267     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4268     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4269       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4270       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4271     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4272       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4273       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4274     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4275       .type = ARM_CP_ALIAS,
4276       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4277       .access = PL1_RW, .resetvalue = 0,
4278       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4279       .writefn = par_write },
4280 #endif
4281     /* TLB invalidate last level of translation table walk */
4282     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4283       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
4284     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4285       .type = ARM_CP_NO_RAW, .access = PL1_W,
4286       .writefn = tlbimvaa_is_write },
4287     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4288       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
4289     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4290       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
4291     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4292       .type = ARM_CP_NO_RAW, .access = PL2_W,
4293       .writefn = tlbimva_hyp_write },
4294     { .name = "TLBIMVALHIS",
4295       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4296       .type = ARM_CP_NO_RAW, .access = PL2_W,
4297       .writefn = tlbimva_hyp_is_write },
4298     { .name = "TLBIIPAS2",
4299       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4300       .type = ARM_CP_NO_RAW, .access = PL2_W,
4301       .writefn = tlbiipas2_write },
4302     { .name = "TLBIIPAS2IS",
4303       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4304       .type = ARM_CP_NO_RAW, .access = PL2_W,
4305       .writefn = tlbiipas2_is_write },
4306     { .name = "TLBIIPAS2L",
4307       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4308       .type = ARM_CP_NO_RAW, .access = PL2_W,
4309       .writefn = tlbiipas2_write },
4310     { .name = "TLBIIPAS2LIS",
4311       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4312       .type = ARM_CP_NO_RAW, .access = PL2_W,
4313       .writefn = tlbiipas2_is_write },
4314     /* 32 bit cache operations */
4315     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4316       .type = ARM_CP_NOP, .access = PL1_W },
4317     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4318       .type = ARM_CP_NOP, .access = PL1_W },
4319     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4320       .type = ARM_CP_NOP, .access = PL1_W },
4321     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4322       .type = ARM_CP_NOP, .access = PL1_W },
4323     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4324       .type = ARM_CP_NOP, .access = PL1_W },
4325     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4326       .type = ARM_CP_NOP, .access = PL1_W },
4327     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4328       .type = ARM_CP_NOP, .access = PL1_W },
4329     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4330       .type = ARM_CP_NOP, .access = PL1_W },
4331     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4332       .type = ARM_CP_NOP, .access = PL1_W },
4333     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4334       .type = ARM_CP_NOP, .access = PL1_W },
4335     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4336       .type = ARM_CP_NOP, .access = PL1_W },
4337     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4338       .type = ARM_CP_NOP, .access = PL1_W },
4339     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4340       .type = ARM_CP_NOP, .access = PL1_W },
4341     /* MMU Domain access control / MPU write buffer control */
4342     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4343       .access = PL1_RW, .resetvalue = 0,
4344       .writefn = dacr_write, .raw_writefn = raw_write,
4345       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4346                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
4347     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
4348       .type = ARM_CP_ALIAS,
4349       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
4350       .access = PL1_RW,
4351       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
4352     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
4353       .type = ARM_CP_ALIAS,
4354       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
4355       .access = PL1_RW,
4356       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
4357     /* We rely on the access checks not allowing the guest to write to the
4358      * state field when SPSel indicates that it's being used as the stack
4359      * pointer.
4360      */
4361     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4362       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4363       .access = PL1_RW, .accessfn = sp_el0_access,
4364       .type = ARM_CP_ALIAS,
4365       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
4366     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4367       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
4368       .access = PL2_RW, .type = ARM_CP_ALIAS,
4369       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
4370     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4371       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
4372       .type = ARM_CP_NO_RAW,
4373       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
4374     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4375       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4376       .type = ARM_CP_ALIAS,
4377       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4378       .access = PL2_RW, .accessfn = fpexc32_access },
4379     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4380       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4381       .access = PL2_RW, .resetvalue = 0,
4382       .writefn = dacr_write, .raw_writefn = raw_write,
4383       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4384     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4385       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4386       .access = PL2_RW, .resetvalue = 0,
4387       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4388     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4389       .type = ARM_CP_ALIAS,
4390       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4391       .access = PL2_RW,
4392       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4393     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4394       .type = ARM_CP_ALIAS,
4395       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4396       .access = PL2_RW,
4397       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4398     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4399       .type = ARM_CP_ALIAS,
4400       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4401       .access = PL2_RW,
4402       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4403     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4404       .type = ARM_CP_ALIAS,
4405       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4406       .access = PL2_RW,
4407       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
4408     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4409       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4410       .resetvalue = 0,
4411       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4412     { .name = "SDCR", .type = ARM_CP_ALIAS,
4413       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4414       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4415       .writefn = sdcr_write,
4416       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
4417     REGINFO_SENTINEL
4418 };
4419 
4420 /* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
4421 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
4422     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4423       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4424       .access = PL2_RW,
4425       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
4426     { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
4427       .type = ARM_CP_NO_RAW,
4428       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4429       .access = PL2_RW,
4430       .type = ARM_CP_CONST, .resetvalue = 0 },
4431     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4432       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4433       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4434     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4435       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4436       .access = PL2_RW,
4437       .type = ARM_CP_CONST, .resetvalue = 0 },
4438     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4439       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4440       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4441     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4442       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4443       .access = PL2_RW, .type = ARM_CP_CONST,
4444       .resetvalue = 0 },
4445     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4446       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4447       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4448     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4449       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4450       .access = PL2_RW, .type = ARM_CP_CONST,
4451       .resetvalue = 0 },
4452     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4453       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4454       .access = PL2_RW, .type = ARM_CP_CONST,
4455       .resetvalue = 0 },
4456     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4457       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4458       .access = PL2_RW, .type = ARM_CP_CONST,
4459       .resetvalue = 0 },
4460     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4461       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4462       .access = PL2_RW, .type = ARM_CP_CONST,
4463       .resetvalue = 0 },
4464     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4465       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4466       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4467     { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
4468       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4469       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4470       .type = ARM_CP_CONST, .resetvalue = 0 },
4471     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4472       .cp = 15, .opc1 = 6, .crm = 2,
4473       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4474       .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
4475     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4476       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4477       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4478     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4479       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4480       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4481     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4482       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4483       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4484     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4485       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4486       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4487     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4488       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4489       .resetvalue = 0 },
4490     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4491       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4492       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4493     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4494       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4495       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4496     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4497       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4498       .resetvalue = 0 },
4499     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4500       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4501       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4502     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4503       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4504       .resetvalue = 0 },
4505     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4506       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4507       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4508     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4509       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4510       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4511     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4512       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4513       .access = PL2_RW, .accessfn = access_tda,
4514       .type = ARM_CP_CONST, .resetvalue = 0 },
4515     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
4516       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4517       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4518       .type = ARM_CP_CONST, .resetvalue = 0 },
4519     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4520       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4521       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4522     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4523       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4524       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4525     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4526       .type = ARM_CP_CONST,
4527       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4528       .access = PL2_RW, .resetvalue = 0 },
4529     REGINFO_SENTINEL
4530 };
4531 
4532 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4533 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
4534     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4535       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4536       .access = PL2_RW,
4537       .type = ARM_CP_CONST, .resetvalue = 0 },
4538     REGINFO_SENTINEL
4539 };
4540 
4541 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4542 {
4543     ARMCPU *cpu = arm_env_get_cpu(env);
4544     uint64_t valid_mask = HCR_MASK;
4545 
4546     if (arm_feature(env, ARM_FEATURE_EL3)) {
4547         valid_mask &= ~HCR_HCD;
4548     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
4549         /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4550          * However, if we're using the SMC PSCI conduit then QEMU is
4551          * effectively acting like EL3 firmware and so the guest at
4552          * EL2 should retain the ability to prevent EL1 from being
4553          * able to make SMC calls into the ersatz firmware, so in
4554          * that case HCR.TSC should be read/write.
4555          */
4556         valid_mask &= ~HCR_TSC;
4557     }
4558     if (cpu_isar_feature(aa64_lor, cpu)) {
4559         valid_mask |= HCR_TLOR;
4560     }
4561     if (cpu_isar_feature(aa64_pauth, cpu)) {
4562         valid_mask |= HCR_API | HCR_APK;
4563     }
4564 
4565     /* Clear RES0 bits.  */
4566     value &= valid_mask;
4567 
4568     /* These bits change the MMU setup:
4569      * HCR_VM enables stage 2 translation
4570      * HCR_PTW forbids certain page-table setups
4571      * HCR_DC Disables stage1 and enables stage2 translation
4572      */
4573     if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
4574         tlb_flush(CPU(cpu));
4575     }
4576     env->cp15.hcr_el2 = value;
4577 
4578     /*
4579      * Updates to VI and VF require us to update the status of
4580      * virtual interrupts, which are the logical OR of these bits
4581      * and the state of the input lines from the GIC. (This requires
4582      * that we have the iothread lock, which is done by marking the
4583      * reginfo structs as ARM_CP_IO.)
4584      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4585      * possible for it to be taken immediately, because VIRQ and
4586      * VFIQ are masked unless running at EL0 or EL1, and HCR
4587      * can only be written at EL2.
4588      */
4589     g_assert(qemu_mutex_iothread_locked());
4590     arm_cpu_update_virq(cpu);
4591     arm_cpu_update_vfiq(cpu);
4592 }
4593 
4594 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
4595                           uint64_t value)
4596 {
4597     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4598     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
4599     hcr_write(env, NULL, value);
4600 }
4601 
4602 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
4603                          uint64_t value)
4604 {
4605     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4606     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
4607     hcr_write(env, NULL, value);
4608 }
4609 
4610 /*
4611  * Return the effective value of HCR_EL2.
4612  * Bits that are not included here:
4613  * RW       (read from SCR_EL3.RW as needed)
4614  */
4615 uint64_t arm_hcr_el2_eff(CPUARMState *env)
4616 {
4617     uint64_t ret = env->cp15.hcr_el2;
4618 
4619     if (arm_is_secure_below_el3(env)) {
4620         /*
4621          * "This register has no effect if EL2 is not enabled in the
4622          * current Security state".  This is ARMv8.4-SecEL2 speak for
4623          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4624          *
4625          * Prior to that, the language was "In an implementation that
4626          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4627          * as if this field is 0 for all purposes other than a direct
4628          * read or write access of HCR_EL2".  With lots of enumeration
4629          * on a per-field basis.  In current QEMU, this is condition
4630          * is arm_is_secure_below_el3.
4631          *
4632          * Since the v8.4 language applies to the entire register, and
4633          * appears to be backward compatible, use that.
4634          */
4635         ret = 0;
4636     } else if (ret & HCR_TGE) {
4637         /* These bits are up-to-date as of ARMv8.4.  */
4638         if (ret & HCR_E2H) {
4639             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
4640                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
4641                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4642                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
4643         } else {
4644             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
4645         }
4646         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
4647                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
4648                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
4649                  HCR_TLOR);
4650     }
4651 
4652     return ret;
4653 }
4654 
4655 static const ARMCPRegInfo el2_cp_reginfo[] = {
4656     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4657       .type = ARM_CP_IO,
4658       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4659       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4660       .writefn = hcr_write },
4661     { .name = "HCR", .state = ARM_CP_STATE_AA32,
4662       .type = ARM_CP_ALIAS | ARM_CP_IO,
4663       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4664       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4665       .writefn = hcr_writelow },
4666     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4667       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4668       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4669     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4670       .type = ARM_CP_ALIAS,
4671       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4672       .access = PL2_RW,
4673       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4674     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4675       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4676       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4677     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4678       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4679       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4680     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4681       .type = ARM_CP_ALIAS,
4682       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4683       .access = PL2_RW,
4684       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4685     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4686       .type = ARM_CP_ALIAS,
4687       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4688       .access = PL2_RW,
4689       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4690     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4691       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4692       .access = PL2_RW, .writefn = vbar_write,
4693       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4694       .resetvalue = 0 },
4695     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4696       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4697       .access = PL3_RW, .type = ARM_CP_ALIAS,
4698       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4699     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4700       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4701       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4702       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
4703     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4704       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4705       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4706       .resetvalue = 0 },
4707     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4708       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4709       .access = PL2_RW, .type = ARM_CP_ALIAS,
4710       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4711     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4712       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4713       .access = PL2_RW, .type = ARM_CP_CONST,
4714       .resetvalue = 0 },
4715     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4716     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4717       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4718       .access = PL2_RW, .type = ARM_CP_CONST,
4719       .resetvalue = 0 },
4720     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4721       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4722       .access = PL2_RW, .type = ARM_CP_CONST,
4723       .resetvalue = 0 },
4724     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4725       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4726       .access = PL2_RW, .type = ARM_CP_CONST,
4727       .resetvalue = 0 },
4728     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4729       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4730       .access = PL2_RW,
4731       /* no .writefn needed as this can't cause an ASID change;
4732        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4733        */
4734       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4735     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4736       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4737       .type = ARM_CP_ALIAS,
4738       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4739       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4740     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4741       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4742       .access = PL2_RW,
4743       /* no .writefn needed as this can't cause an ASID change;
4744        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4745        */
4746       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4747     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4748       .cp = 15, .opc1 = 6, .crm = 2,
4749       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4750       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4751       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4752       .writefn = vttbr_write },
4753     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4754       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4755       .access = PL2_RW, .writefn = vttbr_write,
4756       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4757     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4758       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4759       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4760       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4761     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4762       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4763       .access = PL2_RW, .resetvalue = 0,
4764       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4765     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4766       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4767       .access = PL2_RW, .resetvalue = 0,
4768       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4769     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4770       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4771       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4772     { .name = "TLBIALLNSNH",
4773       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4774       .type = ARM_CP_NO_RAW, .access = PL2_W,
4775       .writefn = tlbiall_nsnh_write },
4776     { .name = "TLBIALLNSNHIS",
4777       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4778       .type = ARM_CP_NO_RAW, .access = PL2_W,
4779       .writefn = tlbiall_nsnh_is_write },
4780     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4781       .type = ARM_CP_NO_RAW, .access = PL2_W,
4782       .writefn = tlbiall_hyp_write },
4783     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4784       .type = ARM_CP_NO_RAW, .access = PL2_W,
4785       .writefn = tlbiall_hyp_is_write },
4786     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4787       .type = ARM_CP_NO_RAW, .access = PL2_W,
4788       .writefn = tlbimva_hyp_write },
4789     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4790       .type = ARM_CP_NO_RAW, .access = PL2_W,
4791       .writefn = tlbimva_hyp_is_write },
4792     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4793       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4794       .type = ARM_CP_NO_RAW, .access = PL2_W,
4795       .writefn = tlbi_aa64_alle2_write },
4796     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4797       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4798       .type = ARM_CP_NO_RAW, .access = PL2_W,
4799       .writefn = tlbi_aa64_vae2_write },
4800     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4801       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4802       .access = PL2_W, .type = ARM_CP_NO_RAW,
4803       .writefn = tlbi_aa64_vae2_write },
4804     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4805       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4806       .access = PL2_W, .type = ARM_CP_NO_RAW,
4807       .writefn = tlbi_aa64_alle2is_write },
4808     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4809       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4810       .type = ARM_CP_NO_RAW, .access = PL2_W,
4811       .writefn = tlbi_aa64_vae2is_write },
4812     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4813       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4814       .access = PL2_W, .type = ARM_CP_NO_RAW,
4815       .writefn = tlbi_aa64_vae2is_write },
4816 #ifndef CONFIG_USER_ONLY
4817     /* Unlike the other EL2-related AT operations, these must
4818      * UNDEF from EL3 if EL2 is not implemented, which is why we
4819      * define them here rather than with the rest of the AT ops.
4820      */
4821     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4822       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4823       .access = PL2_W, .accessfn = at_s1e2_access,
4824       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4825     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4826       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4827       .access = PL2_W, .accessfn = at_s1e2_access,
4828       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4829     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4830      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4831      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4832      * to behave as if SCR.NS was 1.
4833      */
4834     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4835       .access = PL2_W,
4836       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4837     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4838       .access = PL2_W,
4839       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4840     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4841       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4842       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4843        * reset values as IMPDEF. We choose to reset to 3 to comply with
4844        * both ARMv7 and ARMv8.
4845        */
4846       .access = PL2_RW, .resetvalue = 3,
4847       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4848     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4849       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4850       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4851       .writefn = gt_cntvoff_write,
4852       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4853     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4854       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4855       .writefn = gt_cntvoff_write,
4856       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4857     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4858       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4859       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4860       .type = ARM_CP_IO, .access = PL2_RW,
4861       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4862     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4863       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4864       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4865       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4866     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4867       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4868       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4869       .resetfn = gt_hyp_timer_reset,
4870       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4871     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4872       .type = ARM_CP_IO,
4873       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4874       .access = PL2_RW,
4875       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4876       .resetvalue = 0,
4877       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4878 #endif
4879     /* The only field of MDCR_EL2 that has a defined architectural reset value
4880      * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4881      * don't implement any PMU event counters, so using zero as a reset
4882      * value for MDCR_EL2 is okay
4883      */
4884     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4885       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4886       .access = PL2_RW, .resetvalue = 0,
4887       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4888     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4889       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4890       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4891       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4892     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4893       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4894       .access = PL2_RW,
4895       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4896     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4897       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4898       .access = PL2_RW,
4899       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4900     REGINFO_SENTINEL
4901 };
4902 
4903 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4904     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4905       .type = ARM_CP_ALIAS | ARM_CP_IO,
4906       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4907       .access = PL2_RW,
4908       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4909       .writefn = hcr_writehigh },
4910     REGINFO_SENTINEL
4911 };
4912 
4913 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4914                                    bool isread)
4915 {
4916     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4917      * At Secure EL1 it traps to EL3.
4918      */
4919     if (arm_current_el(env) == 3) {
4920         return CP_ACCESS_OK;
4921     }
4922     if (arm_is_secure_below_el3(env)) {
4923         return CP_ACCESS_TRAP_EL3;
4924     }
4925     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4926     if (isread) {
4927         return CP_ACCESS_OK;
4928     }
4929     return CP_ACCESS_TRAP_UNCATEGORIZED;
4930 }
4931 
4932 static const ARMCPRegInfo el3_cp_reginfo[] = {
4933     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4934       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4935       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4936       .resetvalue = 0, .writefn = scr_write },
4937     { .name = "SCR",  .type = ARM_CP_ALIAS,
4938       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4939       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4940       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4941       .writefn = scr_write },
4942     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4943       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4944       .access = PL3_RW, .resetvalue = 0,
4945       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4946     { .name = "SDER",
4947       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4948       .access = PL3_RW, .resetvalue = 0,
4949       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4950     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4951       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4952       .writefn = vbar_write, .resetvalue = 0,
4953       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4954     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4955       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4956       .access = PL3_RW, .resetvalue = 0,
4957       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4958     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4959       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4960       .access = PL3_RW,
4961       /* no .writefn needed as this can't cause an ASID change;
4962        * we must provide a .raw_writefn and .resetfn because we handle
4963        * reset and migration for the AArch32 TTBCR(S), which might be
4964        * using mask and base_mask.
4965        */
4966       .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4967       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4968     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4969       .type = ARM_CP_ALIAS,
4970       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4971       .access = PL3_RW,
4972       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4973     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4974       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4975       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4976     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4977       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4978       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4979     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4980       .type = ARM_CP_ALIAS,
4981       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4982       .access = PL3_RW,
4983       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4984     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4985       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4986       .access = PL3_RW, .writefn = vbar_write,
4987       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4988       .resetvalue = 0 },
4989     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4990       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4991       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4992       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4993     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4994       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4995       .access = PL3_RW, .resetvalue = 0,
4996       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4997     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4998       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4999       .access = PL3_RW, .type = ARM_CP_CONST,
5000       .resetvalue = 0 },
5001     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5002       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5003       .access = PL3_RW, .type = ARM_CP_CONST,
5004       .resetvalue = 0 },
5005     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5006       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5007       .access = PL3_RW, .type = ARM_CP_CONST,
5008       .resetvalue = 0 },
5009     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5010       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5011       .access = PL3_W, .type = ARM_CP_NO_RAW,
5012       .writefn = tlbi_aa64_alle3is_write },
5013     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5014       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5015       .access = PL3_W, .type = ARM_CP_NO_RAW,
5016       .writefn = tlbi_aa64_vae3is_write },
5017     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5018       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5019       .access = PL3_W, .type = ARM_CP_NO_RAW,
5020       .writefn = tlbi_aa64_vae3is_write },
5021     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5022       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5023       .access = PL3_W, .type = ARM_CP_NO_RAW,
5024       .writefn = tlbi_aa64_alle3_write },
5025     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5026       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5027       .access = PL3_W, .type = ARM_CP_NO_RAW,
5028       .writefn = tlbi_aa64_vae3_write },
5029     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5030       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5031       .access = PL3_W, .type = ARM_CP_NO_RAW,
5032       .writefn = tlbi_aa64_vae3_write },
5033     REGINFO_SENTINEL
5034 };
5035 
5036 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5037                                      bool isread)
5038 {
5039     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5040      * but the AArch32 CTR has its own reginfo struct)
5041      */
5042     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5043         return CP_ACCESS_TRAP;
5044     }
5045     return CP_ACCESS_OK;
5046 }
5047 
5048 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5049                         uint64_t value)
5050 {
5051     /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5052      * read via a bit in OSLSR_EL1.
5053      */
5054     int oslock;
5055 
5056     if (ri->state == ARM_CP_STATE_AA32) {
5057         oslock = (value == 0xC5ACCE55);
5058     } else {
5059         oslock = value & 1;
5060     }
5061 
5062     env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5063 }
5064 
5065 static const ARMCPRegInfo debug_cp_reginfo[] = {
5066     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5067      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5068      * unlike DBGDRAR it is never accessible from EL0.
5069      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5070      * accessor.
5071      */
5072     { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
5073       .access = PL0_R, .accessfn = access_tdra,
5074       .type = ARM_CP_CONST, .resetvalue = 0 },
5075     { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
5076       .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5077       .access = PL1_R, .accessfn = access_tdra,
5078       .type = ARM_CP_CONST, .resetvalue = 0 },
5079     { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
5080       .access = PL0_R, .accessfn = access_tdra,
5081       .type = ARM_CP_CONST, .resetvalue = 0 },
5082     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5083     { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
5084       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5085       .access = PL1_RW, .accessfn = access_tda,
5086       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
5087       .resetvalue = 0 },
5088     /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5089      * We don't implement the configurable EL0 access.
5090      */
5091     { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
5092       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5093       .type = ARM_CP_ALIAS,
5094       .access = PL1_R, .accessfn = access_tda,
5095       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
5096     { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
5097       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
5098       .access = PL1_W, .type = ARM_CP_NO_RAW,
5099       .accessfn = access_tdosa,
5100       .writefn = oslar_write },
5101     { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
5102       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
5103       .access = PL1_R, .resetvalue = 10,
5104       .accessfn = access_tdosa,
5105       .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
5106     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5107     { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
5108       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
5109       .access = PL1_RW, .accessfn = access_tdosa,
5110       .type = ARM_CP_NOP },
5111     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5112      * implement vector catch debug events yet.
5113      */
5114     { .name = "DBGVCR",
5115       .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5116       .access = PL1_RW, .accessfn = access_tda,
5117       .type = ARM_CP_NOP },
5118     /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5119      * to save and restore a 32-bit guest's DBGVCR)
5120      */
5121     { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
5122       .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
5123       .access = PL2_RW, .accessfn = access_tda,
5124       .type = ARM_CP_NOP },
5125     /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5126      * Channel but Linux may try to access this register. The 32-bit
5127      * alias is DBGDCCINT.
5128      */
5129     { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
5130       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5131       .access = PL1_RW, .accessfn = access_tda,
5132       .type = ARM_CP_NOP },
5133     REGINFO_SENTINEL
5134 };
5135 
5136 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
5137     /* 64 bit access versions of the (dummy) debug registers */
5138     { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
5139       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5140     { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
5141       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5142     REGINFO_SENTINEL
5143 };
5144 
5145 /* Return the exception level to which exceptions should be taken
5146  * via SVEAccessTrap.  If an exception should be routed through
5147  * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5148  * take care of raising that exception.
5149  * C.f. the ARM pseudocode function CheckSVEEnabled.
5150  */
5151 int sve_exception_el(CPUARMState *env, int el)
5152 {
5153 #ifndef CONFIG_USER_ONLY
5154     if (el <= 1) {
5155         bool disabled = false;
5156 
5157         /* The CPACR.ZEN controls traps to EL1:
5158          * 0, 2 : trap EL0 and EL1 accesses
5159          * 1    : trap only EL0 accesses
5160          * 3    : trap no accesses
5161          */
5162         if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
5163             disabled = true;
5164         } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
5165             disabled = el == 0;
5166         }
5167         if (disabled) {
5168             /* route_to_el2 */
5169             return (arm_feature(env, ARM_FEATURE_EL2)
5170                     && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
5171         }
5172 
5173         /* Check CPACR.FPEN.  */
5174         if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
5175             disabled = true;
5176         } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
5177             disabled = el == 0;
5178         }
5179         if (disabled) {
5180             return 0;
5181         }
5182     }
5183 
5184     /* CPTR_EL2.  Since TZ and TFP are positive,
5185      * they will be zero when EL2 is not present.
5186      */
5187     if (el <= 2 && !arm_is_secure_below_el3(env)) {
5188         if (env->cp15.cptr_el[2] & CPTR_TZ) {
5189             return 2;
5190         }
5191         if (env->cp15.cptr_el[2] & CPTR_TFP) {
5192             return 0;
5193         }
5194     }
5195 
5196     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
5197     if (arm_feature(env, ARM_FEATURE_EL3)
5198         && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
5199         return 3;
5200     }
5201 #endif
5202     return 0;
5203 }
5204 
5205 /*
5206  * Given that SVE is enabled, return the vector length for EL.
5207  */
5208 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
5209 {
5210     ARMCPU *cpu = arm_env_get_cpu(env);
5211     uint32_t zcr_len = cpu->sve_max_vq - 1;
5212 
5213     if (el <= 1) {
5214         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
5215     }
5216     if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
5217         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
5218     }
5219     if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
5220         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
5221     }
5222     return zcr_len;
5223 }
5224 
5225 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5226                       uint64_t value)
5227 {
5228     int cur_el = arm_current_el(env);
5229     int old_len = sve_zcr_len_for_el(env, cur_el);
5230     int new_len;
5231 
5232     /* Bits other than [3:0] are RAZ/WI.  */
5233     raw_write(env, ri, value & 0xf);
5234 
5235     /*
5236      * Because we arrived here, we know both FP and SVE are enabled;
5237      * otherwise we would have trapped access to the ZCR_ELn register.
5238      */
5239     new_len = sve_zcr_len_for_el(env, cur_el);
5240     if (new_len < old_len) {
5241         aarch64_sve_narrow_vq(env, new_len + 1);
5242     }
5243 }
5244 
5245 static const ARMCPRegInfo zcr_el1_reginfo = {
5246     .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
5247     .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
5248     .access = PL1_RW, .type = ARM_CP_SVE,
5249     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
5250     .writefn = zcr_write, .raw_writefn = raw_write
5251 };
5252 
5253 static const ARMCPRegInfo zcr_el2_reginfo = {
5254     .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5255     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5256     .access = PL2_RW, .type = ARM_CP_SVE,
5257     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
5258     .writefn = zcr_write, .raw_writefn = raw_write
5259 };
5260 
5261 static const ARMCPRegInfo zcr_no_el2_reginfo = {
5262     .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5263     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5264     .access = PL2_RW, .type = ARM_CP_SVE,
5265     .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
5266 };
5267 
5268 static const ARMCPRegInfo zcr_el3_reginfo = {
5269     .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
5270     .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
5271     .access = PL3_RW, .type = ARM_CP_SVE,
5272     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
5273     .writefn = zcr_write, .raw_writefn = raw_write
5274 };
5275 
5276 void hw_watchpoint_update(ARMCPU *cpu, int n)
5277 {
5278     CPUARMState *env = &cpu->env;
5279     vaddr len = 0;
5280     vaddr wvr = env->cp15.dbgwvr[n];
5281     uint64_t wcr = env->cp15.dbgwcr[n];
5282     int mask;
5283     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
5284 
5285     if (env->cpu_watchpoint[n]) {
5286         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
5287         env->cpu_watchpoint[n] = NULL;
5288     }
5289 
5290     if (!extract64(wcr, 0, 1)) {
5291         /* E bit clear : watchpoint disabled */
5292         return;
5293     }
5294 
5295     switch (extract64(wcr, 3, 2)) {
5296     case 0:
5297         /* LSC 00 is reserved and must behave as if the wp is disabled */
5298         return;
5299     case 1:
5300         flags |= BP_MEM_READ;
5301         break;
5302     case 2:
5303         flags |= BP_MEM_WRITE;
5304         break;
5305     case 3:
5306         flags |= BP_MEM_ACCESS;
5307         break;
5308     }
5309 
5310     /* Attempts to use both MASK and BAS fields simultaneously are
5311      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5312      * thus generating a watchpoint for every byte in the masked region.
5313      */
5314     mask = extract64(wcr, 24, 4);
5315     if (mask == 1 || mask == 2) {
5316         /* Reserved values of MASK; we must act as if the mask value was
5317          * some non-reserved value, or as if the watchpoint were disabled.
5318          * We choose the latter.
5319          */
5320         return;
5321     } else if (mask) {
5322         /* Watchpoint covers an aligned area up to 2GB in size */
5323         len = 1ULL << mask;
5324         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5325          * whether the watchpoint fires when the unmasked bits match; we opt
5326          * to generate the exceptions.
5327          */
5328         wvr &= ~(len - 1);
5329     } else {
5330         /* Watchpoint covers bytes defined by the byte address select bits */
5331         int bas = extract64(wcr, 5, 8);
5332         int basstart;
5333 
5334         if (bas == 0) {
5335             /* This must act as if the watchpoint is disabled */
5336             return;
5337         }
5338 
5339         if (extract64(wvr, 2, 1)) {
5340             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5341              * ignored, and BAS[3:0] define which bytes to watch.
5342              */
5343             bas &= 0xf;
5344         }
5345         /* The BAS bits are supposed to be programmed to indicate a contiguous
5346          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5347          * we fire for each byte in the word/doubleword addressed by the WVR.
5348          * We choose to ignore any non-zero bits after the first range of 1s.
5349          */
5350         basstart = ctz32(bas);
5351         len = cto32(bas >> basstart);
5352         wvr += basstart;
5353     }
5354 
5355     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
5356                           &env->cpu_watchpoint[n]);
5357 }
5358 
5359 void hw_watchpoint_update_all(ARMCPU *cpu)
5360 {
5361     int i;
5362     CPUARMState *env = &cpu->env;
5363 
5364     /* Completely clear out existing QEMU watchpoints and our array, to
5365      * avoid possible stale entries following migration load.
5366      */
5367     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
5368     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
5369 
5370     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
5371         hw_watchpoint_update(cpu, i);
5372     }
5373 }
5374 
5375 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5376                          uint64_t value)
5377 {
5378     ARMCPU *cpu = arm_env_get_cpu(env);
5379     int i = ri->crm;
5380 
5381     /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5382      * register reads and behaves as if values written are sign extended.
5383      * Bits [1:0] are RES0.
5384      */
5385     value = sextract64(value, 0, 49) & ~3ULL;
5386 
5387     raw_write(env, ri, value);
5388     hw_watchpoint_update(cpu, i);
5389 }
5390 
5391 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5392                          uint64_t value)
5393 {
5394     ARMCPU *cpu = arm_env_get_cpu(env);
5395     int i = ri->crm;
5396 
5397     raw_write(env, ri, value);
5398     hw_watchpoint_update(cpu, i);
5399 }
5400 
5401 void hw_breakpoint_update(ARMCPU *cpu, int n)
5402 {
5403     CPUARMState *env = &cpu->env;
5404     uint64_t bvr = env->cp15.dbgbvr[n];
5405     uint64_t bcr = env->cp15.dbgbcr[n];
5406     vaddr addr;
5407     int bt;
5408     int flags = BP_CPU;
5409 
5410     if (env->cpu_breakpoint[n]) {
5411         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
5412         env->cpu_breakpoint[n] = NULL;
5413     }
5414 
5415     if (!extract64(bcr, 0, 1)) {
5416         /* E bit clear : watchpoint disabled */
5417         return;
5418     }
5419 
5420     bt = extract64(bcr, 20, 4);
5421 
5422     switch (bt) {
5423     case 4: /* unlinked address mismatch (reserved if AArch64) */
5424     case 5: /* linked address mismatch (reserved if AArch64) */
5425         qemu_log_mask(LOG_UNIMP,
5426                       "arm: address mismatch breakpoint types not implemented\n");
5427         return;
5428     case 0: /* unlinked address match */
5429     case 1: /* linked address match */
5430     {
5431         /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5432          * we behave as if the register was sign extended. Bits [1:0] are
5433          * RES0. The BAS field is used to allow setting breakpoints on 16
5434          * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5435          * a bp will fire if the addresses covered by the bp and the addresses
5436          * covered by the insn overlap but the insn doesn't start at the
5437          * start of the bp address range. We choose to require the insn and
5438          * the bp to have the same address. The constraints on writing to
5439          * BAS enforced in dbgbcr_write mean we have only four cases:
5440          *  0b0000  => no breakpoint
5441          *  0b0011  => breakpoint on addr
5442          *  0b1100  => breakpoint on addr + 2
5443          *  0b1111  => breakpoint on addr
5444          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5445          */
5446         int bas = extract64(bcr, 5, 4);
5447         addr = sextract64(bvr, 0, 49) & ~3ULL;
5448         if (bas == 0) {
5449             return;
5450         }
5451         if (bas == 0xc) {
5452             addr += 2;
5453         }
5454         break;
5455     }
5456     case 2: /* unlinked context ID match */
5457     case 8: /* unlinked VMID match (reserved if no EL2) */
5458     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5459         qemu_log_mask(LOG_UNIMP,
5460                       "arm: unlinked context breakpoint types not implemented\n");
5461         return;
5462     case 9: /* linked VMID match (reserved if no EL2) */
5463     case 11: /* linked context ID and VMID match (reserved if no EL2) */
5464     case 3: /* linked context ID match */
5465     default:
5466         /* We must generate no events for Linked context matches (unless
5467          * they are linked to by some other bp/wp, which is handled in
5468          * updates for the linking bp/wp). We choose to also generate no events
5469          * for reserved values.
5470          */
5471         return;
5472     }
5473 
5474     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
5475 }
5476 
5477 void hw_breakpoint_update_all(ARMCPU *cpu)
5478 {
5479     int i;
5480     CPUARMState *env = &cpu->env;
5481 
5482     /* Completely clear out existing QEMU breakpoints and our array, to
5483      * avoid possible stale entries following migration load.
5484      */
5485     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
5486     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
5487 
5488     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
5489         hw_breakpoint_update(cpu, i);
5490     }
5491 }
5492 
5493 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5494                          uint64_t value)
5495 {
5496     ARMCPU *cpu = arm_env_get_cpu(env);
5497     int i = ri->crm;
5498 
5499     raw_write(env, ri, value);
5500     hw_breakpoint_update(cpu, i);
5501 }
5502 
5503 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5504                          uint64_t value)
5505 {
5506     ARMCPU *cpu = arm_env_get_cpu(env);
5507     int i = ri->crm;
5508 
5509     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5510      * copy of BAS[0].
5511      */
5512     value = deposit64(value, 6, 1, extract64(value, 5, 1));
5513     value = deposit64(value, 8, 1, extract64(value, 7, 1));
5514 
5515     raw_write(env, ri, value);
5516     hw_breakpoint_update(cpu, i);
5517 }
5518 
5519 static void define_debug_regs(ARMCPU *cpu)
5520 {
5521     /* Define v7 and v8 architectural debug registers.
5522      * These are just dummy implementations for now.
5523      */
5524     int i;
5525     int wrps, brps, ctx_cmps;
5526     ARMCPRegInfo dbgdidr = {
5527         .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
5528         .access = PL0_R, .accessfn = access_tda,
5529         .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
5530     };
5531 
5532     /* Note that all these register fields hold "number of Xs minus 1". */
5533     brps = extract32(cpu->dbgdidr, 24, 4);
5534     wrps = extract32(cpu->dbgdidr, 28, 4);
5535     ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
5536 
5537     assert(ctx_cmps <= brps);
5538 
5539     /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5540      * of the debug registers such as number of breakpoints;
5541      * check that if they both exist then they agree.
5542      */
5543     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
5544         assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
5545         assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
5546         assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
5547     }
5548 
5549     define_one_arm_cp_reg(cpu, &dbgdidr);
5550     define_arm_cp_regs(cpu, debug_cp_reginfo);
5551 
5552     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
5553         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
5554     }
5555 
5556     for (i = 0; i < brps + 1; i++) {
5557         ARMCPRegInfo dbgregs[] = {
5558             { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
5559               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
5560               .access = PL1_RW, .accessfn = access_tda,
5561               .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
5562               .writefn = dbgbvr_write, .raw_writefn = raw_write
5563             },
5564             { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
5565               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
5566               .access = PL1_RW, .accessfn = access_tda,
5567               .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
5568               .writefn = dbgbcr_write, .raw_writefn = raw_write
5569             },
5570             REGINFO_SENTINEL
5571         };
5572         define_arm_cp_regs(cpu, dbgregs);
5573     }
5574 
5575     for (i = 0; i < wrps + 1; i++) {
5576         ARMCPRegInfo dbgregs[] = {
5577             { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
5578               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
5579               .access = PL1_RW, .accessfn = access_tda,
5580               .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
5581               .writefn = dbgwvr_write, .raw_writefn = raw_write
5582             },
5583             { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
5584               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
5585               .access = PL1_RW, .accessfn = access_tda,
5586               .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
5587               .writefn = dbgwcr_write, .raw_writefn = raw_write
5588             },
5589             REGINFO_SENTINEL
5590         };
5591         define_arm_cp_regs(cpu, dbgregs);
5592     }
5593 }
5594 
5595 /* We don't know until after realize whether there's a GICv3
5596  * attached, and that is what registers the gicv3 sysregs.
5597  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5598  * at runtime.
5599  */
5600 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5601 {
5602     ARMCPU *cpu = arm_env_get_cpu(env);
5603     uint64_t pfr1 = cpu->id_pfr1;
5604 
5605     if (env->gicv3state) {
5606         pfr1 |= 1 << 28;
5607     }
5608     return pfr1;
5609 }
5610 
5611 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5612 {
5613     ARMCPU *cpu = arm_env_get_cpu(env);
5614     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
5615 
5616     if (env->gicv3state) {
5617         pfr0 |= 1 << 24;
5618     }
5619     return pfr0;
5620 }
5621 
5622 /* Shared logic between LORID and the rest of the LOR* registers.
5623  * Secure state has already been delt with.
5624  */
5625 static CPAccessResult access_lor_ns(CPUARMState *env)
5626 {
5627     int el = arm_current_el(env);
5628 
5629     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5630         return CP_ACCESS_TRAP_EL2;
5631     }
5632     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5633         return CP_ACCESS_TRAP_EL3;
5634     }
5635     return CP_ACCESS_OK;
5636 }
5637 
5638 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
5639                                    bool isread)
5640 {
5641     if (arm_is_secure_below_el3(env)) {
5642         /* Access ok in secure mode.  */
5643         return CP_ACCESS_OK;
5644     }
5645     return access_lor_ns(env);
5646 }
5647 
5648 static CPAccessResult access_lor_other(CPUARMState *env,
5649                                        const ARMCPRegInfo *ri, bool isread)
5650 {
5651     if (arm_is_secure_below_el3(env)) {
5652         /* Access denied in secure mode.  */
5653         return CP_ACCESS_TRAP;
5654     }
5655     return access_lor_ns(env);
5656 }
5657 
5658 #ifdef TARGET_AARCH64
5659 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5660                                    bool isread)
5661 {
5662     int el = arm_current_el(env);
5663 
5664     if (el < 2 &&
5665         arm_feature(env, ARM_FEATURE_EL2) &&
5666         !(arm_hcr_el2_eff(env) & HCR_APK)) {
5667         return CP_ACCESS_TRAP_EL2;
5668     }
5669     if (el < 3 &&
5670         arm_feature(env, ARM_FEATURE_EL3) &&
5671         !(env->cp15.scr_el3 & SCR_APK)) {
5672         return CP_ACCESS_TRAP_EL3;
5673     }
5674     return CP_ACCESS_OK;
5675 }
5676 
5677 static const ARMCPRegInfo pauth_reginfo[] = {
5678     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5679       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5680       .access = PL1_RW, .accessfn = access_pauth,
5681       .fieldoffset = offsetof(CPUARMState, apda_key.lo) },
5682     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5683       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5684       .access = PL1_RW, .accessfn = access_pauth,
5685       .fieldoffset = offsetof(CPUARMState, apda_key.hi) },
5686     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5687       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5688       .access = PL1_RW, .accessfn = access_pauth,
5689       .fieldoffset = offsetof(CPUARMState, apdb_key.lo) },
5690     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5691       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5692       .access = PL1_RW, .accessfn = access_pauth,
5693       .fieldoffset = offsetof(CPUARMState, apdb_key.hi) },
5694     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5695       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5696       .access = PL1_RW, .accessfn = access_pauth,
5697       .fieldoffset = offsetof(CPUARMState, apga_key.lo) },
5698     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5699       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5700       .access = PL1_RW, .accessfn = access_pauth,
5701       .fieldoffset = offsetof(CPUARMState, apga_key.hi) },
5702     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5703       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5704       .access = PL1_RW, .accessfn = access_pauth,
5705       .fieldoffset = offsetof(CPUARMState, apia_key.lo) },
5706     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5707       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5708       .access = PL1_RW, .accessfn = access_pauth,
5709       .fieldoffset = offsetof(CPUARMState, apia_key.hi) },
5710     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5711       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5712       .access = PL1_RW, .accessfn = access_pauth,
5713       .fieldoffset = offsetof(CPUARMState, apib_key.lo) },
5714     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5715       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5716       .access = PL1_RW, .accessfn = access_pauth,
5717       .fieldoffset = offsetof(CPUARMState, apib_key.hi) },
5718     REGINFO_SENTINEL
5719 };
5720 #endif
5721 
5722 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5723                                      bool isread)
5724 {
5725     int el = arm_current_el(env);
5726 
5727     if (el == 0) {
5728         uint64_t sctlr = arm_sctlr(env, el);
5729         if (!(sctlr & SCTLR_EnRCTX)) {
5730             return CP_ACCESS_TRAP;
5731         }
5732     } else if (el == 1) {
5733         uint64_t hcr = arm_hcr_el2_eff(env);
5734         if (hcr & HCR_NV) {
5735             return CP_ACCESS_TRAP_EL2;
5736         }
5737     }
5738     return CP_ACCESS_OK;
5739 }
5740 
5741 static const ARMCPRegInfo predinv_reginfo[] = {
5742     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5743       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5744       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5745     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5746       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5747       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5748     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5749       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5750       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5751     /*
5752      * Note the AArch32 opcodes have a different OPC1.
5753      */
5754     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5755       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5756       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5757     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5758       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5759       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5760     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5761       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5762       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5763     REGINFO_SENTINEL
5764 };
5765 
5766 void register_cp_regs_for_features(ARMCPU *cpu)
5767 {
5768     /* Register all the coprocessor registers based on feature bits */
5769     CPUARMState *env = &cpu->env;
5770     if (arm_feature(env, ARM_FEATURE_M)) {
5771         /* M profile has no coprocessor registers */
5772         return;
5773     }
5774 
5775     define_arm_cp_regs(cpu, cp_reginfo);
5776     if (!arm_feature(env, ARM_FEATURE_V8)) {
5777         /* Must go early as it is full of wildcards that may be
5778          * overridden by later definitions.
5779          */
5780         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
5781     }
5782 
5783     if (arm_feature(env, ARM_FEATURE_V6)) {
5784         /* The ID registers all have impdef reset values */
5785         ARMCPRegInfo v6_idregs[] = {
5786             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
5787               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5788               .access = PL1_R, .type = ARM_CP_CONST,
5789               .resetvalue = cpu->id_pfr0 },
5790             /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5791              * the value of the GIC field until after we define these regs.
5792              */
5793             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
5794               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
5795               .access = PL1_R, .type = ARM_CP_NO_RAW,
5796               .readfn = id_pfr1_read,
5797               .writefn = arm_cp_write_ignore },
5798             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
5799               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
5800               .access = PL1_R, .type = ARM_CP_CONST,
5801               .resetvalue = cpu->id_dfr0 },
5802             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
5803               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
5804               .access = PL1_R, .type = ARM_CP_CONST,
5805               .resetvalue = cpu->id_afr0 },
5806             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
5807               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
5808               .access = PL1_R, .type = ARM_CP_CONST,
5809               .resetvalue = cpu->id_mmfr0 },
5810             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
5811               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
5812               .access = PL1_R, .type = ARM_CP_CONST,
5813               .resetvalue = cpu->id_mmfr1 },
5814             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
5815               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
5816               .access = PL1_R, .type = ARM_CP_CONST,
5817               .resetvalue = cpu->id_mmfr2 },
5818             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
5819               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
5820               .access = PL1_R, .type = ARM_CP_CONST,
5821               .resetvalue = cpu->id_mmfr3 },
5822             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
5823               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5824               .access = PL1_R, .type = ARM_CP_CONST,
5825               .resetvalue = cpu->isar.id_isar0 },
5826             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
5827               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
5828               .access = PL1_R, .type = ARM_CP_CONST,
5829               .resetvalue = cpu->isar.id_isar1 },
5830             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
5831               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5832               .access = PL1_R, .type = ARM_CP_CONST,
5833               .resetvalue = cpu->isar.id_isar2 },
5834             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
5835               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
5836               .access = PL1_R, .type = ARM_CP_CONST,
5837               .resetvalue = cpu->isar.id_isar3 },
5838             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
5839               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
5840               .access = PL1_R, .type = ARM_CP_CONST,
5841               .resetvalue = cpu->isar.id_isar4 },
5842             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
5843               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
5844               .access = PL1_R, .type = ARM_CP_CONST,
5845               .resetvalue = cpu->isar.id_isar5 },
5846             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
5847               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
5848               .access = PL1_R, .type = ARM_CP_CONST,
5849               .resetvalue = cpu->id_mmfr4 },
5850             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
5851               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
5852               .access = PL1_R, .type = ARM_CP_CONST,
5853               .resetvalue = cpu->isar.id_isar6 },
5854             REGINFO_SENTINEL
5855         };
5856         define_arm_cp_regs(cpu, v6_idregs);
5857         define_arm_cp_regs(cpu, v6_cp_reginfo);
5858     } else {
5859         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
5860     }
5861     if (arm_feature(env, ARM_FEATURE_V6K)) {
5862         define_arm_cp_regs(cpu, v6k_cp_reginfo);
5863     }
5864     if (arm_feature(env, ARM_FEATURE_V7MP) &&
5865         !arm_feature(env, ARM_FEATURE_PMSA)) {
5866         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
5867     }
5868     if (arm_feature(env, ARM_FEATURE_V7VE)) {
5869         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
5870     }
5871     if (arm_feature(env, ARM_FEATURE_V7)) {
5872         /* v7 performance monitor control register: same implementor
5873          * field as main ID register, and we implement four counters in
5874          * addition to the cycle count register.
5875          */
5876         unsigned int i, pmcrn = 4;
5877         ARMCPRegInfo pmcr = {
5878             .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
5879             .access = PL0_RW,
5880             .type = ARM_CP_IO | ARM_CP_ALIAS,
5881             .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
5882             .accessfn = pmreg_access, .writefn = pmcr_write,
5883             .raw_writefn = raw_write,
5884         };
5885         ARMCPRegInfo pmcr64 = {
5886             .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
5887             .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
5888             .access = PL0_RW, .accessfn = pmreg_access,
5889             .type = ARM_CP_IO,
5890             .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
5891             .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
5892             .writefn = pmcr_write, .raw_writefn = raw_write,
5893         };
5894         define_one_arm_cp_reg(cpu, &pmcr);
5895         define_one_arm_cp_reg(cpu, &pmcr64);
5896         for (i = 0; i < pmcrn; i++) {
5897             char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
5898             char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
5899             char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
5900             char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
5901             ARMCPRegInfo pmev_regs[] = {
5902                 { .name = pmevcntr_name, .cp = 15, .crn = 14,
5903                   .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
5904                   .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
5905                   .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
5906                   .accessfn = pmreg_access },
5907                 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
5908                   .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
5909                   .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
5910                   .type = ARM_CP_IO,
5911                   .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
5912                   .raw_readfn = pmevcntr_rawread,
5913                   .raw_writefn = pmevcntr_rawwrite },
5914                 { .name = pmevtyper_name, .cp = 15, .crn = 14,
5915                   .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
5916                   .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
5917                   .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
5918                   .accessfn = pmreg_access },
5919                 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
5920                   .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
5921                   .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
5922                   .type = ARM_CP_IO,
5923                   .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
5924                   .raw_writefn = pmevtyper_rawwrite },
5925                 REGINFO_SENTINEL
5926             };
5927             define_arm_cp_regs(cpu, pmev_regs);
5928             g_free(pmevcntr_name);
5929             g_free(pmevcntr_el0_name);
5930             g_free(pmevtyper_name);
5931             g_free(pmevtyper_el0_name);
5932         }
5933         ARMCPRegInfo clidr = {
5934             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
5935             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
5936             .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
5937         };
5938         define_one_arm_cp_reg(cpu, &clidr);
5939         define_arm_cp_regs(cpu, v7_cp_reginfo);
5940         define_debug_regs(cpu);
5941     } else {
5942         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
5943     }
5944     if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
5945             FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
5946         ARMCPRegInfo v81_pmu_regs[] = {
5947             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
5948               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
5949               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5950               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
5951             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
5952               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
5953               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5954               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
5955             REGINFO_SENTINEL
5956         };
5957         define_arm_cp_regs(cpu, v81_pmu_regs);
5958     }
5959     if (arm_feature(env, ARM_FEATURE_V8)) {
5960         /* AArch64 ID registers, which all have impdef reset values.
5961          * Note that within the ID register ranges the unused slots
5962          * must all RAZ, not UNDEF; future architecture versions may
5963          * define new registers here.
5964          */
5965         ARMCPRegInfo v8_idregs[] = {
5966             /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5967              * know the right value for the GIC field until after we
5968              * define these regs.
5969              */
5970             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
5971               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
5972               .access = PL1_R, .type = ARM_CP_NO_RAW,
5973               .readfn = id_aa64pfr0_read,
5974               .writefn = arm_cp_write_ignore },
5975             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
5976               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
5977               .access = PL1_R, .type = ARM_CP_CONST,
5978               .resetvalue = cpu->isar.id_aa64pfr1},
5979             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5980               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
5981               .access = PL1_R, .type = ARM_CP_CONST,
5982               .resetvalue = 0 },
5983             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5984               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
5985               .access = PL1_R, .type = ARM_CP_CONST,
5986               .resetvalue = 0 },
5987             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
5988               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
5989               .access = PL1_R, .type = ARM_CP_CONST,
5990               /* At present, only SVEver == 0 is defined anyway.  */
5991               .resetvalue = 0 },
5992             { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5993               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
5994               .access = PL1_R, .type = ARM_CP_CONST,
5995               .resetvalue = 0 },
5996             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5997               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
5998               .access = PL1_R, .type = ARM_CP_CONST,
5999               .resetvalue = 0 },
6000             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6001               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6002               .access = PL1_R, .type = ARM_CP_CONST,
6003               .resetvalue = 0 },
6004             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6005               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6006               .access = PL1_R, .type = ARM_CP_CONST,
6007               .resetvalue = cpu->id_aa64dfr0 },
6008             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6009               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6010               .access = PL1_R, .type = ARM_CP_CONST,
6011               .resetvalue = cpu->id_aa64dfr1 },
6012             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6013               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6014               .access = PL1_R, .type = ARM_CP_CONST,
6015               .resetvalue = 0 },
6016             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6017               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6018               .access = PL1_R, .type = ARM_CP_CONST,
6019               .resetvalue = 0 },
6020             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6021               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6022               .access = PL1_R, .type = ARM_CP_CONST,
6023               .resetvalue = cpu->id_aa64afr0 },
6024             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6025               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6026               .access = PL1_R, .type = ARM_CP_CONST,
6027               .resetvalue = cpu->id_aa64afr1 },
6028             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6029               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6030               .access = PL1_R, .type = ARM_CP_CONST,
6031               .resetvalue = 0 },
6032             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6033               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6034               .access = PL1_R, .type = ARM_CP_CONST,
6035               .resetvalue = 0 },
6036             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6037               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6038               .access = PL1_R, .type = ARM_CP_CONST,
6039               .resetvalue = cpu->isar.id_aa64isar0 },
6040             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6041               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6042               .access = PL1_R, .type = ARM_CP_CONST,
6043               .resetvalue = cpu->isar.id_aa64isar1 },
6044             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6045               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6046               .access = PL1_R, .type = ARM_CP_CONST,
6047               .resetvalue = 0 },
6048             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6049               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6050               .access = PL1_R, .type = ARM_CP_CONST,
6051               .resetvalue = 0 },
6052             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6053               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6054               .access = PL1_R, .type = ARM_CP_CONST,
6055               .resetvalue = 0 },
6056             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6057               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6058               .access = PL1_R, .type = ARM_CP_CONST,
6059               .resetvalue = 0 },
6060             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6061               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6062               .access = PL1_R, .type = ARM_CP_CONST,
6063               .resetvalue = 0 },
6064             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6065               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6066               .access = PL1_R, .type = ARM_CP_CONST,
6067               .resetvalue = 0 },
6068             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6069               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6070               .access = PL1_R, .type = ARM_CP_CONST,
6071               .resetvalue = cpu->isar.id_aa64mmfr0 },
6072             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6073               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6074               .access = PL1_R, .type = ARM_CP_CONST,
6075               .resetvalue = cpu->isar.id_aa64mmfr1 },
6076             { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6077               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6078               .access = PL1_R, .type = ARM_CP_CONST,
6079               .resetvalue = 0 },
6080             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6081               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6082               .access = PL1_R, .type = ARM_CP_CONST,
6083               .resetvalue = 0 },
6084             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6085               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6086               .access = PL1_R, .type = ARM_CP_CONST,
6087               .resetvalue = 0 },
6088             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6089               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6090               .access = PL1_R, .type = ARM_CP_CONST,
6091               .resetvalue = 0 },
6092             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6093               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6094               .access = PL1_R, .type = ARM_CP_CONST,
6095               .resetvalue = 0 },
6096             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6097               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6098               .access = PL1_R, .type = ARM_CP_CONST,
6099               .resetvalue = 0 },
6100             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6101               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6102               .access = PL1_R, .type = ARM_CP_CONST,
6103               .resetvalue = cpu->isar.mvfr0 },
6104             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6105               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6106               .access = PL1_R, .type = ARM_CP_CONST,
6107               .resetvalue = cpu->isar.mvfr1 },
6108             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6109               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6110               .access = PL1_R, .type = ARM_CP_CONST,
6111               .resetvalue = cpu->isar.mvfr2 },
6112             { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6113               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6114               .access = PL1_R, .type = ARM_CP_CONST,
6115               .resetvalue = 0 },
6116             { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6117               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6118               .access = PL1_R, .type = ARM_CP_CONST,
6119               .resetvalue = 0 },
6120             { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6121               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6122               .access = PL1_R, .type = ARM_CP_CONST,
6123               .resetvalue = 0 },
6124             { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6125               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6126               .access = PL1_R, .type = ARM_CP_CONST,
6127               .resetvalue = 0 },
6128             { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6129               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6130               .access = PL1_R, .type = ARM_CP_CONST,
6131               .resetvalue = 0 },
6132             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
6133               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
6134               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6135               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
6136             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
6137               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
6138               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6139               .resetvalue = cpu->pmceid0 },
6140             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
6141               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
6142               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6143               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
6144             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
6145               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
6146               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6147               .resetvalue = cpu->pmceid1 },
6148             REGINFO_SENTINEL
6149         };
6150 #ifdef CONFIG_USER_ONLY
6151         ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6152             { .name = "ID_AA64PFR0_EL1",
6153               .exported_bits = 0x000f000f00ff0000,
6154               .fixed_bits    = 0x0000000000000011 },
6155             { .name = "ID_AA64PFR1_EL1",
6156               .exported_bits = 0x00000000000000f0 },
6157             { .name = "ID_AA64PFR*_EL1_RESERVED",
6158               .is_glob = true                     },
6159             { .name = "ID_AA64ZFR0_EL1"           },
6160             { .name = "ID_AA64MMFR0_EL1",
6161               .fixed_bits    = 0x00000000ff000000 },
6162             { .name = "ID_AA64MMFR1_EL1"          },
6163             { .name = "ID_AA64MMFR*_EL1_RESERVED",
6164               .is_glob = true                     },
6165             { .name = "ID_AA64DFR0_EL1",
6166               .fixed_bits    = 0x0000000000000006 },
6167             { .name = "ID_AA64DFR1_EL1"           },
6168             { .name = "ID_AA64DFR*_EL1_RESERVED",
6169               .is_glob = true                     },
6170             { .name = "ID_AA64AFR*",
6171               .is_glob = true                     },
6172             { .name = "ID_AA64ISAR0_EL1",
6173               .exported_bits = 0x00fffffff0fffff0 },
6174             { .name = "ID_AA64ISAR1_EL1",
6175               .exported_bits = 0x000000f0ffffffff },
6176             { .name = "ID_AA64ISAR*_EL1_RESERVED",
6177               .is_glob = true                     },
6178             REGUSERINFO_SENTINEL
6179         };
6180         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6181 #endif
6182         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6183         if (!arm_feature(env, ARM_FEATURE_EL3) &&
6184             !arm_feature(env, ARM_FEATURE_EL2)) {
6185             ARMCPRegInfo rvbar = {
6186                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
6187                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6188                 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
6189             };
6190             define_one_arm_cp_reg(cpu, &rvbar);
6191         }
6192         define_arm_cp_regs(cpu, v8_idregs);
6193         define_arm_cp_regs(cpu, v8_cp_reginfo);
6194     }
6195     if (arm_feature(env, ARM_FEATURE_EL2)) {
6196         uint64_t vmpidr_def = mpidr_read_val(env);
6197         ARMCPRegInfo vpidr_regs[] = {
6198             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6199               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6200               .access = PL2_RW, .accessfn = access_el3_aa32ns,
6201               .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
6202               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6203             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6204               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6205               .access = PL2_RW, .resetvalue = cpu->midr,
6206               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6207             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6208               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6209               .access = PL2_RW, .accessfn = access_el3_aa32ns,
6210               .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
6211               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6212             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6213               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6214               .access = PL2_RW,
6215               .resetvalue = vmpidr_def,
6216               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6217             REGINFO_SENTINEL
6218         };
6219         define_arm_cp_regs(cpu, vpidr_regs);
6220         define_arm_cp_regs(cpu, el2_cp_reginfo);
6221         if (arm_feature(env, ARM_FEATURE_V8)) {
6222             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6223         }
6224         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6225         if (!arm_feature(env, ARM_FEATURE_EL3)) {
6226             ARMCPRegInfo rvbar = {
6227                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6228                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6229                 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
6230             };
6231             define_one_arm_cp_reg(cpu, &rvbar);
6232         }
6233     } else {
6234         /* If EL2 is missing but higher ELs are enabled, we need to
6235          * register the no_el2 reginfos.
6236          */
6237         if (arm_feature(env, ARM_FEATURE_EL3)) {
6238             /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6239              * of MIDR_EL1 and MPIDR_EL1.
6240              */
6241             ARMCPRegInfo vpidr_regs[] = {
6242                 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6243                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6244                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6245                   .type = ARM_CP_CONST, .resetvalue = cpu->midr,
6246                   .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6247                 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6248                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6249                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6250                   .type = ARM_CP_NO_RAW,
6251                   .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
6252                 REGINFO_SENTINEL
6253             };
6254             define_arm_cp_regs(cpu, vpidr_regs);
6255             define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
6256             if (arm_feature(env, ARM_FEATURE_V8)) {
6257                 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
6258             }
6259         }
6260     }
6261     if (arm_feature(env, ARM_FEATURE_EL3)) {
6262         define_arm_cp_regs(cpu, el3_cp_reginfo);
6263         ARMCPRegInfo el3_regs[] = {
6264             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6265               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6266               .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
6267             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6268               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6269               .access = PL3_RW,
6270               .raw_writefn = raw_write, .writefn = sctlr_write,
6271               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6272               .resetvalue = cpu->reset_sctlr },
6273             REGINFO_SENTINEL
6274         };
6275 
6276         define_arm_cp_regs(cpu, el3_regs);
6277     }
6278     /* The behaviour of NSACR is sufficiently various that we don't
6279      * try to describe it in a single reginfo:
6280      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
6281      *     reads as constant 0xc00 from NS EL1 and NS EL2
6282      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6283      *  if v7 without EL3, register doesn't exist
6284      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6285      */
6286     if (arm_feature(env, ARM_FEATURE_EL3)) {
6287         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6288             ARMCPRegInfo nsacr = {
6289                 .name = "NSACR", .type = ARM_CP_CONST,
6290                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6291                 .access = PL1_RW, .accessfn = nsacr_access,
6292                 .resetvalue = 0xc00
6293             };
6294             define_one_arm_cp_reg(cpu, &nsacr);
6295         } else {
6296             ARMCPRegInfo nsacr = {
6297                 .name = "NSACR",
6298                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6299                 .access = PL3_RW | PL1_R,
6300                 .resetvalue = 0,
6301                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6302             };
6303             define_one_arm_cp_reg(cpu, &nsacr);
6304         }
6305     } else {
6306         if (arm_feature(env, ARM_FEATURE_V8)) {
6307             ARMCPRegInfo nsacr = {
6308                 .name = "NSACR", .type = ARM_CP_CONST,
6309                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6310                 .access = PL1_R,
6311                 .resetvalue = 0xc00
6312             };
6313             define_one_arm_cp_reg(cpu, &nsacr);
6314         }
6315     }
6316 
6317     if (arm_feature(env, ARM_FEATURE_PMSA)) {
6318         if (arm_feature(env, ARM_FEATURE_V6)) {
6319             /* PMSAv6 not implemented */
6320             assert(arm_feature(env, ARM_FEATURE_V7));
6321             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6322             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6323         } else {
6324             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6325         }
6326     } else {
6327         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6328         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6329         /* TTCBR2 is introduced with ARMv8.2-A32HPD.  */
6330         if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
6331             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6332         }
6333     }
6334     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6335         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6336     }
6337     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6338         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6339     }
6340     if (arm_feature(env, ARM_FEATURE_VAPA)) {
6341         define_arm_cp_regs(cpu, vapa_cp_reginfo);
6342     }
6343     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6344         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6345     }
6346     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6347         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6348     }
6349     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6350         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6351     }
6352     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6353         define_arm_cp_regs(cpu, omap_cp_reginfo);
6354     }
6355     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6356         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6357     }
6358     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6359         define_arm_cp_regs(cpu, xscale_cp_reginfo);
6360     }
6361     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6362         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6363     }
6364     if (arm_feature(env, ARM_FEATURE_LPAE)) {
6365         define_arm_cp_regs(cpu, lpae_cp_reginfo);
6366     }
6367     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6368      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6369      * be read-only (ie write causes UNDEF exception).
6370      */
6371     {
6372         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6373             /* Pre-v8 MIDR space.
6374              * Note that the MIDR isn't a simple constant register because
6375              * of the TI925 behaviour where writes to another register can
6376              * cause the MIDR value to change.
6377              *
6378              * Unimplemented registers in the c15 0 0 0 space default to
6379              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6380              * and friends override accordingly.
6381              */
6382             { .name = "MIDR",
6383               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
6384               .access = PL1_R, .resetvalue = cpu->midr,
6385               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
6386               .readfn = midr_read,
6387               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6388               .type = ARM_CP_OVERRIDE },
6389             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6390             { .name = "DUMMY",
6391               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6392               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6393             { .name = "DUMMY",
6394               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6395               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6396             { .name = "DUMMY",
6397               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6398               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6399             { .name = "DUMMY",
6400               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6401               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6402             { .name = "DUMMY",
6403               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6404               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6405             REGINFO_SENTINEL
6406         };
6407         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
6408             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6409               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
6410               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6411               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6412               .readfn = midr_read },
6413             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6414             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6415               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6416               .access = PL1_R, .resetvalue = cpu->midr },
6417             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6418               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6419               .access = PL1_R, .resetvalue = cpu->midr },
6420             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6421               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
6422               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
6423             REGINFO_SENTINEL
6424         };
6425         ARMCPRegInfo id_cp_reginfo[] = {
6426             /* These are common to v8 and pre-v8 */
6427             { .name = "CTR",
6428               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
6429               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6430             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6431               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6432               .access = PL0_R, .accessfn = ctr_el0_access,
6433               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6434             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6435             { .name = "TCMTR",
6436               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
6437               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6438             REGINFO_SENTINEL
6439         };
6440         /* TLBTR is specific to VMSA */
6441         ARMCPRegInfo id_tlbtr_reginfo = {
6442               .name = "TLBTR",
6443               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
6444               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
6445         };
6446         /* MPUIR is specific to PMSA V6+ */
6447         ARMCPRegInfo id_mpuir_reginfo = {
6448               .name = "MPUIR",
6449               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6450               .access = PL1_R, .type = ARM_CP_CONST,
6451               .resetvalue = cpu->pmsav7_dregion << 8
6452         };
6453         ARMCPRegInfo crn0_wi_reginfo = {
6454             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6455             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6456             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6457         };
6458 #ifdef CONFIG_USER_ONLY
6459         ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6460             { .name = "MIDR_EL1",
6461               .exported_bits = 0x00000000ffffffff },
6462             { .name = "REVIDR_EL1"                },
6463             REGUSERINFO_SENTINEL
6464         };
6465         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6466 #endif
6467         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6468             arm_feature(env, ARM_FEATURE_STRONGARM)) {
6469             ARMCPRegInfo *r;
6470             /* Register the blanket "writes ignored" value first to cover the
6471              * whole space. Then update the specific ID registers to allow write
6472              * access, so that they ignore writes rather than causing them to
6473              * UNDEF.
6474              */
6475             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
6476             for (r = id_pre_v8_midr_cp_reginfo;
6477                  r->type != ARM_CP_SENTINEL; r++) {
6478                 r->access = PL1_RW;
6479             }
6480             for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
6481                 r->access = PL1_RW;
6482             }
6483             id_mpuir_reginfo.access = PL1_RW;
6484             id_tlbtr_reginfo.access = PL1_RW;
6485         }
6486         if (arm_feature(env, ARM_FEATURE_V8)) {
6487             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6488         } else {
6489             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6490         }
6491         define_arm_cp_regs(cpu, id_cp_reginfo);
6492         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6493             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
6494         } else if (arm_feature(env, ARM_FEATURE_V7)) {
6495             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6496         }
6497     }
6498 
6499     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
6500         ARMCPRegInfo mpidr_cp_reginfo[] = {
6501             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6502               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6503               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6504             REGINFO_SENTINEL
6505         };
6506 #ifdef CONFIG_USER_ONLY
6507         ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
6508             { .name = "MPIDR_EL1",
6509               .fixed_bits = 0x0000000080000000 },
6510             REGUSERINFO_SENTINEL
6511         };
6512         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
6513 #endif
6514         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
6515     }
6516 
6517     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
6518         ARMCPRegInfo auxcr_reginfo[] = {
6519             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
6520               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
6521               .access = PL1_RW, .type = ARM_CP_CONST,
6522               .resetvalue = cpu->reset_auxcr },
6523             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
6524               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
6525               .access = PL2_RW, .type = ARM_CP_CONST,
6526               .resetvalue = 0 },
6527             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
6528               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
6529               .access = PL3_RW, .type = ARM_CP_CONST,
6530               .resetvalue = 0 },
6531             REGINFO_SENTINEL
6532         };
6533         define_arm_cp_regs(cpu, auxcr_reginfo);
6534         if (arm_feature(env, ARM_FEATURE_V8)) {
6535             /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6536             ARMCPRegInfo hactlr2_reginfo = {
6537                 .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
6538                 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
6539                 .access = PL2_RW, .type = ARM_CP_CONST,
6540                 .resetvalue = 0
6541             };
6542             define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
6543         }
6544     }
6545 
6546     if (arm_feature(env, ARM_FEATURE_CBAR)) {
6547         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6548             /* 32 bit view is [31:18] 0...0 [43:32]. */
6549             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
6550                 | extract64(cpu->reset_cbar, 32, 12);
6551             ARMCPRegInfo cbar_reginfo[] = {
6552                 { .name = "CBAR",
6553                   .type = ARM_CP_CONST,
6554                   .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6555                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
6556                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
6557                   .type = ARM_CP_CONST,
6558                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
6559                   .access = PL1_R, .resetvalue = cbar32 },
6560                 REGINFO_SENTINEL
6561             };
6562             /* We don't implement a r/w 64 bit CBAR currently */
6563             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
6564             define_arm_cp_regs(cpu, cbar_reginfo);
6565         } else {
6566             ARMCPRegInfo cbar = {
6567                 .name = "CBAR",
6568                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6569                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
6570                 .fieldoffset = offsetof(CPUARMState,
6571                                         cp15.c15_config_base_address)
6572             };
6573             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
6574                 cbar.access = PL1_R;
6575                 cbar.fieldoffset = 0;
6576                 cbar.type = ARM_CP_CONST;
6577             }
6578             define_one_arm_cp_reg(cpu, &cbar);
6579         }
6580     }
6581 
6582     if (arm_feature(env, ARM_FEATURE_VBAR)) {
6583         ARMCPRegInfo vbar_cp_reginfo[] = {
6584             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
6585               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
6586               .access = PL1_RW, .writefn = vbar_write,
6587               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
6588                                      offsetof(CPUARMState, cp15.vbar_ns) },
6589               .resetvalue = 0 },
6590             REGINFO_SENTINEL
6591         };
6592         define_arm_cp_regs(cpu, vbar_cp_reginfo);
6593     }
6594 
6595     /* Generic registers whose values depend on the implementation */
6596     {
6597         ARMCPRegInfo sctlr = {
6598             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
6599             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6600             .access = PL1_RW,
6601             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
6602                                    offsetof(CPUARMState, cp15.sctlr_ns) },
6603             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
6604             .raw_writefn = raw_write,
6605         };
6606         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6607             /* Normally we would always end the TB on an SCTLR write, but Linux
6608              * arch/arm/mach-pxa/sleep.S expects two instructions following
6609              * an MMU enable to execute from cache.  Imitate this behaviour.
6610              */
6611             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
6612         }
6613         define_one_arm_cp_reg(cpu, &sctlr);
6614     }
6615 
6616     if (cpu_isar_feature(aa64_lor, cpu)) {
6617         /*
6618          * A trivial implementation of ARMv8.1-LOR leaves all of these
6619          * registers fixed at 0, which indicates that there are zero
6620          * supported Limited Ordering regions.
6621          */
6622         static const ARMCPRegInfo lor_reginfo[] = {
6623             { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6624               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6625               .access = PL1_RW, .accessfn = access_lor_other,
6626               .type = ARM_CP_CONST, .resetvalue = 0 },
6627             { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6628               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6629               .access = PL1_RW, .accessfn = access_lor_other,
6630               .type = ARM_CP_CONST, .resetvalue = 0 },
6631             { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6632               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6633               .access = PL1_RW, .accessfn = access_lor_other,
6634               .type = ARM_CP_CONST, .resetvalue = 0 },
6635             { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6636               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6637               .access = PL1_RW, .accessfn = access_lor_other,
6638               .type = ARM_CP_CONST, .resetvalue = 0 },
6639             { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6640               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6641               .access = PL1_R, .accessfn = access_lorid,
6642               .type = ARM_CP_CONST, .resetvalue = 0 },
6643             REGINFO_SENTINEL
6644         };
6645         define_arm_cp_regs(cpu, lor_reginfo);
6646     }
6647 
6648     if (cpu_isar_feature(aa64_sve, cpu)) {
6649         define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
6650         if (arm_feature(env, ARM_FEATURE_EL2)) {
6651             define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
6652         } else {
6653             define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
6654         }
6655         if (arm_feature(env, ARM_FEATURE_EL3)) {
6656             define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
6657         }
6658     }
6659 
6660 #ifdef TARGET_AARCH64
6661     if (cpu_isar_feature(aa64_pauth, cpu)) {
6662         define_arm_cp_regs(cpu, pauth_reginfo);
6663     }
6664 #endif
6665 
6666     /*
6667      * While all v8.0 cpus support aarch64, QEMU does have configurations
6668      * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
6669      * which will set ID_ISAR6.
6670      */
6671     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
6672         ? cpu_isar_feature(aa64_predinv, cpu)
6673         : cpu_isar_feature(aa32_predinv, cpu)) {
6674         define_arm_cp_regs(cpu, predinv_reginfo);
6675     }
6676 }
6677 
6678 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
6679 {
6680     CPUState *cs = CPU(cpu);
6681     CPUARMState *env = &cpu->env;
6682 
6683     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6684         gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
6685                                  aarch64_fpu_gdb_set_reg,
6686                                  34, "aarch64-fpu.xml", 0);
6687     } else if (arm_feature(env, ARM_FEATURE_NEON)) {
6688         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6689                                  51, "arm-neon.xml", 0);
6690     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
6691         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6692                                  35, "arm-vfp3.xml", 0);
6693     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
6694         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6695                                  19, "arm-vfp.xml", 0);
6696     }
6697     gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
6698                              arm_gen_dynamic_xml(cs),
6699                              "system-registers.xml", 0);
6700 }
6701 
6702 /* Sort alphabetically by type name, except for "any". */
6703 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
6704 {
6705     ObjectClass *class_a = (ObjectClass *)a;
6706     ObjectClass *class_b = (ObjectClass *)b;
6707     const char *name_a, *name_b;
6708 
6709     name_a = object_class_get_name(class_a);
6710     name_b = object_class_get_name(class_b);
6711     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
6712         return 1;
6713     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
6714         return -1;
6715     } else {
6716         return strcmp(name_a, name_b);
6717     }
6718 }
6719 
6720 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
6721 {
6722     ObjectClass *oc = data;
6723     CPUListState *s = user_data;
6724     const char *typename;
6725     char *name;
6726 
6727     typename = object_class_get_name(oc);
6728     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
6729     (*s->cpu_fprintf)(s->file, "  %s\n",
6730                       name);
6731     g_free(name);
6732 }
6733 
6734 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
6735 {
6736     CPUListState s = {
6737         .file = f,
6738         .cpu_fprintf = cpu_fprintf,
6739     };
6740     GSList *list;
6741 
6742     list = object_class_get_list(TYPE_ARM_CPU, false);
6743     list = g_slist_sort(list, arm_cpu_list_compare);
6744     (*cpu_fprintf)(f, "Available CPUs:\n");
6745     g_slist_foreach(list, arm_cpu_list_entry, &s);
6746     g_slist_free(list);
6747 }
6748 
6749 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
6750 {
6751     ObjectClass *oc = data;
6752     CpuDefinitionInfoList **cpu_list = user_data;
6753     CpuDefinitionInfoList *entry;
6754     CpuDefinitionInfo *info;
6755     const char *typename;
6756 
6757     typename = object_class_get_name(oc);
6758     info = g_malloc0(sizeof(*info));
6759     info->name = g_strndup(typename,
6760                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
6761     info->q_typename = g_strdup(typename);
6762 
6763     entry = g_malloc0(sizeof(*entry));
6764     entry->value = info;
6765     entry->next = *cpu_list;
6766     *cpu_list = entry;
6767 }
6768 
6769 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
6770 {
6771     CpuDefinitionInfoList *cpu_list = NULL;
6772     GSList *list;
6773 
6774     list = object_class_get_list(TYPE_ARM_CPU, false);
6775     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
6776     g_slist_free(list);
6777 
6778     return cpu_list;
6779 }
6780 
6781 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
6782                                    void *opaque, int state, int secstate,
6783                                    int crm, int opc1, int opc2,
6784                                    const char *name)
6785 {
6786     /* Private utility function for define_one_arm_cp_reg_with_opaque():
6787      * add a single reginfo struct to the hash table.
6788      */
6789     uint32_t *key = g_new(uint32_t, 1);
6790     ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
6791     int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
6792     int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
6793 
6794     r2->name = g_strdup(name);
6795     /* Reset the secure state to the specific incoming state.  This is
6796      * necessary as the register may have been defined with both states.
6797      */
6798     r2->secure = secstate;
6799 
6800     if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
6801         /* Register is banked (using both entries in array).
6802          * Overwriting fieldoffset as the array is only used to define
6803          * banked registers but later only fieldoffset is used.
6804          */
6805         r2->fieldoffset = r->bank_fieldoffsets[ns];
6806     }
6807 
6808     if (state == ARM_CP_STATE_AA32) {
6809         if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
6810             /* If the register is banked then we don't need to migrate or
6811              * reset the 32-bit instance in certain cases:
6812              *
6813              * 1) If the register has both 32-bit and 64-bit instances then we
6814              *    can count on the 64-bit instance taking care of the
6815              *    non-secure bank.
6816              * 2) If ARMv8 is enabled then we can count on a 64-bit version
6817              *    taking care of the secure bank.  This requires that separate
6818              *    32 and 64-bit definitions are provided.
6819              */
6820             if ((r->state == ARM_CP_STATE_BOTH && ns) ||
6821                 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
6822                 r2->type |= ARM_CP_ALIAS;
6823             }
6824         } else if ((secstate != r->secure) && !ns) {
6825             /* The register is not banked so we only want to allow migration of
6826              * the non-secure instance.
6827              */
6828             r2->type |= ARM_CP_ALIAS;
6829         }
6830 
6831         if (r->state == ARM_CP_STATE_BOTH) {
6832             /* We assume it is a cp15 register if the .cp field is left unset.
6833              */
6834             if (r2->cp == 0) {
6835                 r2->cp = 15;
6836             }
6837 
6838 #ifdef HOST_WORDS_BIGENDIAN
6839             if (r2->fieldoffset) {
6840                 r2->fieldoffset += sizeof(uint32_t);
6841             }
6842 #endif
6843         }
6844     }
6845     if (state == ARM_CP_STATE_AA64) {
6846         /* To allow abbreviation of ARMCPRegInfo
6847          * definitions, we treat cp == 0 as equivalent to
6848          * the value for "standard guest-visible sysreg".
6849          * STATE_BOTH definitions are also always "standard
6850          * sysreg" in their AArch64 view (the .cp value may
6851          * be non-zero for the benefit of the AArch32 view).
6852          */
6853         if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
6854             r2->cp = CP_REG_ARM64_SYSREG_CP;
6855         }
6856         *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
6857                                   r2->opc0, opc1, opc2);
6858     } else {
6859         *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
6860     }
6861     if (opaque) {
6862         r2->opaque = opaque;
6863     }
6864     /* reginfo passed to helpers is correct for the actual access,
6865      * and is never ARM_CP_STATE_BOTH:
6866      */
6867     r2->state = state;
6868     /* Make sure reginfo passed to helpers for wildcarded regs
6869      * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6870      */
6871     r2->crm = crm;
6872     r2->opc1 = opc1;
6873     r2->opc2 = opc2;
6874     /* By convention, for wildcarded registers only the first
6875      * entry is used for migration; the others are marked as
6876      * ALIAS so we don't try to transfer the register
6877      * multiple times. Special registers (ie NOP/WFI) are
6878      * never migratable and not even raw-accessible.
6879      */
6880     if ((r->type & ARM_CP_SPECIAL)) {
6881         r2->type |= ARM_CP_NO_RAW;
6882     }
6883     if (((r->crm == CP_ANY) && crm != 0) ||
6884         ((r->opc1 == CP_ANY) && opc1 != 0) ||
6885         ((r->opc2 == CP_ANY) && opc2 != 0)) {
6886         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
6887     }
6888 
6889     /* Check that raw accesses are either forbidden or handled. Note that
6890      * we can't assert this earlier because the setup of fieldoffset for
6891      * banked registers has to be done first.
6892      */
6893     if (!(r2->type & ARM_CP_NO_RAW)) {
6894         assert(!raw_accessors_invalid(r2));
6895     }
6896 
6897     /* Overriding of an existing definition must be explicitly
6898      * requested.
6899      */
6900     if (!(r->type & ARM_CP_OVERRIDE)) {
6901         ARMCPRegInfo *oldreg;
6902         oldreg = g_hash_table_lookup(cpu->cp_regs, key);
6903         if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
6904             fprintf(stderr, "Register redefined: cp=%d %d bit "
6905                     "crn=%d crm=%d opc1=%d opc2=%d, "
6906                     "was %s, now %s\n", r2->cp, 32 + 32 * is64,
6907                     r2->crn, r2->crm, r2->opc1, r2->opc2,
6908                     oldreg->name, r2->name);
6909             g_assert_not_reached();
6910         }
6911     }
6912     g_hash_table_insert(cpu->cp_regs, key, r2);
6913 }
6914 
6915 
6916 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
6917                                        const ARMCPRegInfo *r, void *opaque)
6918 {
6919     /* Define implementations of coprocessor registers.
6920      * We store these in a hashtable because typically
6921      * there are less than 150 registers in a space which
6922      * is 16*16*16*8*8 = 262144 in size.
6923      * Wildcarding is supported for the crm, opc1 and opc2 fields.
6924      * If a register is defined twice then the second definition is
6925      * used, so this can be used to define some generic registers and
6926      * then override them with implementation specific variations.
6927      * At least one of the original and the second definition should
6928      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6929      * against accidental use.
6930      *
6931      * The state field defines whether the register is to be
6932      * visible in the AArch32 or AArch64 execution state. If the
6933      * state is set to ARM_CP_STATE_BOTH then we synthesise a
6934      * reginfo structure for the AArch32 view, which sees the lower
6935      * 32 bits of the 64 bit register.
6936      *
6937      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6938      * be wildcarded. AArch64 registers are always considered to be 64
6939      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6940      * the register, if any.
6941      */
6942     int crm, opc1, opc2, state;
6943     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
6944     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
6945     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
6946     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
6947     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
6948     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
6949     /* 64 bit registers have only CRm and Opc1 fields */
6950     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
6951     /* op0 only exists in the AArch64 encodings */
6952     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
6953     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6954     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
6955     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6956      * encodes a minimum access level for the register. We roll this
6957      * runtime check into our general permission check code, so check
6958      * here that the reginfo's specified permissions are strict enough
6959      * to encompass the generic architectural permission check.
6960      */
6961     if (r->state != ARM_CP_STATE_AA32) {
6962         int mask = 0;
6963         switch (r->opc1) {
6964         case 0:
6965             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
6966             mask = PL0U_R | PL1_RW;
6967             break;
6968         case 1: case 2:
6969             /* min_EL EL1 */
6970             mask = PL1_RW;
6971             break;
6972         case 3:
6973             /* min_EL EL0 */
6974             mask = PL0_RW;
6975             break;
6976         case 4:
6977             /* min_EL EL2 */
6978             mask = PL2_RW;
6979             break;
6980         case 5:
6981             /* unallocated encoding, so not possible */
6982             assert(false);
6983             break;
6984         case 6:
6985             /* min_EL EL3 */
6986             mask = PL3_RW;
6987             break;
6988         case 7:
6989             /* min_EL EL1, secure mode only (we don't check the latter) */
6990             mask = PL1_RW;
6991             break;
6992         default:
6993             /* broken reginfo with out-of-range opc1 */
6994             assert(false);
6995             break;
6996         }
6997         /* assert our permissions are not too lax (stricter is fine) */
6998         assert((r->access & ~mask) == 0);
6999     }
7000 
7001     /* Check that the register definition has enough info to handle
7002      * reads and writes if they are permitted.
7003      */
7004     if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
7005         if (r->access & PL3_R) {
7006             assert((r->fieldoffset ||
7007                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7008                    r->readfn);
7009         }
7010         if (r->access & PL3_W) {
7011             assert((r->fieldoffset ||
7012                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7013                    r->writefn);
7014         }
7015     }
7016     /* Bad type field probably means missing sentinel at end of reg list */
7017     assert(cptype_valid(r->type));
7018     for (crm = crmmin; crm <= crmmax; crm++) {
7019         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7020             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
7021                 for (state = ARM_CP_STATE_AA32;
7022                      state <= ARM_CP_STATE_AA64; state++) {
7023                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7024                         continue;
7025                     }
7026                     if (state == ARM_CP_STATE_AA32) {
7027                         /* Under AArch32 CP registers can be common
7028                          * (same for secure and non-secure world) or banked.
7029                          */
7030                         char *name;
7031 
7032                         switch (r->secure) {
7033                         case ARM_CP_SECSTATE_S:
7034                         case ARM_CP_SECSTATE_NS:
7035                             add_cpreg_to_hashtable(cpu, r, opaque, state,
7036                                                    r->secure, crm, opc1, opc2,
7037                                                    r->name);
7038                             break;
7039                         default:
7040                             name = g_strdup_printf("%s_S", r->name);
7041                             add_cpreg_to_hashtable(cpu, r, opaque, state,
7042                                                    ARM_CP_SECSTATE_S,
7043                                                    crm, opc1, opc2, name);
7044                             g_free(name);
7045                             add_cpreg_to_hashtable(cpu, r, opaque, state,
7046                                                    ARM_CP_SECSTATE_NS,
7047                                                    crm, opc1, opc2, r->name);
7048                             break;
7049                         }
7050                     } else {
7051                         /* AArch64 registers get mapped to non-secure instance
7052                          * of AArch32 */
7053                         add_cpreg_to_hashtable(cpu, r, opaque, state,
7054                                                ARM_CP_SECSTATE_NS,
7055                                                crm, opc1, opc2, r->name);
7056                     }
7057                 }
7058             }
7059         }
7060     }
7061 }
7062 
7063 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
7064                                     const ARMCPRegInfo *regs, void *opaque)
7065 {
7066     /* Define a whole list of registers */
7067     const ARMCPRegInfo *r;
7068     for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7069         define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
7070     }
7071 }
7072 
7073 /*
7074  * Modify ARMCPRegInfo for access from userspace.
7075  *
7076  * This is a data driven modification directed by
7077  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7078  * user-space cannot alter any values and dynamic values pertaining to
7079  * execution state are hidden from user space view anyway.
7080  */
7081 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
7082 {
7083     const ARMCPRegUserSpaceInfo *m;
7084     ARMCPRegInfo *r;
7085 
7086     for (m = mods; m->name; m++) {
7087         GPatternSpec *pat = NULL;
7088         if (m->is_glob) {
7089             pat = g_pattern_spec_new(m->name);
7090         }
7091         for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7092             if (pat && g_pattern_match_string(pat, r->name)) {
7093                 r->type = ARM_CP_CONST;
7094                 r->access = PL0U_R;
7095                 r->resetvalue = 0;
7096                 /* continue */
7097             } else if (strcmp(r->name, m->name) == 0) {
7098                 r->type = ARM_CP_CONST;
7099                 r->access = PL0U_R;
7100                 r->resetvalue &= m->exported_bits;
7101                 r->resetvalue |= m->fixed_bits;
7102                 break;
7103             }
7104         }
7105         if (pat) {
7106             g_pattern_spec_free(pat);
7107         }
7108     }
7109 }
7110 
7111 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
7112 {
7113     return g_hash_table_lookup(cpregs, &encoded_cp);
7114 }
7115 
7116 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7117                          uint64_t value)
7118 {
7119     /* Helper coprocessor write function for write-ignore registers */
7120 }
7121 
7122 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
7123 {
7124     /* Helper coprocessor write function for read-as-zero registers */
7125     return 0;
7126 }
7127 
7128 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7129 {
7130     /* Helper coprocessor reset function for do-nothing-on-reset registers */
7131 }
7132 
7133 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
7134 {
7135     /* Return true if it is not valid for us to switch to
7136      * this CPU mode (ie all the UNPREDICTABLE cases in
7137      * the ARM ARM CPSRWriteByInstr pseudocode).
7138      */
7139 
7140     /* Changes to or from Hyp via MSR and CPS are illegal. */
7141     if (write_type == CPSRWriteByInstr &&
7142         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7143          mode == ARM_CPU_MODE_HYP)) {
7144         return 1;
7145     }
7146 
7147     switch (mode) {
7148     case ARM_CPU_MODE_USR:
7149         return 0;
7150     case ARM_CPU_MODE_SYS:
7151     case ARM_CPU_MODE_SVC:
7152     case ARM_CPU_MODE_ABT:
7153     case ARM_CPU_MODE_UND:
7154     case ARM_CPU_MODE_IRQ:
7155     case ARM_CPU_MODE_FIQ:
7156         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7157          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7158          */
7159         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7160          * and CPS are treated as illegal mode changes.
7161          */
7162         if (write_type == CPSRWriteByInstr &&
7163             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7164             (arm_hcr_el2_eff(env) & HCR_TGE)) {
7165             return 1;
7166         }
7167         return 0;
7168     case ARM_CPU_MODE_HYP:
7169         return !arm_feature(env, ARM_FEATURE_EL2)
7170             || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
7171     case ARM_CPU_MODE_MON:
7172         return arm_current_el(env) < 3;
7173     default:
7174         return 1;
7175     }
7176 }
7177 
7178 uint32_t cpsr_read(CPUARMState *env)
7179 {
7180     int ZF;
7181     ZF = (env->ZF == 0);
7182     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
7183         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7184         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7185         | ((env->condexec_bits & 0xfc) << 8)
7186         | (env->GE << 16) | (env->daif & CPSR_AIF);
7187 }
7188 
7189 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7190                 CPSRWriteType write_type)
7191 {
7192     uint32_t changed_daif;
7193 
7194     if (mask & CPSR_NZCV) {
7195         env->ZF = (~val) & CPSR_Z;
7196         env->NF = val;
7197         env->CF = (val >> 29) & 1;
7198         env->VF = (val << 3) & 0x80000000;
7199     }
7200     if (mask & CPSR_Q)
7201         env->QF = ((val & CPSR_Q) != 0);
7202     if (mask & CPSR_T)
7203         env->thumb = ((val & CPSR_T) != 0);
7204     if (mask & CPSR_IT_0_1) {
7205         env->condexec_bits &= ~3;
7206         env->condexec_bits |= (val >> 25) & 3;
7207     }
7208     if (mask & CPSR_IT_2_7) {
7209         env->condexec_bits &= 3;
7210         env->condexec_bits |= (val >> 8) & 0xfc;
7211     }
7212     if (mask & CPSR_GE) {
7213         env->GE = (val >> 16) & 0xf;
7214     }
7215 
7216     /* In a V7 implementation that includes the security extensions but does
7217      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7218      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7219      * bits respectively.
7220      *
7221      * In a V8 implementation, it is permitted for privileged software to
7222      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7223      */
7224     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
7225         arm_feature(env, ARM_FEATURE_EL3) &&
7226         !arm_feature(env, ARM_FEATURE_EL2) &&
7227         !arm_is_secure(env)) {
7228 
7229         changed_daif = (env->daif ^ val) & mask;
7230 
7231         if (changed_daif & CPSR_A) {
7232             /* Check to see if we are allowed to change the masking of async
7233              * abort exceptions from a non-secure state.
7234              */
7235             if (!(env->cp15.scr_el3 & SCR_AW)) {
7236                 qemu_log_mask(LOG_GUEST_ERROR,
7237                               "Ignoring attempt to switch CPSR_A flag from "
7238                               "non-secure world with SCR.AW bit clear\n");
7239                 mask &= ~CPSR_A;
7240             }
7241         }
7242 
7243         if (changed_daif & CPSR_F) {
7244             /* Check to see if we are allowed to change the masking of FIQ
7245              * exceptions from a non-secure state.
7246              */
7247             if (!(env->cp15.scr_el3 & SCR_FW)) {
7248                 qemu_log_mask(LOG_GUEST_ERROR,
7249                               "Ignoring attempt to switch CPSR_F flag from "
7250                               "non-secure world with SCR.FW bit clear\n");
7251                 mask &= ~CPSR_F;
7252             }
7253 
7254             /* Check whether non-maskable FIQ (NMFI) support is enabled.
7255              * If this bit is set software is not allowed to mask
7256              * FIQs, but is allowed to set CPSR_F to 0.
7257              */
7258             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7259                 (val & CPSR_F)) {
7260                 qemu_log_mask(LOG_GUEST_ERROR,
7261                               "Ignoring attempt to enable CPSR_F flag "
7262                               "(non-maskable FIQ [NMFI] support enabled)\n");
7263                 mask &= ~CPSR_F;
7264             }
7265         }
7266     }
7267 
7268     env->daif &= ~(CPSR_AIF & mask);
7269     env->daif |= val & CPSR_AIF & mask;
7270 
7271     if (write_type != CPSRWriteRaw &&
7272         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
7273         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7274             /* Note that we can only get here in USR mode if this is a
7275              * gdb stub write; for this case we follow the architectural
7276              * behaviour for guest writes in USR mode of ignoring an attempt
7277              * to switch mode. (Those are caught by translate.c for writes
7278              * triggered by guest instructions.)
7279              */
7280             mask &= ~CPSR_M;
7281         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
7282             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7283              * v7, and has defined behaviour in v8:
7284              *  + leave CPSR.M untouched
7285              *  + allow changes to the other CPSR fields
7286              *  + set PSTATE.IL
7287              * For user changes via the GDB stub, we don't set PSTATE.IL,
7288              * as this would be unnecessarily harsh for a user error.
7289              */
7290             mask &= ~CPSR_M;
7291             if (write_type != CPSRWriteByGDBStub &&
7292                 arm_feature(env, ARM_FEATURE_V8)) {
7293                 mask |= CPSR_IL;
7294                 val |= CPSR_IL;
7295             }
7296             qemu_log_mask(LOG_GUEST_ERROR,
7297                           "Illegal AArch32 mode switch attempt from %s to %s\n",
7298                           aarch32_mode_name(env->uncached_cpsr),
7299                           aarch32_mode_name(val));
7300         } else {
7301             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7302                           write_type == CPSRWriteExceptionReturn ?
7303                           "Exception return from AArch32" :
7304                           "AArch32 mode switch from",
7305                           aarch32_mode_name(env->uncached_cpsr),
7306                           aarch32_mode_name(val), env->regs[15]);
7307             switch_mode(env, val & CPSR_M);
7308         }
7309     }
7310     mask &= ~CACHED_CPSR_BITS;
7311     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7312 }
7313 
7314 /* Sign/zero extend */
7315 uint32_t HELPER(sxtb16)(uint32_t x)
7316 {
7317     uint32_t res;
7318     res = (uint16_t)(int8_t)x;
7319     res |= (uint32_t)(int8_t)(x >> 16) << 16;
7320     return res;
7321 }
7322 
7323 uint32_t HELPER(uxtb16)(uint32_t x)
7324 {
7325     uint32_t res;
7326     res = (uint16_t)(uint8_t)x;
7327     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
7328     return res;
7329 }
7330 
7331 int32_t HELPER(sdiv)(int32_t num, int32_t den)
7332 {
7333     if (den == 0)
7334       return 0;
7335     if (num == INT_MIN && den == -1)
7336       return INT_MIN;
7337     return num / den;
7338 }
7339 
7340 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
7341 {
7342     if (den == 0)
7343       return 0;
7344     return num / den;
7345 }
7346 
7347 uint32_t HELPER(rbit)(uint32_t x)
7348 {
7349     return revbit32(x);
7350 }
7351 
7352 #ifdef CONFIG_USER_ONLY
7353 
7354 /* These should probably raise undefined insn exceptions.  */
7355 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
7356 {
7357     ARMCPU *cpu = arm_env_get_cpu(env);
7358 
7359     cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
7360 }
7361 
7362 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
7363 {
7364     ARMCPU *cpu = arm_env_get_cpu(env);
7365 
7366     cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
7367     return 0;
7368 }
7369 
7370 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
7371 {
7372     /* translate.c should never generate calls here in user-only mode */
7373     g_assert_not_reached();
7374 }
7375 
7376 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
7377 {
7378     /* translate.c should never generate calls here in user-only mode */
7379     g_assert_not_reached();
7380 }
7381 
7382 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
7383 {
7384     /* The TT instructions can be used by unprivileged code, but in
7385      * user-only emulation we don't have the MPU.
7386      * Luckily since we know we are NonSecure unprivileged (and that in
7387      * turn means that the A flag wasn't specified), all the bits in the
7388      * register must be zero:
7389      *  IREGION: 0 because IRVALID is 0
7390      *  IRVALID: 0 because NS
7391      *  S: 0 because NS
7392      *  NSRW: 0 because NS
7393      *  NSR: 0 because NS
7394      *  RW: 0 because unpriv and A flag not set
7395      *  R: 0 because unpriv and A flag not set
7396      *  SRVALID: 0 because NS
7397      *  MRVALID: 0 because unpriv and A flag not set
7398      *  SREGION: 0 becaus SRVALID is 0
7399      *  MREGION: 0 because MRVALID is 0
7400      */
7401     return 0;
7402 }
7403 
7404 static void switch_mode(CPUARMState *env, int mode)
7405 {
7406     ARMCPU *cpu = arm_env_get_cpu(env);
7407 
7408     if (mode != ARM_CPU_MODE_USR) {
7409         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7410     }
7411 }
7412 
7413 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7414                                  uint32_t cur_el, bool secure)
7415 {
7416     return 1;
7417 }
7418 
7419 void aarch64_sync_64_to_32(CPUARMState *env)
7420 {
7421     g_assert_not_reached();
7422 }
7423 
7424 #else
7425 
7426 static void switch_mode(CPUARMState *env, int mode)
7427 {
7428     int old_mode;
7429     int i;
7430 
7431     old_mode = env->uncached_cpsr & CPSR_M;
7432     if (mode == old_mode)
7433         return;
7434 
7435     if (old_mode == ARM_CPU_MODE_FIQ) {
7436         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
7437         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
7438     } else if (mode == ARM_CPU_MODE_FIQ) {
7439         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
7440         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
7441     }
7442 
7443     i = bank_number(old_mode);
7444     env->banked_r13[i] = env->regs[13];
7445     env->banked_spsr[i] = env->spsr;
7446 
7447     i = bank_number(mode);
7448     env->regs[13] = env->banked_r13[i];
7449     env->spsr = env->banked_spsr[i];
7450 
7451     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7452     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
7453 }
7454 
7455 /* Physical Interrupt Target EL Lookup Table
7456  *
7457  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7458  *
7459  * The below multi-dimensional table is used for looking up the target
7460  * exception level given numerous condition criteria.  Specifically, the
7461  * target EL is based on SCR and HCR routing controls as well as the
7462  * currently executing EL and secure state.
7463  *
7464  *    Dimensions:
7465  *    target_el_table[2][2][2][2][2][4]
7466  *                    |  |  |  |  |  +--- Current EL
7467  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
7468  *                    |  |  |  +--------- HCR mask override
7469  *                    |  |  +------------ SCR exec state control
7470  *                    |  +--------------- SCR mask override
7471  *                    +------------------ 32-bit(0)/64-bit(1) EL3
7472  *
7473  *    The table values are as such:
7474  *    0-3 = EL0-EL3
7475  *     -1 = Cannot occur
7476  *
7477  * The ARM ARM target EL table includes entries indicating that an "exception
7478  * is not taken".  The two cases where this is applicable are:
7479  *    1) An exception is taken from EL3 but the SCR does not have the exception
7480  *    routed to EL3.
7481  *    2) An exception is taken from EL2 but the HCR does not have the exception
7482  *    routed to EL2.
7483  * In these two cases, the below table contain a target of EL1.  This value is
7484  * returned as it is expected that the consumer of the table data will check
7485  * for "target EL >= current EL" to ensure the exception is not taken.
7486  *
7487  *            SCR     HCR
7488  *         64  EA     AMO                 From
7489  *        BIT IRQ     IMO      Non-secure         Secure
7490  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
7491  */
7492 static const int8_t target_el_table[2][2][2][2][2][4] = {
7493     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
7494        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
7495       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
7496        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
7497      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
7498        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
7499       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
7500        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
7501     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
7502        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
7503       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
7504        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
7505      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
7506        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
7507       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
7508        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
7509 };
7510 
7511 /*
7512  * Determine the target EL for physical exceptions
7513  */
7514 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7515                                  uint32_t cur_el, bool secure)
7516 {
7517     CPUARMState *env = cs->env_ptr;
7518     bool rw;
7519     bool scr;
7520     bool hcr;
7521     int target_el;
7522     /* Is the highest EL AArch64? */
7523     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
7524     uint64_t hcr_el2;
7525 
7526     if (arm_feature(env, ARM_FEATURE_EL3)) {
7527         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
7528     } else {
7529         /* Either EL2 is the highest EL (and so the EL2 register width
7530          * is given by is64); or there is no EL2 or EL3, in which case
7531          * the value of 'rw' does not affect the table lookup anyway.
7532          */
7533         rw = is64;
7534     }
7535 
7536     hcr_el2 = arm_hcr_el2_eff(env);
7537     switch (excp_idx) {
7538     case EXCP_IRQ:
7539         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
7540         hcr = hcr_el2 & HCR_IMO;
7541         break;
7542     case EXCP_FIQ:
7543         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
7544         hcr = hcr_el2 & HCR_FMO;
7545         break;
7546     default:
7547         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
7548         hcr = hcr_el2 & HCR_AMO;
7549         break;
7550     };
7551 
7552     /* Perform a table-lookup for the target EL given the current state */
7553     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
7554 
7555     assert(target_el > 0);
7556 
7557     return target_el;
7558 }
7559 
7560 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
7561                             ARMMMUIdx mmu_idx, bool ignfault)
7562 {
7563     CPUState *cs = CPU(cpu);
7564     CPUARMState *env = &cpu->env;
7565     MemTxAttrs attrs = {};
7566     MemTxResult txres;
7567     target_ulong page_size;
7568     hwaddr physaddr;
7569     int prot;
7570     ARMMMUFaultInfo fi = {};
7571     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
7572     int exc;
7573     bool exc_secure;
7574 
7575     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
7576                       &attrs, &prot, &page_size, &fi, NULL)) {
7577         /* MPU/SAU lookup failed */
7578         if (fi.type == ARMFault_QEMU_SFault) {
7579             qemu_log_mask(CPU_LOG_INT,
7580                           "...SecureFault with SFSR.AUVIOL during stacking\n");
7581             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
7582             env->v7m.sfar = addr;
7583             exc = ARMV7M_EXCP_SECURE;
7584             exc_secure = false;
7585         } else {
7586             qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n");
7587             env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
7588             exc = ARMV7M_EXCP_MEM;
7589             exc_secure = secure;
7590         }
7591         goto pend_fault;
7592     }
7593     address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
7594                          attrs, &txres);
7595     if (txres != MEMTX_OK) {
7596         /* BusFault trying to write the data */
7597         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
7598         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
7599         exc = ARMV7M_EXCP_BUS;
7600         exc_secure = false;
7601         goto pend_fault;
7602     }
7603     return true;
7604 
7605 pend_fault:
7606     /* By pending the exception at this point we are making
7607      * the IMPDEF choice "overridden exceptions pended" (see the
7608      * MergeExcInfo() pseudocode). The other choice would be to not
7609      * pend them now and then make a choice about which to throw away
7610      * later if we have two derived exceptions.
7611      * The only case when we must not pend the exception but instead
7612      * throw it away is if we are doing the push of the callee registers
7613      * and we've already generated a derived exception. Even in this
7614      * case we will still update the fault status registers.
7615      */
7616     if (!ignfault) {
7617         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
7618     }
7619     return false;
7620 }
7621 
7622 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
7623                            ARMMMUIdx mmu_idx)
7624 {
7625     CPUState *cs = CPU(cpu);
7626     CPUARMState *env = &cpu->env;
7627     MemTxAttrs attrs = {};
7628     MemTxResult txres;
7629     target_ulong page_size;
7630     hwaddr physaddr;
7631     int prot;
7632     ARMMMUFaultInfo fi = {};
7633     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
7634     int exc;
7635     bool exc_secure;
7636     uint32_t value;
7637 
7638     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
7639                       &attrs, &prot, &page_size, &fi, NULL)) {
7640         /* MPU/SAU lookup failed */
7641         if (fi.type == ARMFault_QEMU_SFault) {
7642             qemu_log_mask(CPU_LOG_INT,
7643                           "...SecureFault with SFSR.AUVIOL during unstack\n");
7644             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
7645             env->v7m.sfar = addr;
7646             exc = ARMV7M_EXCP_SECURE;
7647             exc_secure = false;
7648         } else {
7649             qemu_log_mask(CPU_LOG_INT,
7650                           "...MemManageFault with CFSR.MUNSTKERR\n");
7651             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
7652             exc = ARMV7M_EXCP_MEM;
7653             exc_secure = secure;
7654         }
7655         goto pend_fault;
7656     }
7657 
7658     value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
7659                               attrs, &txres);
7660     if (txres != MEMTX_OK) {
7661         /* BusFault trying to read the data */
7662         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
7663         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
7664         exc = ARMV7M_EXCP_BUS;
7665         exc_secure = false;
7666         goto pend_fault;
7667     }
7668 
7669     *dest = value;
7670     return true;
7671 
7672 pend_fault:
7673     /* By pending the exception at this point we are making
7674      * the IMPDEF choice "overridden exceptions pended" (see the
7675      * MergeExcInfo() pseudocode). The other choice would be to not
7676      * pend them now and then make a choice about which to throw away
7677      * later if we have two derived exceptions.
7678      */
7679     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
7680     return false;
7681 }
7682 
7683 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7684  * This may change the current stack pointer between Main and Process
7685  * stack pointers if it is done for the CONTROL register for the current
7686  * security state.
7687  */
7688 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
7689                                                  bool new_spsel,
7690                                                  bool secstate)
7691 {
7692     bool old_is_psp = v7m_using_psp(env);
7693 
7694     env->v7m.control[secstate] =
7695         deposit32(env->v7m.control[secstate],
7696                   R_V7M_CONTROL_SPSEL_SHIFT,
7697                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
7698 
7699     if (secstate == env->v7m.secure) {
7700         bool new_is_psp = v7m_using_psp(env);
7701         uint32_t tmp;
7702 
7703         if (old_is_psp != new_is_psp) {
7704             tmp = env->v7m.other_sp;
7705             env->v7m.other_sp = env->regs[13];
7706             env->regs[13] = tmp;
7707         }
7708     }
7709 }
7710 
7711 /* Write to v7M CONTROL.SPSEL bit. This may change the current
7712  * stack pointer between Main and Process stack pointers.
7713  */
7714 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
7715 {
7716     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
7717 }
7718 
7719 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
7720 {
7721     /* Write a new value to v7m.exception, thus transitioning into or out
7722      * of Handler mode; this may result in a change of active stack pointer.
7723      */
7724     bool new_is_psp, old_is_psp = v7m_using_psp(env);
7725     uint32_t tmp;
7726 
7727     env->v7m.exception = new_exc;
7728 
7729     new_is_psp = v7m_using_psp(env);
7730 
7731     if (old_is_psp != new_is_psp) {
7732         tmp = env->v7m.other_sp;
7733         env->v7m.other_sp = env->regs[13];
7734         env->regs[13] = tmp;
7735     }
7736 }
7737 
7738 /* Switch M profile security state between NS and S */
7739 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
7740 {
7741     uint32_t new_ss_msp, new_ss_psp;
7742 
7743     if (env->v7m.secure == new_secstate) {
7744         return;
7745     }
7746 
7747     /* All the banked state is accessed by looking at env->v7m.secure
7748      * except for the stack pointer; rearrange the SP appropriately.
7749      */
7750     new_ss_msp = env->v7m.other_ss_msp;
7751     new_ss_psp = env->v7m.other_ss_psp;
7752 
7753     if (v7m_using_psp(env)) {
7754         env->v7m.other_ss_psp = env->regs[13];
7755         env->v7m.other_ss_msp = env->v7m.other_sp;
7756     } else {
7757         env->v7m.other_ss_msp = env->regs[13];
7758         env->v7m.other_ss_psp = env->v7m.other_sp;
7759     }
7760 
7761     env->v7m.secure = new_secstate;
7762 
7763     if (v7m_using_psp(env)) {
7764         env->regs[13] = new_ss_psp;
7765         env->v7m.other_sp = new_ss_msp;
7766     } else {
7767         env->regs[13] = new_ss_msp;
7768         env->v7m.other_sp = new_ss_psp;
7769     }
7770 }
7771 
7772 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
7773 {
7774     /* Handle v7M BXNS:
7775      *  - if the return value is a magic value, do exception return (like BX)
7776      *  - otherwise bit 0 of the return value is the target security state
7777      */
7778     uint32_t min_magic;
7779 
7780     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7781         /* Covers FNC_RETURN and EXC_RETURN magic */
7782         min_magic = FNC_RETURN_MIN_MAGIC;
7783     } else {
7784         /* EXC_RETURN magic only */
7785         min_magic = EXC_RETURN_MIN_MAGIC;
7786     }
7787 
7788     if (dest >= min_magic) {
7789         /* This is an exception return magic value; put it where
7790          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7791          * Note that if we ever add gen_ss_advance() singlestep support to
7792          * M profile this should count as an "instruction execution complete"
7793          * event (compare gen_bx_excret_final_code()).
7794          */
7795         env->regs[15] = dest & ~1;
7796         env->thumb = dest & 1;
7797         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
7798         /* notreached */
7799     }
7800 
7801     /* translate.c should have made BXNS UNDEF unless we're secure */
7802     assert(env->v7m.secure);
7803 
7804     switch_v7m_security_state(env, dest & 1);
7805     env->thumb = 1;
7806     env->regs[15] = dest & ~1;
7807 }
7808 
7809 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
7810 {
7811     /* Handle v7M BLXNS:
7812      *  - bit 0 of the destination address is the target security state
7813      */
7814 
7815     /* At this point regs[15] is the address just after the BLXNS */
7816     uint32_t nextinst = env->regs[15] | 1;
7817     uint32_t sp = env->regs[13] - 8;
7818     uint32_t saved_psr;
7819 
7820     /* translate.c will have made BLXNS UNDEF unless we're secure */
7821     assert(env->v7m.secure);
7822 
7823     if (dest & 1) {
7824         /* target is Secure, so this is just a normal BLX,
7825          * except that the low bit doesn't indicate Thumb/not.
7826          */
7827         env->regs[14] = nextinst;
7828         env->thumb = 1;
7829         env->regs[15] = dest & ~1;
7830         return;
7831     }
7832 
7833     /* Target is non-secure: first push a stack frame */
7834     if (!QEMU_IS_ALIGNED(sp, 8)) {
7835         qemu_log_mask(LOG_GUEST_ERROR,
7836                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
7837     }
7838 
7839     if (sp < v7m_sp_limit(env)) {
7840         raise_exception(env, EXCP_STKOF, 0, 1);
7841     }
7842 
7843     saved_psr = env->v7m.exception;
7844     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
7845         saved_psr |= XPSR_SFPA;
7846     }
7847 
7848     /* Note that these stores can throw exceptions on MPU faults */
7849     cpu_stl_data(env, sp, nextinst);
7850     cpu_stl_data(env, sp + 4, saved_psr);
7851 
7852     env->regs[13] = sp;
7853     env->regs[14] = 0xfeffffff;
7854     if (arm_v7m_is_handler_mode(env)) {
7855         /* Write a dummy value to IPSR, to avoid leaking the current secure
7856          * exception number to non-secure code. This is guaranteed not
7857          * to cause write_v7m_exception() to actually change stacks.
7858          */
7859         write_v7m_exception(env, 1);
7860     }
7861     switch_v7m_security_state(env, 0);
7862     env->thumb = 1;
7863     env->regs[15] = dest;
7864 }
7865 
7866 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
7867                                 bool spsel)
7868 {
7869     /* Return a pointer to the location where we currently store the
7870      * stack pointer for the requested security state and thread mode.
7871      * This pointer will become invalid if the CPU state is updated
7872      * such that the stack pointers are switched around (eg changing
7873      * the SPSEL control bit).
7874      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
7875      * Unlike that pseudocode, we require the caller to pass us in the
7876      * SPSEL control bit value; this is because we also use this
7877      * function in handling of pushing of the callee-saves registers
7878      * part of the v8M stack frame (pseudocode PushCalleeStack()),
7879      * and in the tailchain codepath the SPSEL bit comes from the exception
7880      * return magic LR value from the previous exception. The pseudocode
7881      * opencodes the stack-selection in PushCalleeStack(), but we prefer
7882      * to make this utility function generic enough to do the job.
7883      */
7884     bool want_psp = threadmode && spsel;
7885 
7886     if (secure == env->v7m.secure) {
7887         if (want_psp == v7m_using_psp(env)) {
7888             return &env->regs[13];
7889         } else {
7890             return &env->v7m.other_sp;
7891         }
7892     } else {
7893         if (want_psp) {
7894             return &env->v7m.other_ss_psp;
7895         } else {
7896             return &env->v7m.other_ss_msp;
7897         }
7898     }
7899 }
7900 
7901 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
7902                                 uint32_t *pvec)
7903 {
7904     CPUState *cs = CPU(cpu);
7905     CPUARMState *env = &cpu->env;
7906     MemTxResult result;
7907     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
7908     uint32_t vector_entry;
7909     MemTxAttrs attrs = {};
7910     ARMMMUIdx mmu_idx;
7911     bool exc_secure;
7912 
7913     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
7914 
7915     /* We don't do a get_phys_addr() here because the rules for vector
7916      * loads are special: they always use the default memory map, and
7917      * the default memory map permits reads from all addresses.
7918      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
7919      * that we want this special case which would always say "yes",
7920      * we just do the SAU lookup here followed by a direct physical load.
7921      */
7922     attrs.secure = targets_secure;
7923     attrs.user = false;
7924 
7925     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7926         V8M_SAttributes sattrs = {};
7927 
7928         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
7929         if (sattrs.ns) {
7930             attrs.secure = false;
7931         } else if (!targets_secure) {
7932             /* NS access to S memory */
7933             goto load_fail;
7934         }
7935     }
7936 
7937     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
7938                                      attrs, &result);
7939     if (result != MEMTX_OK) {
7940         goto load_fail;
7941     }
7942     *pvec = vector_entry;
7943     return true;
7944 
7945 load_fail:
7946     /* All vector table fetch fails are reported as HardFault, with
7947      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
7948      * technically the underlying exception is a MemManage or BusFault
7949      * that is escalated to HardFault.) This is a terminal exception,
7950      * so we will either take the HardFault immediately or else enter
7951      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
7952      */
7953     exc_secure = targets_secure ||
7954         !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
7955     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
7956     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
7957     return false;
7958 }
7959 
7960 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
7961                                   bool ignore_faults)
7962 {
7963     /* For v8M, push the callee-saves register part of the stack frame.
7964      * Compare the v8M pseudocode PushCalleeStack().
7965      * In the tailchaining case this may not be the current stack.
7966      */
7967     CPUARMState *env = &cpu->env;
7968     uint32_t *frame_sp_p;
7969     uint32_t frameptr;
7970     ARMMMUIdx mmu_idx;
7971     bool stacked_ok;
7972     uint32_t limit;
7973     bool want_psp;
7974 
7975     if (dotailchain) {
7976         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
7977         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
7978             !mode;
7979 
7980         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
7981         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
7982                                     lr & R_V7M_EXCRET_SPSEL_MASK);
7983         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
7984         if (want_psp) {
7985             limit = env->v7m.psplim[M_REG_S];
7986         } else {
7987             limit = env->v7m.msplim[M_REG_S];
7988         }
7989     } else {
7990         mmu_idx = arm_mmu_idx(env);
7991         frame_sp_p = &env->regs[13];
7992         limit = v7m_sp_limit(env);
7993     }
7994 
7995     frameptr = *frame_sp_p - 0x28;
7996     if (frameptr < limit) {
7997         /*
7998          * Stack limit failure: set SP to the limit value, and generate
7999          * STKOF UsageFault. Stack pushes below the limit must not be
8000          * performed. It is IMPDEF whether pushes above the limit are
8001          * performed; we choose not to.
8002          */
8003         qemu_log_mask(CPU_LOG_INT,
8004                       "...STKOF during callee-saves register stacking\n");
8005         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
8006         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8007                                 env->v7m.secure);
8008         *frame_sp_p = limit;
8009         return true;
8010     }
8011 
8012     /* Write as much of the stack frame as we can. A write failure may
8013      * cause us to pend a derived exception.
8014      */
8015     stacked_ok =
8016         v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) &&
8017         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx,
8018                         ignore_faults) &&
8019         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx,
8020                         ignore_faults) &&
8021         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx,
8022                         ignore_faults) &&
8023         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx,
8024                         ignore_faults) &&
8025         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx,
8026                         ignore_faults) &&
8027         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx,
8028                         ignore_faults) &&
8029         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx,
8030                         ignore_faults) &&
8031         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
8032                         ignore_faults);
8033 
8034     /* Update SP regardless of whether any of the stack accesses failed. */
8035     *frame_sp_p = frameptr;
8036 
8037     return !stacked_ok;
8038 }
8039 
8040 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
8041                                 bool ignore_stackfaults)
8042 {
8043     /* Do the "take the exception" parts of exception entry,
8044      * but not the pushing of state to the stack. This is
8045      * similar to the pseudocode ExceptionTaken() function.
8046      */
8047     CPUARMState *env = &cpu->env;
8048     uint32_t addr;
8049     bool targets_secure;
8050     int exc;
8051     bool push_failed = false;
8052 
8053     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
8054     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
8055                   targets_secure ? "secure" : "nonsecure", exc);
8056 
8057     if (arm_feature(env, ARM_FEATURE_V8)) {
8058         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
8059             (lr & R_V7M_EXCRET_S_MASK)) {
8060             /* The background code (the owner of the registers in the
8061              * exception frame) is Secure. This means it may either already
8062              * have or now needs to push callee-saves registers.
8063              */
8064             if (targets_secure) {
8065                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
8066                     /* We took an exception from Secure to NonSecure
8067                      * (which means the callee-saved registers got stacked)
8068                      * and are now tailchaining to a Secure exception.
8069                      * Clear DCRS so eventual return from this Secure
8070                      * exception unstacks the callee-saved registers.
8071                      */
8072                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
8073                 }
8074             } else {
8075                 /* We're going to a non-secure exception; push the
8076                  * callee-saves registers to the stack now, if they're
8077                  * not already saved.
8078                  */
8079                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
8080                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
8081                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
8082                                                         ignore_stackfaults);
8083                 }
8084                 lr |= R_V7M_EXCRET_DCRS_MASK;
8085             }
8086         }
8087 
8088         lr &= ~R_V7M_EXCRET_ES_MASK;
8089         if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8090             lr |= R_V7M_EXCRET_ES_MASK;
8091         }
8092         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
8093         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
8094             lr |= R_V7M_EXCRET_SPSEL_MASK;
8095         }
8096 
8097         /* Clear registers if necessary to prevent non-secure exception
8098          * code being able to see register values from secure code.
8099          * Where register values become architecturally UNKNOWN we leave
8100          * them with their previous values.
8101          */
8102         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8103             if (!targets_secure) {
8104                 /* Always clear the caller-saved registers (they have been
8105                  * pushed to the stack earlier in v7m_push_stack()).
8106                  * Clear callee-saved registers if the background code is
8107                  * Secure (in which case these regs were saved in
8108                  * v7m_push_callee_stack()).
8109                  */
8110                 int i;
8111 
8112                 for (i = 0; i < 13; i++) {
8113                     /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
8114                     if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
8115                         env->regs[i] = 0;
8116                     }
8117                 }
8118                 /* Clear EAPSR */
8119                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
8120             }
8121         }
8122     }
8123 
8124     if (push_failed && !ignore_stackfaults) {
8125         /* Derived exception on callee-saves register stacking:
8126          * we might now want to take a different exception which
8127          * targets a different security state, so try again from the top.
8128          */
8129         qemu_log_mask(CPU_LOG_INT,
8130                       "...derived exception on callee-saves register stacking");
8131         v7m_exception_taken(cpu, lr, true, true);
8132         return;
8133     }
8134 
8135     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
8136         /* Vector load failed: derived exception */
8137         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
8138         v7m_exception_taken(cpu, lr, true, true);
8139         return;
8140     }
8141 
8142     /* Now we've done everything that might cause a derived exception
8143      * we can go ahead and activate whichever exception we're going to
8144      * take (which might now be the derived exception).
8145      */
8146     armv7m_nvic_acknowledge_irq(env->nvic);
8147 
8148     /* Switch to target security state -- must do this before writing SPSEL */
8149     switch_v7m_security_state(env, targets_secure);
8150     write_v7m_control_spsel(env, 0);
8151     arm_clear_exclusive(env);
8152     /* Clear IT bits */
8153     env->condexec_bits = 0;
8154     env->regs[14] = lr;
8155     env->regs[15] = addr & 0xfffffffe;
8156     env->thumb = addr & 1;
8157 }
8158 
8159 static bool v7m_push_stack(ARMCPU *cpu)
8160 {
8161     /* Do the "set up stack frame" part of exception entry,
8162      * similar to pseudocode PushStack().
8163      * Return true if we generate a derived exception (and so
8164      * should ignore further stack faults trying to process
8165      * that derived exception.)
8166      */
8167     bool stacked_ok;
8168     CPUARMState *env = &cpu->env;
8169     uint32_t xpsr = xpsr_read(env);
8170     uint32_t frameptr = env->regs[13];
8171     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
8172 
8173     /* Align stack pointer if the guest wants that */
8174     if ((frameptr & 4) &&
8175         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
8176         frameptr -= 4;
8177         xpsr |= XPSR_SPREALIGN;
8178     }
8179 
8180     frameptr -= 0x20;
8181 
8182     if (arm_feature(env, ARM_FEATURE_V8)) {
8183         uint32_t limit = v7m_sp_limit(env);
8184 
8185         if (frameptr < limit) {
8186             /*
8187              * Stack limit failure: set SP to the limit value, and generate
8188              * STKOF UsageFault. Stack pushes below the limit must not be
8189              * performed. It is IMPDEF whether pushes above the limit are
8190              * performed; we choose not to.
8191              */
8192             qemu_log_mask(CPU_LOG_INT,
8193                           "...STKOF during stacking\n");
8194             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
8195             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8196                                     env->v7m.secure);
8197             env->regs[13] = limit;
8198             return true;
8199         }
8200     }
8201 
8202     /* Write as much of the stack frame as we can. If we fail a stack
8203      * write this will result in a derived exception being pended
8204      * (which may be taken in preference to the one we started with
8205      * if it has higher priority).
8206      */
8207     stacked_ok =
8208         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) &&
8209         v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) &&
8210         v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) &&
8211         v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) &&
8212         v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) &&
8213         v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) &&
8214         v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
8215         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
8216 
8217     /* Update SP regardless of whether any of the stack accesses failed. */
8218     env->regs[13] = frameptr;
8219 
8220     return !stacked_ok;
8221 }
8222 
8223 static void do_v7m_exception_exit(ARMCPU *cpu)
8224 {
8225     CPUARMState *env = &cpu->env;
8226     uint32_t excret;
8227     uint32_t xpsr;
8228     bool ufault = false;
8229     bool sfault = false;
8230     bool return_to_sp_process;
8231     bool return_to_handler;
8232     bool rettobase = false;
8233     bool exc_secure = false;
8234     bool return_to_secure;
8235 
8236     /* If we're not in Handler mode then jumps to magic exception-exit
8237      * addresses don't have magic behaviour. However for the v8M
8238      * security extensions the magic secure-function-return has to
8239      * work in thread mode too, so to avoid doing an extra check in
8240      * the generated code we allow exception-exit magic to also cause the
8241      * internal exception and bring us here in thread mode. Correct code
8242      * will never try to do this (the following insn fetch will always
8243      * fault) so we the overhead of having taken an unnecessary exception
8244      * doesn't matter.
8245      */
8246     if (!arm_v7m_is_handler_mode(env)) {
8247         return;
8248     }
8249 
8250     /* In the spec pseudocode ExceptionReturn() is called directly
8251      * from BXWritePC() and gets the full target PC value including
8252      * bit zero. In QEMU's implementation we treat it as a normal
8253      * jump-to-register (which is then caught later on), and so split
8254      * the target value up between env->regs[15] and env->thumb in
8255      * gen_bx(). Reconstitute it.
8256      */
8257     excret = env->regs[15];
8258     if (env->thumb) {
8259         excret |= 1;
8260     }
8261 
8262     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
8263                   " previous exception %d\n",
8264                   excret, env->v7m.exception);
8265 
8266     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
8267         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
8268                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
8269                       excret);
8270     }
8271 
8272     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8273         /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8274          * we pick which FAULTMASK to clear.
8275          */
8276         if (!env->v7m.secure &&
8277             ((excret & R_V7M_EXCRET_ES_MASK) ||
8278              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
8279             sfault = 1;
8280             /* For all other purposes, treat ES as 0 (R_HXSR) */
8281             excret &= ~R_V7M_EXCRET_ES_MASK;
8282         }
8283         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
8284     }
8285 
8286     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
8287         /* Auto-clear FAULTMASK on return from other than NMI.
8288          * If the security extension is implemented then this only
8289          * happens if the raw execution priority is >= 0; the
8290          * value of the ES bit in the exception return value indicates
8291          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8292          */
8293         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8294             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
8295                 env->v7m.faultmask[exc_secure] = 0;
8296             }
8297         } else {
8298             env->v7m.faultmask[M_REG_NS] = 0;
8299         }
8300     }
8301 
8302     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
8303                                      exc_secure)) {
8304     case -1:
8305         /* attempt to exit an exception that isn't active */
8306         ufault = true;
8307         break;
8308     case 0:
8309         /* still an irq active now */
8310         break;
8311     case 1:
8312         /* we returned to base exception level, no nesting.
8313          * (In the pseudocode this is written using "NestedActivation != 1"
8314          * where we have 'rettobase == false'.)
8315          */
8316         rettobase = true;
8317         break;
8318     default:
8319         g_assert_not_reached();
8320     }
8321 
8322     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
8323     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
8324     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
8325         (excret & R_V7M_EXCRET_S_MASK);
8326 
8327     if (arm_feature(env, ARM_FEATURE_V8)) {
8328         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8329             /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8330              * we choose to take the UsageFault.
8331              */
8332             if ((excret & R_V7M_EXCRET_S_MASK) ||
8333                 (excret & R_V7M_EXCRET_ES_MASK) ||
8334                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
8335                 ufault = true;
8336             }
8337         }
8338         if (excret & R_V7M_EXCRET_RES0_MASK) {
8339             ufault = true;
8340         }
8341     } else {
8342         /* For v7M we only recognize certain combinations of the low bits */
8343         switch (excret & 0xf) {
8344         case 1: /* Return to Handler */
8345             break;
8346         case 13: /* Return to Thread using Process stack */
8347         case 9: /* Return to Thread using Main stack */
8348             /* We only need to check NONBASETHRDENA for v7M, because in
8349              * v8M this bit does not exist (it is RES1).
8350              */
8351             if (!rettobase &&
8352                 !(env->v7m.ccr[env->v7m.secure] &
8353                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
8354                 ufault = true;
8355             }
8356             break;
8357         default:
8358             ufault = true;
8359         }
8360     }
8361 
8362     /*
8363      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8364      * Handler mode (and will be until we write the new XPSR.Interrupt
8365      * field) this does not switch around the current stack pointer.
8366      * We must do this before we do any kind of tailchaining, including
8367      * for the derived exceptions on integrity check failures, or we will
8368      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8369      */
8370     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
8371 
8372     if (sfault) {
8373         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
8374         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8375         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
8376                       "stackframe: failed EXC_RETURN.ES validity check\n");
8377         v7m_exception_taken(cpu, excret, true, false);
8378         return;
8379     }
8380 
8381     if (ufault) {
8382         /* Bad exception return: instead of popping the exception
8383          * stack, directly take a usage fault on the current stack.
8384          */
8385         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
8386         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
8387         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
8388                       "stackframe: failed exception return integrity check\n");
8389         v7m_exception_taken(cpu, excret, true, false);
8390         return;
8391     }
8392 
8393     /*
8394      * Tailchaining: if there is currently a pending exception that
8395      * is high enough priority to preempt execution at the level we're
8396      * about to return to, then just directly take that exception now,
8397      * avoiding an unstack-and-then-stack. Note that now we have
8398      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
8399      * our current execution priority is already the execution priority we are
8400      * returning to -- none of the state we would unstack or set based on
8401      * the EXCRET value affects it.
8402      */
8403     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
8404         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
8405         v7m_exception_taken(cpu, excret, true, false);
8406         return;
8407     }
8408 
8409     switch_v7m_security_state(env, return_to_secure);
8410 
8411     {
8412         /* The stack pointer we should be reading the exception frame from
8413          * depends on bits in the magic exception return type value (and
8414          * for v8M isn't necessarily the stack pointer we will eventually
8415          * end up resuming execution with). Get a pointer to the location
8416          * in the CPU state struct where the SP we need is currently being
8417          * stored; we will use and modify it in place.
8418          * We use this limited C variable scope so we don't accidentally
8419          * use 'frame_sp_p' after we do something that makes it invalid.
8420          */
8421         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
8422                                               return_to_secure,
8423                                               !return_to_handler,
8424                                               return_to_sp_process);
8425         uint32_t frameptr = *frame_sp_p;
8426         bool pop_ok = true;
8427         ARMMMUIdx mmu_idx;
8428         bool return_to_priv = return_to_handler ||
8429             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
8430 
8431         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
8432                                                         return_to_priv);
8433 
8434         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
8435             arm_feature(env, ARM_FEATURE_V8)) {
8436             qemu_log_mask(LOG_GUEST_ERROR,
8437                           "M profile exception return with non-8-aligned SP "
8438                           "for destination state is UNPREDICTABLE\n");
8439         }
8440 
8441         /* Do we need to pop callee-saved registers? */
8442         if (return_to_secure &&
8443             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
8444              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
8445             uint32_t expected_sig = 0xfefa125b;
8446             uint32_t actual_sig;
8447 
8448             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
8449 
8450             if (pop_ok && expected_sig != actual_sig) {
8451                 /* Take a SecureFault on the current stack */
8452                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
8453                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8454                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
8455                               "stackframe: failed exception return integrity "
8456                               "signature check\n");
8457                 v7m_exception_taken(cpu, excret, true, false);
8458                 return;
8459             }
8460 
8461             pop_ok = pop_ok &&
8462                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
8463                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
8464                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
8465                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
8466                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
8467                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
8468                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
8469                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
8470 
8471             frameptr += 0x28;
8472         }
8473 
8474         /* Pop registers */
8475         pop_ok = pop_ok &&
8476             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
8477             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
8478             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
8479             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
8480             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
8481             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
8482             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
8483             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
8484 
8485         if (!pop_ok) {
8486             /* v7m_stack_read() pended a fault, so take it (as a tail
8487              * chained exception on the same stack frame)
8488              */
8489             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
8490             v7m_exception_taken(cpu, excret, true, false);
8491             return;
8492         }
8493 
8494         /* Returning from an exception with a PC with bit 0 set is defined
8495          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
8496          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
8497          * the lsbit, and there are several RTOSes out there which incorrectly
8498          * assume the r15 in the stack frame should be a Thumb-style "lsbit
8499          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
8500          * complain about the badly behaved guest.
8501          */
8502         if (env->regs[15] & 1) {
8503             env->regs[15] &= ~1U;
8504             if (!arm_feature(env, ARM_FEATURE_V8)) {
8505                 qemu_log_mask(LOG_GUEST_ERROR,
8506                               "M profile return from interrupt with misaligned "
8507                               "PC is UNPREDICTABLE on v7M\n");
8508             }
8509         }
8510 
8511         if (arm_feature(env, ARM_FEATURE_V8)) {
8512             /* For v8M we have to check whether the xPSR exception field
8513              * matches the EXCRET value for return to handler/thread
8514              * before we commit to changing the SP and xPSR.
8515              */
8516             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
8517             if (return_to_handler != will_be_handler) {
8518                 /* Take an INVPC UsageFault on the current stack.
8519                  * By this point we will have switched to the security state
8520                  * for the background state, so this UsageFault will target
8521                  * that state.
8522                  */
8523                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8524                                         env->v7m.secure);
8525                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
8526                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
8527                               "stackframe: failed exception return integrity "
8528                               "check\n");
8529                 v7m_exception_taken(cpu, excret, true, false);
8530                 return;
8531             }
8532         }
8533 
8534         /* Commit to consuming the stack frame */
8535         frameptr += 0x20;
8536         /* Undo stack alignment (the SPREALIGN bit indicates that the original
8537          * pre-exception SP was not 8-aligned and we added a padding word to
8538          * align it, so we undo this by ORing in the bit that increases it
8539          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
8540          * would work too but a logical OR is how the pseudocode specifies it.)
8541          */
8542         if (xpsr & XPSR_SPREALIGN) {
8543             frameptr |= 4;
8544         }
8545         *frame_sp_p = frameptr;
8546     }
8547     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
8548     xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
8549 
8550     /* The restored xPSR exception field will be zero if we're
8551      * resuming in Thread mode. If that doesn't match what the
8552      * exception return excret specified then this is a UsageFault.
8553      * v7M requires we make this check here; v8M did it earlier.
8554      */
8555     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
8556         /* Take an INVPC UsageFault by pushing the stack again;
8557          * we know we're v7M so this is never a Secure UsageFault.
8558          */
8559         bool ignore_stackfaults;
8560 
8561         assert(!arm_feature(env, ARM_FEATURE_V8));
8562         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
8563         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
8564         ignore_stackfaults = v7m_push_stack(cpu);
8565         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
8566                       "failed exception return integrity check\n");
8567         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
8568         return;
8569     }
8570 
8571     /* Otherwise, we have a successful exception exit. */
8572     arm_clear_exclusive(env);
8573     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
8574 }
8575 
8576 static bool do_v7m_function_return(ARMCPU *cpu)
8577 {
8578     /* v8M security extensions magic function return.
8579      * We may either:
8580      *  (1) throw an exception (longjump)
8581      *  (2) return true if we successfully handled the function return
8582      *  (3) return false if we failed a consistency check and have
8583      *      pended a UsageFault that needs to be taken now
8584      *
8585      * At this point the magic return value is split between env->regs[15]
8586      * and env->thumb. We don't bother to reconstitute it because we don't
8587      * need it (all values are handled the same way).
8588      */
8589     CPUARMState *env = &cpu->env;
8590     uint32_t newpc, newpsr, newpsr_exc;
8591 
8592     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
8593 
8594     {
8595         bool threadmode, spsel;
8596         TCGMemOpIdx oi;
8597         ARMMMUIdx mmu_idx;
8598         uint32_t *frame_sp_p;
8599         uint32_t frameptr;
8600 
8601         /* Pull the return address and IPSR from the Secure stack */
8602         threadmode = !arm_v7m_is_handler_mode(env);
8603         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
8604 
8605         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
8606         frameptr = *frame_sp_p;
8607 
8608         /* These loads may throw an exception (for MPU faults). We want to
8609          * do them as secure, so work out what MMU index that is.
8610          */
8611         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
8612         oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
8613         newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
8614         newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
8615 
8616         /* Consistency checks on new IPSR */
8617         newpsr_exc = newpsr & XPSR_EXCP;
8618         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
8619               (env->v7m.exception == 1 && newpsr_exc != 0))) {
8620             /* Pend the fault and tell our caller to take it */
8621             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
8622             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8623                                     env->v7m.secure);
8624             qemu_log_mask(CPU_LOG_INT,
8625                           "...taking INVPC UsageFault: "
8626                           "IPSR consistency check failed\n");
8627             return false;
8628         }
8629 
8630         *frame_sp_p = frameptr + 8;
8631     }
8632 
8633     /* This invalidates frame_sp_p */
8634     switch_v7m_security_state(env, true);
8635     env->v7m.exception = newpsr_exc;
8636     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
8637     if (newpsr & XPSR_SFPA) {
8638         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
8639     }
8640     xpsr_write(env, 0, XPSR_IT);
8641     env->thumb = newpc & 1;
8642     env->regs[15] = newpc & ~1;
8643 
8644     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
8645     return true;
8646 }
8647 
8648 static void arm_log_exception(int idx)
8649 {
8650     if (qemu_loglevel_mask(CPU_LOG_INT)) {
8651         const char *exc = NULL;
8652         static const char * const excnames[] = {
8653             [EXCP_UDEF] = "Undefined Instruction",
8654             [EXCP_SWI] = "SVC",
8655             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
8656             [EXCP_DATA_ABORT] = "Data Abort",
8657             [EXCP_IRQ] = "IRQ",
8658             [EXCP_FIQ] = "FIQ",
8659             [EXCP_BKPT] = "Breakpoint",
8660             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
8661             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
8662             [EXCP_HVC] = "Hypervisor Call",
8663             [EXCP_HYP_TRAP] = "Hypervisor Trap",
8664             [EXCP_SMC] = "Secure Monitor Call",
8665             [EXCP_VIRQ] = "Virtual IRQ",
8666             [EXCP_VFIQ] = "Virtual FIQ",
8667             [EXCP_SEMIHOST] = "Semihosting call",
8668             [EXCP_NOCP] = "v7M NOCP UsageFault",
8669             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
8670             [EXCP_STKOF] = "v8M STKOF UsageFault",
8671         };
8672 
8673         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
8674             exc = excnames[idx];
8675         }
8676         if (!exc) {
8677             exc = "unknown";
8678         }
8679         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
8680     }
8681 }
8682 
8683 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
8684                                uint32_t addr, uint16_t *insn)
8685 {
8686     /* Load a 16-bit portion of a v7M instruction, returning true on success,
8687      * or false on failure (in which case we will have pended the appropriate
8688      * exception).
8689      * We need to do the instruction fetch's MPU and SAU checks
8690      * like this because there is no MMU index that would allow
8691      * doing the load with a single function call. Instead we must
8692      * first check that the security attributes permit the load
8693      * and that they don't mismatch on the two halves of the instruction,
8694      * and then we do the load as a secure load (ie using the security
8695      * attributes of the address, not the CPU, as architecturally required).
8696      */
8697     CPUState *cs = CPU(cpu);
8698     CPUARMState *env = &cpu->env;
8699     V8M_SAttributes sattrs = {};
8700     MemTxAttrs attrs = {};
8701     ARMMMUFaultInfo fi = {};
8702     MemTxResult txres;
8703     target_ulong page_size;
8704     hwaddr physaddr;
8705     int prot;
8706 
8707     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
8708     if (!sattrs.nsc || sattrs.ns) {
8709         /* This must be the second half of the insn, and it straddles a
8710          * region boundary with the second half not being S&NSC.
8711          */
8712         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
8713         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8714         qemu_log_mask(CPU_LOG_INT,
8715                       "...really SecureFault with SFSR.INVEP\n");
8716         return false;
8717     }
8718     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
8719                       &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
8720         /* the MPU lookup failed */
8721         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
8722         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
8723         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
8724         return false;
8725     }
8726     *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
8727                                  attrs, &txres);
8728     if (txres != MEMTX_OK) {
8729         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
8730         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
8731         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
8732         return false;
8733     }
8734     return true;
8735 }
8736 
8737 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
8738 {
8739     /* Check whether this attempt to execute code in a Secure & NS-Callable
8740      * memory region is for an SG instruction; if so, then emulate the
8741      * effect of the SG instruction and return true. Otherwise pend
8742      * the correct kind of exception and return false.
8743      */
8744     CPUARMState *env = &cpu->env;
8745     ARMMMUIdx mmu_idx;
8746     uint16_t insn;
8747 
8748     /* We should never get here unless get_phys_addr_pmsav8() caused
8749      * an exception for NS executing in S&NSC memory.
8750      */
8751     assert(!env->v7m.secure);
8752     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
8753 
8754     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
8755     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
8756 
8757     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
8758         return false;
8759     }
8760 
8761     if (!env->thumb) {
8762         goto gen_invep;
8763     }
8764 
8765     if (insn != 0xe97f) {
8766         /* Not an SG instruction first half (we choose the IMPDEF
8767          * early-SG-check option).
8768          */
8769         goto gen_invep;
8770     }
8771 
8772     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
8773         return false;
8774     }
8775 
8776     if (insn != 0xe97f) {
8777         /* Not an SG instruction second half (yes, both halves of the SG
8778          * insn have the same hex value)
8779          */
8780         goto gen_invep;
8781     }
8782 
8783     /* OK, we have confirmed that we really have an SG instruction.
8784      * We know we're NS in S memory so don't need to repeat those checks.
8785      */
8786     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
8787                   ", executing it\n", env->regs[15]);
8788     env->regs[14] &= ~1;
8789     switch_v7m_security_state(env, true);
8790     xpsr_write(env, 0, XPSR_IT);
8791     env->regs[15] += 4;
8792     return true;
8793 
8794 gen_invep:
8795     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
8796     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8797     qemu_log_mask(CPU_LOG_INT,
8798                   "...really SecureFault with SFSR.INVEP\n");
8799     return false;
8800 }
8801 
8802 void arm_v7m_cpu_do_interrupt(CPUState *cs)
8803 {
8804     ARMCPU *cpu = ARM_CPU(cs);
8805     CPUARMState *env = &cpu->env;
8806     uint32_t lr;
8807     bool ignore_stackfaults;
8808 
8809     arm_log_exception(cs->exception_index);
8810 
8811     /* For exceptions we just mark as pending on the NVIC, and let that
8812        handle it.  */
8813     switch (cs->exception_index) {
8814     case EXCP_UDEF:
8815         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
8816         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
8817         break;
8818     case EXCP_NOCP:
8819         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
8820         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
8821         break;
8822     case EXCP_INVSTATE:
8823         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
8824         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
8825         break;
8826     case EXCP_STKOF:
8827         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
8828         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
8829         break;
8830     case EXCP_SWI:
8831         /* The PC already points to the next instruction.  */
8832         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
8833         break;
8834     case EXCP_PREFETCH_ABORT:
8835     case EXCP_DATA_ABORT:
8836         /* Note that for M profile we don't have a guest facing FSR, but
8837          * the env->exception.fsr will be populated by the code that
8838          * raises the fault, in the A profile short-descriptor format.
8839          */
8840         switch (env->exception.fsr & 0xf) {
8841         case M_FAKE_FSR_NSC_EXEC:
8842             /* Exception generated when we try to execute code at an address
8843              * which is marked as Secure & Non-Secure Callable and the CPU
8844              * is in the Non-Secure state. The only instruction which can
8845              * be executed like this is SG (and that only if both halves of
8846              * the SG instruction have the same security attributes.)
8847              * Everything else must generate an INVEP SecureFault, so we
8848              * emulate the SG instruction here.
8849              */
8850             if (v7m_handle_execute_nsc(cpu)) {
8851                 return;
8852             }
8853             break;
8854         case M_FAKE_FSR_SFAULT:
8855             /* Various flavours of SecureFault for attempts to execute or
8856              * access data in the wrong security state.
8857              */
8858             switch (cs->exception_index) {
8859             case EXCP_PREFETCH_ABORT:
8860                 if (env->v7m.secure) {
8861                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
8862                     qemu_log_mask(CPU_LOG_INT,
8863                                   "...really SecureFault with SFSR.INVTRAN\n");
8864                 } else {
8865                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
8866                     qemu_log_mask(CPU_LOG_INT,
8867                                   "...really SecureFault with SFSR.INVEP\n");
8868                 }
8869                 break;
8870             case EXCP_DATA_ABORT:
8871                 /* This must be an NS access to S memory */
8872                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
8873                 qemu_log_mask(CPU_LOG_INT,
8874                               "...really SecureFault with SFSR.AUVIOL\n");
8875                 break;
8876             }
8877             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8878             break;
8879         case 0x8: /* External Abort */
8880             switch (cs->exception_index) {
8881             case EXCP_PREFETCH_ABORT:
8882                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
8883                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
8884                 break;
8885             case EXCP_DATA_ABORT:
8886                 env->v7m.cfsr[M_REG_NS] |=
8887                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
8888                 env->v7m.bfar = env->exception.vaddress;
8889                 qemu_log_mask(CPU_LOG_INT,
8890                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
8891                               env->v7m.bfar);
8892                 break;
8893             }
8894             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
8895             break;
8896         default:
8897             /* All other FSR values are either MPU faults or "can't happen
8898              * for M profile" cases.
8899              */
8900             switch (cs->exception_index) {
8901             case EXCP_PREFETCH_ABORT:
8902                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
8903                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
8904                 break;
8905             case EXCP_DATA_ABORT:
8906                 env->v7m.cfsr[env->v7m.secure] |=
8907                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
8908                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
8909                 qemu_log_mask(CPU_LOG_INT,
8910                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
8911                               env->v7m.mmfar[env->v7m.secure]);
8912                 break;
8913             }
8914             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
8915                                     env->v7m.secure);
8916             break;
8917         }
8918         break;
8919     case EXCP_BKPT:
8920         if (semihosting_enabled()) {
8921             int nr;
8922             nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
8923             if (nr == 0xab) {
8924                 env->regs[15] += 2;
8925                 qemu_log_mask(CPU_LOG_INT,
8926                               "...handling as semihosting call 0x%x\n",
8927                               env->regs[0]);
8928                 env->regs[0] = do_arm_semihosting(env);
8929                 return;
8930             }
8931         }
8932         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
8933         break;
8934     case EXCP_IRQ:
8935         break;
8936     case EXCP_EXCEPTION_EXIT:
8937         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
8938             /* Must be v8M security extension function return */
8939             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
8940             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
8941             if (do_v7m_function_return(cpu)) {
8942                 return;
8943             }
8944         } else {
8945             do_v7m_exception_exit(cpu);
8946             return;
8947         }
8948         break;
8949     default:
8950         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8951         return; /* Never happens.  Keep compiler happy.  */
8952     }
8953 
8954     if (arm_feature(env, ARM_FEATURE_V8)) {
8955         lr = R_V7M_EXCRET_RES1_MASK |
8956             R_V7M_EXCRET_DCRS_MASK |
8957             R_V7M_EXCRET_FTYPE_MASK;
8958         /* The S bit indicates whether we should return to Secure
8959          * or NonSecure (ie our current state).
8960          * The ES bit indicates whether we're taking this exception
8961          * to Secure or NonSecure (ie our target state). We set it
8962          * later, in v7m_exception_taken().
8963          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
8964          * This corresponds to the ARM ARM pseudocode for v8M setting
8965          * some LR bits in PushStack() and some in ExceptionTaken();
8966          * the distinction matters for the tailchain cases where we
8967          * can take an exception without pushing the stack.
8968          */
8969         if (env->v7m.secure) {
8970             lr |= R_V7M_EXCRET_S_MASK;
8971         }
8972     } else {
8973         lr = R_V7M_EXCRET_RES1_MASK |
8974             R_V7M_EXCRET_S_MASK |
8975             R_V7M_EXCRET_DCRS_MASK |
8976             R_V7M_EXCRET_FTYPE_MASK |
8977             R_V7M_EXCRET_ES_MASK;
8978         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
8979             lr |= R_V7M_EXCRET_SPSEL_MASK;
8980         }
8981     }
8982     if (!arm_v7m_is_handler_mode(env)) {
8983         lr |= R_V7M_EXCRET_MODE_MASK;
8984     }
8985 
8986     ignore_stackfaults = v7m_push_stack(cpu);
8987     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
8988 }
8989 
8990 /* Function used to synchronize QEMU's AArch64 register set with AArch32
8991  * register set.  This is necessary when switching between AArch32 and AArch64
8992  * execution state.
8993  */
8994 void aarch64_sync_32_to_64(CPUARMState *env)
8995 {
8996     int i;
8997     uint32_t mode = env->uncached_cpsr & CPSR_M;
8998 
8999     /* We can blanket copy R[0:7] to X[0:7] */
9000     for (i = 0; i < 8; i++) {
9001         env->xregs[i] = env->regs[i];
9002     }
9003 
9004     /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9005      * Otherwise, they come from the banked user regs.
9006      */
9007     if (mode == ARM_CPU_MODE_FIQ) {
9008         for (i = 8; i < 13; i++) {
9009             env->xregs[i] = env->usr_regs[i - 8];
9010         }
9011     } else {
9012         for (i = 8; i < 13; i++) {
9013             env->xregs[i] = env->regs[i];
9014         }
9015     }
9016 
9017     /* Registers x13-x23 are the various mode SP and FP registers. Registers
9018      * r13 and r14 are only copied if we are in that mode, otherwise we copy
9019      * from the mode banked register.
9020      */
9021     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9022         env->xregs[13] = env->regs[13];
9023         env->xregs[14] = env->regs[14];
9024     } else {
9025         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9026         /* HYP is an exception in that it is copied from r14 */
9027         if (mode == ARM_CPU_MODE_HYP) {
9028             env->xregs[14] = env->regs[14];
9029         } else {
9030             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9031         }
9032     }
9033 
9034     if (mode == ARM_CPU_MODE_HYP) {
9035         env->xregs[15] = env->regs[13];
9036     } else {
9037         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9038     }
9039 
9040     if (mode == ARM_CPU_MODE_IRQ) {
9041         env->xregs[16] = env->regs[14];
9042         env->xregs[17] = env->regs[13];
9043     } else {
9044         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9045         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9046     }
9047 
9048     if (mode == ARM_CPU_MODE_SVC) {
9049         env->xregs[18] = env->regs[14];
9050         env->xregs[19] = env->regs[13];
9051     } else {
9052         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9053         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9054     }
9055 
9056     if (mode == ARM_CPU_MODE_ABT) {
9057         env->xregs[20] = env->regs[14];
9058         env->xregs[21] = env->regs[13];
9059     } else {
9060         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9061         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9062     }
9063 
9064     if (mode == ARM_CPU_MODE_UND) {
9065         env->xregs[22] = env->regs[14];
9066         env->xregs[23] = env->regs[13];
9067     } else {
9068         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9069         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9070     }
9071 
9072     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9073      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
9074      * FIQ bank for r8-r14.
9075      */
9076     if (mode == ARM_CPU_MODE_FIQ) {
9077         for (i = 24; i < 31; i++) {
9078             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
9079         }
9080     } else {
9081         for (i = 24; i < 29; i++) {
9082             env->xregs[i] = env->fiq_regs[i - 24];
9083         }
9084         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9085         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9086     }
9087 
9088     env->pc = env->regs[15];
9089 }
9090 
9091 /* Function used to synchronize QEMU's AArch32 register set with AArch64
9092  * register set.  This is necessary when switching between AArch32 and AArch64
9093  * execution state.
9094  */
9095 void aarch64_sync_64_to_32(CPUARMState *env)
9096 {
9097     int i;
9098     uint32_t mode = env->uncached_cpsr & CPSR_M;
9099 
9100     /* We can blanket copy X[0:7] to R[0:7] */
9101     for (i = 0; i < 8; i++) {
9102         env->regs[i] = env->xregs[i];
9103     }
9104 
9105     /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9106      * Otherwise, we copy x8-x12 into the banked user regs.
9107      */
9108     if (mode == ARM_CPU_MODE_FIQ) {
9109         for (i = 8; i < 13; i++) {
9110             env->usr_regs[i - 8] = env->xregs[i];
9111         }
9112     } else {
9113         for (i = 8; i < 13; i++) {
9114             env->regs[i] = env->xregs[i];
9115         }
9116     }
9117 
9118     /* Registers r13 & r14 depend on the current mode.
9119      * If we are in a given mode, we copy the corresponding x registers to r13
9120      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
9121      * for the mode.
9122      */
9123     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9124         env->regs[13] = env->xregs[13];
9125         env->regs[14] = env->xregs[14];
9126     } else {
9127         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9128 
9129         /* HYP is an exception in that it does not have its own banked r14 but
9130          * shares the USR r14
9131          */
9132         if (mode == ARM_CPU_MODE_HYP) {
9133             env->regs[14] = env->xregs[14];
9134         } else {
9135             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9136         }
9137     }
9138 
9139     if (mode == ARM_CPU_MODE_HYP) {
9140         env->regs[13] = env->xregs[15];
9141     } else {
9142         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9143     }
9144 
9145     if (mode == ARM_CPU_MODE_IRQ) {
9146         env->regs[14] = env->xregs[16];
9147         env->regs[13] = env->xregs[17];
9148     } else {
9149         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9150         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9151     }
9152 
9153     if (mode == ARM_CPU_MODE_SVC) {
9154         env->regs[14] = env->xregs[18];
9155         env->regs[13] = env->xregs[19];
9156     } else {
9157         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9158         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9159     }
9160 
9161     if (mode == ARM_CPU_MODE_ABT) {
9162         env->regs[14] = env->xregs[20];
9163         env->regs[13] = env->xregs[21];
9164     } else {
9165         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9166         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9167     }
9168 
9169     if (mode == ARM_CPU_MODE_UND) {
9170         env->regs[14] = env->xregs[22];
9171         env->regs[13] = env->xregs[23];
9172     } else {
9173         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9174         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9175     }
9176 
9177     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9178      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
9179      * FIQ bank for r8-r14.
9180      */
9181     if (mode == ARM_CPU_MODE_FIQ) {
9182         for (i = 24; i < 31; i++) {
9183             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
9184         }
9185     } else {
9186         for (i = 24; i < 29; i++) {
9187             env->fiq_regs[i - 24] = env->xregs[i];
9188         }
9189         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9190         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9191     }
9192 
9193     env->regs[15] = env->pc;
9194 }
9195 
9196 static void take_aarch32_exception(CPUARMState *env, int new_mode,
9197                                    uint32_t mask, uint32_t offset,
9198                                    uint32_t newpc)
9199 {
9200     /* Change the CPU state so as to actually take the exception. */
9201     switch_mode(env, new_mode);
9202     /*
9203      * For exceptions taken to AArch32 we must clear the SS bit in both
9204      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9205      */
9206     env->uncached_cpsr &= ~PSTATE_SS;
9207     env->spsr = cpsr_read(env);
9208     /* Clear IT bits.  */
9209     env->condexec_bits = 0;
9210     /* Switch to the new mode, and to the correct instruction set.  */
9211     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9212     /* Set new mode endianness */
9213     env->uncached_cpsr &= ~CPSR_E;
9214     if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
9215         env->uncached_cpsr |= CPSR_E;
9216     }
9217     /* J and IL must always be cleared for exception entry */
9218     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9219     env->daif |= mask;
9220 
9221     if (new_mode == ARM_CPU_MODE_HYP) {
9222         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9223         env->elr_el[2] = env->regs[15];
9224     } else {
9225         /*
9226          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9227          * and we should just guard the thumb mode on V4
9228          */
9229         if (arm_feature(env, ARM_FEATURE_V4T)) {
9230             env->thumb =
9231                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9232         }
9233         env->regs[14] = env->regs[15] + offset;
9234     }
9235     env->regs[15] = newpc;
9236 }
9237 
9238 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9239 {
9240     /*
9241      * Handle exception entry to Hyp mode; this is sufficiently
9242      * different to entry to other AArch32 modes that we handle it
9243      * separately here.
9244      *
9245      * The vector table entry used is always the 0x14 Hyp mode entry point,
9246      * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9247      * The offset applied to the preferred return address is always zero
9248      * (see DDI0487C.a section G1.12.3).
9249      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9250      */
9251     uint32_t addr, mask;
9252     ARMCPU *cpu = ARM_CPU(cs);
9253     CPUARMState *env = &cpu->env;
9254 
9255     switch (cs->exception_index) {
9256     case EXCP_UDEF:
9257         addr = 0x04;
9258         break;
9259     case EXCP_SWI:
9260         addr = 0x14;
9261         break;
9262     case EXCP_BKPT:
9263         /* Fall through to prefetch abort.  */
9264     case EXCP_PREFETCH_ABORT:
9265         env->cp15.ifar_s = env->exception.vaddress;
9266         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9267                       (uint32_t)env->exception.vaddress);
9268         addr = 0x0c;
9269         break;
9270     case EXCP_DATA_ABORT:
9271         env->cp15.dfar_s = env->exception.vaddress;
9272         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9273                       (uint32_t)env->exception.vaddress);
9274         addr = 0x10;
9275         break;
9276     case EXCP_IRQ:
9277         addr = 0x18;
9278         break;
9279     case EXCP_FIQ:
9280         addr = 0x1c;
9281         break;
9282     case EXCP_HVC:
9283         addr = 0x08;
9284         break;
9285     case EXCP_HYP_TRAP:
9286         addr = 0x14;
9287     default:
9288         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9289     }
9290 
9291     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9292         if (!arm_feature(env, ARM_FEATURE_V8)) {
9293             /*
9294              * QEMU syndrome values are v8-style. v7 has the IL bit
9295              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9296              * If this is a v7 CPU, squash the IL bit in those cases.
9297              */
9298             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9299                 (cs->exception_index == EXCP_DATA_ABORT &&
9300                  !(env->exception.syndrome & ARM_EL_ISV)) ||
9301                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9302                 env->exception.syndrome &= ~ARM_EL_IL;
9303             }
9304         }
9305         env->cp15.esr_el[2] = env->exception.syndrome;
9306     }
9307 
9308     if (arm_current_el(env) != 2 && addr < 0x14) {
9309         addr = 0x14;
9310     }
9311 
9312     mask = 0;
9313     if (!(env->cp15.scr_el3 & SCR_EA)) {
9314         mask |= CPSR_A;
9315     }
9316     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9317         mask |= CPSR_I;
9318     }
9319     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9320         mask |= CPSR_F;
9321     }
9322 
9323     addr += env->cp15.hvbar;
9324 
9325     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9326 }
9327 
9328 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
9329 {
9330     ARMCPU *cpu = ARM_CPU(cs);
9331     CPUARMState *env = &cpu->env;
9332     uint32_t addr;
9333     uint32_t mask;
9334     int new_mode;
9335     uint32_t offset;
9336     uint32_t moe;
9337 
9338     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9339     switch (syn_get_ec(env->exception.syndrome)) {
9340     case EC_BREAKPOINT:
9341     case EC_BREAKPOINT_SAME_EL:
9342         moe = 1;
9343         break;
9344     case EC_WATCHPOINT:
9345     case EC_WATCHPOINT_SAME_EL:
9346         moe = 10;
9347         break;
9348     case EC_AA32_BKPT:
9349         moe = 3;
9350         break;
9351     case EC_VECTORCATCH:
9352         moe = 5;
9353         break;
9354     default:
9355         moe = 0;
9356         break;
9357     }
9358 
9359     if (moe) {
9360         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9361     }
9362 
9363     if (env->exception.target_el == 2) {
9364         arm_cpu_do_interrupt_aarch32_hyp(cs);
9365         return;
9366     }
9367 
9368     switch (cs->exception_index) {
9369     case EXCP_UDEF:
9370         new_mode = ARM_CPU_MODE_UND;
9371         addr = 0x04;
9372         mask = CPSR_I;
9373         if (env->thumb)
9374             offset = 2;
9375         else
9376             offset = 4;
9377         break;
9378     case EXCP_SWI:
9379         new_mode = ARM_CPU_MODE_SVC;
9380         addr = 0x08;
9381         mask = CPSR_I;
9382         /* The PC already points to the next instruction.  */
9383         offset = 0;
9384         break;
9385     case EXCP_BKPT:
9386         /* Fall through to prefetch abort.  */
9387     case EXCP_PREFETCH_ABORT:
9388         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
9389         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
9390         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
9391                       env->exception.fsr, (uint32_t)env->exception.vaddress);
9392         new_mode = ARM_CPU_MODE_ABT;
9393         addr = 0x0c;
9394         mask = CPSR_A | CPSR_I;
9395         offset = 4;
9396         break;
9397     case EXCP_DATA_ABORT:
9398         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9399         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
9400         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
9401                       env->exception.fsr,
9402                       (uint32_t)env->exception.vaddress);
9403         new_mode = ARM_CPU_MODE_ABT;
9404         addr = 0x10;
9405         mask = CPSR_A | CPSR_I;
9406         offset = 8;
9407         break;
9408     case EXCP_IRQ:
9409         new_mode = ARM_CPU_MODE_IRQ;
9410         addr = 0x18;
9411         /* Disable IRQ and imprecise data aborts.  */
9412         mask = CPSR_A | CPSR_I;
9413         offset = 4;
9414         if (env->cp15.scr_el3 & SCR_IRQ) {
9415             /* IRQ routed to monitor mode */
9416             new_mode = ARM_CPU_MODE_MON;
9417             mask |= CPSR_F;
9418         }
9419         break;
9420     case EXCP_FIQ:
9421         new_mode = ARM_CPU_MODE_FIQ;
9422         addr = 0x1c;
9423         /* Disable FIQ, IRQ and imprecise data aborts.  */
9424         mask = CPSR_A | CPSR_I | CPSR_F;
9425         if (env->cp15.scr_el3 & SCR_FIQ) {
9426             /* FIQ routed to monitor mode */
9427             new_mode = ARM_CPU_MODE_MON;
9428         }
9429         offset = 4;
9430         break;
9431     case EXCP_VIRQ:
9432         new_mode = ARM_CPU_MODE_IRQ;
9433         addr = 0x18;
9434         /* Disable IRQ and imprecise data aborts.  */
9435         mask = CPSR_A | CPSR_I;
9436         offset = 4;
9437         break;
9438     case EXCP_VFIQ:
9439         new_mode = ARM_CPU_MODE_FIQ;
9440         addr = 0x1c;
9441         /* Disable FIQ, IRQ and imprecise data aborts.  */
9442         mask = CPSR_A | CPSR_I | CPSR_F;
9443         offset = 4;
9444         break;
9445     case EXCP_SMC:
9446         new_mode = ARM_CPU_MODE_MON;
9447         addr = 0x08;
9448         mask = CPSR_A | CPSR_I | CPSR_F;
9449         offset = 0;
9450         break;
9451     default:
9452         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9453         return; /* Never happens.  Keep compiler happy.  */
9454     }
9455 
9456     if (new_mode == ARM_CPU_MODE_MON) {
9457         addr += env->cp15.mvbar;
9458     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9459         /* High vectors. When enabled, base address cannot be remapped. */
9460         addr += 0xffff0000;
9461     } else {
9462         /* ARM v7 architectures provide a vector base address register to remap
9463          * the interrupt vector table.
9464          * This register is only followed in non-monitor mode, and is banked.
9465          * Note: only bits 31:5 are valid.
9466          */
9467         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9468     }
9469 
9470     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9471         env->cp15.scr_el3 &= ~SCR_NS;
9472     }
9473 
9474     take_aarch32_exception(env, new_mode, mask, offset, addr);
9475 }
9476 
9477 /* Handle exception entry to a target EL which is using AArch64 */
9478 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9479 {
9480     ARMCPU *cpu = ARM_CPU(cs);
9481     CPUARMState *env = &cpu->env;
9482     unsigned int new_el = env->exception.target_el;
9483     target_ulong addr = env->cp15.vbar_el[new_el];
9484     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
9485     unsigned int cur_el = arm_current_el(env);
9486 
9487     /*
9488      * Note that new_el can never be 0.  If cur_el is 0, then
9489      * el0_a64 is is_a64(), else el0_a64 is ignored.
9490      */
9491     aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9492 
9493     if (cur_el < new_el) {
9494         /* Entry vector offset depends on whether the implemented EL
9495          * immediately lower than the target level is using AArch32 or AArch64
9496          */
9497         bool is_aa64;
9498 
9499         switch (new_el) {
9500         case 3:
9501             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9502             break;
9503         case 2:
9504             is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
9505             break;
9506         case 1:
9507             is_aa64 = is_a64(env);
9508             break;
9509         default:
9510             g_assert_not_reached();
9511         }
9512 
9513         if (is_aa64) {
9514             addr += 0x400;
9515         } else {
9516             addr += 0x600;
9517         }
9518     } else if (pstate_read(env) & PSTATE_SP) {
9519         addr += 0x200;
9520     }
9521 
9522     switch (cs->exception_index) {
9523     case EXCP_PREFETCH_ABORT:
9524     case EXCP_DATA_ABORT:
9525         env->cp15.far_el[new_el] = env->exception.vaddress;
9526         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9527                       env->cp15.far_el[new_el]);
9528         /* fall through */
9529     case EXCP_BKPT:
9530     case EXCP_UDEF:
9531     case EXCP_SWI:
9532     case EXCP_HVC:
9533     case EXCP_HYP_TRAP:
9534     case EXCP_SMC:
9535         if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
9536             /*
9537              * QEMU internal FP/SIMD syndromes from AArch32 include the
9538              * TA and coproc fields which are only exposed if the exception
9539              * is taken to AArch32 Hyp mode. Mask them out to get a valid
9540              * AArch64 format syndrome.
9541              */
9542             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
9543         }
9544         env->cp15.esr_el[new_el] = env->exception.syndrome;
9545         break;
9546     case EXCP_IRQ:
9547     case EXCP_VIRQ:
9548         addr += 0x80;
9549         break;
9550     case EXCP_FIQ:
9551     case EXCP_VFIQ:
9552         addr += 0x100;
9553         break;
9554     case EXCP_SEMIHOST:
9555         qemu_log_mask(CPU_LOG_INT,
9556                       "...handling as semihosting call 0x%" PRIx64 "\n",
9557                       env->xregs[0]);
9558         env->xregs[0] = do_arm_semihosting(env);
9559         return;
9560     default:
9561         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9562     }
9563 
9564     if (is_a64(env)) {
9565         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
9566         aarch64_save_sp(env, arm_current_el(env));
9567         env->elr_el[new_el] = env->pc;
9568     } else {
9569         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
9570         env->elr_el[new_el] = env->regs[15];
9571 
9572         aarch64_sync_32_to_64(env);
9573 
9574         env->condexec_bits = 0;
9575     }
9576     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9577                   env->elr_el[new_el]);
9578 
9579     pstate_write(env, PSTATE_DAIF | new_mode);
9580     env->aarch64 = 1;
9581     aarch64_restore_sp(env, new_el);
9582 
9583     env->pc = addr;
9584 
9585     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9586                   new_el, env->pc, pstate_read(env));
9587 }
9588 
9589 static inline bool check_for_semihosting(CPUState *cs)
9590 {
9591     /* Check whether this exception is a semihosting call; if so
9592      * then handle it and return true; otherwise return false.
9593      */
9594     ARMCPU *cpu = ARM_CPU(cs);
9595     CPUARMState *env = &cpu->env;
9596 
9597     if (is_a64(env)) {
9598         if (cs->exception_index == EXCP_SEMIHOST) {
9599             /* This is always the 64-bit semihosting exception.
9600              * The "is this usermode" and "is semihosting enabled"
9601              * checks have been done at translate time.
9602              */
9603             qemu_log_mask(CPU_LOG_INT,
9604                           "...handling as semihosting call 0x%" PRIx64 "\n",
9605                           env->xregs[0]);
9606             env->xregs[0] = do_arm_semihosting(env);
9607             return true;
9608         }
9609         return false;
9610     } else {
9611         uint32_t imm;
9612 
9613         /* Only intercept calls from privileged modes, to provide some
9614          * semblance of security.
9615          */
9616         if (cs->exception_index != EXCP_SEMIHOST &&
9617             (!semihosting_enabled() ||
9618              ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
9619             return false;
9620         }
9621 
9622         switch (cs->exception_index) {
9623         case EXCP_SEMIHOST:
9624             /* This is always a semihosting call; the "is this usermode"
9625              * and "is semihosting enabled" checks have been done at
9626              * translate time.
9627              */
9628             break;
9629         case EXCP_SWI:
9630             /* Check for semihosting interrupt.  */
9631             if (env->thumb) {
9632                 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
9633                     & 0xff;
9634                 if (imm == 0xab) {
9635                     break;
9636                 }
9637             } else {
9638                 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
9639                     & 0xffffff;
9640                 if (imm == 0x123456) {
9641                     break;
9642                 }
9643             }
9644             return false;
9645         case EXCP_BKPT:
9646             /* See if this is a semihosting syscall.  */
9647             if (env->thumb) {
9648                 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
9649                     & 0xff;
9650                 if (imm == 0xab) {
9651                     env->regs[15] += 2;
9652                     break;
9653                 }
9654             }
9655             return false;
9656         default:
9657             return false;
9658         }
9659 
9660         qemu_log_mask(CPU_LOG_INT,
9661                       "...handling as semihosting call 0x%x\n",
9662                       env->regs[0]);
9663         env->regs[0] = do_arm_semihosting(env);
9664         return true;
9665     }
9666 }
9667 
9668 /* Handle a CPU exception for A and R profile CPUs.
9669  * Do any appropriate logging, handle PSCI calls, and then hand off
9670  * to the AArch64-entry or AArch32-entry function depending on the
9671  * target exception level's register width.
9672  */
9673 void arm_cpu_do_interrupt(CPUState *cs)
9674 {
9675     ARMCPU *cpu = ARM_CPU(cs);
9676     CPUARMState *env = &cpu->env;
9677     unsigned int new_el = env->exception.target_el;
9678 
9679     assert(!arm_feature(env, ARM_FEATURE_M));
9680 
9681     arm_log_exception(cs->exception_index);
9682     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9683                   new_el);
9684     if (qemu_loglevel_mask(CPU_LOG_INT)
9685         && !excp_is_internal(cs->exception_index)) {
9686         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
9687                       syn_get_ec(env->exception.syndrome),
9688                       env->exception.syndrome);
9689     }
9690 
9691     if (arm_is_psci_call(cpu, cs->exception_index)) {
9692         arm_handle_psci_call(cpu);
9693         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9694         return;
9695     }
9696 
9697     /* Semihosting semantics depend on the register width of the
9698      * code that caused the exception, not the target exception level,
9699      * so must be handled here.
9700      */
9701     if (check_for_semihosting(cs)) {
9702         return;
9703     }
9704 
9705     /* Hooks may change global state so BQL should be held, also the
9706      * BQL needs to be held for any modification of
9707      * cs->interrupt_request.
9708      */
9709     g_assert(qemu_mutex_iothread_locked());
9710 
9711     arm_call_pre_el_change_hook(cpu);
9712 
9713     assert(!excp_is_internal(cs->exception_index));
9714     if (arm_el_is_aa64(env, new_el)) {
9715         arm_cpu_do_interrupt_aarch64(cs);
9716     } else {
9717         arm_cpu_do_interrupt_aarch32(cs);
9718     }
9719 
9720     arm_call_el_change_hook(cpu);
9721 
9722     if (!kvm_enabled()) {
9723         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
9724     }
9725 }
9726 #endif /* !CONFIG_USER_ONLY */
9727 
9728 /* Return the exception level which controls this address translation regime */
9729 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
9730 {
9731     switch (mmu_idx) {
9732     case ARMMMUIdx_S2NS:
9733     case ARMMMUIdx_S1E2:
9734         return 2;
9735     case ARMMMUIdx_S1E3:
9736         return 3;
9737     case ARMMMUIdx_S1SE0:
9738         return arm_el_is_aa64(env, 3) ? 1 : 3;
9739     case ARMMMUIdx_S1SE1:
9740     case ARMMMUIdx_S1NSE0:
9741     case ARMMMUIdx_S1NSE1:
9742     case ARMMMUIdx_MPrivNegPri:
9743     case ARMMMUIdx_MUserNegPri:
9744     case ARMMMUIdx_MPriv:
9745     case ARMMMUIdx_MUser:
9746     case ARMMMUIdx_MSPrivNegPri:
9747     case ARMMMUIdx_MSUserNegPri:
9748     case ARMMMUIdx_MSPriv:
9749     case ARMMMUIdx_MSUser:
9750         return 1;
9751     default:
9752         g_assert_not_reached();
9753     }
9754 }
9755 
9756 #ifndef CONFIG_USER_ONLY
9757 
9758 /* Return the SCTLR value which controls this address translation regime */
9759 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
9760 {
9761     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
9762 }
9763 
9764 /* Return true if the specified stage of address translation is disabled */
9765 static inline bool regime_translation_disabled(CPUARMState *env,
9766                                                ARMMMUIdx mmu_idx)
9767 {
9768     if (arm_feature(env, ARM_FEATURE_M)) {
9769         switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
9770                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
9771         case R_V7M_MPU_CTRL_ENABLE_MASK:
9772             /* Enabled, but not for HardFault and NMI */
9773             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
9774         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
9775             /* Enabled for all cases */
9776             return false;
9777         case 0:
9778         default:
9779             /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9780              * we warned about that in armv7m_nvic.c when the guest set it.
9781              */
9782             return true;
9783         }
9784     }
9785 
9786     if (mmu_idx == ARMMMUIdx_S2NS) {
9787         /* HCR.DC means HCR.VM behaves as 1 */
9788         return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
9789     }
9790 
9791     if (env->cp15.hcr_el2 & HCR_TGE) {
9792         /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9793         if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
9794             return true;
9795         }
9796     }
9797 
9798     if ((env->cp15.hcr_el2 & HCR_DC) &&
9799         (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
9800         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9801         return true;
9802     }
9803 
9804     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
9805 }
9806 
9807 static inline bool regime_translation_big_endian(CPUARMState *env,
9808                                                  ARMMMUIdx mmu_idx)
9809 {
9810     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
9811 }
9812 
9813 /* Return the TTBR associated with this translation regime */
9814 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
9815                                    int ttbrn)
9816 {
9817     if (mmu_idx == ARMMMUIdx_S2NS) {
9818         return env->cp15.vttbr_el2;
9819     }
9820     if (ttbrn == 0) {
9821         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
9822     } else {
9823         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
9824     }
9825 }
9826 
9827 #endif /* !CONFIG_USER_ONLY */
9828 
9829 /* Return the TCR controlling this translation regime */
9830 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
9831 {
9832     if (mmu_idx == ARMMMUIdx_S2NS) {
9833         return &env->cp15.vtcr_el2;
9834     }
9835     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
9836 }
9837 
9838 /* Convert a possible stage1+2 MMU index into the appropriate
9839  * stage 1 MMU index
9840  */
9841 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
9842 {
9843     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
9844         mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
9845     }
9846     return mmu_idx;
9847 }
9848 
9849 /* Return true if the translation regime is using LPAE format page tables */
9850 static inline bool regime_using_lpae_format(CPUARMState *env,
9851                                             ARMMMUIdx mmu_idx)
9852 {
9853     int el = regime_el(env, mmu_idx);
9854     if (el == 2 || arm_el_is_aa64(env, el)) {
9855         return true;
9856     }
9857     if (arm_feature(env, ARM_FEATURE_LPAE)
9858         && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
9859         return true;
9860     }
9861     return false;
9862 }
9863 
9864 /* Returns true if the stage 1 translation regime is using LPAE format page
9865  * tables. Used when raising alignment exceptions, whose FSR changes depending
9866  * on whether the long or short descriptor format is in use. */
9867 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
9868 {
9869     mmu_idx = stage_1_mmu_idx(mmu_idx);
9870 
9871     return regime_using_lpae_format(env, mmu_idx);
9872 }
9873 
9874 #ifndef CONFIG_USER_ONLY
9875 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
9876 {
9877     switch (mmu_idx) {
9878     case ARMMMUIdx_S1SE0:
9879     case ARMMMUIdx_S1NSE0:
9880     case ARMMMUIdx_MUser:
9881     case ARMMMUIdx_MSUser:
9882     case ARMMMUIdx_MUserNegPri:
9883     case ARMMMUIdx_MSUserNegPri:
9884         return true;
9885     default:
9886         return false;
9887     case ARMMMUIdx_S12NSE0:
9888     case ARMMMUIdx_S12NSE1:
9889         g_assert_not_reached();
9890     }
9891 }
9892 
9893 /* Translate section/page access permissions to page
9894  * R/W protection flags
9895  *
9896  * @env:         CPUARMState
9897  * @mmu_idx:     MMU index indicating required translation regime
9898  * @ap:          The 3-bit access permissions (AP[2:0])
9899  * @domain_prot: The 2-bit domain access permissions
9900  */
9901 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
9902                                 int ap, int domain_prot)
9903 {
9904     bool is_user = regime_is_user(env, mmu_idx);
9905 
9906     if (domain_prot == 3) {
9907         return PAGE_READ | PAGE_WRITE;
9908     }
9909 
9910     switch (ap) {
9911     case 0:
9912         if (arm_feature(env, ARM_FEATURE_V7)) {
9913             return 0;
9914         }
9915         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
9916         case SCTLR_S:
9917             return is_user ? 0 : PAGE_READ;
9918         case SCTLR_R:
9919             return PAGE_READ;
9920         default:
9921             return 0;
9922         }
9923     case 1:
9924         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9925     case 2:
9926         if (is_user) {
9927             return PAGE_READ;
9928         } else {
9929             return PAGE_READ | PAGE_WRITE;
9930         }
9931     case 3:
9932         return PAGE_READ | PAGE_WRITE;
9933     case 4: /* Reserved.  */
9934         return 0;
9935     case 5:
9936         return is_user ? 0 : PAGE_READ;
9937     case 6:
9938         return PAGE_READ;
9939     case 7:
9940         if (!arm_feature(env, ARM_FEATURE_V6K)) {
9941             return 0;
9942         }
9943         return PAGE_READ;
9944     default:
9945         g_assert_not_reached();
9946     }
9947 }
9948 
9949 /* Translate section/page access permissions to page
9950  * R/W protection flags.
9951  *
9952  * @ap:      The 2-bit simple AP (AP[2:1])
9953  * @is_user: TRUE if accessing from PL0
9954  */
9955 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
9956 {
9957     switch (ap) {
9958     case 0:
9959         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9960     case 1:
9961         return PAGE_READ | PAGE_WRITE;
9962     case 2:
9963         return is_user ? 0 : PAGE_READ;
9964     case 3:
9965         return PAGE_READ;
9966     default:
9967         g_assert_not_reached();
9968     }
9969 }
9970 
9971 static inline int
9972 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
9973 {
9974     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
9975 }
9976 
9977 /* Translate S2 section/page access permissions to protection flags
9978  *
9979  * @env:     CPUARMState
9980  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
9981  * @xn:      XN (execute-never) bit
9982  */
9983 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
9984 {
9985     int prot = 0;
9986 
9987     if (s2ap & 1) {
9988         prot |= PAGE_READ;
9989     }
9990     if (s2ap & 2) {
9991         prot |= PAGE_WRITE;
9992     }
9993     if (!xn) {
9994         if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
9995             prot |= PAGE_EXEC;
9996         }
9997     }
9998     return prot;
9999 }
10000 
10001 /* Translate section/page access permissions to protection flags
10002  *
10003  * @env:     CPUARMState
10004  * @mmu_idx: MMU index indicating required translation regime
10005  * @is_aa64: TRUE if AArch64
10006  * @ap:      The 2-bit simple AP (AP[2:1])
10007  * @ns:      NS (non-secure) bit
10008  * @xn:      XN (execute-never) bit
10009  * @pxn:     PXN (privileged execute-never) bit
10010  */
10011 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
10012                       int ap, int ns, int xn, int pxn)
10013 {
10014     bool is_user = regime_is_user(env, mmu_idx);
10015     int prot_rw, user_rw;
10016     bool have_wxn;
10017     int wxn = 0;
10018 
10019     assert(mmu_idx != ARMMMUIdx_S2NS);
10020 
10021     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
10022     if (is_user) {
10023         prot_rw = user_rw;
10024     } else {
10025         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
10026     }
10027 
10028     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
10029         return prot_rw;
10030     }
10031 
10032     /* TODO have_wxn should be replaced with
10033      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10034      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10035      * compatible processors have EL2, which is required for [U]WXN.
10036      */
10037     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
10038 
10039     if (have_wxn) {
10040         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
10041     }
10042 
10043     if (is_aa64) {
10044         switch (regime_el(env, mmu_idx)) {
10045         case 1:
10046             if (!is_user) {
10047                 xn = pxn || (user_rw & PAGE_WRITE);
10048             }
10049             break;
10050         case 2:
10051         case 3:
10052             break;
10053         }
10054     } else if (arm_feature(env, ARM_FEATURE_V7)) {
10055         switch (regime_el(env, mmu_idx)) {
10056         case 1:
10057         case 3:
10058             if (is_user) {
10059                 xn = xn || !(user_rw & PAGE_READ);
10060             } else {
10061                 int uwxn = 0;
10062                 if (have_wxn) {
10063                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
10064                 }
10065                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
10066                      (uwxn && (user_rw & PAGE_WRITE));
10067             }
10068             break;
10069         case 2:
10070             break;
10071         }
10072     } else {
10073         xn = wxn = 0;
10074     }
10075 
10076     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
10077         return prot_rw;
10078     }
10079     return prot_rw | PAGE_EXEC;
10080 }
10081 
10082 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
10083                                      uint32_t *table, uint32_t address)
10084 {
10085     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10086     TCR *tcr = regime_tcr(env, mmu_idx);
10087 
10088     if (address & tcr->mask) {
10089         if (tcr->raw_tcr & TTBCR_PD1) {
10090             /* Translation table walk disabled for TTBR1 */
10091             return false;
10092         }
10093         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
10094     } else {
10095         if (tcr->raw_tcr & TTBCR_PD0) {
10096             /* Translation table walk disabled for TTBR0 */
10097             return false;
10098         }
10099         *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
10100     }
10101     *table |= (address >> 18) & 0x3ffc;
10102     return true;
10103 }
10104 
10105 /* Translate a S1 pagetable walk through S2 if needed.  */
10106 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
10107                                hwaddr addr, MemTxAttrs txattrs,
10108                                ARMMMUFaultInfo *fi)
10109 {
10110     if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
10111         !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10112         target_ulong s2size;
10113         hwaddr s2pa;
10114         int s2prot;
10115         int ret;
10116         ARMCacheAttrs cacheattrs = {};
10117         ARMCacheAttrs *pcacheattrs = NULL;
10118 
10119         if (env->cp15.hcr_el2 & HCR_PTW) {
10120             /*
10121              * PTW means we must fault if this S1 walk touches S2 Device
10122              * memory; otherwise we don't care about the attributes and can
10123              * save the S2 translation the effort of computing them.
10124              */
10125             pcacheattrs = &cacheattrs;
10126         }
10127 
10128         ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
10129                                  &txattrs, &s2prot, &s2size, fi, pcacheattrs);
10130         if (ret) {
10131             assert(fi->type != ARMFault_None);
10132             fi->s2addr = addr;
10133             fi->stage2 = true;
10134             fi->s1ptw = true;
10135             return ~0;
10136         }
10137         if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
10138             /* Access was to Device memory: generate Permission fault */
10139             fi->type = ARMFault_Permission;
10140             fi->s2addr = addr;
10141             fi->stage2 = true;
10142             fi->s1ptw = true;
10143             return ~0;
10144         }
10145         addr = s2pa;
10146     }
10147     return addr;
10148 }
10149 
10150 /* All loads done in the course of a page table walk go through here. */
10151 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10152                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10153 {
10154     ARMCPU *cpu = ARM_CPU(cs);
10155     CPUARMState *env = &cpu->env;
10156     MemTxAttrs attrs = {};
10157     MemTxResult result = MEMTX_OK;
10158     AddressSpace *as;
10159     uint32_t data;
10160 
10161     attrs.secure = is_secure;
10162     as = arm_addressspace(cs, attrs);
10163     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
10164     if (fi->s1ptw) {
10165         return 0;
10166     }
10167     if (regime_translation_big_endian(env, mmu_idx)) {
10168         data = address_space_ldl_be(as, addr, attrs, &result);
10169     } else {
10170         data = address_space_ldl_le(as, addr, attrs, &result);
10171     }
10172     if (result == MEMTX_OK) {
10173         return data;
10174     }
10175     fi->type = ARMFault_SyncExternalOnWalk;
10176     fi->ea = arm_extabort_type(result);
10177     return 0;
10178 }
10179 
10180 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10181                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10182 {
10183     ARMCPU *cpu = ARM_CPU(cs);
10184     CPUARMState *env = &cpu->env;
10185     MemTxAttrs attrs = {};
10186     MemTxResult result = MEMTX_OK;
10187     AddressSpace *as;
10188     uint64_t data;
10189 
10190     attrs.secure = is_secure;
10191     as = arm_addressspace(cs, attrs);
10192     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
10193     if (fi->s1ptw) {
10194         return 0;
10195     }
10196     if (regime_translation_big_endian(env, mmu_idx)) {
10197         data = address_space_ldq_be(as, addr, attrs, &result);
10198     } else {
10199         data = address_space_ldq_le(as, addr, attrs, &result);
10200     }
10201     if (result == MEMTX_OK) {
10202         return data;
10203     }
10204     fi->type = ARMFault_SyncExternalOnWalk;
10205     fi->ea = arm_extabort_type(result);
10206     return 0;
10207 }
10208 
10209 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
10210                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
10211                              hwaddr *phys_ptr, int *prot,
10212                              target_ulong *page_size,
10213                              ARMMMUFaultInfo *fi)
10214 {
10215     CPUState *cs = CPU(arm_env_get_cpu(env));
10216     int level = 1;
10217     uint32_t table;
10218     uint32_t desc;
10219     int type;
10220     int ap;
10221     int domain = 0;
10222     int domain_prot;
10223     hwaddr phys_addr;
10224     uint32_t dacr;
10225 
10226     /* Pagetable walk.  */
10227     /* Lookup l1 descriptor.  */
10228     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10229         /* Section translation fault if page walk is disabled by PD0 or PD1 */
10230         fi->type = ARMFault_Translation;
10231         goto do_fault;
10232     }
10233     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10234                        mmu_idx, fi);
10235     if (fi->type != ARMFault_None) {
10236         goto do_fault;
10237     }
10238     type = (desc & 3);
10239     domain = (desc >> 5) & 0x0f;
10240     if (regime_el(env, mmu_idx) == 1) {
10241         dacr = env->cp15.dacr_ns;
10242     } else {
10243         dacr = env->cp15.dacr_s;
10244     }
10245     domain_prot = (dacr >> (domain * 2)) & 3;
10246     if (type == 0) {
10247         /* Section translation fault.  */
10248         fi->type = ARMFault_Translation;
10249         goto do_fault;
10250     }
10251     if (type != 2) {
10252         level = 2;
10253     }
10254     if (domain_prot == 0 || domain_prot == 2) {
10255         fi->type = ARMFault_Domain;
10256         goto do_fault;
10257     }
10258     if (type == 2) {
10259         /* 1Mb section.  */
10260         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10261         ap = (desc >> 10) & 3;
10262         *page_size = 1024 * 1024;
10263     } else {
10264         /* Lookup l2 entry.  */
10265         if (type == 1) {
10266             /* Coarse pagetable.  */
10267             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10268         } else {
10269             /* Fine pagetable.  */
10270             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
10271         }
10272         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10273                            mmu_idx, fi);
10274         if (fi->type != ARMFault_None) {
10275             goto do_fault;
10276         }
10277         switch (desc & 3) {
10278         case 0: /* Page translation fault.  */
10279             fi->type = ARMFault_Translation;
10280             goto do_fault;
10281         case 1: /* 64k page.  */
10282             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10283             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10284             *page_size = 0x10000;
10285             break;
10286         case 2: /* 4k page.  */
10287             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10288             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10289             *page_size = 0x1000;
10290             break;
10291         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10292             if (type == 1) {
10293                 /* ARMv6/XScale extended small page format */
10294                 if (arm_feature(env, ARM_FEATURE_XSCALE)
10295                     || arm_feature(env, ARM_FEATURE_V6)) {
10296                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10297                     *page_size = 0x1000;
10298                 } else {
10299                     /* UNPREDICTABLE in ARMv5; we choose to take a
10300                      * page translation fault.
10301                      */
10302                     fi->type = ARMFault_Translation;
10303                     goto do_fault;
10304                 }
10305             } else {
10306                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
10307                 *page_size = 0x400;
10308             }
10309             ap = (desc >> 4) & 3;
10310             break;
10311         default:
10312             /* Never happens, but compiler isn't smart enough to tell.  */
10313             abort();
10314         }
10315     }
10316     *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10317     *prot |= *prot ? PAGE_EXEC : 0;
10318     if (!(*prot & (1 << access_type))) {
10319         /* Access permission fault.  */
10320         fi->type = ARMFault_Permission;
10321         goto do_fault;
10322     }
10323     *phys_ptr = phys_addr;
10324     return false;
10325 do_fault:
10326     fi->domain = domain;
10327     fi->level = level;
10328     return true;
10329 }
10330 
10331 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
10332                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
10333                              hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10334                              target_ulong *page_size, ARMMMUFaultInfo *fi)
10335 {
10336     CPUState *cs = CPU(arm_env_get_cpu(env));
10337     int level = 1;
10338     uint32_t table;
10339     uint32_t desc;
10340     uint32_t xn;
10341     uint32_t pxn = 0;
10342     int type;
10343     int ap;
10344     int domain = 0;
10345     int domain_prot;
10346     hwaddr phys_addr;
10347     uint32_t dacr;
10348     bool ns;
10349 
10350     /* Pagetable walk.  */
10351     /* Lookup l1 descriptor.  */
10352     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10353         /* Section translation fault if page walk is disabled by PD0 or PD1 */
10354         fi->type = ARMFault_Translation;
10355         goto do_fault;
10356     }
10357     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10358                        mmu_idx, fi);
10359     if (fi->type != ARMFault_None) {
10360         goto do_fault;
10361     }
10362     type = (desc & 3);
10363     if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
10364         /* Section translation fault, or attempt to use the encoding
10365          * which is Reserved on implementations without PXN.
10366          */
10367         fi->type = ARMFault_Translation;
10368         goto do_fault;
10369     }
10370     if ((type == 1) || !(desc & (1 << 18))) {
10371         /* Page or Section.  */
10372         domain = (desc >> 5) & 0x0f;
10373     }
10374     if (regime_el(env, mmu_idx) == 1) {
10375         dacr = env->cp15.dacr_ns;
10376     } else {
10377         dacr = env->cp15.dacr_s;
10378     }
10379     if (type == 1) {
10380         level = 2;
10381     }
10382     domain_prot = (dacr >> (domain * 2)) & 3;
10383     if (domain_prot == 0 || domain_prot == 2) {
10384         /* Section or Page domain fault */
10385         fi->type = ARMFault_Domain;
10386         goto do_fault;
10387     }
10388     if (type != 1) {
10389         if (desc & (1 << 18)) {
10390             /* Supersection.  */
10391             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
10392             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
10393             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
10394             *page_size = 0x1000000;
10395         } else {
10396             /* Section.  */
10397             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10398             *page_size = 0x100000;
10399         }
10400         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
10401         xn = desc & (1 << 4);
10402         pxn = desc & 1;
10403         ns = extract32(desc, 19, 1);
10404     } else {
10405         if (arm_feature(env, ARM_FEATURE_PXN)) {
10406             pxn = (desc >> 2) & 1;
10407         }
10408         ns = extract32(desc, 3, 1);
10409         /* Lookup l2 entry.  */
10410         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10411         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10412                            mmu_idx, fi);
10413         if (fi->type != ARMFault_None) {
10414             goto do_fault;
10415         }
10416         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
10417         switch (desc & 3) {
10418         case 0: /* Page translation fault.  */
10419             fi->type = ARMFault_Translation;
10420             goto do_fault;
10421         case 1: /* 64k page.  */
10422             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10423             xn = desc & (1 << 15);
10424             *page_size = 0x10000;
10425             break;
10426         case 2: case 3: /* 4k page.  */
10427             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10428             xn = desc & 1;
10429             *page_size = 0x1000;
10430             break;
10431         default:
10432             /* Never happens, but compiler isn't smart enough to tell.  */
10433             abort();
10434         }
10435     }
10436     if (domain_prot == 3) {
10437         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10438     } else {
10439         if (pxn && !regime_is_user(env, mmu_idx)) {
10440             xn = 1;
10441         }
10442         if (xn && access_type == MMU_INST_FETCH) {
10443             fi->type = ARMFault_Permission;
10444             goto do_fault;
10445         }
10446 
10447         if (arm_feature(env, ARM_FEATURE_V6K) &&
10448                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
10449             /* The simplified model uses AP[0] as an access control bit.  */
10450             if ((ap & 1) == 0) {
10451                 /* Access flag fault.  */
10452                 fi->type = ARMFault_AccessFlag;
10453                 goto do_fault;
10454             }
10455             *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
10456         } else {
10457             *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10458         }
10459         if (*prot && !xn) {
10460             *prot |= PAGE_EXEC;
10461         }
10462         if (!(*prot & (1 << access_type))) {
10463             /* Access permission fault.  */
10464             fi->type = ARMFault_Permission;
10465             goto do_fault;
10466         }
10467     }
10468     if (ns) {
10469         /* The NS bit will (as required by the architecture) have no effect if
10470          * the CPU doesn't support TZ or this is a non-secure translation
10471          * regime, because the attribute will already be non-secure.
10472          */
10473         attrs->secure = false;
10474     }
10475     *phys_ptr = phys_addr;
10476     return false;
10477 do_fault:
10478     fi->domain = domain;
10479     fi->level = level;
10480     return true;
10481 }
10482 
10483 /*
10484  * check_s2_mmu_setup
10485  * @cpu:        ARMCPU
10486  * @is_aa64:    True if the translation regime is in AArch64 state
10487  * @startlevel: Suggested starting level
10488  * @inputsize:  Bitsize of IPAs
10489  * @stride:     Page-table stride (See the ARM ARM)
10490  *
10491  * Returns true if the suggested S2 translation parameters are OK and
10492  * false otherwise.
10493  */
10494 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
10495                                int inputsize, int stride)
10496 {
10497     const int grainsize = stride + 3;
10498     int startsizecheck;
10499 
10500     /* Negative levels are never allowed.  */
10501     if (level < 0) {
10502         return false;
10503     }
10504 
10505     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
10506     if (startsizecheck < 1 || startsizecheck > stride + 4) {
10507         return false;
10508     }
10509 
10510     if (is_aa64) {
10511         CPUARMState *env = &cpu->env;
10512         unsigned int pamax = arm_pamax(cpu);
10513 
10514         switch (stride) {
10515         case 13: /* 64KB Pages.  */
10516             if (level == 0 || (level == 1 && pamax <= 42)) {
10517                 return false;
10518             }
10519             break;
10520         case 11: /* 16KB Pages.  */
10521             if (level == 0 || (level == 1 && pamax <= 40)) {
10522                 return false;
10523             }
10524             break;
10525         case 9: /* 4KB Pages.  */
10526             if (level == 0 && pamax <= 42) {
10527                 return false;
10528             }
10529             break;
10530         default:
10531             g_assert_not_reached();
10532         }
10533 
10534         /* Inputsize checks.  */
10535         if (inputsize > pamax &&
10536             (arm_el_is_aa64(env, 1) || inputsize > 40)) {
10537             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
10538             return false;
10539         }
10540     } else {
10541         /* AArch32 only supports 4KB pages. Assert on that.  */
10542         assert(stride == 9);
10543 
10544         if (level == 0) {
10545             return false;
10546         }
10547     }
10548     return true;
10549 }
10550 
10551 /* Translate from the 4-bit stage 2 representation of
10552  * memory attributes (without cache-allocation hints) to
10553  * the 8-bit representation of the stage 1 MAIR registers
10554  * (which includes allocation hints).
10555  *
10556  * ref: shared/translation/attrs/S2AttrDecode()
10557  *      .../S2ConvertAttrsHints()
10558  */
10559 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
10560 {
10561     uint8_t hiattr = extract32(s2attrs, 2, 2);
10562     uint8_t loattr = extract32(s2attrs, 0, 2);
10563     uint8_t hihint = 0, lohint = 0;
10564 
10565     if (hiattr != 0) { /* normal memory */
10566         if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
10567             hiattr = loattr = 1; /* non-cacheable */
10568         } else {
10569             if (hiattr != 1) { /* Write-through or write-back */
10570                 hihint = 3; /* RW allocate */
10571             }
10572             if (loattr != 1) { /* Write-through or write-back */
10573                 lohint = 3; /* RW allocate */
10574             }
10575         }
10576     }
10577 
10578     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
10579 }
10580 #endif /* !CONFIG_USER_ONLY */
10581 
10582 ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
10583                                         ARMMMUIdx mmu_idx)
10584 {
10585     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10586     uint32_t el = regime_el(env, mmu_idx);
10587     bool tbi, tbid, epd, hpd, using16k, using64k;
10588     int select, tsz;
10589 
10590     /*
10591      * Bit 55 is always between the two regions, and is canonical for
10592      * determining if address tagging is enabled.
10593      */
10594     select = extract64(va, 55, 1);
10595 
10596     if (el > 1) {
10597         tsz = extract32(tcr, 0, 6);
10598         using64k = extract32(tcr, 14, 1);
10599         using16k = extract32(tcr, 15, 1);
10600         if (mmu_idx == ARMMMUIdx_S2NS) {
10601             /* VTCR_EL2 */
10602             tbi = tbid = hpd = false;
10603         } else {
10604             tbi = extract32(tcr, 20, 1);
10605             hpd = extract32(tcr, 24, 1);
10606             tbid = extract32(tcr, 29, 1);
10607         }
10608         epd = false;
10609     } else if (!select) {
10610         tsz = extract32(tcr, 0, 6);
10611         epd = extract32(tcr, 7, 1);
10612         using64k = extract32(tcr, 14, 1);
10613         using16k = extract32(tcr, 15, 1);
10614         tbi = extract64(tcr, 37, 1);
10615         hpd = extract64(tcr, 41, 1);
10616         tbid = extract64(tcr, 51, 1);
10617     } else {
10618         int tg = extract32(tcr, 30, 2);
10619         using16k = tg == 1;
10620         using64k = tg == 3;
10621         tsz = extract32(tcr, 16, 6);
10622         epd = extract32(tcr, 23, 1);
10623         tbi = extract64(tcr, 38, 1);
10624         hpd = extract64(tcr, 42, 1);
10625         tbid = extract64(tcr, 52, 1);
10626     }
10627     tsz = MIN(tsz, 39);  /* TODO: ARMv8.4-TTST */
10628     tsz = MAX(tsz, 16);  /* TODO: ARMv8.2-LVA  */
10629 
10630     return (ARMVAParameters) {
10631         .tsz = tsz,
10632         .select = select,
10633         .tbi = tbi,
10634         .tbid = tbid,
10635         .epd = epd,
10636         .hpd = hpd,
10637         .using16k = using16k,
10638         .using64k = using64k,
10639     };
10640 }
10641 
10642 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10643                                    ARMMMUIdx mmu_idx, bool data)
10644 {
10645     ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx);
10646 
10647     /* Present TBI as a composite with TBID.  */
10648     ret.tbi &= (data || !ret.tbid);
10649     return ret;
10650 }
10651 
10652 #ifndef CONFIG_USER_ONLY
10653 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
10654                                           ARMMMUIdx mmu_idx)
10655 {
10656     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10657     uint32_t el = regime_el(env, mmu_idx);
10658     int select, tsz;
10659     bool epd, hpd;
10660 
10661     if (mmu_idx == ARMMMUIdx_S2NS) {
10662         /* VTCR */
10663         bool sext = extract32(tcr, 4, 1);
10664         bool sign = extract32(tcr, 3, 1);
10665 
10666         /*
10667          * If the sign-extend bit is not the same as t0sz[3], the result
10668          * is unpredictable. Flag this as a guest error.
10669          */
10670         if (sign != sext) {
10671             qemu_log_mask(LOG_GUEST_ERROR,
10672                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10673         }
10674         tsz = sextract32(tcr, 0, 4) + 8;
10675         select = 0;
10676         hpd = false;
10677         epd = false;
10678     } else if (el == 2) {
10679         /* HTCR */
10680         tsz = extract32(tcr, 0, 3);
10681         select = 0;
10682         hpd = extract64(tcr, 24, 1);
10683         epd = false;
10684     } else {
10685         int t0sz = extract32(tcr, 0, 3);
10686         int t1sz = extract32(tcr, 16, 3);
10687 
10688         if (t1sz == 0) {
10689             select = va > (0xffffffffu >> t0sz);
10690         } else {
10691             /* Note that we will detect errors later.  */
10692             select = va >= ~(0xffffffffu >> t1sz);
10693         }
10694         if (!select) {
10695             tsz = t0sz;
10696             epd = extract32(tcr, 7, 1);
10697             hpd = extract64(tcr, 41, 1);
10698         } else {
10699             tsz = t1sz;
10700             epd = extract32(tcr, 23, 1);
10701             hpd = extract64(tcr, 42, 1);
10702         }
10703         /* For aarch32, hpd0 is not enabled without t2e as well.  */
10704         hpd &= extract32(tcr, 6, 1);
10705     }
10706 
10707     return (ARMVAParameters) {
10708         .tsz = tsz,
10709         .select = select,
10710         .epd = epd,
10711         .hpd = hpd,
10712     };
10713 }
10714 
10715 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
10716                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
10717                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
10718                                target_ulong *page_size_ptr,
10719                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10720 {
10721     ARMCPU *cpu = arm_env_get_cpu(env);
10722     CPUState *cs = CPU(cpu);
10723     /* Read an LPAE long-descriptor translation table. */
10724     ARMFaultType fault_type = ARMFault_Translation;
10725     uint32_t level;
10726     ARMVAParameters param;
10727     uint64_t ttbr;
10728     hwaddr descaddr, indexmask, indexmask_grainsize;
10729     uint32_t tableattrs;
10730     target_ulong page_size;
10731     uint32_t attrs;
10732     int32_t stride;
10733     int addrsize, inputsize;
10734     TCR *tcr = regime_tcr(env, mmu_idx);
10735     int ap, ns, xn, pxn;
10736     uint32_t el = regime_el(env, mmu_idx);
10737     bool ttbr1_valid;
10738     uint64_t descaddrmask;
10739     bool aarch64 = arm_el_is_aa64(env, el);
10740     bool guarded = false;
10741 
10742     /* TODO:
10743      * This code does not handle the different format TCR for VTCR_EL2.
10744      * This code also does not support shareability levels.
10745      * Attribute and permission bit handling should also be checked when adding
10746      * support for those page table walks.
10747      */
10748     if (aarch64) {
10749         param = aa64_va_parameters(env, address, mmu_idx,
10750                                    access_type != MMU_INST_FETCH);
10751         level = 0;
10752         /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
10753          * invalid.
10754          */
10755         ttbr1_valid = (el < 2);
10756         addrsize = 64 - 8 * param.tbi;
10757         inputsize = 64 - param.tsz;
10758     } else {
10759         param = aa32_va_parameters(env, address, mmu_idx);
10760         level = 1;
10761         /* There is no TTBR1 for EL2 */
10762         ttbr1_valid = (el != 2);
10763         addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32);
10764         inputsize = addrsize - param.tsz;
10765     }
10766 
10767     /*
10768      * We determined the region when collecting the parameters, but we
10769      * have not yet validated that the address is valid for the region.
10770      * Extract the top bits and verify that they all match select.
10771      *
10772      * For aa32, if inputsize == addrsize, then we have selected the
10773      * region by exclusion in aa32_va_parameters and there is no more
10774      * validation to do here.
10775      */
10776     if (inputsize < addrsize) {
10777         target_ulong top_bits = sextract64(address, inputsize,
10778                                            addrsize - inputsize);
10779         if (-top_bits != param.select || (param.select && !ttbr1_valid)) {
10780             /* The gap between the two regions is a Translation fault */
10781             fault_type = ARMFault_Translation;
10782             goto do_fault;
10783         }
10784     }
10785 
10786     if (param.using64k) {
10787         stride = 13;
10788     } else if (param.using16k) {
10789         stride = 11;
10790     } else {
10791         stride = 9;
10792     }
10793 
10794     /* Note that QEMU ignores shareability and cacheability attributes,
10795      * so we don't need to do anything with the SH, ORGN, IRGN fields
10796      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
10797      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10798      * implement any ASID-like capability so we can ignore it (instead
10799      * we will always flush the TLB any time the ASID is changed).
10800      */
10801     ttbr = regime_ttbr(env, mmu_idx, param.select);
10802 
10803     /* Here we should have set up all the parameters for the translation:
10804      * inputsize, ttbr, epd, stride, tbi
10805      */
10806 
10807     if (param.epd) {
10808         /* Translation table walk disabled => Translation fault on TLB miss
10809          * Note: This is always 0 on 64-bit EL2 and EL3.
10810          */
10811         goto do_fault;
10812     }
10813 
10814     if (mmu_idx != ARMMMUIdx_S2NS) {
10815         /* The starting level depends on the virtual address size (which can
10816          * be up to 48 bits) and the translation granule size. It indicates
10817          * the number of strides (stride bits at a time) needed to
10818          * consume the bits of the input address. In the pseudocode this is:
10819          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
10820          * where their 'inputsize' is our 'inputsize', 'grainsize' is
10821          * our 'stride + 3' and 'stride' is our 'stride'.
10822          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10823          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10824          * = 4 - (inputsize - 4) / stride;
10825          */
10826         level = 4 - (inputsize - 4) / stride;
10827     } else {
10828         /* For stage 2 translations the starting level is specified by the
10829          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10830          */
10831         uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
10832         uint32_t startlevel;
10833         bool ok;
10834 
10835         if (!aarch64 || stride == 9) {
10836             /* AArch32 or 4KB pages */
10837             startlevel = 2 - sl0;
10838         } else {
10839             /* 16KB or 64KB pages */
10840             startlevel = 3 - sl0;
10841         }
10842 
10843         /* Check that the starting level is valid. */
10844         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
10845                                 inputsize, stride);
10846         if (!ok) {
10847             fault_type = ARMFault_Translation;
10848             goto do_fault;
10849         }
10850         level = startlevel;
10851     }
10852 
10853     indexmask_grainsize = (1ULL << (stride + 3)) - 1;
10854     indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
10855 
10856     /* Now we can extract the actual base address from the TTBR */
10857     descaddr = extract64(ttbr, 0, 48);
10858     descaddr &= ~indexmask;
10859 
10860     /* The address field in the descriptor goes up to bit 39 for ARMv7
10861      * but up to bit 47 for ARMv8, but we use the descaddrmask
10862      * up to bit 39 for AArch32, because we don't need other bits in that case
10863      * to construct next descriptor address (anyway they should be all zeroes).
10864      */
10865     descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
10866                    ~indexmask_grainsize;
10867 
10868     /* Secure accesses start with the page table in secure memory and
10869      * can be downgraded to non-secure at any step. Non-secure accesses
10870      * remain non-secure. We implement this by just ORing in the NSTable/NS
10871      * bits at each step.
10872      */
10873     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
10874     for (;;) {
10875         uint64_t descriptor;
10876         bool nstable;
10877 
10878         descaddr |= (address >> (stride * (4 - level))) & indexmask;
10879         descaddr &= ~7ULL;
10880         nstable = extract32(tableattrs, 4, 1);
10881         descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
10882         if (fi->type != ARMFault_None) {
10883             goto do_fault;
10884         }
10885 
10886         if (!(descriptor & 1) ||
10887             (!(descriptor & 2) && (level == 3))) {
10888             /* Invalid, or the Reserved level 3 encoding */
10889             goto do_fault;
10890         }
10891         descaddr = descriptor & descaddrmask;
10892 
10893         if ((descriptor & 2) && (level < 3)) {
10894             /* Table entry. The top five bits are attributes which may
10895              * propagate down through lower levels of the table (and
10896              * which are all arranged so that 0 means "no effect", so
10897              * we can gather them up by ORing in the bits at each level).
10898              */
10899             tableattrs |= extract64(descriptor, 59, 5);
10900             level++;
10901             indexmask = indexmask_grainsize;
10902             continue;
10903         }
10904         /* Block entry at level 1 or 2, or page entry at level 3.
10905          * These are basically the same thing, although the number
10906          * of bits we pull in from the vaddr varies.
10907          */
10908         page_size = (1ULL << ((stride * (4 - level)) + 3));
10909         descaddr |= (address & (page_size - 1));
10910         /* Extract attributes from the descriptor */
10911         attrs = extract64(descriptor, 2, 10)
10912             | (extract64(descriptor, 52, 12) << 10);
10913 
10914         if (mmu_idx == ARMMMUIdx_S2NS) {
10915             /* Stage 2 table descriptors do not include any attribute fields */
10916             break;
10917         }
10918         /* Merge in attributes from table descriptors */
10919         attrs |= nstable << 3; /* NS */
10920         guarded = extract64(descriptor, 50, 1);  /* GP */
10921         if (param.hpd) {
10922             /* HPD disables all the table attributes except NSTable.  */
10923             break;
10924         }
10925         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
10926         /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10927          * means "force PL1 access only", which means forcing AP[1] to 0.
10928          */
10929         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
10930         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
10931         break;
10932     }
10933     /* Here descaddr is the final physical address, and attributes
10934      * are all in attrs.
10935      */
10936     fault_type = ARMFault_AccessFlag;
10937     if ((attrs & (1 << 8)) == 0) {
10938         /* Access flag */
10939         goto do_fault;
10940     }
10941 
10942     ap = extract32(attrs, 4, 2);
10943     xn = extract32(attrs, 12, 1);
10944 
10945     if (mmu_idx == ARMMMUIdx_S2NS) {
10946         ns = true;
10947         *prot = get_S2prot(env, ap, xn);
10948     } else {
10949         ns = extract32(attrs, 3, 1);
10950         pxn = extract32(attrs, 11, 1);
10951         *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
10952     }
10953 
10954     fault_type = ARMFault_Permission;
10955     if (!(*prot & (1 << access_type))) {
10956         goto do_fault;
10957     }
10958 
10959     if (ns) {
10960         /* The NS bit will (as required by the architecture) have no effect if
10961          * the CPU doesn't support TZ or this is a non-secure translation
10962          * regime, because the attribute will already be non-secure.
10963          */
10964         txattrs->secure = false;
10965     }
10966     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
10967     if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
10968         txattrs->target_tlb_bit0 = true;
10969     }
10970 
10971     if (cacheattrs != NULL) {
10972         if (mmu_idx == ARMMMUIdx_S2NS) {
10973             cacheattrs->attrs = convert_stage2_attrs(env,
10974                                                      extract32(attrs, 0, 4));
10975         } else {
10976             /* Index into MAIR registers for cache attributes */
10977             uint8_t attrindx = extract32(attrs, 0, 3);
10978             uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
10979             assert(attrindx <= 7);
10980             cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
10981         }
10982         cacheattrs->shareability = extract32(attrs, 6, 2);
10983     }
10984 
10985     *phys_ptr = descaddr;
10986     *page_size_ptr = page_size;
10987     return false;
10988 
10989 do_fault:
10990     fi->type = fault_type;
10991     fi->level = level;
10992     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
10993     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
10994     return true;
10995 }
10996 
10997 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
10998                                                 ARMMMUIdx mmu_idx,
10999                                                 int32_t address, int *prot)
11000 {
11001     if (!arm_feature(env, ARM_FEATURE_M)) {
11002         *prot = PAGE_READ | PAGE_WRITE;
11003         switch (address) {
11004         case 0xF0000000 ... 0xFFFFFFFF:
11005             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
11006                 /* hivecs execing is ok */
11007                 *prot |= PAGE_EXEC;
11008             }
11009             break;
11010         case 0x00000000 ... 0x7FFFFFFF:
11011             *prot |= PAGE_EXEC;
11012             break;
11013         }
11014     } else {
11015         /* Default system address map for M profile cores.
11016          * The architecture specifies which regions are execute-never;
11017          * at the MPU level no other checks are defined.
11018          */
11019         switch (address) {
11020         case 0x00000000 ... 0x1fffffff: /* ROM */
11021         case 0x20000000 ... 0x3fffffff: /* SRAM */
11022         case 0x60000000 ... 0x7fffffff: /* RAM */
11023         case 0x80000000 ... 0x9fffffff: /* RAM */
11024             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11025             break;
11026         case 0x40000000 ... 0x5fffffff: /* Peripheral */
11027         case 0xa0000000 ... 0xbfffffff: /* Device */
11028         case 0xc0000000 ... 0xdfffffff: /* Device */
11029         case 0xe0000000 ... 0xffffffff: /* System */
11030             *prot = PAGE_READ | PAGE_WRITE;
11031             break;
11032         default:
11033             g_assert_not_reached();
11034         }
11035     }
11036 }
11037 
11038 static bool pmsav7_use_background_region(ARMCPU *cpu,
11039                                          ARMMMUIdx mmu_idx, bool is_user)
11040 {
11041     /* Return true if we should use the default memory map as a
11042      * "background" region if there are no hits against any MPU regions.
11043      */
11044     CPUARMState *env = &cpu->env;
11045 
11046     if (is_user) {
11047         return false;
11048     }
11049 
11050     if (arm_feature(env, ARM_FEATURE_M)) {
11051         return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
11052             & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
11053     } else {
11054         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
11055     }
11056 }
11057 
11058 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
11059 {
11060     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11061     return arm_feature(env, ARM_FEATURE_M) &&
11062         extract32(address, 20, 12) == 0xe00;
11063 }
11064 
11065 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
11066 {
11067     /* True if address is in the M profile system region
11068      * 0xe0000000 - 0xffffffff
11069      */
11070     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
11071 }
11072 
11073 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
11074                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
11075                                  hwaddr *phys_ptr, int *prot,
11076                                  target_ulong *page_size,
11077                                  ARMMMUFaultInfo *fi)
11078 {
11079     ARMCPU *cpu = arm_env_get_cpu(env);
11080     int n;
11081     bool is_user = regime_is_user(env, mmu_idx);
11082 
11083     *phys_ptr = address;
11084     *page_size = TARGET_PAGE_SIZE;
11085     *prot = 0;
11086 
11087     if (regime_translation_disabled(env, mmu_idx) ||
11088         m_is_ppb_region(env, address)) {
11089         /* MPU disabled or M profile PPB access: use default memory map.
11090          * The other case which uses the default memory map in the
11091          * v7M ARM ARM pseudocode is exception vector reads from the vector
11092          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11093          * which always does a direct read using address_space_ldl(), rather
11094          * than going via this function, so we don't need to check that here.
11095          */
11096         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11097     } else { /* MPU enabled */
11098         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11099             /* region search */
11100             uint32_t base = env->pmsav7.drbar[n];
11101             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
11102             uint32_t rmask;
11103             bool srdis = false;
11104 
11105             if (!(env->pmsav7.drsr[n] & 0x1)) {
11106                 continue;
11107             }
11108 
11109             if (!rsize) {
11110                 qemu_log_mask(LOG_GUEST_ERROR,
11111                               "DRSR[%d]: Rsize field cannot be 0\n", n);
11112                 continue;
11113             }
11114             rsize++;
11115             rmask = (1ull << rsize) - 1;
11116 
11117             if (base & rmask) {
11118                 qemu_log_mask(LOG_GUEST_ERROR,
11119                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
11120                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
11121                               n, base, rmask);
11122                 continue;
11123             }
11124 
11125             if (address < base || address > base + rmask) {
11126                 /*
11127                  * Address not in this region. We must check whether the
11128                  * region covers addresses in the same page as our address.
11129                  * In that case we must not report a size that covers the
11130                  * whole page for a subsequent hit against a different MPU
11131                  * region or the background region, because it would result in
11132                  * incorrect TLB hits for subsequent accesses to addresses that
11133                  * are in this MPU region.
11134                  */
11135                 if (ranges_overlap(base, rmask,
11136                                    address & TARGET_PAGE_MASK,
11137                                    TARGET_PAGE_SIZE)) {
11138                     *page_size = 1;
11139                 }
11140                 continue;
11141             }
11142 
11143             /* Region matched */
11144 
11145             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
11146                 int i, snd;
11147                 uint32_t srdis_mask;
11148 
11149                 rsize -= 3; /* sub region size (power of 2) */
11150                 snd = ((address - base) >> rsize) & 0x7;
11151                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
11152 
11153                 srdis_mask = srdis ? 0x3 : 0x0;
11154                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
11155                     /* This will check in groups of 2, 4 and then 8, whether
11156                      * the subregion bits are consistent. rsize is incremented
11157                      * back up to give the region size, considering consistent
11158                      * adjacent subregions as one region. Stop testing if rsize
11159                      * is already big enough for an entire QEMU page.
11160                      */
11161                     int snd_rounded = snd & ~(i - 1);
11162                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
11163                                                      snd_rounded + 8, i);
11164                     if (srdis_mask ^ srdis_multi) {
11165                         break;
11166                     }
11167                     srdis_mask = (srdis_mask << i) | srdis_mask;
11168                     rsize++;
11169                 }
11170             }
11171             if (srdis) {
11172                 continue;
11173             }
11174             if (rsize < TARGET_PAGE_BITS) {
11175                 *page_size = 1 << rsize;
11176             }
11177             break;
11178         }
11179 
11180         if (n == -1) { /* no hits */
11181             if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11182                 /* background fault */
11183                 fi->type = ARMFault_Background;
11184                 return true;
11185             }
11186             get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11187         } else { /* a MPU hit! */
11188             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
11189             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
11190 
11191             if (m_is_system_region(env, address)) {
11192                 /* System space is always execute never */
11193                 xn = 1;
11194             }
11195 
11196             if (is_user) { /* User mode AP bit decoding */
11197                 switch (ap) {
11198                 case 0:
11199                 case 1:
11200                 case 5:
11201                     break; /* no access */
11202                 case 3:
11203                     *prot |= PAGE_WRITE;
11204                     /* fall through */
11205                 case 2:
11206                 case 6:
11207                     *prot |= PAGE_READ | PAGE_EXEC;
11208                     break;
11209                 case 7:
11210                     /* for v7M, same as 6; for R profile a reserved value */
11211                     if (arm_feature(env, ARM_FEATURE_M)) {
11212                         *prot |= PAGE_READ | PAGE_EXEC;
11213                         break;
11214                     }
11215                     /* fall through */
11216                 default:
11217                     qemu_log_mask(LOG_GUEST_ERROR,
11218                                   "DRACR[%d]: Bad value for AP bits: 0x%"
11219                                   PRIx32 "\n", n, ap);
11220                 }
11221             } else { /* Priv. mode AP bits decoding */
11222                 switch (ap) {
11223                 case 0:
11224                     break; /* no access */
11225                 case 1:
11226                 case 2:
11227                 case 3:
11228                     *prot |= PAGE_WRITE;
11229                     /* fall through */
11230                 case 5:
11231                 case 6:
11232                     *prot |= PAGE_READ | PAGE_EXEC;
11233                     break;
11234                 case 7:
11235                     /* for v7M, same as 6; for R profile a reserved value */
11236                     if (arm_feature(env, ARM_FEATURE_M)) {
11237                         *prot |= PAGE_READ | PAGE_EXEC;
11238                         break;
11239                     }
11240                     /* fall through */
11241                 default:
11242                     qemu_log_mask(LOG_GUEST_ERROR,
11243                                   "DRACR[%d]: Bad value for AP bits: 0x%"
11244                                   PRIx32 "\n", n, ap);
11245                 }
11246             }
11247 
11248             /* execute never */
11249             if (xn) {
11250                 *prot &= ~PAGE_EXEC;
11251             }
11252         }
11253     }
11254 
11255     fi->type = ARMFault_Permission;
11256     fi->level = 1;
11257     return !(*prot & (1 << access_type));
11258 }
11259 
11260 static bool v8m_is_sau_exempt(CPUARMState *env,
11261                               uint32_t address, MMUAccessType access_type)
11262 {
11263     /* The architecture specifies that certain address ranges are
11264      * exempt from v8M SAU/IDAU checks.
11265      */
11266     return
11267         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
11268         (address >= 0xe0000000 && address <= 0xe0002fff) ||
11269         (address >= 0xe000e000 && address <= 0xe000efff) ||
11270         (address >= 0xe002e000 && address <= 0xe002efff) ||
11271         (address >= 0xe0040000 && address <= 0xe0041fff) ||
11272         (address >= 0xe00ff000 && address <= 0xe00fffff);
11273 }
11274 
11275 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
11276                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11277                                 V8M_SAttributes *sattrs)
11278 {
11279     /* Look up the security attributes for this address. Compare the
11280      * pseudocode SecurityCheck() function.
11281      * We assume the caller has zero-initialized *sattrs.
11282      */
11283     ARMCPU *cpu = arm_env_get_cpu(env);
11284     int r;
11285     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
11286     int idau_region = IREGION_NOTVALID;
11287     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11288     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11289 
11290     if (cpu->idau) {
11291         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
11292         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
11293 
11294         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
11295                    &idau_nsc);
11296     }
11297 
11298     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
11299         /* 0xf0000000..0xffffffff is always S for insn fetches */
11300         return;
11301     }
11302 
11303     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
11304         sattrs->ns = !regime_is_secure(env, mmu_idx);
11305         return;
11306     }
11307 
11308     if (idau_region != IREGION_NOTVALID) {
11309         sattrs->irvalid = true;
11310         sattrs->iregion = idau_region;
11311     }
11312 
11313     switch (env->sau.ctrl & 3) {
11314     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11315         break;
11316     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11317         sattrs->ns = true;
11318         break;
11319     default: /* SAU.ENABLE == 1 */
11320         for (r = 0; r < cpu->sau_sregion; r++) {
11321             if (env->sau.rlar[r] & 1) {
11322                 uint32_t base = env->sau.rbar[r] & ~0x1f;
11323                 uint32_t limit = env->sau.rlar[r] | 0x1f;
11324 
11325                 if (base <= address && limit >= address) {
11326                     if (base > addr_page_base || limit < addr_page_limit) {
11327                         sattrs->subpage = true;
11328                     }
11329                     if (sattrs->srvalid) {
11330                         /* If we hit in more than one region then we must report
11331                          * as Secure, not NS-Callable, with no valid region
11332                          * number info.
11333                          */
11334                         sattrs->ns = false;
11335                         sattrs->nsc = false;
11336                         sattrs->sregion = 0;
11337                         sattrs->srvalid = false;
11338                         break;
11339                     } else {
11340                         if (env->sau.rlar[r] & 2) {
11341                             sattrs->nsc = true;
11342                         } else {
11343                             sattrs->ns = true;
11344                         }
11345                         sattrs->srvalid = true;
11346                         sattrs->sregion = r;
11347                     }
11348                 } else {
11349                     /*
11350                      * Address not in this region. We must check whether the
11351                      * region covers addresses in the same page as our address.
11352                      * In that case we must not report a size that covers the
11353                      * whole page for a subsequent hit against a different MPU
11354                      * region or the background region, because it would result
11355                      * in incorrect TLB hits for subsequent accesses to
11356                      * addresses that are in this MPU region.
11357                      */
11358                     if (limit >= base &&
11359                         ranges_overlap(base, limit - base + 1,
11360                                        addr_page_base,
11361                                        TARGET_PAGE_SIZE)) {
11362                         sattrs->subpage = true;
11363                     }
11364                 }
11365             }
11366         }
11367         break;
11368     }
11369 
11370     /*
11371      * The IDAU will override the SAU lookup results if it specifies
11372      * higher security than the SAU does.
11373      */
11374     if (!idau_ns) {
11375         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
11376             sattrs->ns = false;
11377             sattrs->nsc = idau_nsc;
11378         }
11379     }
11380 }
11381 
11382 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
11383                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
11384                               hwaddr *phys_ptr, MemTxAttrs *txattrs,
11385                               int *prot, bool *is_subpage,
11386                               ARMMMUFaultInfo *fi, uint32_t *mregion)
11387 {
11388     /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11389      * that a full phys-to-virt translation does).
11390      * mregion is (if not NULL) set to the region number which matched,
11391      * or -1 if no region number is returned (MPU off, address did not
11392      * hit a region, address hit in multiple regions).
11393      * We set is_subpage to true if the region hit doesn't cover the
11394      * entire TARGET_PAGE the address is within.
11395      */
11396     ARMCPU *cpu = arm_env_get_cpu(env);
11397     bool is_user = regime_is_user(env, mmu_idx);
11398     uint32_t secure = regime_is_secure(env, mmu_idx);
11399     int n;
11400     int matchregion = -1;
11401     bool hit = false;
11402     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11403     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11404 
11405     *is_subpage = false;
11406     *phys_ptr = address;
11407     *prot = 0;
11408     if (mregion) {
11409         *mregion = -1;
11410     }
11411 
11412     /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11413      * was an exception vector read from the vector table (which is always
11414      * done using the default system address map), because those accesses
11415      * are done in arm_v7m_load_vector(), which always does a direct
11416      * read using address_space_ldl(), rather than going via this function.
11417      */
11418     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
11419         hit = true;
11420     } else if (m_is_ppb_region(env, address)) {
11421         hit = true;
11422     } else {
11423         if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11424             hit = true;
11425         }
11426 
11427         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11428             /* region search */
11429             /* Note that the base address is bits [31:5] from the register
11430              * with bits [4:0] all zeroes, but the limit address is bits
11431              * [31:5] from the register with bits [4:0] all ones.
11432              */
11433             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
11434             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
11435 
11436             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
11437                 /* Region disabled */
11438                 continue;
11439             }
11440 
11441             if (address < base || address > limit) {
11442                 /*
11443                  * Address not in this region. We must check whether the
11444                  * region covers addresses in the same page as our address.
11445                  * In that case we must not report a size that covers the
11446                  * whole page for a subsequent hit against a different MPU
11447                  * region or the background region, because it would result in
11448                  * incorrect TLB hits for subsequent accesses to addresses that
11449                  * are in this MPU region.
11450                  */
11451                 if (limit >= base &&
11452                     ranges_overlap(base, limit - base + 1,
11453                                    addr_page_base,
11454                                    TARGET_PAGE_SIZE)) {
11455                     *is_subpage = true;
11456                 }
11457                 continue;
11458             }
11459 
11460             if (base > addr_page_base || limit < addr_page_limit) {
11461                 *is_subpage = true;
11462             }
11463 
11464             if (matchregion != -1) {
11465                 /* Multiple regions match -- always a failure (unlike
11466                  * PMSAv7 where highest-numbered-region wins)
11467                  */
11468                 fi->type = ARMFault_Permission;
11469                 fi->level = 1;
11470                 return true;
11471             }
11472 
11473             matchregion = n;
11474             hit = true;
11475         }
11476     }
11477 
11478     if (!hit) {
11479         /* background fault */
11480         fi->type = ARMFault_Background;
11481         return true;
11482     }
11483 
11484     if (matchregion == -1) {
11485         /* hit using the background region */
11486         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11487     } else {
11488         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
11489         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
11490 
11491         if (m_is_system_region(env, address)) {
11492             /* System space is always execute never */
11493             xn = 1;
11494         }
11495 
11496         *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
11497         if (*prot && !xn) {
11498             *prot |= PAGE_EXEC;
11499         }
11500         /* We don't need to look the attribute up in the MAIR0/MAIR1
11501          * registers because that only tells us about cacheability.
11502          */
11503         if (mregion) {
11504             *mregion = matchregion;
11505         }
11506     }
11507 
11508     fi->type = ARMFault_Permission;
11509     fi->level = 1;
11510     return !(*prot & (1 << access_type));
11511 }
11512 
11513 
11514 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
11515                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
11516                                  hwaddr *phys_ptr, MemTxAttrs *txattrs,
11517                                  int *prot, target_ulong *page_size,
11518                                  ARMMMUFaultInfo *fi)
11519 {
11520     uint32_t secure = regime_is_secure(env, mmu_idx);
11521     V8M_SAttributes sattrs = {};
11522     bool ret;
11523     bool mpu_is_subpage;
11524 
11525     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
11526         v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
11527         if (access_type == MMU_INST_FETCH) {
11528             /* Instruction fetches always use the MMU bank and the
11529              * transaction attribute determined by the fetch address,
11530              * regardless of CPU state. This is painful for QEMU
11531              * to handle, because it would mean we need to encode
11532              * into the mmu_idx not just the (user, negpri) information
11533              * for the current security state but also that for the
11534              * other security state, which would balloon the number
11535              * of mmu_idx values needed alarmingly.
11536              * Fortunately we can avoid this because it's not actually
11537              * possible to arbitrarily execute code from memory with
11538              * the wrong security attribute: it will always generate
11539              * an exception of some kind or another, apart from the
11540              * special case of an NS CPU executing an SG instruction
11541              * in S&NSC memory. So we always just fail the translation
11542              * here and sort things out in the exception handler
11543              * (including possibly emulating an SG instruction).
11544              */
11545             if (sattrs.ns != !secure) {
11546                 if (sattrs.nsc) {
11547                     fi->type = ARMFault_QEMU_NSCExec;
11548                 } else {
11549                     fi->type = ARMFault_QEMU_SFault;
11550                 }
11551                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
11552                 *phys_ptr = address;
11553                 *prot = 0;
11554                 return true;
11555             }
11556         } else {
11557             /* For data accesses we always use the MMU bank indicated
11558              * by the current CPU state, but the security attributes
11559              * might downgrade a secure access to nonsecure.
11560              */
11561             if (sattrs.ns) {
11562                 txattrs->secure = false;
11563             } else if (!secure) {
11564                 /* NS access to S memory must fault.
11565                  * Architecturally we should first check whether the
11566                  * MPU information for this address indicates that we
11567                  * are doing an unaligned access to Device memory, which
11568                  * should generate a UsageFault instead. QEMU does not
11569                  * currently check for that kind of unaligned access though.
11570                  * If we added it we would need to do so as a special case
11571                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11572                  */
11573                 fi->type = ARMFault_QEMU_SFault;
11574                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
11575                 *phys_ptr = address;
11576                 *prot = 0;
11577                 return true;
11578             }
11579         }
11580     }
11581 
11582     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
11583                             txattrs, prot, &mpu_is_subpage, fi, NULL);
11584     *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
11585     return ret;
11586 }
11587 
11588 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
11589                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
11590                                  hwaddr *phys_ptr, int *prot,
11591                                  ARMMMUFaultInfo *fi)
11592 {
11593     int n;
11594     uint32_t mask;
11595     uint32_t base;
11596     bool is_user = regime_is_user(env, mmu_idx);
11597 
11598     if (regime_translation_disabled(env, mmu_idx)) {
11599         /* MPU disabled.  */
11600         *phys_ptr = address;
11601         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11602         return false;
11603     }
11604 
11605     *phys_ptr = address;
11606     for (n = 7; n >= 0; n--) {
11607         base = env->cp15.c6_region[n];
11608         if ((base & 1) == 0) {
11609             continue;
11610         }
11611         mask = 1 << ((base >> 1) & 0x1f);
11612         /* Keep this shift separate from the above to avoid an
11613            (undefined) << 32.  */
11614         mask = (mask << 1) - 1;
11615         if (((base ^ address) & ~mask) == 0) {
11616             break;
11617         }
11618     }
11619     if (n < 0) {
11620         fi->type = ARMFault_Background;
11621         return true;
11622     }
11623 
11624     if (access_type == MMU_INST_FETCH) {
11625         mask = env->cp15.pmsav5_insn_ap;
11626     } else {
11627         mask = env->cp15.pmsav5_data_ap;
11628     }
11629     mask = (mask >> (n * 4)) & 0xf;
11630     switch (mask) {
11631     case 0:
11632         fi->type = ARMFault_Permission;
11633         fi->level = 1;
11634         return true;
11635     case 1:
11636         if (is_user) {
11637             fi->type = ARMFault_Permission;
11638             fi->level = 1;
11639             return true;
11640         }
11641         *prot = PAGE_READ | PAGE_WRITE;
11642         break;
11643     case 2:
11644         *prot = PAGE_READ;
11645         if (!is_user) {
11646             *prot |= PAGE_WRITE;
11647         }
11648         break;
11649     case 3:
11650         *prot = PAGE_READ | PAGE_WRITE;
11651         break;
11652     case 5:
11653         if (is_user) {
11654             fi->type = ARMFault_Permission;
11655             fi->level = 1;
11656             return true;
11657         }
11658         *prot = PAGE_READ;
11659         break;
11660     case 6:
11661         *prot = PAGE_READ;
11662         break;
11663     default:
11664         /* Bad permission.  */
11665         fi->type = ARMFault_Permission;
11666         fi->level = 1;
11667         return true;
11668     }
11669     *prot |= PAGE_EXEC;
11670     return false;
11671 }
11672 
11673 /* Combine either inner or outer cacheability attributes for normal
11674  * memory, according to table D4-42 and pseudocode procedure
11675  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11676  *
11677  * NB: only stage 1 includes allocation hints (RW bits), leading to
11678  * some asymmetry.
11679  */
11680 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
11681 {
11682     if (s1 == 4 || s2 == 4) {
11683         /* non-cacheable has precedence */
11684         return 4;
11685     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
11686         /* stage 1 write-through takes precedence */
11687         return s1;
11688     } else if (extract32(s2, 2, 2) == 2) {
11689         /* stage 2 write-through takes precedence, but the allocation hint
11690          * is still taken from stage 1
11691          */
11692         return (2 << 2) | extract32(s1, 0, 2);
11693     } else { /* write-back */
11694         return s1;
11695     }
11696 }
11697 
11698 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11699  * and CombineS1S2Desc()
11700  *
11701  * @s1:      Attributes from stage 1 walk
11702  * @s2:      Attributes from stage 2 walk
11703  */
11704 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
11705 {
11706     uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
11707     uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
11708     ARMCacheAttrs ret;
11709 
11710     /* Combine shareability attributes (table D4-43) */
11711     if (s1.shareability == 2 || s2.shareability == 2) {
11712         /* if either are outer-shareable, the result is outer-shareable */
11713         ret.shareability = 2;
11714     } else if (s1.shareability == 3 || s2.shareability == 3) {
11715         /* if either are inner-shareable, the result is inner-shareable */
11716         ret.shareability = 3;
11717     } else {
11718         /* both non-shareable */
11719         ret.shareability = 0;
11720     }
11721 
11722     /* Combine memory type and cacheability attributes */
11723     if (s1hi == 0 || s2hi == 0) {
11724         /* Device has precedence over normal */
11725         if (s1lo == 0 || s2lo == 0) {
11726             /* nGnRnE has precedence over anything */
11727             ret.attrs = 0;
11728         } else if (s1lo == 4 || s2lo == 4) {
11729             /* non-Reordering has precedence over Reordering */
11730             ret.attrs = 4;  /* nGnRE */
11731         } else if (s1lo == 8 || s2lo == 8) {
11732             /* non-Gathering has precedence over Gathering */
11733             ret.attrs = 8;  /* nGRE */
11734         } else {
11735             ret.attrs = 0xc; /* GRE */
11736         }
11737 
11738         /* Any location for which the resultant memory type is any
11739          * type of Device memory is always treated as Outer Shareable.
11740          */
11741         ret.shareability = 2;
11742     } else { /* Normal memory */
11743         /* Outer/inner cacheability combine independently */
11744         ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
11745                   | combine_cacheattr_nibble(s1lo, s2lo);
11746 
11747         if (ret.attrs == 0x44) {
11748             /* Any location for which the resultant memory type is Normal
11749              * Inner Non-cacheable, Outer Non-cacheable is always treated
11750              * as Outer Shareable.
11751              */
11752             ret.shareability = 2;
11753         }
11754     }
11755 
11756     return ret;
11757 }
11758 
11759 
11760 /* get_phys_addr - get the physical address for this virtual address
11761  *
11762  * Find the physical address corresponding to the given virtual address,
11763  * by doing a translation table walk on MMU based systems or using the
11764  * MPU state on MPU based systems.
11765  *
11766  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11767  * prot and page_size may not be filled in, and the populated fsr value provides
11768  * information on why the translation aborted, in the format of a
11769  * DFSR/IFSR fault register, with the following caveats:
11770  *  * we honour the short vs long DFSR format differences.
11771  *  * the WnR bit is never set (the caller must do this).
11772  *  * for PSMAv5 based systems we don't bother to return a full FSR format
11773  *    value.
11774  *
11775  * @env: CPUARMState
11776  * @address: virtual address to get physical address for
11777  * @access_type: 0 for read, 1 for write, 2 for execute
11778  * @mmu_idx: MMU index indicating required translation regime
11779  * @phys_ptr: set to the physical address corresponding to the virtual address
11780  * @attrs: set to the memory transaction attributes to use
11781  * @prot: set to the permissions for the page containing phys_ptr
11782  * @page_size: set to the size of the page containing phys_ptr
11783  * @fi: set to fault info if the translation fails
11784  * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11785  */
11786 static bool get_phys_addr(CPUARMState *env, target_ulong address,
11787                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
11788                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
11789                           target_ulong *page_size,
11790                           ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
11791 {
11792     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
11793         /* Call ourselves recursively to do the stage 1 and then stage 2
11794          * translations.
11795          */
11796         if (arm_feature(env, ARM_FEATURE_EL2)) {
11797             hwaddr ipa;
11798             int s2_prot;
11799             int ret;
11800             ARMCacheAttrs cacheattrs2 = {};
11801 
11802             ret = get_phys_addr(env, address, access_type,
11803                                 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
11804                                 prot, page_size, fi, cacheattrs);
11805 
11806             /* If S1 fails or S2 is disabled, return early.  */
11807             if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
11808                 *phys_ptr = ipa;
11809                 return ret;
11810             }
11811 
11812             /* S1 is done. Now do S2 translation.  */
11813             ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
11814                                      phys_ptr, attrs, &s2_prot,
11815                                      page_size, fi,
11816                                      cacheattrs != NULL ? &cacheattrs2 : NULL);
11817             fi->s2addr = ipa;
11818             /* Combine the S1 and S2 perms.  */
11819             *prot &= s2_prot;
11820 
11821             /* Combine the S1 and S2 cache attributes, if needed */
11822             if (!ret && cacheattrs != NULL) {
11823                 if (env->cp15.hcr_el2 & HCR_DC) {
11824                     /*
11825                      * HCR.DC forces the first stage attributes to
11826                      *  Normal Non-Shareable,
11827                      *  Inner Write-Back Read-Allocate Write-Allocate,
11828                      *  Outer Write-Back Read-Allocate Write-Allocate.
11829                      */
11830                     cacheattrs->attrs = 0xff;
11831                     cacheattrs->shareability = 0;
11832                 }
11833                 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
11834             }
11835 
11836             return ret;
11837         } else {
11838             /*
11839              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11840              */
11841             mmu_idx = stage_1_mmu_idx(mmu_idx);
11842         }
11843     }
11844 
11845     /* The page table entries may downgrade secure to non-secure, but
11846      * cannot upgrade an non-secure translation regime's attributes
11847      * to secure.
11848      */
11849     attrs->secure = regime_is_secure(env, mmu_idx);
11850     attrs->user = regime_is_user(env, mmu_idx);
11851 
11852     /* Fast Context Switch Extension. This doesn't exist at all in v8.
11853      * In v7 and earlier it affects all stage 1 translations.
11854      */
11855     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
11856         && !arm_feature(env, ARM_FEATURE_V8)) {
11857         if (regime_el(env, mmu_idx) == 3) {
11858             address += env->cp15.fcseidr_s;
11859         } else {
11860             address += env->cp15.fcseidr_ns;
11861         }
11862     }
11863 
11864     if (arm_feature(env, ARM_FEATURE_PMSA)) {
11865         bool ret;
11866         *page_size = TARGET_PAGE_SIZE;
11867 
11868         if (arm_feature(env, ARM_FEATURE_V8)) {
11869             /* PMSAv8 */
11870             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
11871                                        phys_ptr, attrs, prot, page_size, fi);
11872         } else if (arm_feature(env, ARM_FEATURE_V7)) {
11873             /* PMSAv7 */
11874             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
11875                                        phys_ptr, prot, page_size, fi);
11876         } else {
11877             /* Pre-v7 MPU */
11878             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
11879                                        phys_ptr, prot, fi);
11880         }
11881         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
11882                       " mmu_idx %u -> %s (prot %c%c%c)\n",
11883                       access_type == MMU_DATA_LOAD ? "reading" :
11884                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
11885                       (uint32_t)address, mmu_idx,
11886                       ret ? "Miss" : "Hit",
11887                       *prot & PAGE_READ ? 'r' : '-',
11888                       *prot & PAGE_WRITE ? 'w' : '-',
11889                       *prot & PAGE_EXEC ? 'x' : '-');
11890 
11891         return ret;
11892     }
11893 
11894     /* Definitely a real MMU, not an MPU */
11895 
11896     if (regime_translation_disabled(env, mmu_idx)) {
11897         /* MMU disabled. */
11898         *phys_ptr = address;
11899         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11900         *page_size = TARGET_PAGE_SIZE;
11901         return 0;
11902     }
11903 
11904     if (regime_using_lpae_format(env, mmu_idx)) {
11905         return get_phys_addr_lpae(env, address, access_type, mmu_idx,
11906                                   phys_ptr, attrs, prot, page_size,
11907                                   fi, cacheattrs);
11908     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
11909         return get_phys_addr_v6(env, address, access_type, mmu_idx,
11910                                 phys_ptr, attrs, prot, page_size, fi);
11911     } else {
11912         return get_phys_addr_v5(env, address, access_type, mmu_idx,
11913                                     phys_ptr, prot, page_size, fi);
11914     }
11915 }
11916 
11917 /* Walk the page table and (if the mapping exists) add the page
11918  * to the TLB. Return false on success, or true on failure. Populate
11919  * fsr with ARM DFSR/IFSR fault register format value on failure.
11920  */
11921 bool arm_tlb_fill(CPUState *cs, vaddr address,
11922                   MMUAccessType access_type, int mmu_idx,
11923                   ARMMMUFaultInfo *fi)
11924 {
11925     ARMCPU *cpu = ARM_CPU(cs);
11926     CPUARMState *env = &cpu->env;
11927     hwaddr phys_addr;
11928     target_ulong page_size;
11929     int prot;
11930     int ret;
11931     MemTxAttrs attrs = {};
11932 
11933     ret = get_phys_addr(env, address, access_type,
11934                         core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
11935                         &attrs, &prot, &page_size, fi, NULL);
11936     if (!ret) {
11937         /*
11938          * Map a single [sub]page. Regions smaller than our declared
11939          * target page size are handled specially, so for those we
11940          * pass in the exact addresses.
11941          */
11942         if (page_size >= TARGET_PAGE_SIZE) {
11943             phys_addr &= TARGET_PAGE_MASK;
11944             address &= TARGET_PAGE_MASK;
11945         }
11946         tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
11947                                 prot, mmu_idx, page_size);
11948         return 0;
11949     }
11950 
11951     return ret;
11952 }
11953 
11954 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
11955                                          MemTxAttrs *attrs)
11956 {
11957     ARMCPU *cpu = ARM_CPU(cs);
11958     CPUARMState *env = &cpu->env;
11959     hwaddr phys_addr;
11960     target_ulong page_size;
11961     int prot;
11962     bool ret;
11963     ARMMMUFaultInfo fi = {};
11964     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
11965 
11966     *attrs = (MemTxAttrs) {};
11967 
11968     ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
11969                         attrs, &prot, &page_size, &fi, NULL);
11970 
11971     if (ret) {
11972         return -1;
11973     }
11974     return phys_addr;
11975 }
11976 
11977 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
11978 {
11979     uint32_t mask;
11980     unsigned el = arm_current_el(env);
11981 
11982     /* First handle registers which unprivileged can read */
11983 
11984     switch (reg) {
11985     case 0 ... 7: /* xPSR sub-fields */
11986         mask = 0;
11987         if ((reg & 1) && el) {
11988             mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
11989         }
11990         if (!(reg & 4)) {
11991             mask |= XPSR_NZCV | XPSR_Q; /* APSR */
11992         }
11993         /* EPSR reads as zero */
11994         return xpsr_read(env) & mask;
11995         break;
11996     case 20: /* CONTROL */
11997         return env->v7m.control[env->v7m.secure];
11998     case 0x94: /* CONTROL_NS */
11999         /* We have to handle this here because unprivileged Secure code
12000          * can read the NS CONTROL register.
12001          */
12002         if (!env->v7m.secure) {
12003             return 0;
12004         }
12005         return env->v7m.control[M_REG_NS];
12006     }
12007 
12008     if (el == 0) {
12009         return 0; /* unprivileged reads others as zero */
12010     }
12011 
12012     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12013         switch (reg) {
12014         case 0x88: /* MSP_NS */
12015             if (!env->v7m.secure) {
12016                 return 0;
12017             }
12018             return env->v7m.other_ss_msp;
12019         case 0x89: /* PSP_NS */
12020             if (!env->v7m.secure) {
12021                 return 0;
12022             }
12023             return env->v7m.other_ss_psp;
12024         case 0x8a: /* MSPLIM_NS */
12025             if (!env->v7m.secure) {
12026                 return 0;
12027             }
12028             return env->v7m.msplim[M_REG_NS];
12029         case 0x8b: /* PSPLIM_NS */
12030             if (!env->v7m.secure) {
12031                 return 0;
12032             }
12033             return env->v7m.psplim[M_REG_NS];
12034         case 0x90: /* PRIMASK_NS */
12035             if (!env->v7m.secure) {
12036                 return 0;
12037             }
12038             return env->v7m.primask[M_REG_NS];
12039         case 0x91: /* BASEPRI_NS */
12040             if (!env->v7m.secure) {
12041                 return 0;
12042             }
12043             return env->v7m.basepri[M_REG_NS];
12044         case 0x93: /* FAULTMASK_NS */
12045             if (!env->v7m.secure) {
12046                 return 0;
12047             }
12048             return env->v7m.faultmask[M_REG_NS];
12049         case 0x98: /* SP_NS */
12050         {
12051             /* This gives the non-secure SP selected based on whether we're
12052              * currently in handler mode or not, using the NS CONTROL.SPSEL.
12053              */
12054             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
12055 
12056             if (!env->v7m.secure) {
12057                 return 0;
12058             }
12059             if (!arm_v7m_is_handler_mode(env) && spsel) {
12060                 return env->v7m.other_ss_psp;
12061             } else {
12062                 return env->v7m.other_ss_msp;
12063             }
12064         }
12065         default:
12066             break;
12067         }
12068     }
12069 
12070     switch (reg) {
12071     case 8: /* MSP */
12072         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
12073     case 9: /* PSP */
12074         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
12075     case 10: /* MSPLIM */
12076         if (!arm_feature(env, ARM_FEATURE_V8)) {
12077             goto bad_reg;
12078         }
12079         return env->v7m.msplim[env->v7m.secure];
12080     case 11: /* PSPLIM */
12081         if (!arm_feature(env, ARM_FEATURE_V8)) {
12082             goto bad_reg;
12083         }
12084         return env->v7m.psplim[env->v7m.secure];
12085     case 16: /* PRIMASK */
12086         return env->v7m.primask[env->v7m.secure];
12087     case 17: /* BASEPRI */
12088     case 18: /* BASEPRI_MAX */
12089         return env->v7m.basepri[env->v7m.secure];
12090     case 19: /* FAULTMASK */
12091         return env->v7m.faultmask[env->v7m.secure];
12092     default:
12093     bad_reg:
12094         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
12095                                        " register %d\n", reg);
12096         return 0;
12097     }
12098 }
12099 
12100 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
12101 {
12102     /* We're passed bits [11..0] of the instruction; extract
12103      * SYSm and the mask bits.
12104      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
12105      * we choose to treat them as if the mask bits were valid.
12106      * NB that the pseudocode 'mask' variable is bits [11..10],
12107      * whereas ours is [11..8].
12108      */
12109     uint32_t mask = extract32(maskreg, 8, 4);
12110     uint32_t reg = extract32(maskreg, 0, 8);
12111 
12112     if (arm_current_el(env) == 0 && reg > 7) {
12113         /* only xPSR sub-fields may be written by unprivileged */
12114         return;
12115     }
12116 
12117     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12118         switch (reg) {
12119         case 0x88: /* MSP_NS */
12120             if (!env->v7m.secure) {
12121                 return;
12122             }
12123             env->v7m.other_ss_msp = val;
12124             return;
12125         case 0x89: /* PSP_NS */
12126             if (!env->v7m.secure) {
12127                 return;
12128             }
12129             env->v7m.other_ss_psp = val;
12130             return;
12131         case 0x8a: /* MSPLIM_NS */
12132             if (!env->v7m.secure) {
12133                 return;
12134             }
12135             env->v7m.msplim[M_REG_NS] = val & ~7;
12136             return;
12137         case 0x8b: /* PSPLIM_NS */
12138             if (!env->v7m.secure) {
12139                 return;
12140             }
12141             env->v7m.psplim[M_REG_NS] = val & ~7;
12142             return;
12143         case 0x90: /* PRIMASK_NS */
12144             if (!env->v7m.secure) {
12145                 return;
12146             }
12147             env->v7m.primask[M_REG_NS] = val & 1;
12148             return;
12149         case 0x91: /* BASEPRI_NS */
12150             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
12151                 return;
12152             }
12153             env->v7m.basepri[M_REG_NS] = val & 0xff;
12154             return;
12155         case 0x93: /* FAULTMASK_NS */
12156             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
12157                 return;
12158             }
12159             env->v7m.faultmask[M_REG_NS] = val & 1;
12160             return;
12161         case 0x94: /* CONTROL_NS */
12162             if (!env->v7m.secure) {
12163                 return;
12164             }
12165             write_v7m_control_spsel_for_secstate(env,
12166                                                  val & R_V7M_CONTROL_SPSEL_MASK,
12167                                                  M_REG_NS);
12168             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
12169                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
12170                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
12171             }
12172             return;
12173         case 0x98: /* SP_NS */
12174         {
12175             /* This gives the non-secure SP selected based on whether we're
12176              * currently in handler mode or not, using the NS CONTROL.SPSEL.
12177              */
12178             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
12179             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
12180             uint32_t limit;
12181 
12182             if (!env->v7m.secure) {
12183                 return;
12184             }
12185 
12186             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
12187 
12188             if (val < limit) {
12189                 CPUState *cs = CPU(arm_env_get_cpu(env));
12190 
12191                 cpu_restore_state(cs, GETPC(), true);
12192                 raise_exception(env, EXCP_STKOF, 0, 1);
12193             }
12194 
12195             if (is_psp) {
12196                 env->v7m.other_ss_psp = val;
12197             } else {
12198                 env->v7m.other_ss_msp = val;
12199             }
12200             return;
12201         }
12202         default:
12203             break;
12204         }
12205     }
12206 
12207     switch (reg) {
12208     case 0 ... 7: /* xPSR sub-fields */
12209         /* only APSR is actually writable */
12210         if (!(reg & 4)) {
12211             uint32_t apsrmask = 0;
12212 
12213             if (mask & 8) {
12214                 apsrmask |= XPSR_NZCV | XPSR_Q;
12215             }
12216             if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
12217                 apsrmask |= XPSR_GE;
12218             }
12219             xpsr_write(env, val, apsrmask);
12220         }
12221         break;
12222     case 8: /* MSP */
12223         if (v7m_using_psp(env)) {
12224             env->v7m.other_sp = val;
12225         } else {
12226             env->regs[13] = val;
12227         }
12228         break;
12229     case 9: /* PSP */
12230         if (v7m_using_psp(env)) {
12231             env->regs[13] = val;
12232         } else {
12233             env->v7m.other_sp = val;
12234         }
12235         break;
12236     case 10: /* MSPLIM */
12237         if (!arm_feature(env, ARM_FEATURE_V8)) {
12238             goto bad_reg;
12239         }
12240         env->v7m.msplim[env->v7m.secure] = val & ~7;
12241         break;
12242     case 11: /* PSPLIM */
12243         if (!arm_feature(env, ARM_FEATURE_V8)) {
12244             goto bad_reg;
12245         }
12246         env->v7m.psplim[env->v7m.secure] = val & ~7;
12247         break;
12248     case 16: /* PRIMASK */
12249         env->v7m.primask[env->v7m.secure] = val & 1;
12250         break;
12251     case 17: /* BASEPRI */
12252         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
12253             goto bad_reg;
12254         }
12255         env->v7m.basepri[env->v7m.secure] = val & 0xff;
12256         break;
12257     case 18: /* BASEPRI_MAX */
12258         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
12259             goto bad_reg;
12260         }
12261         val &= 0xff;
12262         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
12263                          || env->v7m.basepri[env->v7m.secure] == 0)) {
12264             env->v7m.basepri[env->v7m.secure] = val;
12265         }
12266         break;
12267     case 19: /* FAULTMASK */
12268         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
12269             goto bad_reg;
12270         }
12271         env->v7m.faultmask[env->v7m.secure] = val & 1;
12272         break;
12273     case 20: /* CONTROL */
12274         /* Writing to the SPSEL bit only has an effect if we are in
12275          * thread mode; other bits can be updated by any privileged code.
12276          * write_v7m_control_spsel() deals with updating the SPSEL bit in
12277          * env->v7m.control, so we only need update the others.
12278          * For v7M, we must just ignore explicit writes to SPSEL in handler
12279          * mode; for v8M the write is permitted but will have no effect.
12280          */
12281         if (arm_feature(env, ARM_FEATURE_V8) ||
12282             !arm_v7m_is_handler_mode(env)) {
12283             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
12284         }
12285         if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
12286             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
12287             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
12288         }
12289         break;
12290     default:
12291     bad_reg:
12292         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
12293                                        " register %d\n", reg);
12294         return;
12295     }
12296 }
12297 
12298 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
12299 {
12300     /* Implement the TT instruction. op is bits [7:6] of the insn. */
12301     bool forceunpriv = op & 1;
12302     bool alt = op & 2;
12303     V8M_SAttributes sattrs = {};
12304     uint32_t tt_resp;
12305     bool r, rw, nsr, nsrw, mrvalid;
12306     int prot;
12307     ARMMMUFaultInfo fi = {};
12308     MemTxAttrs attrs = {};
12309     hwaddr phys_addr;
12310     ARMMMUIdx mmu_idx;
12311     uint32_t mregion;
12312     bool targetpriv;
12313     bool targetsec = env->v7m.secure;
12314     bool is_subpage;
12315 
12316     /* Work out what the security state and privilege level we're
12317      * interested in is...
12318      */
12319     if (alt) {
12320         targetsec = !targetsec;
12321     }
12322 
12323     if (forceunpriv) {
12324         targetpriv = false;
12325     } else {
12326         targetpriv = arm_v7m_is_handler_mode(env) ||
12327             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
12328     }
12329 
12330     /* ...and then figure out which MMU index this is */
12331     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
12332 
12333     /* We know that the MPU and SAU don't care about the access type
12334      * for our purposes beyond that we don't want to claim to be
12335      * an insn fetch, so we arbitrarily call this a read.
12336      */
12337 
12338     /* MPU region info only available for privileged or if
12339      * inspecting the other MPU state.
12340      */
12341     if (arm_current_el(env) != 0 || alt) {
12342         /* We can ignore the return value as prot is always set */
12343         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
12344                           &phys_addr, &attrs, &prot, &is_subpage,
12345                           &fi, &mregion);
12346         if (mregion == -1) {
12347             mrvalid = false;
12348             mregion = 0;
12349         } else {
12350             mrvalid = true;
12351         }
12352         r = prot & PAGE_READ;
12353         rw = prot & PAGE_WRITE;
12354     } else {
12355         r = false;
12356         rw = false;
12357         mrvalid = false;
12358         mregion = 0;
12359     }
12360 
12361     if (env->v7m.secure) {
12362         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
12363         nsr = sattrs.ns && r;
12364         nsrw = sattrs.ns && rw;
12365     } else {
12366         sattrs.ns = true;
12367         nsr = false;
12368         nsrw = false;
12369     }
12370 
12371     tt_resp = (sattrs.iregion << 24) |
12372         (sattrs.irvalid << 23) |
12373         ((!sattrs.ns) << 22) |
12374         (nsrw << 21) |
12375         (nsr << 20) |
12376         (rw << 19) |
12377         (r << 18) |
12378         (sattrs.srvalid << 17) |
12379         (mrvalid << 16) |
12380         (sattrs.sregion << 8) |
12381         mregion;
12382 
12383     return tt_resp;
12384 }
12385 
12386 #endif
12387 
12388 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
12389 {
12390     /* Implement DC ZVA, which zeroes a fixed-length block of memory.
12391      * Note that we do not implement the (architecturally mandated)
12392      * alignment fault for attempts to use this on Device memory
12393      * (which matches the usual QEMU behaviour of not implementing either
12394      * alignment faults or any memory attribute handling).
12395      */
12396 
12397     ARMCPU *cpu = arm_env_get_cpu(env);
12398     uint64_t blocklen = 4 << cpu->dcz_blocksize;
12399     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
12400 
12401 #ifndef CONFIG_USER_ONLY
12402     {
12403         /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
12404          * the block size so we might have to do more than one TLB lookup.
12405          * We know that in fact for any v8 CPU the page size is at least 4K
12406          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
12407          * 1K as an artefact of legacy v5 subpage support being present in the
12408          * same QEMU executable.
12409          */
12410         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
12411         void *hostaddr[maxidx];
12412         int try, i;
12413         unsigned mmu_idx = cpu_mmu_index(env, false);
12414         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
12415 
12416         for (try = 0; try < 2; try++) {
12417 
12418             for (i = 0; i < maxidx; i++) {
12419                 hostaddr[i] = tlb_vaddr_to_host(env,
12420                                                 vaddr + TARGET_PAGE_SIZE * i,
12421                                                 1, mmu_idx);
12422                 if (!hostaddr[i]) {
12423                     break;
12424                 }
12425             }
12426             if (i == maxidx) {
12427                 /* If it's all in the TLB it's fair game for just writing to;
12428                  * we know we don't need to update dirty status, etc.
12429                  */
12430                 for (i = 0; i < maxidx - 1; i++) {
12431                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
12432                 }
12433                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
12434                 return;
12435             }
12436             /* OK, try a store and see if we can populate the tlb. This
12437              * might cause an exception if the memory isn't writable,
12438              * in which case we will longjmp out of here. We must for
12439              * this purpose use the actual register value passed to us
12440              * so that we get the fault address right.
12441              */
12442             helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
12443             /* Now we can populate the other TLB entries, if any */
12444             for (i = 0; i < maxidx; i++) {
12445                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
12446                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
12447                     helper_ret_stb_mmu(env, va, 0, oi, GETPC());
12448                 }
12449             }
12450         }
12451 
12452         /* Slow path (probably attempt to do this to an I/O device or
12453          * similar, or clearing of a block of code we have translations
12454          * cached for). Just do a series of byte writes as the architecture
12455          * demands. It's not worth trying to use a cpu_physical_memory_map(),
12456          * memset(), unmap() sequence here because:
12457          *  + we'd need to account for the blocksize being larger than a page
12458          *  + the direct-RAM access case is almost always going to be dealt
12459          *    with in the fastpath code above, so there's no speed benefit
12460          *  + we would have to deal with the map returning NULL because the
12461          *    bounce buffer was in use
12462          */
12463         for (i = 0; i < blocklen; i++) {
12464             helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
12465         }
12466     }
12467 #else
12468     memset(g2h(vaddr), 0, blocklen);
12469 #endif
12470 }
12471 
12472 /* Note that signed overflow is undefined in C.  The following routines are
12473    careful to use unsigned types where modulo arithmetic is required.
12474    Failure to do so _will_ break on newer gcc.  */
12475 
12476 /* Signed saturating arithmetic.  */
12477 
12478 /* Perform 16-bit signed saturating addition.  */
12479 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
12480 {
12481     uint16_t res;
12482 
12483     res = a + b;
12484     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
12485         if (a & 0x8000)
12486             res = 0x8000;
12487         else
12488             res = 0x7fff;
12489     }
12490     return res;
12491 }
12492 
12493 /* Perform 8-bit signed saturating addition.  */
12494 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
12495 {
12496     uint8_t res;
12497 
12498     res = a + b;
12499     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
12500         if (a & 0x80)
12501             res = 0x80;
12502         else
12503             res = 0x7f;
12504     }
12505     return res;
12506 }
12507 
12508 /* Perform 16-bit signed saturating subtraction.  */
12509 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
12510 {
12511     uint16_t res;
12512 
12513     res = a - b;
12514     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
12515         if (a & 0x8000)
12516             res = 0x8000;
12517         else
12518             res = 0x7fff;
12519     }
12520     return res;
12521 }
12522 
12523 /* Perform 8-bit signed saturating subtraction.  */
12524 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
12525 {
12526     uint8_t res;
12527 
12528     res = a - b;
12529     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
12530         if (a & 0x80)
12531             res = 0x80;
12532         else
12533             res = 0x7f;
12534     }
12535     return res;
12536 }
12537 
12538 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12539 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12540 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
12541 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
12542 #define PFX q
12543 
12544 #include "op_addsub.h"
12545 
12546 /* Unsigned saturating arithmetic.  */
12547 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
12548 {
12549     uint16_t res;
12550     res = a + b;
12551     if (res < a)
12552         res = 0xffff;
12553     return res;
12554 }
12555 
12556 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
12557 {
12558     if (a > b)
12559         return a - b;
12560     else
12561         return 0;
12562 }
12563 
12564 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
12565 {
12566     uint8_t res;
12567     res = a + b;
12568     if (res < a)
12569         res = 0xff;
12570     return res;
12571 }
12572 
12573 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
12574 {
12575     if (a > b)
12576         return a - b;
12577     else
12578         return 0;
12579 }
12580 
12581 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12582 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12583 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
12584 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
12585 #define PFX uq
12586 
12587 #include "op_addsub.h"
12588 
12589 /* Signed modulo arithmetic.  */
12590 #define SARITH16(a, b, n, op) do { \
12591     int32_t sum; \
12592     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12593     RESULT(sum, n, 16); \
12594     if (sum >= 0) \
12595         ge |= 3 << (n * 2); \
12596     } while(0)
12597 
12598 #define SARITH8(a, b, n, op) do { \
12599     int32_t sum; \
12600     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12601     RESULT(sum, n, 8); \
12602     if (sum >= 0) \
12603         ge |= 1 << n; \
12604     } while(0)
12605 
12606 
12607 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12608 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12609 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
12610 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
12611 #define PFX s
12612 #define ARITH_GE
12613 
12614 #include "op_addsub.h"
12615 
12616 /* Unsigned modulo arithmetic.  */
12617 #define ADD16(a, b, n) do { \
12618     uint32_t sum; \
12619     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12620     RESULT(sum, n, 16); \
12621     if ((sum >> 16) == 1) \
12622         ge |= 3 << (n * 2); \
12623     } while(0)
12624 
12625 #define ADD8(a, b, n) do { \
12626     uint32_t sum; \
12627     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12628     RESULT(sum, n, 8); \
12629     if ((sum >> 8) == 1) \
12630         ge |= 1 << n; \
12631     } while(0)
12632 
12633 #define SUB16(a, b, n) do { \
12634     uint32_t sum; \
12635     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12636     RESULT(sum, n, 16); \
12637     if ((sum >> 16) == 0) \
12638         ge |= 3 << (n * 2); \
12639     } while(0)
12640 
12641 #define SUB8(a, b, n) do { \
12642     uint32_t sum; \
12643     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12644     RESULT(sum, n, 8); \
12645     if ((sum >> 8) == 0) \
12646         ge |= 1 << n; \
12647     } while(0)
12648 
12649 #define PFX u
12650 #define ARITH_GE
12651 
12652 #include "op_addsub.h"
12653 
12654 /* Halved signed arithmetic.  */
12655 #define ADD16(a, b, n) \
12656   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12657 #define SUB16(a, b, n) \
12658   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12659 #define ADD8(a, b, n) \
12660   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12661 #define SUB8(a, b, n) \
12662   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12663 #define PFX sh
12664 
12665 #include "op_addsub.h"
12666 
12667 /* Halved unsigned arithmetic.  */
12668 #define ADD16(a, b, n) \
12669   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12670 #define SUB16(a, b, n) \
12671   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12672 #define ADD8(a, b, n) \
12673   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12674 #define SUB8(a, b, n) \
12675   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12676 #define PFX uh
12677 
12678 #include "op_addsub.h"
12679 
12680 static inline uint8_t do_usad(uint8_t a, uint8_t b)
12681 {
12682     if (a > b)
12683         return a - b;
12684     else
12685         return b - a;
12686 }
12687 
12688 /* Unsigned sum of absolute byte differences.  */
12689 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12690 {
12691     uint32_t sum;
12692     sum = do_usad(a, b);
12693     sum += do_usad(a >> 8, b >> 8);
12694     sum += do_usad(a >> 16, b >>16);
12695     sum += do_usad(a >> 24, b >> 24);
12696     return sum;
12697 }
12698 
12699 /* For ARMv6 SEL instruction.  */
12700 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12701 {
12702     uint32_t mask;
12703 
12704     mask = 0;
12705     if (flags & 1)
12706         mask |= 0xff;
12707     if (flags & 2)
12708         mask |= 0xff00;
12709     if (flags & 4)
12710         mask |= 0xff0000;
12711     if (flags & 8)
12712         mask |= 0xff000000;
12713     return (a & mask) | (b & ~mask);
12714 }
12715 
12716 /* CRC helpers.
12717  * The upper bytes of val (above the number specified by 'bytes') must have
12718  * been zeroed out by the caller.
12719  */
12720 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12721 {
12722     uint8_t buf[4];
12723 
12724     stl_le_p(buf, val);
12725 
12726     /* zlib crc32 converts the accumulator and output to one's complement.  */
12727     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12728 }
12729 
12730 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12731 {
12732     uint8_t buf[4];
12733 
12734     stl_le_p(buf, val);
12735 
12736     /* Linux crc32c converts the output to one's complement.  */
12737     return crc32c(acc, buf, bytes) ^ 0xffffffff;
12738 }
12739 
12740 /* Return the exception level to which FP-disabled exceptions should
12741  * be taken, or 0 if FP is enabled.
12742  */
12743 int fp_exception_el(CPUARMState *env, int cur_el)
12744 {
12745 #ifndef CONFIG_USER_ONLY
12746     int fpen;
12747 
12748     /* CPACR and the CPTR registers don't exist before v6, so FP is
12749      * always accessible
12750      */
12751     if (!arm_feature(env, ARM_FEATURE_V6)) {
12752         return 0;
12753     }
12754 
12755     /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12756      * 0, 2 : trap EL0 and EL1/PL1 accesses
12757      * 1    : trap only EL0 accesses
12758      * 3    : trap no accesses
12759      */
12760     fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12761     switch (fpen) {
12762     case 0:
12763     case 2:
12764         if (cur_el == 0 || cur_el == 1) {
12765             /* Trap to PL1, which might be EL1 or EL3 */
12766             if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12767                 return 3;
12768             }
12769             return 1;
12770         }
12771         if (cur_el == 3 && !is_a64(env)) {
12772             /* Secure PL1 running at EL3 */
12773             return 3;
12774         }
12775         break;
12776     case 1:
12777         if (cur_el == 0) {
12778             return 1;
12779         }
12780         break;
12781     case 3:
12782         break;
12783     }
12784 
12785     /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12786      * check because zero bits in the registers mean "don't trap".
12787      */
12788 
12789     /* CPTR_EL2 : present in v7VE or v8 */
12790     if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12791         && !arm_is_secure_below_el3(env)) {
12792         /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12793         return 2;
12794     }
12795 
12796     /* CPTR_EL3 : present in v8 */
12797     if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12798         /* Trap all FP ops to EL3 */
12799         return 3;
12800     }
12801 #endif
12802     return 0;
12803 }
12804 
12805 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
12806                                                 bool secstate, bool priv)
12807 {
12808     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
12809 
12810     if (priv) {
12811         mmu_idx |= ARM_MMU_IDX_M_PRIV;
12812     }
12813 
12814     if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) {
12815         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
12816     }
12817 
12818     if (secstate) {
12819         mmu_idx |= ARM_MMU_IDX_M_S;
12820     }
12821 
12822     return mmu_idx;
12823 }
12824 
12825 /* Return the MMU index for a v7M CPU in the specified security state */
12826 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12827 {
12828     bool priv = arm_current_el(env) != 0;
12829 
12830     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
12831 }
12832 
12833 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12834 {
12835     int el;
12836 
12837     if (arm_feature(env, ARM_FEATURE_M)) {
12838         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12839     }
12840 
12841     el = arm_current_el(env);
12842     if (el < 2 && arm_is_secure_below_el3(env)) {
12843         return ARMMMUIdx_S1SE0 + el;
12844     } else {
12845         return ARMMMUIdx_S12NSE0 + el;
12846     }
12847 }
12848 
12849 int cpu_mmu_index(CPUARMState *env, bool ifetch)
12850 {
12851     return arm_to_core_mmu_idx(arm_mmu_idx(env));
12852 }
12853 
12854 #ifndef CONFIG_USER_ONLY
12855 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
12856 {
12857     return stage_1_mmu_idx(arm_mmu_idx(env));
12858 }
12859 #endif
12860 
12861 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
12862                           target_ulong *cs_base, uint32_t *pflags)
12863 {
12864     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
12865     int current_el = arm_current_el(env);
12866     int fp_el = fp_exception_el(env, current_el);
12867     uint32_t flags = 0;
12868 
12869     if (is_a64(env)) {
12870         ARMCPU *cpu = arm_env_get_cpu(env);
12871         uint64_t sctlr;
12872 
12873         *pc = env->pc;
12874         flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
12875 
12876         /* Get control bits for tagged addresses.  */
12877         {
12878             ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
12879             ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
12880             int tbii, tbid;
12881 
12882             /* FIXME: ARMv8.1-VHE S2 translation regime.  */
12883             if (regime_el(env, stage1) < 2) {
12884                 ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
12885                 tbid = (p1.tbi << 1) | p0.tbi;
12886                 tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
12887             } else {
12888                 tbid = p0.tbi;
12889                 tbii = tbid & !p0.tbid;
12890             }
12891 
12892             flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
12893             flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
12894         }
12895 
12896         if (cpu_isar_feature(aa64_sve, cpu)) {
12897             int sve_el = sve_exception_el(env, current_el);
12898             uint32_t zcr_len;
12899 
12900             /* If SVE is disabled, but FP is enabled,
12901              * then the effective len is 0.
12902              */
12903             if (sve_el != 0 && fp_el == 0) {
12904                 zcr_len = 0;
12905             } else {
12906                 zcr_len = sve_zcr_len_for_el(env, current_el);
12907             }
12908             flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
12909             flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
12910         }
12911 
12912         sctlr = arm_sctlr(env, current_el);
12913 
12914         if (cpu_isar_feature(aa64_pauth, cpu)) {
12915             /*
12916              * In order to save space in flags, we record only whether
12917              * pauth is "inactive", meaning all insns are implemented as
12918              * a nop, or "active" when some action must be performed.
12919              * The decision of which action to take is left to a helper.
12920              */
12921             if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
12922                 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
12923             }
12924         }
12925 
12926         if (cpu_isar_feature(aa64_bti, cpu)) {
12927             /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
12928             if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
12929                 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
12930             }
12931             flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
12932         }
12933     } else {
12934         *pc = env->regs[15];
12935         flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
12936         flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
12937         flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
12938         flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
12939         flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
12940         flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
12941         if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
12942             || arm_el_is_aa64(env, 1)) {
12943             flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
12944         }
12945         flags = FIELD_DP32(flags, TBFLAG_A32, XSCALE_CPAR, env->cp15.c15_cpar);
12946     }
12947 
12948     flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
12949 
12950     /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12951      * states defined in the ARM ARM for software singlestep:
12952      *  SS_ACTIVE   PSTATE.SS   State
12953      *     0            x       Inactive (the TB flag for SS is always 0)
12954      *     1            0       Active-pending
12955      *     1            1       Active-not-pending
12956      */
12957     if (arm_singlestep_active(env)) {
12958         flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
12959         if (is_a64(env)) {
12960             if (env->pstate & PSTATE_SS) {
12961                 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
12962             }
12963         } else {
12964             if (env->uncached_cpsr & PSTATE_SS) {
12965                 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
12966             }
12967         }
12968     }
12969     if (arm_cpu_data_is_big_endian(env)) {
12970         flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
12971     }
12972     flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
12973 
12974     if (arm_v7m_is_handler_mode(env)) {
12975         flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
12976     }
12977 
12978     /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
12979      * suppressing them because the requested execution priority is less than 0.
12980      */
12981     if (arm_feature(env, ARM_FEATURE_V8) &&
12982         arm_feature(env, ARM_FEATURE_M) &&
12983         !((mmu_idx  & ARM_MMU_IDX_M_NEGPRI) &&
12984           (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
12985         flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
12986     }
12987 
12988     *pflags = flags;
12989     *cs_base = 0;
12990 }
12991 
12992 #ifdef TARGET_AARCH64
12993 /*
12994  * The manual says that when SVE is enabled and VQ is widened the
12995  * implementation is allowed to zero the previously inaccessible
12996  * portion of the registers.  The corollary to that is that when
12997  * SVE is enabled and VQ is narrowed we are also allowed to zero
12998  * the now inaccessible portion of the registers.
12999  *
13000  * The intent of this is that no predicate bit beyond VQ is ever set.
13001  * Which means that some operations on predicate registers themselves
13002  * may operate on full uint64_t or even unrolled across the maximum
13003  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
13004  * may well be cheaper than conditionals to restrict the operation
13005  * to the relevant portion of a uint16_t[16].
13006  */
13007 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
13008 {
13009     int i, j;
13010     uint64_t pmask;
13011 
13012     assert(vq >= 1 && vq <= ARM_MAX_VQ);
13013     assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
13014 
13015     /* Zap the high bits of the zregs.  */
13016     for (i = 0; i < 32; i++) {
13017         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
13018     }
13019 
13020     /* Zap the high bits of the pregs and ffr.  */
13021     pmask = 0;
13022     if (vq & 3) {
13023         pmask = ~(-1ULL << (16 * (vq & 3)));
13024     }
13025     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
13026         for (i = 0; i < 17; ++i) {
13027             env->vfp.pregs[i].p[j] &= pmask;
13028         }
13029         pmask = 0;
13030     }
13031 }
13032 
13033 /*
13034  * Notice a change in SVE vector size when changing EL.
13035  */
13036 void aarch64_sve_change_el(CPUARMState *env, int old_el,
13037                            int new_el, bool el0_a64)
13038 {
13039     ARMCPU *cpu = arm_env_get_cpu(env);
13040     int old_len, new_len;
13041     bool old_a64, new_a64;
13042 
13043     /* Nothing to do if no SVE.  */
13044     if (!cpu_isar_feature(aa64_sve, cpu)) {
13045         return;
13046     }
13047 
13048     /* Nothing to do if FP is disabled in either EL.  */
13049     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
13050         return;
13051     }
13052 
13053     /*
13054      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13055      * at ELx, or not available because the EL is in AArch32 state, then
13056      * for all purposes other than a direct read, the ZCR_ELx.LEN field
13057      * has an effective value of 0".
13058      *
13059      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13060      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13061      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
13062      * we already have the correct register contents when encountering the
13063      * vq0->vq0 transition between EL0->EL1.
13064      */
13065     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
13066     old_len = (old_a64 && !sve_exception_el(env, old_el)
13067                ? sve_zcr_len_for_el(env, old_el) : 0);
13068     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
13069     new_len = (new_a64 && !sve_exception_el(env, new_el)
13070                ? sve_zcr_len_for_el(env, new_el) : 0);
13071 
13072     /* When changing vector length, clear inaccessible state.  */
13073     if (new_len < old_len) {
13074         aarch64_sve_narrow_vq(env, new_len + 1);
13075     }
13076 }
13077 #endif
13078