xref: /openbmc/qemu/target/arm/helper.c (revision ae3c12a0)
1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "target/arm/idau.h"
4 #include "trace.h"
5 #include "cpu.h"
6 #include "internals.h"
7 #include "exec/gdbstub.h"
8 #include "exec/helper-proto.h"
9 #include "qemu/host-utils.h"
10 #include "sysemu/arch_init.h"
11 #include "sysemu/sysemu.h"
12 #include "qemu/bitops.h"
13 #include "qemu/crc32c.h"
14 #include "qemu/qemu-print.h"
15 #include "exec/exec-all.h"
16 #include "exec/cpu_ldst.h"
17 #include "arm_ldst.h"
18 #include <zlib.h> /* For crc32 */
19 #include "exec/semihost.h"
20 #include "sysemu/cpus.h"
21 #include "sysemu/kvm.h"
22 #include "fpu/softfloat.h"
23 #include "qemu/range.h"
24 #include "qapi/qapi-commands-target.h"
25 
26 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
27 
28 #ifndef CONFIG_USER_ONLY
29 /* Cacheability and shareability attributes for a memory access */
30 typedef struct ARMCacheAttrs {
31     unsigned int attrs:8; /* as in the MAIR register encoding */
32     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
33 } ARMCacheAttrs;
34 
35 static bool get_phys_addr(CPUARMState *env, target_ulong address,
36                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
37                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
38                           target_ulong *page_size,
39                           ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
40 
41 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
42                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
43                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
44                                target_ulong *page_size_ptr,
45                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
46 
47 /* Security attributes for an address, as returned by v8m_security_lookup. */
48 typedef struct V8M_SAttributes {
49     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
50     bool ns;
51     bool nsc;
52     uint8_t sregion;
53     bool srvalid;
54     uint8_t iregion;
55     bool irvalid;
56 } V8M_SAttributes;
57 
58 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
59                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
60                                 V8M_SAttributes *sattrs);
61 #endif
62 
63 static void switch_mode(CPUARMState *env, int mode);
64 
65 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
66 {
67     int nregs;
68 
69     /* VFP data registers are always little-endian.  */
70     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
71     if (reg < nregs) {
72         stq_le_p(buf, *aa32_vfp_dreg(env, reg));
73         return 8;
74     }
75     if (arm_feature(env, ARM_FEATURE_NEON)) {
76         /* Aliases for Q regs.  */
77         nregs += 16;
78         if (reg < nregs) {
79             uint64_t *q = aa32_vfp_qreg(env, reg - 32);
80             stq_le_p(buf, q[0]);
81             stq_le_p(buf + 8, q[1]);
82             return 16;
83         }
84     }
85     switch (reg - nregs) {
86     case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
87     case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
88     case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
89     }
90     return 0;
91 }
92 
93 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
94 {
95     int nregs;
96 
97     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
98     if (reg < nregs) {
99         *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
100         return 8;
101     }
102     if (arm_feature(env, ARM_FEATURE_NEON)) {
103         nregs += 16;
104         if (reg < nregs) {
105             uint64_t *q = aa32_vfp_qreg(env, reg - 32);
106             q[0] = ldq_le_p(buf);
107             q[1] = ldq_le_p(buf + 8);
108             return 16;
109         }
110     }
111     switch (reg - nregs) {
112     case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
113     case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
114     case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
115     }
116     return 0;
117 }
118 
119 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
120 {
121     switch (reg) {
122     case 0 ... 31:
123         /* 128 bit FP register */
124         {
125             uint64_t *q = aa64_vfp_qreg(env, reg);
126             stq_le_p(buf, q[0]);
127             stq_le_p(buf + 8, q[1]);
128             return 16;
129         }
130     case 32:
131         /* FPSR */
132         stl_p(buf, vfp_get_fpsr(env));
133         return 4;
134     case 33:
135         /* FPCR */
136         stl_p(buf, vfp_get_fpcr(env));
137         return 4;
138     default:
139         return 0;
140     }
141 }
142 
143 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
144 {
145     switch (reg) {
146     case 0 ... 31:
147         /* 128 bit FP register */
148         {
149             uint64_t *q = aa64_vfp_qreg(env, reg);
150             q[0] = ldq_le_p(buf);
151             q[1] = ldq_le_p(buf + 8);
152             return 16;
153         }
154     case 32:
155         /* FPSR */
156         vfp_set_fpsr(env, ldl_p(buf));
157         return 4;
158     case 33:
159         /* FPCR */
160         vfp_set_fpcr(env, ldl_p(buf));
161         return 4;
162     default:
163         return 0;
164     }
165 }
166 
167 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
168 {
169     assert(ri->fieldoffset);
170     if (cpreg_field_is_64bit(ri)) {
171         return CPREG_FIELD64(env, ri);
172     } else {
173         return CPREG_FIELD32(env, ri);
174     }
175 }
176 
177 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
178                       uint64_t value)
179 {
180     assert(ri->fieldoffset);
181     if (cpreg_field_is_64bit(ri)) {
182         CPREG_FIELD64(env, ri) = value;
183     } else {
184         CPREG_FIELD32(env, ri) = value;
185     }
186 }
187 
188 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
189 {
190     return (char *)env + ri->fieldoffset;
191 }
192 
193 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
194 {
195     /* Raw read of a coprocessor register (as needed for migration, etc). */
196     if (ri->type & ARM_CP_CONST) {
197         return ri->resetvalue;
198     } else if (ri->raw_readfn) {
199         return ri->raw_readfn(env, ri);
200     } else if (ri->readfn) {
201         return ri->readfn(env, ri);
202     } else {
203         return raw_read(env, ri);
204     }
205 }
206 
207 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
208                              uint64_t v)
209 {
210     /* Raw write of a coprocessor register (as needed for migration, etc).
211      * Note that constant registers are treated as write-ignored; the
212      * caller should check for success by whether a readback gives the
213      * value written.
214      */
215     if (ri->type & ARM_CP_CONST) {
216         return;
217     } else if (ri->raw_writefn) {
218         ri->raw_writefn(env, ri, v);
219     } else if (ri->writefn) {
220         ri->writefn(env, ri, v);
221     } else {
222         raw_write(env, ri, v);
223     }
224 }
225 
226 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
227 {
228     ARMCPU *cpu = arm_env_get_cpu(env);
229     const ARMCPRegInfo *ri;
230     uint32_t key;
231 
232     key = cpu->dyn_xml.cpregs_keys[reg];
233     ri = get_arm_cp_reginfo(cpu->cp_regs, key);
234     if (ri) {
235         if (cpreg_field_is_64bit(ri)) {
236             return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
237         } else {
238             return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
239         }
240     }
241     return 0;
242 }
243 
244 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
245 {
246     return 0;
247 }
248 
249 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
250 {
251    /* Return true if the regdef would cause an assertion if you called
252     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
253     * program bug for it not to have the NO_RAW flag).
254     * NB that returning false here doesn't necessarily mean that calling
255     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
256     * read/write access functions which are safe for raw use" from "has
257     * read/write access functions which have side effects but has forgotten
258     * to provide raw access functions".
259     * The tests here line up with the conditions in read/write_raw_cp_reg()
260     * and assertions in raw_read()/raw_write().
261     */
262     if ((ri->type & ARM_CP_CONST) ||
263         ri->fieldoffset ||
264         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
265         return false;
266     }
267     return true;
268 }
269 
270 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
271 {
272     /* Write the coprocessor state from cpu->env to the (index,value) list. */
273     int i;
274     bool ok = true;
275 
276     for (i = 0; i < cpu->cpreg_array_len; i++) {
277         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
278         const ARMCPRegInfo *ri;
279         uint64_t newval;
280 
281         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
282         if (!ri) {
283             ok = false;
284             continue;
285         }
286         if (ri->type & ARM_CP_NO_RAW) {
287             continue;
288         }
289 
290         newval = read_raw_cp_reg(&cpu->env, ri);
291         if (kvm_sync) {
292             /*
293              * Only sync if the previous list->cpustate sync succeeded.
294              * Rather than tracking the success/failure state for every
295              * item in the list, we just recheck "does the raw write we must
296              * have made in write_list_to_cpustate() read back OK" here.
297              */
298             uint64_t oldval = cpu->cpreg_values[i];
299 
300             if (oldval == newval) {
301                 continue;
302             }
303 
304             write_raw_cp_reg(&cpu->env, ri, oldval);
305             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
306                 continue;
307             }
308 
309             write_raw_cp_reg(&cpu->env, ri, newval);
310         }
311         cpu->cpreg_values[i] = newval;
312     }
313     return ok;
314 }
315 
316 bool write_list_to_cpustate(ARMCPU *cpu)
317 {
318     int i;
319     bool ok = true;
320 
321     for (i = 0; i < cpu->cpreg_array_len; i++) {
322         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
323         uint64_t v = cpu->cpreg_values[i];
324         const ARMCPRegInfo *ri;
325 
326         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
327         if (!ri) {
328             ok = false;
329             continue;
330         }
331         if (ri->type & ARM_CP_NO_RAW) {
332             continue;
333         }
334         /* Write value and confirm it reads back as written
335          * (to catch read-only registers and partially read-only
336          * registers where the incoming migration value doesn't match)
337          */
338         write_raw_cp_reg(&cpu->env, ri, v);
339         if (read_raw_cp_reg(&cpu->env, ri) != v) {
340             ok = false;
341         }
342     }
343     return ok;
344 }
345 
346 static void add_cpreg_to_list(gpointer key, gpointer opaque)
347 {
348     ARMCPU *cpu = opaque;
349     uint64_t regidx;
350     const ARMCPRegInfo *ri;
351 
352     regidx = *(uint32_t *)key;
353     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
354 
355     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
356         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
357         /* The value array need not be initialized at this point */
358         cpu->cpreg_array_len++;
359     }
360 }
361 
362 static void count_cpreg(gpointer key, gpointer opaque)
363 {
364     ARMCPU *cpu = opaque;
365     uint64_t regidx;
366     const ARMCPRegInfo *ri;
367 
368     regidx = *(uint32_t *)key;
369     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
370 
371     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
372         cpu->cpreg_array_len++;
373     }
374 }
375 
376 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
377 {
378     uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
379     uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
380 
381     if (aidx > bidx) {
382         return 1;
383     }
384     if (aidx < bidx) {
385         return -1;
386     }
387     return 0;
388 }
389 
390 void init_cpreg_list(ARMCPU *cpu)
391 {
392     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
393      * Note that we require cpreg_tuples[] to be sorted by key ID.
394      */
395     GList *keys;
396     int arraylen;
397 
398     keys = g_hash_table_get_keys(cpu->cp_regs);
399     keys = g_list_sort(keys, cpreg_key_compare);
400 
401     cpu->cpreg_array_len = 0;
402 
403     g_list_foreach(keys, count_cpreg, cpu);
404 
405     arraylen = cpu->cpreg_array_len;
406     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
407     cpu->cpreg_values = g_new(uint64_t, arraylen);
408     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
409     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
410     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
411     cpu->cpreg_array_len = 0;
412 
413     g_list_foreach(keys, add_cpreg_to_list, cpu);
414 
415     assert(cpu->cpreg_array_len == arraylen);
416 
417     g_list_free(keys);
418 }
419 
420 /*
421  * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
422  * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
423  *
424  * access_el3_aa32ns: Used to check AArch32 register views.
425  * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
426  */
427 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
428                                         const ARMCPRegInfo *ri,
429                                         bool isread)
430 {
431     bool secure = arm_is_secure_below_el3(env);
432 
433     assert(!arm_el_is_aa64(env, 3));
434     if (secure) {
435         return CP_ACCESS_TRAP_UNCATEGORIZED;
436     }
437     return CP_ACCESS_OK;
438 }
439 
440 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
441                                                 const ARMCPRegInfo *ri,
442                                                 bool isread)
443 {
444     if (!arm_el_is_aa64(env, 3)) {
445         return access_el3_aa32ns(env, ri, isread);
446     }
447     return CP_ACCESS_OK;
448 }
449 
450 /* Some secure-only AArch32 registers trap to EL3 if used from
451  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
452  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
453  * We assume that the .access field is set to PL1_RW.
454  */
455 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
456                                             const ARMCPRegInfo *ri,
457                                             bool isread)
458 {
459     if (arm_current_el(env) == 3) {
460         return CP_ACCESS_OK;
461     }
462     if (arm_is_secure_below_el3(env)) {
463         return CP_ACCESS_TRAP_EL3;
464     }
465     /* This will be EL1 NS and EL2 NS, which just UNDEF */
466     return CP_ACCESS_TRAP_UNCATEGORIZED;
467 }
468 
469 /* Check for traps to "powerdown debug" registers, which are controlled
470  * by MDCR.TDOSA
471  */
472 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
473                                    bool isread)
474 {
475     int el = arm_current_el(env);
476     bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
477         (env->cp15.mdcr_el2 & MDCR_TDE) ||
478         (arm_hcr_el2_eff(env) & HCR_TGE);
479 
480     if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
481         return CP_ACCESS_TRAP_EL2;
482     }
483     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
484         return CP_ACCESS_TRAP_EL3;
485     }
486     return CP_ACCESS_OK;
487 }
488 
489 /* Check for traps to "debug ROM" registers, which are controlled
490  * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
491  */
492 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
493                                   bool isread)
494 {
495     int el = arm_current_el(env);
496     bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
497         (env->cp15.mdcr_el2 & MDCR_TDE) ||
498         (arm_hcr_el2_eff(env) & HCR_TGE);
499 
500     if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
501         return CP_ACCESS_TRAP_EL2;
502     }
503     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
504         return CP_ACCESS_TRAP_EL3;
505     }
506     return CP_ACCESS_OK;
507 }
508 
509 /* Check for traps to general debug registers, which are controlled
510  * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
511  */
512 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
513                                   bool isread)
514 {
515     int el = arm_current_el(env);
516     bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
517         (env->cp15.mdcr_el2 & MDCR_TDE) ||
518         (arm_hcr_el2_eff(env) & HCR_TGE);
519 
520     if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
521         return CP_ACCESS_TRAP_EL2;
522     }
523     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
524         return CP_ACCESS_TRAP_EL3;
525     }
526     return CP_ACCESS_OK;
527 }
528 
529 /* Check for traps to performance monitor registers, which are controlled
530  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
531  */
532 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
533                                  bool isread)
534 {
535     int el = arm_current_el(env);
536 
537     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
538         && !arm_is_secure_below_el3(env)) {
539         return CP_ACCESS_TRAP_EL2;
540     }
541     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
542         return CP_ACCESS_TRAP_EL3;
543     }
544     return CP_ACCESS_OK;
545 }
546 
547 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
548 {
549     ARMCPU *cpu = arm_env_get_cpu(env);
550 
551     raw_write(env, ri, value);
552     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
553 }
554 
555 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
556 {
557     ARMCPU *cpu = arm_env_get_cpu(env);
558 
559     if (raw_read(env, ri) != value) {
560         /* Unlike real hardware the qemu TLB uses virtual addresses,
561          * not modified virtual addresses, so this causes a TLB flush.
562          */
563         tlb_flush(CPU(cpu));
564         raw_write(env, ri, value);
565     }
566 }
567 
568 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
569                              uint64_t value)
570 {
571     ARMCPU *cpu = arm_env_get_cpu(env);
572 
573     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
574         && !extended_addresses_enabled(env)) {
575         /* For VMSA (when not using the LPAE long descriptor page table
576          * format) this register includes the ASID, so do a TLB flush.
577          * For PMSA it is purely a process ID and no action is needed.
578          */
579         tlb_flush(CPU(cpu));
580     }
581     raw_write(env, ri, value);
582 }
583 
584 /* IS variants of TLB operations must affect all cores */
585 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
586                              uint64_t value)
587 {
588     CPUState *cs = ENV_GET_CPU(env);
589 
590     tlb_flush_all_cpus_synced(cs);
591 }
592 
593 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
594                              uint64_t value)
595 {
596     CPUState *cs = ENV_GET_CPU(env);
597 
598     tlb_flush_all_cpus_synced(cs);
599 }
600 
601 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
602                              uint64_t value)
603 {
604     CPUState *cs = ENV_GET_CPU(env);
605 
606     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
607 }
608 
609 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
610                              uint64_t value)
611 {
612     CPUState *cs = ENV_GET_CPU(env);
613 
614     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
615 }
616 
617 /*
618  * Non-IS variants of TLB operations are upgraded to
619  * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
620  * force broadcast of these operations.
621  */
622 static bool tlb_force_broadcast(CPUARMState *env)
623 {
624     return (env->cp15.hcr_el2 & HCR_FB) &&
625         arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
626 }
627 
628 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
629                           uint64_t value)
630 {
631     /* Invalidate all (TLBIALL) */
632     ARMCPU *cpu = arm_env_get_cpu(env);
633 
634     if (tlb_force_broadcast(env)) {
635         tlbiall_is_write(env, NULL, value);
636         return;
637     }
638 
639     tlb_flush(CPU(cpu));
640 }
641 
642 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
643                           uint64_t value)
644 {
645     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
646     ARMCPU *cpu = arm_env_get_cpu(env);
647 
648     if (tlb_force_broadcast(env)) {
649         tlbimva_is_write(env, NULL, value);
650         return;
651     }
652 
653     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
654 }
655 
656 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
657                            uint64_t value)
658 {
659     /* Invalidate by ASID (TLBIASID) */
660     ARMCPU *cpu = arm_env_get_cpu(env);
661 
662     if (tlb_force_broadcast(env)) {
663         tlbiasid_is_write(env, NULL, value);
664         return;
665     }
666 
667     tlb_flush(CPU(cpu));
668 }
669 
670 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
671                            uint64_t value)
672 {
673     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
674     ARMCPU *cpu = arm_env_get_cpu(env);
675 
676     if (tlb_force_broadcast(env)) {
677         tlbimvaa_is_write(env, NULL, value);
678         return;
679     }
680 
681     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
682 }
683 
684 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
685                                uint64_t value)
686 {
687     CPUState *cs = ENV_GET_CPU(env);
688 
689     tlb_flush_by_mmuidx(cs,
690                         ARMMMUIdxBit_S12NSE1 |
691                         ARMMMUIdxBit_S12NSE0 |
692                         ARMMMUIdxBit_S2NS);
693 }
694 
695 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
696                                   uint64_t value)
697 {
698     CPUState *cs = ENV_GET_CPU(env);
699 
700     tlb_flush_by_mmuidx_all_cpus_synced(cs,
701                                         ARMMMUIdxBit_S12NSE1 |
702                                         ARMMMUIdxBit_S12NSE0 |
703                                         ARMMMUIdxBit_S2NS);
704 }
705 
706 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
707                             uint64_t value)
708 {
709     /* Invalidate by IPA. This has to invalidate any structures that
710      * contain only stage 2 translation information, but does not need
711      * to apply to structures that contain combined stage 1 and stage 2
712      * translation information.
713      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
714      */
715     CPUState *cs = ENV_GET_CPU(env);
716     uint64_t pageaddr;
717 
718     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
719         return;
720     }
721 
722     pageaddr = sextract64(value << 12, 0, 40);
723 
724     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
725 }
726 
727 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
728                                uint64_t value)
729 {
730     CPUState *cs = ENV_GET_CPU(env);
731     uint64_t pageaddr;
732 
733     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
734         return;
735     }
736 
737     pageaddr = sextract64(value << 12, 0, 40);
738 
739     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
740                                              ARMMMUIdxBit_S2NS);
741 }
742 
743 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
744                               uint64_t value)
745 {
746     CPUState *cs = ENV_GET_CPU(env);
747 
748     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
749 }
750 
751 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
752                                  uint64_t value)
753 {
754     CPUState *cs = ENV_GET_CPU(env);
755 
756     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
757 }
758 
759 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
760                               uint64_t value)
761 {
762     CPUState *cs = ENV_GET_CPU(env);
763     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
764 
765     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
766 }
767 
768 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
769                                  uint64_t value)
770 {
771     CPUState *cs = ENV_GET_CPU(env);
772     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
773 
774     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
775                                              ARMMMUIdxBit_S1E2);
776 }
777 
778 static const ARMCPRegInfo cp_reginfo[] = {
779     /* Define the secure and non-secure FCSE identifier CP registers
780      * separately because there is no secure bank in V8 (no _EL3).  This allows
781      * the secure register to be properly reset and migrated. There is also no
782      * v8 EL1 version of the register so the non-secure instance stands alone.
783      */
784     { .name = "FCSEIDR",
785       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
786       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
787       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
788       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
789     { .name = "FCSEIDR_S",
790       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
791       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
792       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
793       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
794     /* Define the secure and non-secure context identifier CP registers
795      * separately because there is no secure bank in V8 (no _EL3).  This allows
796      * the secure register to be properly reset and migrated.  In the
797      * non-secure case, the 32-bit register will have reset and migration
798      * disabled during registration as it is handled by the 64-bit instance.
799      */
800     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
801       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
802       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
803       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
804       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
805     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
806       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
807       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
808       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
809       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
810     REGINFO_SENTINEL
811 };
812 
813 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
814     /* NB: Some of these registers exist in v8 but with more precise
815      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
816      */
817     /* MMU Domain access control / MPU write buffer control */
818     { .name = "DACR",
819       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
820       .access = PL1_RW, .resetvalue = 0,
821       .writefn = dacr_write, .raw_writefn = raw_write,
822       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
823                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
824     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
825      * For v6 and v5, these mappings are overly broad.
826      */
827     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
828       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
829     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
830       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
831     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
832       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
833     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
834       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
835     /* Cache maintenance ops; some of this space may be overridden later. */
836     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
837       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
838       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
839     REGINFO_SENTINEL
840 };
841 
842 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
843     /* Not all pre-v6 cores implemented this WFI, so this is slightly
844      * over-broad.
845      */
846     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
847       .access = PL1_W, .type = ARM_CP_WFI },
848     REGINFO_SENTINEL
849 };
850 
851 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
852     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
853      * is UNPREDICTABLE; we choose to NOP as most implementations do).
854      */
855     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
856       .access = PL1_W, .type = ARM_CP_WFI },
857     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
858      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
859      * OMAPCP will override this space.
860      */
861     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
862       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
863       .resetvalue = 0 },
864     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
865       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
866       .resetvalue = 0 },
867     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
868     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
869       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
870       .resetvalue = 0 },
871     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
872      * implementing it as RAZ means the "debug architecture version" bits
873      * will read as a reserved value, which should cause Linux to not try
874      * to use the debug hardware.
875      */
876     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
877       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
878     /* MMU TLB control. Note that the wildcarding means we cover not just
879      * the unified TLB ops but also the dside/iside/inner-shareable variants.
880      */
881     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
882       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
883       .type = ARM_CP_NO_RAW },
884     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
885       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
886       .type = ARM_CP_NO_RAW },
887     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
888       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
889       .type = ARM_CP_NO_RAW },
890     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
891       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
892       .type = ARM_CP_NO_RAW },
893     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
894       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
895     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
896       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
897     REGINFO_SENTINEL
898 };
899 
900 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
901                         uint64_t value)
902 {
903     uint32_t mask = 0;
904 
905     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
906     if (!arm_feature(env, ARM_FEATURE_V8)) {
907         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
908          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
909          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
910          */
911         if (arm_feature(env, ARM_FEATURE_VFP)) {
912             /* VFP coprocessor: cp10 & cp11 [23:20] */
913             mask |= (1 << 31) | (1 << 30) | (0xf << 20);
914 
915             if (!arm_feature(env, ARM_FEATURE_NEON)) {
916                 /* ASEDIS [31] bit is RAO/WI */
917                 value |= (1 << 31);
918             }
919 
920             /* VFPv3 and upwards with NEON implement 32 double precision
921              * registers (D0-D31).
922              */
923             if (!arm_feature(env, ARM_FEATURE_NEON) ||
924                     !arm_feature(env, ARM_FEATURE_VFP3)) {
925                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
926                 value |= (1 << 30);
927             }
928         }
929         value &= mask;
930     }
931     env->cp15.cpacr_el1 = value;
932 }
933 
934 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
935 {
936     /* Call cpacr_write() so that we reset with the correct RAO bits set
937      * for our CPU features.
938      */
939     cpacr_write(env, ri, 0);
940 }
941 
942 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
943                                    bool isread)
944 {
945     if (arm_feature(env, ARM_FEATURE_V8)) {
946         /* Check if CPACR accesses are to be trapped to EL2 */
947         if (arm_current_el(env) == 1 &&
948             (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
949             return CP_ACCESS_TRAP_EL2;
950         /* Check if CPACR accesses are to be trapped to EL3 */
951         } else if (arm_current_el(env) < 3 &&
952                    (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
953             return CP_ACCESS_TRAP_EL3;
954         }
955     }
956 
957     return CP_ACCESS_OK;
958 }
959 
960 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
961                                   bool isread)
962 {
963     /* Check if CPTR accesses are set to trap to EL3 */
964     if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
965         return CP_ACCESS_TRAP_EL3;
966     }
967 
968     return CP_ACCESS_OK;
969 }
970 
971 static const ARMCPRegInfo v6_cp_reginfo[] = {
972     /* prefetch by MVA in v6, NOP in v7 */
973     { .name = "MVA_prefetch",
974       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
975       .access = PL1_W, .type = ARM_CP_NOP },
976     /* We need to break the TB after ISB to execute self-modifying code
977      * correctly and also to take any pending interrupts immediately.
978      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
979      */
980     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
981       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
982     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
983       .access = PL0_W, .type = ARM_CP_NOP },
984     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
985       .access = PL0_W, .type = ARM_CP_NOP },
986     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
987       .access = PL1_RW,
988       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
989                              offsetof(CPUARMState, cp15.ifar_ns) },
990       .resetvalue = 0, },
991     /* Watchpoint Fault Address Register : should actually only be present
992      * for 1136, 1176, 11MPCore.
993      */
994     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
995       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
996     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
997       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
998       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
999       .resetfn = cpacr_reset, .writefn = cpacr_write },
1000     REGINFO_SENTINEL
1001 };
1002 
1003 /* Definitions for the PMU registers */
1004 #define PMCRN_MASK  0xf800
1005 #define PMCRN_SHIFT 11
1006 #define PMCRLC  0x40
1007 #define PMCRDP  0x10
1008 #define PMCRD   0x8
1009 #define PMCRC   0x4
1010 #define PMCRP   0x2
1011 #define PMCRE   0x1
1012 
1013 #define PMXEVTYPER_P          0x80000000
1014 #define PMXEVTYPER_U          0x40000000
1015 #define PMXEVTYPER_NSK        0x20000000
1016 #define PMXEVTYPER_NSU        0x10000000
1017 #define PMXEVTYPER_NSH        0x08000000
1018 #define PMXEVTYPER_M          0x04000000
1019 #define PMXEVTYPER_MT         0x02000000
1020 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1021 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1022                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1023                                PMXEVTYPER_M | PMXEVTYPER_MT | \
1024                                PMXEVTYPER_EVTCOUNT)
1025 
1026 #define PMCCFILTR             0xf8000000
1027 #define PMCCFILTR_M           PMXEVTYPER_M
1028 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1029 
1030 static inline uint32_t pmu_num_counters(CPUARMState *env)
1031 {
1032   return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1033 }
1034 
1035 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1036 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1037 {
1038   return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1039 }
1040 
1041 typedef struct pm_event {
1042     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1043     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1044     bool (*supported)(CPUARMState *);
1045     /*
1046      * Retrieve the current count of the underlying event. The programmed
1047      * counters hold a difference from the return value from this function
1048      */
1049     uint64_t (*get_count)(CPUARMState *);
1050     /*
1051      * Return how many nanoseconds it will take (at a minimum) for count events
1052      * to occur. A negative value indicates the counter will never overflow, or
1053      * that the counter has otherwise arranged for the overflow bit to be set
1054      * and the PMU interrupt to be raised on overflow.
1055      */
1056     int64_t (*ns_per_count)(uint64_t);
1057 } pm_event;
1058 
1059 static bool event_always_supported(CPUARMState *env)
1060 {
1061     return true;
1062 }
1063 
1064 static uint64_t swinc_get_count(CPUARMState *env)
1065 {
1066     /*
1067      * SW_INCR events are written directly to the pmevcntr's by writes to
1068      * PMSWINC, so there is no underlying count maintained by the PMU itself
1069      */
1070     return 0;
1071 }
1072 
1073 static int64_t swinc_ns_per(uint64_t ignored)
1074 {
1075     return -1;
1076 }
1077 
1078 /*
1079  * Return the underlying cycle count for the PMU cycle counters. If we're in
1080  * usermode, simply return 0.
1081  */
1082 static uint64_t cycles_get_count(CPUARMState *env)
1083 {
1084 #ifndef CONFIG_USER_ONLY
1085     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1086                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1087 #else
1088     return cpu_get_host_ticks();
1089 #endif
1090 }
1091 
1092 #ifndef CONFIG_USER_ONLY
1093 static int64_t cycles_ns_per(uint64_t cycles)
1094 {
1095     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1096 }
1097 
1098 static bool instructions_supported(CPUARMState *env)
1099 {
1100     return use_icount == 1 /* Precise instruction counting */;
1101 }
1102 
1103 static uint64_t instructions_get_count(CPUARMState *env)
1104 {
1105     return (uint64_t)cpu_get_icount_raw();
1106 }
1107 
1108 static int64_t instructions_ns_per(uint64_t icount)
1109 {
1110     return cpu_icount_to_ns((int64_t)icount);
1111 }
1112 #endif
1113 
1114 static const pm_event pm_events[] = {
1115     { .number = 0x000, /* SW_INCR */
1116       .supported = event_always_supported,
1117       .get_count = swinc_get_count,
1118       .ns_per_count = swinc_ns_per,
1119     },
1120 #ifndef CONFIG_USER_ONLY
1121     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1122       .supported = instructions_supported,
1123       .get_count = instructions_get_count,
1124       .ns_per_count = instructions_ns_per,
1125     },
1126     { .number = 0x011, /* CPU_CYCLES, Cycle */
1127       .supported = event_always_supported,
1128       .get_count = cycles_get_count,
1129       .ns_per_count = cycles_ns_per,
1130     }
1131 #endif
1132 };
1133 
1134 /*
1135  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1136  * events (i.e. the statistical profiling extension), this implementation
1137  * should first be updated to something sparse instead of the current
1138  * supported_event_map[] array.
1139  */
1140 #define MAX_EVENT_ID 0x11
1141 #define UNSUPPORTED_EVENT UINT16_MAX
1142 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1143 
1144 /*
1145  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1146  * of ARM event numbers to indices in our pm_events array.
1147  *
1148  * Note: Events in the 0x40XX range are not currently supported.
1149  */
1150 void pmu_init(ARMCPU *cpu)
1151 {
1152     unsigned int i;
1153 
1154     /*
1155      * Empty supported_event_map and cpu->pmceid[01] before adding supported
1156      * events to them
1157      */
1158     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1159         supported_event_map[i] = UNSUPPORTED_EVENT;
1160     }
1161     cpu->pmceid0 = 0;
1162     cpu->pmceid1 = 0;
1163 
1164     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1165         const pm_event *cnt = &pm_events[i];
1166         assert(cnt->number <= MAX_EVENT_ID);
1167         /* We do not currently support events in the 0x40xx range */
1168         assert(cnt->number <= 0x3f);
1169 
1170         if (cnt->supported(&cpu->env)) {
1171             supported_event_map[cnt->number] = i;
1172             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1173             if (cnt->number & 0x20) {
1174                 cpu->pmceid1 |= event_mask;
1175             } else {
1176                 cpu->pmceid0 |= event_mask;
1177             }
1178         }
1179     }
1180 }
1181 
1182 /*
1183  * Check at runtime whether a PMU event is supported for the current machine
1184  */
1185 static bool event_supported(uint16_t number)
1186 {
1187     if (number > MAX_EVENT_ID) {
1188         return false;
1189     }
1190     return supported_event_map[number] != UNSUPPORTED_EVENT;
1191 }
1192 
1193 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1194                                    bool isread)
1195 {
1196     /* Performance monitor registers user accessibility is controlled
1197      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1198      * trapping to EL2 or EL3 for other accesses.
1199      */
1200     int el = arm_current_el(env);
1201 
1202     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1203         return CP_ACCESS_TRAP;
1204     }
1205     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1206         && !arm_is_secure_below_el3(env)) {
1207         return CP_ACCESS_TRAP_EL2;
1208     }
1209     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1210         return CP_ACCESS_TRAP_EL3;
1211     }
1212 
1213     return CP_ACCESS_OK;
1214 }
1215 
1216 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1217                                            const ARMCPRegInfo *ri,
1218                                            bool isread)
1219 {
1220     /* ER: event counter read trap control */
1221     if (arm_feature(env, ARM_FEATURE_V8)
1222         && arm_current_el(env) == 0
1223         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1224         && isread) {
1225         return CP_ACCESS_OK;
1226     }
1227 
1228     return pmreg_access(env, ri, isread);
1229 }
1230 
1231 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1232                                          const ARMCPRegInfo *ri,
1233                                          bool isread)
1234 {
1235     /* SW: software increment write trap control */
1236     if (arm_feature(env, ARM_FEATURE_V8)
1237         && arm_current_el(env) == 0
1238         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1239         && !isread) {
1240         return CP_ACCESS_OK;
1241     }
1242 
1243     return pmreg_access(env, ri, isread);
1244 }
1245 
1246 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1247                                         const ARMCPRegInfo *ri,
1248                                         bool isread)
1249 {
1250     /* ER: event counter read trap control */
1251     if (arm_feature(env, ARM_FEATURE_V8)
1252         && arm_current_el(env) == 0
1253         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1254         return CP_ACCESS_OK;
1255     }
1256 
1257     return pmreg_access(env, ri, isread);
1258 }
1259 
1260 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1261                                          const ARMCPRegInfo *ri,
1262                                          bool isread)
1263 {
1264     /* CR: cycle counter read trap control */
1265     if (arm_feature(env, ARM_FEATURE_V8)
1266         && arm_current_el(env) == 0
1267         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1268         && isread) {
1269         return CP_ACCESS_OK;
1270     }
1271 
1272     return pmreg_access(env, ri, isread);
1273 }
1274 
1275 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1276  * the current EL, security state, and register configuration.
1277  */
1278 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1279 {
1280     uint64_t filter;
1281     bool e, p, u, nsk, nsu, nsh, m;
1282     bool enabled, prohibited, filtered;
1283     bool secure = arm_is_secure(env);
1284     int el = arm_current_el(env);
1285     uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1286 
1287     if (!arm_feature(env, ARM_FEATURE_PMU)) {
1288         return false;
1289     }
1290 
1291     if (!arm_feature(env, ARM_FEATURE_EL2) ||
1292             (counter < hpmn || counter == 31)) {
1293         e = env->cp15.c9_pmcr & PMCRE;
1294     } else {
1295         e = env->cp15.mdcr_el2 & MDCR_HPME;
1296     }
1297     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1298 
1299     if (!secure) {
1300         if (el == 2 && (counter < hpmn || counter == 31)) {
1301             prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1302         } else {
1303             prohibited = false;
1304         }
1305     } else {
1306         prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1307            (env->cp15.mdcr_el3 & MDCR_SPME);
1308     }
1309 
1310     if (prohibited && counter == 31) {
1311         prohibited = env->cp15.c9_pmcr & PMCRDP;
1312     }
1313 
1314     if (counter == 31) {
1315         filter = env->cp15.pmccfiltr_el0;
1316     } else {
1317         filter = env->cp15.c14_pmevtyper[counter];
1318     }
1319 
1320     p   = filter & PMXEVTYPER_P;
1321     u   = filter & PMXEVTYPER_U;
1322     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1323     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1324     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1325     m   = arm_el_is_aa64(env, 1) &&
1326               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1327 
1328     if (el == 0) {
1329         filtered = secure ? u : u != nsu;
1330     } else if (el == 1) {
1331         filtered = secure ? p : p != nsk;
1332     } else if (el == 2) {
1333         filtered = !nsh;
1334     } else { /* EL3 */
1335         filtered = m != p;
1336     }
1337 
1338     if (counter != 31) {
1339         /*
1340          * If not checking PMCCNTR, ensure the counter is setup to an event we
1341          * support
1342          */
1343         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1344         if (!event_supported(event)) {
1345             return false;
1346         }
1347     }
1348 
1349     return enabled && !prohibited && !filtered;
1350 }
1351 
1352 static void pmu_update_irq(CPUARMState *env)
1353 {
1354     ARMCPU *cpu = arm_env_get_cpu(env);
1355     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1356             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1357 }
1358 
1359 /*
1360  * Ensure c15_ccnt is the guest-visible count so that operations such as
1361  * enabling/disabling the counter or filtering, modifying the count itself,
1362  * etc. can be done logically. This is essentially a no-op if the counter is
1363  * not enabled at the time of the call.
1364  */
1365 static void pmccntr_op_start(CPUARMState *env)
1366 {
1367     uint64_t cycles = cycles_get_count(env);
1368 
1369     if (pmu_counter_enabled(env, 31)) {
1370         uint64_t eff_cycles = cycles;
1371         if (env->cp15.c9_pmcr & PMCRD) {
1372             /* Increment once every 64 processor clock cycles */
1373             eff_cycles /= 64;
1374         }
1375 
1376         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1377 
1378         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1379                                  1ull << 63 : 1ull << 31;
1380         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1381             env->cp15.c9_pmovsr |= (1 << 31);
1382             pmu_update_irq(env);
1383         }
1384 
1385         env->cp15.c15_ccnt = new_pmccntr;
1386     }
1387     env->cp15.c15_ccnt_delta = cycles;
1388 }
1389 
1390 /*
1391  * If PMCCNTR is enabled, recalculate the delta between the clock and the
1392  * guest-visible count. A call to pmccntr_op_finish should follow every call to
1393  * pmccntr_op_start.
1394  */
1395 static void pmccntr_op_finish(CPUARMState *env)
1396 {
1397     if (pmu_counter_enabled(env, 31)) {
1398 #ifndef CONFIG_USER_ONLY
1399         /* Calculate when the counter will next overflow */
1400         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1401         if (!(env->cp15.c9_pmcr & PMCRLC)) {
1402             remaining_cycles = (uint32_t)remaining_cycles;
1403         }
1404         int64_t overflow_in = cycles_ns_per(remaining_cycles);
1405 
1406         if (overflow_in > 0) {
1407             int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1408                 overflow_in;
1409             ARMCPU *cpu = arm_env_get_cpu(env);
1410             timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1411         }
1412 #endif
1413 
1414         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1415         if (env->cp15.c9_pmcr & PMCRD) {
1416             /* Increment once every 64 processor clock cycles */
1417             prev_cycles /= 64;
1418         }
1419         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1420     }
1421 }
1422 
1423 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1424 {
1425 
1426     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1427     uint64_t count = 0;
1428     if (event_supported(event)) {
1429         uint16_t event_idx = supported_event_map[event];
1430         count = pm_events[event_idx].get_count(env);
1431     }
1432 
1433     if (pmu_counter_enabled(env, counter)) {
1434         uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1435 
1436         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1437             env->cp15.c9_pmovsr |= (1 << counter);
1438             pmu_update_irq(env);
1439         }
1440         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1441     }
1442     env->cp15.c14_pmevcntr_delta[counter] = count;
1443 }
1444 
1445 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1446 {
1447     if (pmu_counter_enabled(env, counter)) {
1448 #ifndef CONFIG_USER_ONLY
1449         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1450         uint16_t event_idx = supported_event_map[event];
1451         uint64_t delta = UINT32_MAX -
1452             (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1453         int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1454 
1455         if (overflow_in > 0) {
1456             int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1457                 overflow_in;
1458             ARMCPU *cpu = arm_env_get_cpu(env);
1459             timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1460         }
1461 #endif
1462 
1463         env->cp15.c14_pmevcntr_delta[counter] -=
1464             env->cp15.c14_pmevcntr[counter];
1465     }
1466 }
1467 
1468 void pmu_op_start(CPUARMState *env)
1469 {
1470     unsigned int i;
1471     pmccntr_op_start(env);
1472     for (i = 0; i < pmu_num_counters(env); i++) {
1473         pmevcntr_op_start(env, i);
1474     }
1475 }
1476 
1477 void pmu_op_finish(CPUARMState *env)
1478 {
1479     unsigned int i;
1480     pmccntr_op_finish(env);
1481     for (i = 0; i < pmu_num_counters(env); i++) {
1482         pmevcntr_op_finish(env, i);
1483     }
1484 }
1485 
1486 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1487 {
1488     pmu_op_start(&cpu->env);
1489 }
1490 
1491 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1492 {
1493     pmu_op_finish(&cpu->env);
1494 }
1495 
1496 void arm_pmu_timer_cb(void *opaque)
1497 {
1498     ARMCPU *cpu = opaque;
1499 
1500     /*
1501      * Update all the counter values based on the current underlying counts,
1502      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1503      * has the effect of setting the cpu->pmu_timer to the next earliest time a
1504      * counter may expire.
1505      */
1506     pmu_op_start(&cpu->env);
1507     pmu_op_finish(&cpu->env);
1508 }
1509 
1510 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1511                        uint64_t value)
1512 {
1513     pmu_op_start(env);
1514 
1515     if (value & PMCRC) {
1516         /* The counter has been reset */
1517         env->cp15.c15_ccnt = 0;
1518     }
1519 
1520     if (value & PMCRP) {
1521         unsigned int i;
1522         for (i = 0; i < pmu_num_counters(env); i++) {
1523             env->cp15.c14_pmevcntr[i] = 0;
1524         }
1525     }
1526 
1527     /* only the DP, X, D and E bits are writable */
1528     env->cp15.c9_pmcr &= ~0x39;
1529     env->cp15.c9_pmcr |= (value & 0x39);
1530 
1531     pmu_op_finish(env);
1532 }
1533 
1534 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1535                           uint64_t value)
1536 {
1537     unsigned int i;
1538     for (i = 0; i < pmu_num_counters(env); i++) {
1539         /* Increment a counter's count iff: */
1540         if ((value & (1 << i)) && /* counter's bit is set */
1541                 /* counter is enabled and not filtered */
1542                 pmu_counter_enabled(env, i) &&
1543                 /* counter is SW_INCR */
1544                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1545             pmevcntr_op_start(env, i);
1546 
1547             /*
1548              * Detect if this write causes an overflow since we can't predict
1549              * PMSWINC overflows like we can for other events
1550              */
1551             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1552 
1553             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1554                 env->cp15.c9_pmovsr |= (1 << i);
1555                 pmu_update_irq(env);
1556             }
1557 
1558             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1559 
1560             pmevcntr_op_finish(env, i);
1561         }
1562     }
1563 }
1564 
1565 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1566 {
1567     uint64_t ret;
1568     pmccntr_op_start(env);
1569     ret = env->cp15.c15_ccnt;
1570     pmccntr_op_finish(env);
1571     return ret;
1572 }
1573 
1574 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1575                          uint64_t value)
1576 {
1577     /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1578      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1579      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1580      * accessed.
1581      */
1582     env->cp15.c9_pmselr = value & 0x1f;
1583 }
1584 
1585 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1586                         uint64_t value)
1587 {
1588     pmccntr_op_start(env);
1589     env->cp15.c15_ccnt = value;
1590     pmccntr_op_finish(env);
1591 }
1592 
1593 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1594                             uint64_t value)
1595 {
1596     uint64_t cur_val = pmccntr_read(env, NULL);
1597 
1598     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1599 }
1600 
1601 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1602                             uint64_t value)
1603 {
1604     pmccntr_op_start(env);
1605     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1606     pmccntr_op_finish(env);
1607 }
1608 
1609 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1610                             uint64_t value)
1611 {
1612     pmccntr_op_start(env);
1613     /* M is not accessible from AArch32 */
1614     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1615         (value & PMCCFILTR);
1616     pmccntr_op_finish(env);
1617 }
1618 
1619 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1620 {
1621     /* M is not visible in AArch32 */
1622     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1623 }
1624 
1625 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1626                             uint64_t value)
1627 {
1628     value &= pmu_counter_mask(env);
1629     env->cp15.c9_pmcnten |= value;
1630 }
1631 
1632 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1633                              uint64_t value)
1634 {
1635     value &= pmu_counter_mask(env);
1636     env->cp15.c9_pmcnten &= ~value;
1637 }
1638 
1639 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1640                          uint64_t value)
1641 {
1642     value &= pmu_counter_mask(env);
1643     env->cp15.c9_pmovsr &= ~value;
1644     pmu_update_irq(env);
1645 }
1646 
1647 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1648                          uint64_t value)
1649 {
1650     value &= pmu_counter_mask(env);
1651     env->cp15.c9_pmovsr |= value;
1652     pmu_update_irq(env);
1653 }
1654 
1655 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1656                              uint64_t value, const uint8_t counter)
1657 {
1658     if (counter == 31) {
1659         pmccfiltr_write(env, ri, value);
1660     } else if (counter < pmu_num_counters(env)) {
1661         pmevcntr_op_start(env, counter);
1662 
1663         /*
1664          * If this counter's event type is changing, store the current
1665          * underlying count for the new type in c14_pmevcntr_delta[counter] so
1666          * pmevcntr_op_finish has the correct baseline when it converts back to
1667          * a delta.
1668          */
1669         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1670             PMXEVTYPER_EVTCOUNT;
1671         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1672         if (old_event != new_event) {
1673             uint64_t count = 0;
1674             if (event_supported(new_event)) {
1675                 uint16_t event_idx = supported_event_map[new_event];
1676                 count = pm_events[event_idx].get_count(env);
1677             }
1678             env->cp15.c14_pmevcntr_delta[counter] = count;
1679         }
1680 
1681         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1682         pmevcntr_op_finish(env, counter);
1683     }
1684     /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1685      * PMSELR value is equal to or greater than the number of implemented
1686      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1687      */
1688 }
1689 
1690 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1691                                const uint8_t counter)
1692 {
1693     if (counter == 31) {
1694         return env->cp15.pmccfiltr_el0;
1695     } else if (counter < pmu_num_counters(env)) {
1696         return env->cp15.c14_pmevtyper[counter];
1697     } else {
1698       /*
1699        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1700        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1701        */
1702         return 0;
1703     }
1704 }
1705 
1706 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1707                               uint64_t value)
1708 {
1709     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1710     pmevtyper_write(env, ri, value, counter);
1711 }
1712 
1713 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1714                                uint64_t value)
1715 {
1716     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1717     env->cp15.c14_pmevtyper[counter] = value;
1718 
1719     /*
1720      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1721      * pmu_op_finish calls when loading saved state for a migration. Because
1722      * we're potentially updating the type of event here, the value written to
1723      * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1724      * different counter type. Therefore, we need to set this value to the
1725      * current count for the counter type we're writing so that pmu_op_finish
1726      * has the correct count for its calculation.
1727      */
1728     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1729     if (event_supported(event)) {
1730         uint16_t event_idx = supported_event_map[event];
1731         env->cp15.c14_pmevcntr_delta[counter] =
1732             pm_events[event_idx].get_count(env);
1733     }
1734 }
1735 
1736 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1737 {
1738     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1739     return pmevtyper_read(env, ri, counter);
1740 }
1741 
1742 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1743                              uint64_t value)
1744 {
1745     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1746 }
1747 
1748 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1749 {
1750     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1751 }
1752 
1753 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1754                              uint64_t value, uint8_t counter)
1755 {
1756     if (counter < pmu_num_counters(env)) {
1757         pmevcntr_op_start(env, counter);
1758         env->cp15.c14_pmevcntr[counter] = value;
1759         pmevcntr_op_finish(env, counter);
1760     }
1761     /*
1762      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1763      * are CONSTRAINED UNPREDICTABLE.
1764      */
1765 }
1766 
1767 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1768                               uint8_t counter)
1769 {
1770     if (counter < pmu_num_counters(env)) {
1771         uint64_t ret;
1772         pmevcntr_op_start(env, counter);
1773         ret = env->cp15.c14_pmevcntr[counter];
1774         pmevcntr_op_finish(env, counter);
1775         return ret;
1776     } else {
1777       /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1778        * are CONSTRAINED UNPREDICTABLE. */
1779         return 0;
1780     }
1781 }
1782 
1783 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1784                              uint64_t value)
1785 {
1786     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1787     pmevcntr_write(env, ri, value, counter);
1788 }
1789 
1790 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1791 {
1792     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1793     return pmevcntr_read(env, ri, counter);
1794 }
1795 
1796 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1797                              uint64_t value)
1798 {
1799     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1800     assert(counter < pmu_num_counters(env));
1801     env->cp15.c14_pmevcntr[counter] = value;
1802     pmevcntr_write(env, ri, value, counter);
1803 }
1804 
1805 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1806 {
1807     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1808     assert(counter < pmu_num_counters(env));
1809     return env->cp15.c14_pmevcntr[counter];
1810 }
1811 
1812 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1813                              uint64_t value)
1814 {
1815     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1816 }
1817 
1818 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1819 {
1820     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1821 }
1822 
1823 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1824                             uint64_t value)
1825 {
1826     if (arm_feature(env, ARM_FEATURE_V8)) {
1827         env->cp15.c9_pmuserenr = value & 0xf;
1828     } else {
1829         env->cp15.c9_pmuserenr = value & 1;
1830     }
1831 }
1832 
1833 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1834                              uint64_t value)
1835 {
1836     /* We have no event counters so only the C bit can be changed */
1837     value &= pmu_counter_mask(env);
1838     env->cp15.c9_pminten |= value;
1839     pmu_update_irq(env);
1840 }
1841 
1842 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1843                              uint64_t value)
1844 {
1845     value &= pmu_counter_mask(env);
1846     env->cp15.c9_pminten &= ~value;
1847     pmu_update_irq(env);
1848 }
1849 
1850 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1851                        uint64_t value)
1852 {
1853     /* Note that even though the AArch64 view of this register has bits
1854      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1855      * architectural requirements for bits which are RES0 only in some
1856      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1857      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1858      */
1859     raw_write(env, ri, value & ~0x1FULL);
1860 }
1861 
1862 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1863 {
1864     /* Begin with base v8.0 state.  */
1865     uint32_t valid_mask = 0x3fff;
1866     ARMCPU *cpu = arm_env_get_cpu(env);
1867 
1868     if (arm_el_is_aa64(env, 3)) {
1869         value |= SCR_FW | SCR_AW;   /* these two bits are RES1.  */
1870         valid_mask &= ~SCR_NET;
1871     } else {
1872         valid_mask &= ~(SCR_RW | SCR_ST);
1873     }
1874 
1875     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1876         valid_mask &= ~SCR_HCE;
1877 
1878         /* On ARMv7, SMD (or SCD as it is called in v7) is only
1879          * supported if EL2 exists. The bit is UNK/SBZP when
1880          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1881          * when EL2 is unavailable.
1882          * On ARMv8, this bit is always available.
1883          */
1884         if (arm_feature(env, ARM_FEATURE_V7) &&
1885             !arm_feature(env, ARM_FEATURE_V8)) {
1886             valid_mask &= ~SCR_SMD;
1887         }
1888     }
1889     if (cpu_isar_feature(aa64_lor, cpu)) {
1890         valid_mask |= SCR_TLOR;
1891     }
1892     if (cpu_isar_feature(aa64_pauth, cpu)) {
1893         valid_mask |= SCR_API | SCR_APK;
1894     }
1895 
1896     /* Clear all-context RES0 bits.  */
1897     value &= valid_mask;
1898     raw_write(env, ri, value);
1899 }
1900 
1901 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1902 {
1903     ARMCPU *cpu = arm_env_get_cpu(env);
1904 
1905     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1906      * bank
1907      */
1908     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1909                                         ri->secure & ARM_CP_SECSTATE_S);
1910 
1911     return cpu->ccsidr[index];
1912 }
1913 
1914 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1915                          uint64_t value)
1916 {
1917     raw_write(env, ri, value & 0xf);
1918 }
1919 
1920 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1921 {
1922     CPUState *cs = ENV_GET_CPU(env);
1923     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1924     uint64_t ret = 0;
1925 
1926     if (hcr_el2 & HCR_IMO) {
1927         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1928             ret |= CPSR_I;
1929         }
1930     } else {
1931         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1932             ret |= CPSR_I;
1933         }
1934     }
1935 
1936     if (hcr_el2 & HCR_FMO) {
1937         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1938             ret |= CPSR_F;
1939         }
1940     } else {
1941         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1942             ret |= CPSR_F;
1943         }
1944     }
1945 
1946     /* External aborts are not possible in QEMU so A bit is always clear */
1947     return ret;
1948 }
1949 
1950 static const ARMCPRegInfo v7_cp_reginfo[] = {
1951     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1952     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1953       .access = PL1_W, .type = ARM_CP_NOP },
1954     /* Performance monitors are implementation defined in v7,
1955      * but with an ARM recommended set of registers, which we
1956      * follow.
1957      *
1958      * Performance registers fall into three categories:
1959      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1960      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1961      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1962      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1963      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1964      */
1965     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1966       .access = PL0_RW, .type = ARM_CP_ALIAS,
1967       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1968       .writefn = pmcntenset_write,
1969       .accessfn = pmreg_access,
1970       .raw_writefn = raw_write },
1971     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1972       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1973       .access = PL0_RW, .accessfn = pmreg_access,
1974       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1975       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1976     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1977       .access = PL0_RW,
1978       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1979       .accessfn = pmreg_access,
1980       .writefn = pmcntenclr_write,
1981       .type = ARM_CP_ALIAS },
1982     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1983       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1984       .access = PL0_RW, .accessfn = pmreg_access,
1985       .type = ARM_CP_ALIAS,
1986       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1987       .writefn = pmcntenclr_write },
1988     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1989       .access = PL0_RW, .type = ARM_CP_IO,
1990       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1991       .accessfn = pmreg_access,
1992       .writefn = pmovsr_write,
1993       .raw_writefn = raw_write },
1994     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1995       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1996       .access = PL0_RW, .accessfn = pmreg_access,
1997       .type = ARM_CP_ALIAS | ARM_CP_IO,
1998       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1999       .writefn = pmovsr_write,
2000       .raw_writefn = raw_write },
2001     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2002       .access = PL0_W, .accessfn = pmreg_access_swinc,
2003       .type = ARM_CP_NO_RAW | ARM_CP_IO,
2004       .writefn = pmswinc_write },
2005     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2006       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2007       .access = PL0_W, .accessfn = pmreg_access_swinc,
2008       .type = ARM_CP_NO_RAW | ARM_CP_IO,
2009       .writefn = pmswinc_write },
2010     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2011       .access = PL0_RW, .type = ARM_CP_ALIAS,
2012       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2013       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2014       .raw_writefn = raw_write},
2015     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2016       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2017       .access = PL0_RW, .accessfn = pmreg_access_selr,
2018       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2019       .writefn = pmselr_write, .raw_writefn = raw_write, },
2020     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2021       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2022       .readfn = pmccntr_read, .writefn = pmccntr_write32,
2023       .accessfn = pmreg_access_ccntr },
2024     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2025       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2026       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2027       .type = ARM_CP_IO,
2028       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2029       .readfn = pmccntr_read, .writefn = pmccntr_write,
2030       .raw_readfn = raw_read, .raw_writefn = raw_write, },
2031     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2032       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2033       .access = PL0_RW, .accessfn = pmreg_access,
2034       .type = ARM_CP_ALIAS | ARM_CP_IO,
2035       .resetvalue = 0, },
2036     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2037       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2038       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2039       .access = PL0_RW, .accessfn = pmreg_access,
2040       .type = ARM_CP_IO,
2041       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2042       .resetvalue = 0, },
2043     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2044       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2045       .accessfn = pmreg_access,
2046       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2047     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2048       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2049       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2050       .accessfn = pmreg_access,
2051       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2052     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2053       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2054       .accessfn = pmreg_access_xevcntr,
2055       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2056     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2057       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2058       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2059       .accessfn = pmreg_access_xevcntr,
2060       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2061     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2062       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2063       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2064       .resetvalue = 0,
2065       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2066     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2067       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2068       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2069       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2070       .resetvalue = 0,
2071       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2072     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2073       .access = PL1_RW, .accessfn = access_tpm,
2074       .type = ARM_CP_ALIAS | ARM_CP_IO,
2075       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2076       .resetvalue = 0,
2077       .writefn = pmintenset_write, .raw_writefn = raw_write },
2078     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2079       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2080       .access = PL1_RW, .accessfn = access_tpm,
2081       .type = ARM_CP_IO,
2082       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2083       .writefn = pmintenset_write, .raw_writefn = raw_write,
2084       .resetvalue = 0x0 },
2085     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2086       .access = PL1_RW, .accessfn = access_tpm,
2087       .type = ARM_CP_ALIAS | ARM_CP_IO,
2088       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2089       .writefn = pmintenclr_write, },
2090     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2091       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2092       .access = PL1_RW, .accessfn = access_tpm,
2093       .type = ARM_CP_ALIAS | ARM_CP_IO,
2094       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2095       .writefn = pmintenclr_write },
2096     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2097       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2098       .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2099     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2100       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2101       .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
2102       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2103                              offsetof(CPUARMState, cp15.csselr_ns) } },
2104     /* Auxiliary ID register: this actually has an IMPDEF value but for now
2105      * just RAZ for all cores:
2106      */
2107     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2108       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2109       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2110     /* Auxiliary fault status registers: these also are IMPDEF, and we
2111      * choose to RAZ/WI for all cores.
2112      */
2113     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2114       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2115       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2116     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2117       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2118       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2119     /* MAIR can just read-as-written because we don't implement caches
2120      * and so don't need to care about memory attributes.
2121      */
2122     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2123       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2124       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2125       .resetvalue = 0 },
2126     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2127       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2128       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2129       .resetvalue = 0 },
2130     /* For non-long-descriptor page tables these are PRRR and NMRR;
2131      * regardless they still act as reads-as-written for QEMU.
2132      */
2133      /* MAIR0/1 are defined separately from their 64-bit counterpart which
2134       * allows them to assign the correct fieldoffset based on the endianness
2135       * handled in the field definitions.
2136       */
2137     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2138       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
2139       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2140                              offsetof(CPUARMState, cp15.mair0_ns) },
2141       .resetfn = arm_cp_reset_ignore },
2142     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2143       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
2144       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2145                              offsetof(CPUARMState, cp15.mair1_ns) },
2146       .resetfn = arm_cp_reset_ignore },
2147     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2148       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2149       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2150     /* 32 bit ITLB invalidates */
2151     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2152       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2153     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2154       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2155     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2156       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2157     /* 32 bit DTLB invalidates */
2158     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2159       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2160     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2161       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2162     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2163       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2164     /* 32 bit TLB invalidates */
2165     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2166       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2167     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2168       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2169     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2170       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2171     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2172       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2173     REGINFO_SENTINEL
2174 };
2175 
2176 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2177     /* 32 bit TLB invalidates, Inner Shareable */
2178     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2179       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
2180     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2181       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2182     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2183       .type = ARM_CP_NO_RAW, .access = PL1_W,
2184       .writefn = tlbiasid_is_write },
2185     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2186       .type = ARM_CP_NO_RAW, .access = PL1_W,
2187       .writefn = tlbimvaa_is_write },
2188     REGINFO_SENTINEL
2189 };
2190 
2191 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2192     /* PMOVSSET is not implemented in v7 before v7ve */
2193     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2194       .access = PL0_RW, .accessfn = pmreg_access,
2195       .type = ARM_CP_ALIAS | ARM_CP_IO,
2196       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2197       .writefn = pmovsset_write,
2198       .raw_writefn = raw_write },
2199     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2200       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2201       .access = PL0_RW, .accessfn = pmreg_access,
2202       .type = ARM_CP_ALIAS | ARM_CP_IO,
2203       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2204       .writefn = pmovsset_write,
2205       .raw_writefn = raw_write },
2206     REGINFO_SENTINEL
2207 };
2208 
2209 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2210                         uint64_t value)
2211 {
2212     value &= 1;
2213     env->teecr = value;
2214 }
2215 
2216 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2217                                     bool isread)
2218 {
2219     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2220         return CP_ACCESS_TRAP;
2221     }
2222     return CP_ACCESS_OK;
2223 }
2224 
2225 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2226     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2227       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2228       .resetvalue = 0,
2229       .writefn = teecr_write },
2230     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2231       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2232       .accessfn = teehbr_access, .resetvalue = 0 },
2233     REGINFO_SENTINEL
2234 };
2235 
2236 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2237     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2238       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2239       .access = PL0_RW,
2240       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2241     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2242       .access = PL0_RW,
2243       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2244                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2245       .resetfn = arm_cp_reset_ignore },
2246     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2247       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2248       .access = PL0_R|PL1_W,
2249       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2250       .resetvalue = 0},
2251     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2252       .access = PL0_R|PL1_W,
2253       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2254                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2255       .resetfn = arm_cp_reset_ignore },
2256     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2257       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2258       .access = PL1_RW,
2259       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2260     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2261       .access = PL1_RW,
2262       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2263                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2264       .resetvalue = 0 },
2265     REGINFO_SENTINEL
2266 };
2267 
2268 #ifndef CONFIG_USER_ONLY
2269 
2270 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2271                                        bool isread)
2272 {
2273     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2274      * Writable only at the highest implemented exception level.
2275      */
2276     int el = arm_current_el(env);
2277 
2278     switch (el) {
2279     case 0:
2280         if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
2281             return CP_ACCESS_TRAP;
2282         }
2283         break;
2284     case 1:
2285         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2286             arm_is_secure_below_el3(env)) {
2287             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2288             return CP_ACCESS_TRAP_UNCATEGORIZED;
2289         }
2290         break;
2291     case 2:
2292     case 3:
2293         break;
2294     }
2295 
2296     if (!isread && el < arm_highest_el(env)) {
2297         return CP_ACCESS_TRAP_UNCATEGORIZED;
2298     }
2299 
2300     return CP_ACCESS_OK;
2301 }
2302 
2303 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2304                                         bool isread)
2305 {
2306     unsigned int cur_el = arm_current_el(env);
2307     bool secure = arm_is_secure(env);
2308 
2309     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2310     if (cur_el == 0 &&
2311         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2312         return CP_ACCESS_TRAP;
2313     }
2314 
2315     if (arm_feature(env, ARM_FEATURE_EL2) &&
2316         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2317         !extract32(env->cp15.cnthctl_el2, 0, 1)) {
2318         return CP_ACCESS_TRAP_EL2;
2319     }
2320     return CP_ACCESS_OK;
2321 }
2322 
2323 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2324                                       bool isread)
2325 {
2326     unsigned int cur_el = arm_current_el(env);
2327     bool secure = arm_is_secure(env);
2328 
2329     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2330      * EL0[PV]TEN is zero.
2331      */
2332     if (cur_el == 0 &&
2333         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2334         return CP_ACCESS_TRAP;
2335     }
2336 
2337     if (arm_feature(env, ARM_FEATURE_EL2) &&
2338         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2339         !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2340         return CP_ACCESS_TRAP_EL2;
2341     }
2342     return CP_ACCESS_OK;
2343 }
2344 
2345 static CPAccessResult gt_pct_access(CPUARMState *env,
2346                                     const ARMCPRegInfo *ri,
2347                                     bool isread)
2348 {
2349     return gt_counter_access(env, GTIMER_PHYS, isread);
2350 }
2351 
2352 static CPAccessResult gt_vct_access(CPUARMState *env,
2353                                     const ARMCPRegInfo *ri,
2354                                     bool isread)
2355 {
2356     return gt_counter_access(env, GTIMER_VIRT, isread);
2357 }
2358 
2359 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2360                                        bool isread)
2361 {
2362     return gt_timer_access(env, GTIMER_PHYS, isread);
2363 }
2364 
2365 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2366                                        bool isread)
2367 {
2368     return gt_timer_access(env, GTIMER_VIRT, isread);
2369 }
2370 
2371 static CPAccessResult gt_stimer_access(CPUARMState *env,
2372                                        const ARMCPRegInfo *ri,
2373                                        bool isread)
2374 {
2375     /* The AArch64 register view of the secure physical timer is
2376      * always accessible from EL3, and configurably accessible from
2377      * Secure EL1.
2378      */
2379     switch (arm_current_el(env)) {
2380     case 1:
2381         if (!arm_is_secure(env)) {
2382             return CP_ACCESS_TRAP;
2383         }
2384         if (!(env->cp15.scr_el3 & SCR_ST)) {
2385             return CP_ACCESS_TRAP_EL3;
2386         }
2387         return CP_ACCESS_OK;
2388     case 0:
2389     case 2:
2390         return CP_ACCESS_TRAP;
2391     case 3:
2392         return CP_ACCESS_OK;
2393     default:
2394         g_assert_not_reached();
2395     }
2396 }
2397 
2398 static uint64_t gt_get_countervalue(CPUARMState *env)
2399 {
2400     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
2401 }
2402 
2403 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2404 {
2405     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2406 
2407     if (gt->ctl & 1) {
2408         /* Timer enabled: calculate and set current ISTATUS, irq, and
2409          * reset timer to when ISTATUS next has to change
2410          */
2411         uint64_t offset = timeridx == GTIMER_VIRT ?
2412                                       cpu->env.cp15.cntvoff_el2 : 0;
2413         uint64_t count = gt_get_countervalue(&cpu->env);
2414         /* Note that this must be unsigned 64 bit arithmetic: */
2415         int istatus = count - offset >= gt->cval;
2416         uint64_t nexttick;
2417         int irqstate;
2418 
2419         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2420 
2421         irqstate = (istatus && !(gt->ctl & 2));
2422         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2423 
2424         if (istatus) {
2425             /* Next transition is when count rolls back over to zero */
2426             nexttick = UINT64_MAX;
2427         } else {
2428             /* Next transition is when we hit cval */
2429             nexttick = gt->cval + offset;
2430         }
2431         /* Note that the desired next expiry time might be beyond the
2432          * signed-64-bit range of a QEMUTimer -- in this case we just
2433          * set the timer for as far in the future as possible. When the
2434          * timer expires we will reset the timer for any remaining period.
2435          */
2436         if (nexttick > INT64_MAX / GTIMER_SCALE) {
2437             nexttick = INT64_MAX / GTIMER_SCALE;
2438         }
2439         timer_mod(cpu->gt_timer[timeridx], nexttick);
2440         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2441     } else {
2442         /* Timer disabled: ISTATUS and timer output always clear */
2443         gt->ctl &= ~4;
2444         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2445         timer_del(cpu->gt_timer[timeridx]);
2446         trace_arm_gt_recalc_disabled(timeridx);
2447     }
2448 }
2449 
2450 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2451                            int timeridx)
2452 {
2453     ARMCPU *cpu = arm_env_get_cpu(env);
2454 
2455     timer_del(cpu->gt_timer[timeridx]);
2456 }
2457 
2458 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2459 {
2460     return gt_get_countervalue(env);
2461 }
2462 
2463 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2464 {
2465     return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
2466 }
2467 
2468 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2469                           int timeridx,
2470                           uint64_t value)
2471 {
2472     trace_arm_gt_cval_write(timeridx, value);
2473     env->cp15.c14_timer[timeridx].cval = value;
2474     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
2475 }
2476 
2477 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2478                              int timeridx)
2479 {
2480     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2481 
2482     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2483                       (gt_get_countervalue(env) - offset));
2484 }
2485 
2486 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2487                           int timeridx,
2488                           uint64_t value)
2489 {
2490     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2491 
2492     trace_arm_gt_tval_write(timeridx, value);
2493     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2494                                          sextract64(value, 0, 32);
2495     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
2496 }
2497 
2498 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2499                          int timeridx,
2500                          uint64_t value)
2501 {
2502     ARMCPU *cpu = arm_env_get_cpu(env);
2503     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2504 
2505     trace_arm_gt_ctl_write(timeridx, value);
2506     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2507     if ((oldval ^ value) & 1) {
2508         /* Enable toggled */
2509         gt_recalc_timer(cpu, timeridx);
2510     } else if ((oldval ^ value) & 2) {
2511         /* IMASK toggled: don't need to recalculate,
2512          * just set the interrupt line based on ISTATUS
2513          */
2514         int irqstate = (oldval & 4) && !(value & 2);
2515 
2516         trace_arm_gt_imask_toggle(timeridx, irqstate);
2517         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2518     }
2519 }
2520 
2521 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2522 {
2523     gt_timer_reset(env, ri, GTIMER_PHYS);
2524 }
2525 
2526 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2527                                uint64_t value)
2528 {
2529     gt_cval_write(env, ri, GTIMER_PHYS, value);
2530 }
2531 
2532 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2533 {
2534     return gt_tval_read(env, ri, GTIMER_PHYS);
2535 }
2536 
2537 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2538                                uint64_t value)
2539 {
2540     gt_tval_write(env, ri, GTIMER_PHYS, value);
2541 }
2542 
2543 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2544                               uint64_t value)
2545 {
2546     gt_ctl_write(env, ri, GTIMER_PHYS, value);
2547 }
2548 
2549 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2550 {
2551     gt_timer_reset(env, ri, GTIMER_VIRT);
2552 }
2553 
2554 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2555                                uint64_t value)
2556 {
2557     gt_cval_write(env, ri, GTIMER_VIRT, value);
2558 }
2559 
2560 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2561 {
2562     return gt_tval_read(env, ri, GTIMER_VIRT);
2563 }
2564 
2565 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2566                                uint64_t value)
2567 {
2568     gt_tval_write(env, ri, GTIMER_VIRT, value);
2569 }
2570 
2571 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2572                               uint64_t value)
2573 {
2574     gt_ctl_write(env, ri, GTIMER_VIRT, value);
2575 }
2576 
2577 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2578                               uint64_t value)
2579 {
2580     ARMCPU *cpu = arm_env_get_cpu(env);
2581 
2582     trace_arm_gt_cntvoff_write(value);
2583     raw_write(env, ri, value);
2584     gt_recalc_timer(cpu, GTIMER_VIRT);
2585 }
2586 
2587 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2588 {
2589     gt_timer_reset(env, ri, GTIMER_HYP);
2590 }
2591 
2592 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2593                               uint64_t value)
2594 {
2595     gt_cval_write(env, ri, GTIMER_HYP, value);
2596 }
2597 
2598 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2599 {
2600     return gt_tval_read(env, ri, GTIMER_HYP);
2601 }
2602 
2603 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2604                               uint64_t value)
2605 {
2606     gt_tval_write(env, ri, GTIMER_HYP, value);
2607 }
2608 
2609 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2610                               uint64_t value)
2611 {
2612     gt_ctl_write(env, ri, GTIMER_HYP, value);
2613 }
2614 
2615 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2616 {
2617     gt_timer_reset(env, ri, GTIMER_SEC);
2618 }
2619 
2620 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2621                               uint64_t value)
2622 {
2623     gt_cval_write(env, ri, GTIMER_SEC, value);
2624 }
2625 
2626 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2627 {
2628     return gt_tval_read(env, ri, GTIMER_SEC);
2629 }
2630 
2631 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2632                               uint64_t value)
2633 {
2634     gt_tval_write(env, ri, GTIMER_SEC, value);
2635 }
2636 
2637 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2638                               uint64_t value)
2639 {
2640     gt_ctl_write(env, ri, GTIMER_SEC, value);
2641 }
2642 
2643 void arm_gt_ptimer_cb(void *opaque)
2644 {
2645     ARMCPU *cpu = opaque;
2646 
2647     gt_recalc_timer(cpu, GTIMER_PHYS);
2648 }
2649 
2650 void arm_gt_vtimer_cb(void *opaque)
2651 {
2652     ARMCPU *cpu = opaque;
2653 
2654     gt_recalc_timer(cpu, GTIMER_VIRT);
2655 }
2656 
2657 void arm_gt_htimer_cb(void *opaque)
2658 {
2659     ARMCPU *cpu = opaque;
2660 
2661     gt_recalc_timer(cpu, GTIMER_HYP);
2662 }
2663 
2664 void arm_gt_stimer_cb(void *opaque)
2665 {
2666     ARMCPU *cpu = opaque;
2667 
2668     gt_recalc_timer(cpu, GTIMER_SEC);
2669 }
2670 
2671 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2672     /* Note that CNTFRQ is purely reads-as-written for the benefit
2673      * of software; writing it doesn't actually change the timer frequency.
2674      * Our reset value matches the fixed frequency we implement the timer at.
2675      */
2676     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2677       .type = ARM_CP_ALIAS,
2678       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2679       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2680     },
2681     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2682       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2683       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2684       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2685       .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2686     },
2687     /* overall control: mostly access permissions */
2688     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2689       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2690       .access = PL1_RW,
2691       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2692       .resetvalue = 0,
2693     },
2694     /* per-timer control */
2695     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2696       .secure = ARM_CP_SECSTATE_NS,
2697       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2698       .accessfn = gt_ptimer_access,
2699       .fieldoffset = offsetoflow32(CPUARMState,
2700                                    cp15.c14_timer[GTIMER_PHYS].ctl),
2701       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2702     },
2703     { .name = "CNTP_CTL_S",
2704       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2705       .secure = ARM_CP_SECSTATE_S,
2706       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2707       .accessfn = gt_ptimer_access,
2708       .fieldoffset = offsetoflow32(CPUARMState,
2709                                    cp15.c14_timer[GTIMER_SEC].ctl),
2710       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2711     },
2712     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2713       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2714       .type = ARM_CP_IO, .access = PL0_RW,
2715       .accessfn = gt_ptimer_access,
2716       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2717       .resetvalue = 0,
2718       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2719     },
2720     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2721       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2722       .accessfn = gt_vtimer_access,
2723       .fieldoffset = offsetoflow32(CPUARMState,
2724                                    cp15.c14_timer[GTIMER_VIRT].ctl),
2725       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2726     },
2727     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2728       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2729       .type = ARM_CP_IO, .access = PL0_RW,
2730       .accessfn = gt_vtimer_access,
2731       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2732       .resetvalue = 0,
2733       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2734     },
2735     /* TimerValue views: a 32 bit downcounting view of the underlying state */
2736     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2737       .secure = ARM_CP_SECSTATE_NS,
2738       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2739       .accessfn = gt_ptimer_access,
2740       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2741     },
2742     { .name = "CNTP_TVAL_S",
2743       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2744       .secure = ARM_CP_SECSTATE_S,
2745       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2746       .accessfn = gt_ptimer_access,
2747       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2748     },
2749     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2750       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2751       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2752       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2753       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2754     },
2755     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2756       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2757       .accessfn = gt_vtimer_access,
2758       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2759     },
2760     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2761       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2762       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2763       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2764       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2765     },
2766     /* The counter itself */
2767     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2768       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2769       .accessfn = gt_pct_access,
2770       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2771     },
2772     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2773       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2774       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2775       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2776     },
2777     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2778       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2779       .accessfn = gt_vct_access,
2780       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2781     },
2782     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2783       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2784       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2785       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2786     },
2787     /* Comparison value, indicating when the timer goes off */
2788     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2789       .secure = ARM_CP_SECSTATE_NS,
2790       .access = PL0_RW,
2791       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2792       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2793       .accessfn = gt_ptimer_access,
2794       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2795     },
2796     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2797       .secure = ARM_CP_SECSTATE_S,
2798       .access = PL0_RW,
2799       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2800       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2801       .accessfn = gt_ptimer_access,
2802       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2803     },
2804     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2805       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2806       .access = PL0_RW,
2807       .type = ARM_CP_IO,
2808       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2809       .resetvalue = 0, .accessfn = gt_ptimer_access,
2810       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2811     },
2812     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2813       .access = PL0_RW,
2814       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2815       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2816       .accessfn = gt_vtimer_access,
2817       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2818     },
2819     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2820       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2821       .access = PL0_RW,
2822       .type = ARM_CP_IO,
2823       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2824       .resetvalue = 0, .accessfn = gt_vtimer_access,
2825       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2826     },
2827     /* Secure timer -- this is actually restricted to only EL3
2828      * and configurably Secure-EL1 via the accessfn.
2829      */
2830     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2831       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2832       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2833       .accessfn = gt_stimer_access,
2834       .readfn = gt_sec_tval_read,
2835       .writefn = gt_sec_tval_write,
2836       .resetfn = gt_sec_timer_reset,
2837     },
2838     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2839       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2840       .type = ARM_CP_IO, .access = PL1_RW,
2841       .accessfn = gt_stimer_access,
2842       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2843       .resetvalue = 0,
2844       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2845     },
2846     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2847       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2848       .type = ARM_CP_IO, .access = PL1_RW,
2849       .accessfn = gt_stimer_access,
2850       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2851       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2852     },
2853     REGINFO_SENTINEL
2854 };
2855 
2856 #else
2857 
2858 /* In user-mode most of the generic timer registers are inaccessible
2859  * however modern kernels (4.12+) allow access to cntvct_el0
2860  */
2861 
2862 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2863 {
2864     /* Currently we have no support for QEMUTimer in linux-user so we
2865      * can't call gt_get_countervalue(env), instead we directly
2866      * call the lower level functions.
2867      */
2868     return cpu_get_clock() / GTIMER_SCALE;
2869 }
2870 
2871 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2872     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2873       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2874       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2875       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2876       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2877     },
2878     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2879       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2880       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2881       .readfn = gt_virt_cnt_read,
2882     },
2883     REGINFO_SENTINEL
2884 };
2885 
2886 #endif
2887 
2888 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2889 {
2890     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2891         raw_write(env, ri, value);
2892     } else if (arm_feature(env, ARM_FEATURE_V7)) {
2893         raw_write(env, ri, value & 0xfffff6ff);
2894     } else {
2895         raw_write(env, ri, value & 0xfffff1ff);
2896     }
2897 }
2898 
2899 #ifndef CONFIG_USER_ONLY
2900 /* get_phys_addr() isn't present for user-mode-only targets */
2901 
2902 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2903                                  bool isread)
2904 {
2905     if (ri->opc2 & 4) {
2906         /* The ATS12NSO* operations must trap to EL3 if executed in
2907          * Secure EL1 (which can only happen if EL3 is AArch64).
2908          * They are simply UNDEF if executed from NS EL1.
2909          * They function normally from EL2 or EL3.
2910          */
2911         if (arm_current_el(env) == 1) {
2912             if (arm_is_secure_below_el3(env)) {
2913                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2914             }
2915             return CP_ACCESS_TRAP_UNCATEGORIZED;
2916         }
2917     }
2918     return CP_ACCESS_OK;
2919 }
2920 
2921 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2922                              MMUAccessType access_type, ARMMMUIdx mmu_idx)
2923 {
2924     hwaddr phys_addr;
2925     target_ulong page_size;
2926     int prot;
2927     bool ret;
2928     uint64_t par64;
2929     bool format64 = false;
2930     MemTxAttrs attrs = {};
2931     ARMMMUFaultInfo fi = {};
2932     ARMCacheAttrs cacheattrs = {};
2933 
2934     ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2935                         &prot, &page_size, &fi, &cacheattrs);
2936 
2937     if (is_a64(env)) {
2938         format64 = true;
2939     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2940         /*
2941          * ATS1Cxx:
2942          * * TTBCR.EAE determines whether the result is returned using the
2943          *   32-bit or the 64-bit PAR format
2944          * * Instructions executed in Hyp mode always use the 64bit format
2945          *
2946          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2947          * * The Non-secure TTBCR.EAE bit is set to 1
2948          * * The implementation includes EL2, and the value of HCR.VM is 1
2949          *
2950          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2951          *
2952          * ATS1Hx always uses the 64bit format.
2953          */
2954         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2955 
2956         if (arm_feature(env, ARM_FEATURE_EL2)) {
2957             if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2958                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2959             } else {
2960                 format64 |= arm_current_el(env) == 2;
2961             }
2962         }
2963     }
2964 
2965     if (format64) {
2966         /* Create a 64-bit PAR */
2967         par64 = (1 << 11); /* LPAE bit always set */
2968         if (!ret) {
2969             par64 |= phys_addr & ~0xfffULL;
2970             if (!attrs.secure) {
2971                 par64 |= (1 << 9); /* NS */
2972             }
2973             par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2974             par64 |= cacheattrs.shareability << 7; /* SH */
2975         } else {
2976             uint32_t fsr = arm_fi_to_lfsc(&fi);
2977 
2978             par64 |= 1; /* F */
2979             par64 |= (fsr & 0x3f) << 1; /* FS */
2980             if (fi.stage2) {
2981                 par64 |= (1 << 9); /* S */
2982             }
2983             if (fi.s1ptw) {
2984                 par64 |= (1 << 8); /* PTW */
2985             }
2986         }
2987     } else {
2988         /* fsr is a DFSR/IFSR value for the short descriptor
2989          * translation table format (with WnR always clear).
2990          * Convert it to a 32-bit PAR.
2991          */
2992         if (!ret) {
2993             /* We do not set any attribute bits in the PAR */
2994             if (page_size == (1 << 24)
2995                 && arm_feature(env, ARM_FEATURE_V7)) {
2996                 par64 = (phys_addr & 0xff000000) | (1 << 1);
2997             } else {
2998                 par64 = phys_addr & 0xfffff000;
2999             }
3000             if (!attrs.secure) {
3001                 par64 |= (1 << 9); /* NS */
3002             }
3003         } else {
3004             uint32_t fsr = arm_fi_to_sfsc(&fi);
3005 
3006             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3007                     ((fsr & 0xf) << 1) | 1;
3008         }
3009     }
3010     return par64;
3011 }
3012 
3013 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3014 {
3015     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3016     uint64_t par64;
3017     ARMMMUIdx mmu_idx;
3018     int el = arm_current_el(env);
3019     bool secure = arm_is_secure_below_el3(env);
3020 
3021     switch (ri->opc2 & 6) {
3022     case 0:
3023         /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3024         switch (el) {
3025         case 3:
3026             mmu_idx = ARMMMUIdx_S1E3;
3027             break;
3028         case 2:
3029             mmu_idx = ARMMMUIdx_S1NSE1;
3030             break;
3031         case 1:
3032             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3033             break;
3034         default:
3035             g_assert_not_reached();
3036         }
3037         break;
3038     case 2:
3039         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3040         switch (el) {
3041         case 3:
3042             mmu_idx = ARMMMUIdx_S1SE0;
3043             break;
3044         case 2:
3045             mmu_idx = ARMMMUIdx_S1NSE0;
3046             break;
3047         case 1:
3048             mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3049             break;
3050         default:
3051             g_assert_not_reached();
3052         }
3053         break;
3054     case 4:
3055         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3056         mmu_idx = ARMMMUIdx_S12NSE1;
3057         break;
3058     case 6:
3059         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3060         mmu_idx = ARMMMUIdx_S12NSE0;
3061         break;
3062     default:
3063         g_assert_not_reached();
3064     }
3065 
3066     par64 = do_ats_write(env, value, access_type, mmu_idx);
3067 
3068     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3069 }
3070 
3071 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3072                         uint64_t value)
3073 {
3074     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3075     uint64_t par64;
3076 
3077     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
3078 
3079     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3080 }
3081 
3082 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3083                                      bool isread)
3084 {
3085     if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3086         return CP_ACCESS_TRAP;
3087     }
3088     return CP_ACCESS_OK;
3089 }
3090 
3091 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3092                         uint64_t value)
3093 {
3094     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3095     ARMMMUIdx mmu_idx;
3096     int secure = arm_is_secure_below_el3(env);
3097 
3098     switch (ri->opc2 & 6) {
3099     case 0:
3100         switch (ri->opc1) {
3101         case 0: /* AT S1E1R, AT S1E1W */
3102             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3103             break;
3104         case 4: /* AT S1E2R, AT S1E2W */
3105             mmu_idx = ARMMMUIdx_S1E2;
3106             break;
3107         case 6: /* AT S1E3R, AT S1E3W */
3108             mmu_idx = ARMMMUIdx_S1E3;
3109             break;
3110         default:
3111             g_assert_not_reached();
3112         }
3113         break;
3114     case 2: /* AT S1E0R, AT S1E0W */
3115         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3116         break;
3117     case 4: /* AT S12E1R, AT S12E1W */
3118         mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
3119         break;
3120     case 6: /* AT S12E0R, AT S12E0W */
3121         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
3122         break;
3123     default:
3124         g_assert_not_reached();
3125     }
3126 
3127     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3128 }
3129 #endif
3130 
3131 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3132     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3133       .access = PL1_RW, .resetvalue = 0,
3134       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3135                              offsetoflow32(CPUARMState, cp15.par_ns) },
3136       .writefn = par_write },
3137 #ifndef CONFIG_USER_ONLY
3138     /* This underdecoding is safe because the reginfo is NO_RAW. */
3139     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3140       .access = PL1_W, .accessfn = ats_access,
3141       .writefn = ats_write, .type = ARM_CP_NO_RAW },
3142 #endif
3143     REGINFO_SENTINEL
3144 };
3145 
3146 /* Return basic MPU access permission bits.  */
3147 static uint32_t simple_mpu_ap_bits(uint32_t val)
3148 {
3149     uint32_t ret;
3150     uint32_t mask;
3151     int i;
3152     ret = 0;
3153     mask = 3;
3154     for (i = 0; i < 16; i += 2) {
3155         ret |= (val >> i) & mask;
3156         mask <<= 2;
3157     }
3158     return ret;
3159 }
3160 
3161 /* Pad basic MPU access permission bits to extended format.  */
3162 static uint32_t extended_mpu_ap_bits(uint32_t val)
3163 {
3164     uint32_t ret;
3165     uint32_t mask;
3166     int i;
3167     ret = 0;
3168     mask = 3;
3169     for (i = 0; i < 16; i += 2) {
3170         ret |= (val & mask) << i;
3171         mask <<= 2;
3172     }
3173     return ret;
3174 }
3175 
3176 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3177                                  uint64_t value)
3178 {
3179     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3180 }
3181 
3182 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3183 {
3184     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3185 }
3186 
3187 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3188                                  uint64_t value)
3189 {
3190     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3191 }
3192 
3193 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3194 {
3195     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3196 }
3197 
3198 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3199 {
3200     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3201 
3202     if (!u32p) {
3203         return 0;
3204     }
3205 
3206     u32p += env->pmsav7.rnr[M_REG_NS];
3207     return *u32p;
3208 }
3209 
3210 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3211                          uint64_t value)
3212 {
3213     ARMCPU *cpu = arm_env_get_cpu(env);
3214     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3215 
3216     if (!u32p) {
3217         return;
3218     }
3219 
3220     u32p += env->pmsav7.rnr[M_REG_NS];
3221     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3222     *u32p = value;
3223 }
3224 
3225 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3226                               uint64_t value)
3227 {
3228     ARMCPU *cpu = arm_env_get_cpu(env);
3229     uint32_t nrgs = cpu->pmsav7_dregion;
3230 
3231     if (value >= nrgs) {
3232         qemu_log_mask(LOG_GUEST_ERROR,
3233                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3234                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3235         return;
3236     }
3237 
3238     raw_write(env, ri, value);
3239 }
3240 
3241 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3242     /* Reset for all these registers is handled in arm_cpu_reset(),
3243      * because the PMSAv7 is also used by M-profile CPUs, which do
3244      * not register cpregs but still need the state to be reset.
3245      */
3246     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3247       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3248       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3249       .readfn = pmsav7_read, .writefn = pmsav7_write,
3250       .resetfn = arm_cp_reset_ignore },
3251     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3252       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3253       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3254       .readfn = pmsav7_read, .writefn = pmsav7_write,
3255       .resetfn = arm_cp_reset_ignore },
3256     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3257       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3258       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3259       .readfn = pmsav7_read, .writefn = pmsav7_write,
3260       .resetfn = arm_cp_reset_ignore },
3261     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3262       .access = PL1_RW,
3263       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3264       .writefn = pmsav7_rgnr_write,
3265       .resetfn = arm_cp_reset_ignore },
3266     REGINFO_SENTINEL
3267 };
3268 
3269 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3270     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3271       .access = PL1_RW, .type = ARM_CP_ALIAS,
3272       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3273       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3274     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3275       .access = PL1_RW, .type = ARM_CP_ALIAS,
3276       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3277       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3278     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3279       .access = PL1_RW,
3280       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3281       .resetvalue = 0, },
3282     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3283       .access = PL1_RW,
3284       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3285       .resetvalue = 0, },
3286     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3287       .access = PL1_RW,
3288       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3289     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3290       .access = PL1_RW,
3291       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3292     /* Protection region base and size registers */
3293     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3294       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3295       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3296     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3297       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3298       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3299     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3300       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3301       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3302     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3303       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3304       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3305     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3306       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3307       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3308     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3309       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3310       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3311     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3312       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3313       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3314     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3315       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3316       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3317     REGINFO_SENTINEL
3318 };
3319 
3320 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3321                                  uint64_t value)
3322 {
3323     TCR *tcr = raw_ptr(env, ri);
3324     int maskshift = extract32(value, 0, 3);
3325 
3326     if (!arm_feature(env, ARM_FEATURE_V8)) {
3327         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3328             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3329              * using Long-desciptor translation table format */
3330             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3331         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3332             /* In an implementation that includes the Security Extensions
3333              * TTBCR has additional fields PD0 [4] and PD1 [5] for
3334              * Short-descriptor translation table format.
3335              */
3336             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3337         } else {
3338             value &= TTBCR_N;
3339         }
3340     }
3341 
3342     /* Update the masks corresponding to the TCR bank being written
3343      * Note that we always calculate mask and base_mask, but
3344      * they are only used for short-descriptor tables (ie if EAE is 0);
3345      * for long-descriptor tables the TCR fields are used differently
3346      * and the mask and base_mask values are meaningless.
3347      */
3348     tcr->raw_tcr = value;
3349     tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3350     tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3351 }
3352 
3353 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3354                              uint64_t value)
3355 {
3356     ARMCPU *cpu = arm_env_get_cpu(env);
3357     TCR *tcr = raw_ptr(env, ri);
3358 
3359     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3360         /* With LPAE the TTBCR could result in a change of ASID
3361          * via the TTBCR.A1 bit, so do a TLB flush.
3362          */
3363         tlb_flush(CPU(cpu));
3364     }
3365     /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
3366     value = deposit64(tcr->raw_tcr, 0, 32, value);
3367     vmsa_ttbcr_raw_write(env, ri, value);
3368 }
3369 
3370 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3371 {
3372     TCR *tcr = raw_ptr(env, ri);
3373 
3374     /* Reset both the TCR as well as the masks corresponding to the bank of
3375      * the TCR being reset.
3376      */
3377     tcr->raw_tcr = 0;
3378     tcr->mask = 0;
3379     tcr->base_mask = 0xffffc000u;
3380 }
3381 
3382 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3383                                uint64_t value)
3384 {
3385     ARMCPU *cpu = arm_env_get_cpu(env);
3386     TCR *tcr = raw_ptr(env, ri);
3387 
3388     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3389     tlb_flush(CPU(cpu));
3390     tcr->raw_tcr = value;
3391 }
3392 
3393 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3394                             uint64_t value)
3395 {
3396     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
3397     if (cpreg_field_is_64bit(ri) &&
3398         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3399         ARMCPU *cpu = arm_env_get_cpu(env);
3400         tlb_flush(CPU(cpu));
3401     }
3402     raw_write(env, ri, value);
3403 }
3404 
3405 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3406                         uint64_t value)
3407 {
3408     ARMCPU *cpu = arm_env_get_cpu(env);
3409     CPUState *cs = CPU(cpu);
3410 
3411     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
3412     if (raw_read(env, ri) != value) {
3413         tlb_flush_by_mmuidx(cs,
3414                             ARMMMUIdxBit_S12NSE1 |
3415                             ARMMMUIdxBit_S12NSE0 |
3416                             ARMMMUIdxBit_S2NS);
3417         raw_write(env, ri, value);
3418     }
3419 }
3420 
3421 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3422     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3423       .access = PL1_RW, .type = ARM_CP_ALIAS,
3424       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3425                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3426     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3427       .access = PL1_RW, .resetvalue = 0,
3428       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3429                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3430     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3431       .access = PL1_RW, .resetvalue = 0,
3432       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3433                              offsetof(CPUARMState, cp15.dfar_ns) } },
3434     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3435       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3436       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3437       .resetvalue = 0, },
3438     REGINFO_SENTINEL
3439 };
3440 
3441 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3442     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3443       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3444       .access = PL1_RW,
3445       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3446     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3447       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3448       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3449       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3450                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
3451     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3452       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3453       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3454       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3455                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
3456     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3457       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3458       .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
3459       .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3460       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3461     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3462       .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3463       .raw_writefn = vmsa_ttbcr_raw_write,
3464       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3465                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
3466     REGINFO_SENTINEL
3467 };
3468 
3469 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3470  * qemu tlbs nor adjusting cached masks.
3471  */
3472 static const ARMCPRegInfo ttbcr2_reginfo = {
3473     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3474     .access = PL1_RW, .type = ARM_CP_ALIAS,
3475     .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3476                            offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3477 };
3478 
3479 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3480                                 uint64_t value)
3481 {
3482     env->cp15.c15_ticonfig = value & 0xe7;
3483     /* The OS_TYPE bit in this register changes the reported CPUID! */
3484     env->cp15.c0_cpuid = (value & (1 << 5)) ?
3485         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3486 }
3487 
3488 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3489                                 uint64_t value)
3490 {
3491     env->cp15.c15_threadid = value & 0xffff;
3492 }
3493 
3494 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3495                            uint64_t value)
3496 {
3497     /* Wait-for-interrupt (deprecated) */
3498     cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
3499 }
3500 
3501 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3502                                   uint64_t value)
3503 {
3504     /* On OMAP there are registers indicating the max/min index of dcache lines
3505      * containing a dirty line; cache flush operations have to reset these.
3506      */
3507     env->cp15.c15_i_max = 0x000;
3508     env->cp15.c15_i_min = 0xff0;
3509 }
3510 
3511 static const ARMCPRegInfo omap_cp_reginfo[] = {
3512     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3513       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3514       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3515       .resetvalue = 0, },
3516     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3517       .access = PL1_RW, .type = ARM_CP_NOP },
3518     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3519       .access = PL1_RW,
3520       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3521       .writefn = omap_ticonfig_write },
3522     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3523       .access = PL1_RW,
3524       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3525     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3526       .access = PL1_RW, .resetvalue = 0xff0,
3527       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3528     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3529       .access = PL1_RW,
3530       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3531       .writefn = omap_threadid_write },
3532     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3533       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3534       .type = ARM_CP_NO_RAW,
3535       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3536     /* TODO: Peripheral port remap register:
3537      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3538      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3539      * when MMU is off.
3540      */
3541     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3542       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3543       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3544       .writefn = omap_cachemaint_write },
3545     { .name = "C9", .cp = 15, .crn = 9,
3546       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3547       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3548     REGINFO_SENTINEL
3549 };
3550 
3551 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3552                               uint64_t value)
3553 {
3554     env->cp15.c15_cpar = value & 0x3fff;
3555 }
3556 
3557 static const ARMCPRegInfo xscale_cp_reginfo[] = {
3558     { .name = "XSCALE_CPAR",
3559       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3560       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3561       .writefn = xscale_cpar_write, },
3562     { .name = "XSCALE_AUXCR",
3563       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3564       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3565       .resetvalue = 0, },
3566     /* XScale specific cache-lockdown: since we have no cache we NOP these
3567      * and hope the guest does not really rely on cache behaviour.
3568      */
3569     { .name = "XSCALE_LOCK_ICACHE_LINE",
3570       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3571       .access = PL1_W, .type = ARM_CP_NOP },
3572     { .name = "XSCALE_UNLOCK_ICACHE",
3573       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3574       .access = PL1_W, .type = ARM_CP_NOP },
3575     { .name = "XSCALE_DCACHE_LOCK",
3576       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3577       .access = PL1_RW, .type = ARM_CP_NOP },
3578     { .name = "XSCALE_UNLOCK_DCACHE",
3579       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3580       .access = PL1_W, .type = ARM_CP_NOP },
3581     REGINFO_SENTINEL
3582 };
3583 
3584 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3585     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3586      * implementation of this implementation-defined space.
3587      * Ideally this should eventually disappear in favour of actually
3588      * implementing the correct behaviour for all cores.
3589      */
3590     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3591       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3592       .access = PL1_RW,
3593       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3594       .resetvalue = 0 },
3595     REGINFO_SENTINEL
3596 };
3597 
3598 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3599     /* Cache status: RAZ because we have no cache so it's always clean */
3600     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3601       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3602       .resetvalue = 0 },
3603     REGINFO_SENTINEL
3604 };
3605 
3606 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3607     /* We never have a a block transfer operation in progress */
3608     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3609       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3610       .resetvalue = 0 },
3611     /* The cache ops themselves: these all NOP for QEMU */
3612     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3613       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3614     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3615       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3616     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3617       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3618     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3619       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3620     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3621       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3622     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3623       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3624     REGINFO_SENTINEL
3625 };
3626 
3627 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3628     /* The cache test-and-clean instructions always return (1 << 30)
3629      * to indicate that there are no dirty cache lines.
3630      */
3631     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3632       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3633       .resetvalue = (1 << 30) },
3634     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3635       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3636       .resetvalue = (1 << 30) },
3637     REGINFO_SENTINEL
3638 };
3639 
3640 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3641     /* Ignore ReadBuffer accesses */
3642     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3643       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3644       .access = PL1_RW, .resetvalue = 0,
3645       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3646     REGINFO_SENTINEL
3647 };
3648 
3649 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3650 {
3651     ARMCPU *cpu = arm_env_get_cpu(env);
3652     unsigned int cur_el = arm_current_el(env);
3653     bool secure = arm_is_secure(env);
3654 
3655     if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3656         return env->cp15.vpidr_el2;
3657     }
3658     return raw_read(env, ri);
3659 }
3660 
3661 static uint64_t mpidr_read_val(CPUARMState *env)
3662 {
3663     ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
3664     uint64_t mpidr = cpu->mp_affinity;
3665 
3666     if (arm_feature(env, ARM_FEATURE_V7MP)) {
3667         mpidr |= (1U << 31);
3668         /* Cores which are uniprocessor (non-coherent)
3669          * but still implement the MP extensions set
3670          * bit 30. (For instance, Cortex-R5).
3671          */
3672         if (cpu->mp_is_up) {
3673             mpidr |= (1u << 30);
3674         }
3675     }
3676     return mpidr;
3677 }
3678 
3679 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3680 {
3681     unsigned int cur_el = arm_current_el(env);
3682     bool secure = arm_is_secure(env);
3683 
3684     if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3685         return env->cp15.vmpidr_el2;
3686     }
3687     return mpidr_read_val(env);
3688 }
3689 
3690 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3691     /* NOP AMAIR0/1 */
3692     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3693       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3694       .access = PL1_RW, .type = ARM_CP_CONST,
3695       .resetvalue = 0 },
3696     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3697     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3698       .access = PL1_RW, .type = ARM_CP_CONST,
3699       .resetvalue = 0 },
3700     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3701       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3702       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3703                              offsetof(CPUARMState, cp15.par_ns)} },
3704     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3705       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3706       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3707                              offsetof(CPUARMState, cp15.ttbr0_ns) },
3708       .writefn = vmsa_ttbr_write, },
3709     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3710       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3711       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3712                              offsetof(CPUARMState, cp15.ttbr1_ns) },
3713       .writefn = vmsa_ttbr_write, },
3714     REGINFO_SENTINEL
3715 };
3716 
3717 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3718 {
3719     return vfp_get_fpcr(env);
3720 }
3721 
3722 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3723                             uint64_t value)
3724 {
3725     vfp_set_fpcr(env, value);
3726 }
3727 
3728 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3729 {
3730     return vfp_get_fpsr(env);
3731 }
3732 
3733 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3734                             uint64_t value)
3735 {
3736     vfp_set_fpsr(env, value);
3737 }
3738 
3739 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3740                                        bool isread)
3741 {
3742     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3743         return CP_ACCESS_TRAP;
3744     }
3745     return CP_ACCESS_OK;
3746 }
3747 
3748 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3749                             uint64_t value)
3750 {
3751     env->daif = value & PSTATE_DAIF;
3752 }
3753 
3754 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3755                                           const ARMCPRegInfo *ri,
3756                                           bool isread)
3757 {
3758     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3759      * SCTLR_EL1.UCI is set.
3760      */
3761     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3762         return CP_ACCESS_TRAP;
3763     }
3764     return CP_ACCESS_OK;
3765 }
3766 
3767 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3768  * Page D4-1736 (DDI0487A.b)
3769  */
3770 
3771 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3772                                       uint64_t value)
3773 {
3774     CPUState *cs = ENV_GET_CPU(env);
3775     bool sec = arm_is_secure_below_el3(env);
3776 
3777     if (sec) {
3778         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3779                                             ARMMMUIdxBit_S1SE1 |
3780                                             ARMMMUIdxBit_S1SE0);
3781     } else {
3782         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3783                                             ARMMMUIdxBit_S12NSE1 |
3784                                             ARMMMUIdxBit_S12NSE0);
3785     }
3786 }
3787 
3788 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3789                                     uint64_t value)
3790 {
3791     CPUState *cs = ENV_GET_CPU(env);
3792 
3793     if (tlb_force_broadcast(env)) {
3794         tlbi_aa64_vmalle1is_write(env, NULL, value);
3795         return;
3796     }
3797 
3798     if (arm_is_secure_below_el3(env)) {
3799         tlb_flush_by_mmuidx(cs,
3800                             ARMMMUIdxBit_S1SE1 |
3801                             ARMMMUIdxBit_S1SE0);
3802     } else {
3803         tlb_flush_by_mmuidx(cs,
3804                             ARMMMUIdxBit_S12NSE1 |
3805                             ARMMMUIdxBit_S12NSE0);
3806     }
3807 }
3808 
3809 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3810                                   uint64_t value)
3811 {
3812     /* Note that the 'ALL' scope must invalidate both stage 1 and
3813      * stage 2 translations, whereas most other scopes only invalidate
3814      * stage 1 translations.
3815      */
3816     ARMCPU *cpu = arm_env_get_cpu(env);
3817     CPUState *cs = CPU(cpu);
3818 
3819     if (arm_is_secure_below_el3(env)) {
3820         tlb_flush_by_mmuidx(cs,
3821                             ARMMMUIdxBit_S1SE1 |
3822                             ARMMMUIdxBit_S1SE0);
3823     } else {
3824         if (arm_feature(env, ARM_FEATURE_EL2)) {
3825             tlb_flush_by_mmuidx(cs,
3826                                 ARMMMUIdxBit_S12NSE1 |
3827                                 ARMMMUIdxBit_S12NSE0 |
3828                                 ARMMMUIdxBit_S2NS);
3829         } else {
3830             tlb_flush_by_mmuidx(cs,
3831                                 ARMMMUIdxBit_S12NSE1 |
3832                                 ARMMMUIdxBit_S12NSE0);
3833         }
3834     }
3835 }
3836 
3837 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3838                                   uint64_t value)
3839 {
3840     ARMCPU *cpu = arm_env_get_cpu(env);
3841     CPUState *cs = CPU(cpu);
3842 
3843     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3844 }
3845 
3846 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3847                                   uint64_t value)
3848 {
3849     ARMCPU *cpu = arm_env_get_cpu(env);
3850     CPUState *cs = CPU(cpu);
3851 
3852     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3853 }
3854 
3855 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3856                                     uint64_t value)
3857 {
3858     /* Note that the 'ALL' scope must invalidate both stage 1 and
3859      * stage 2 translations, whereas most other scopes only invalidate
3860      * stage 1 translations.
3861      */
3862     CPUState *cs = ENV_GET_CPU(env);
3863     bool sec = arm_is_secure_below_el3(env);
3864     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3865 
3866     if (sec) {
3867         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3868                                             ARMMMUIdxBit_S1SE1 |
3869                                             ARMMMUIdxBit_S1SE0);
3870     } else if (has_el2) {
3871         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3872                                             ARMMMUIdxBit_S12NSE1 |
3873                                             ARMMMUIdxBit_S12NSE0 |
3874                                             ARMMMUIdxBit_S2NS);
3875     } else {
3876           tlb_flush_by_mmuidx_all_cpus_synced(cs,
3877                                               ARMMMUIdxBit_S12NSE1 |
3878                                               ARMMMUIdxBit_S12NSE0);
3879     }
3880 }
3881 
3882 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3883                                     uint64_t value)
3884 {
3885     CPUState *cs = ENV_GET_CPU(env);
3886 
3887     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3888 }
3889 
3890 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3891                                     uint64_t value)
3892 {
3893     CPUState *cs = ENV_GET_CPU(env);
3894 
3895     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3896 }
3897 
3898 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3899                                  uint64_t value)
3900 {
3901     /* Invalidate by VA, EL2
3902      * Currently handles both VAE2 and VALE2, since we don't support
3903      * flush-last-level-only.
3904      */
3905     ARMCPU *cpu = arm_env_get_cpu(env);
3906     CPUState *cs = CPU(cpu);
3907     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3908 
3909     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3910 }
3911 
3912 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3913                                  uint64_t value)
3914 {
3915     /* Invalidate by VA, EL3
3916      * Currently handles both VAE3 and VALE3, since we don't support
3917      * flush-last-level-only.
3918      */
3919     ARMCPU *cpu = arm_env_get_cpu(env);
3920     CPUState *cs = CPU(cpu);
3921     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3922 
3923     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3924 }
3925 
3926 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3927                                    uint64_t value)
3928 {
3929     ARMCPU *cpu = arm_env_get_cpu(env);
3930     CPUState *cs = CPU(cpu);
3931     bool sec = arm_is_secure_below_el3(env);
3932     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3933 
3934     if (sec) {
3935         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3936                                                  ARMMMUIdxBit_S1SE1 |
3937                                                  ARMMMUIdxBit_S1SE0);
3938     } else {
3939         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3940                                                  ARMMMUIdxBit_S12NSE1 |
3941                                                  ARMMMUIdxBit_S12NSE0);
3942     }
3943 }
3944 
3945 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3946                                  uint64_t value)
3947 {
3948     /* Invalidate by VA, EL1&0 (AArch64 version).
3949      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3950      * since we don't support flush-for-specific-ASID-only or
3951      * flush-last-level-only.
3952      */
3953     ARMCPU *cpu = arm_env_get_cpu(env);
3954     CPUState *cs = CPU(cpu);
3955     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3956 
3957     if (tlb_force_broadcast(env)) {
3958         tlbi_aa64_vae1is_write(env, NULL, value);
3959         return;
3960     }
3961 
3962     if (arm_is_secure_below_el3(env)) {
3963         tlb_flush_page_by_mmuidx(cs, pageaddr,
3964                                  ARMMMUIdxBit_S1SE1 |
3965                                  ARMMMUIdxBit_S1SE0);
3966     } else {
3967         tlb_flush_page_by_mmuidx(cs, pageaddr,
3968                                  ARMMMUIdxBit_S12NSE1 |
3969                                  ARMMMUIdxBit_S12NSE0);
3970     }
3971 }
3972 
3973 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3974                                    uint64_t value)
3975 {
3976     CPUState *cs = ENV_GET_CPU(env);
3977     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3978 
3979     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3980                                              ARMMMUIdxBit_S1E2);
3981 }
3982 
3983 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3984                                    uint64_t value)
3985 {
3986     CPUState *cs = ENV_GET_CPU(env);
3987     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3988 
3989     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3990                                              ARMMMUIdxBit_S1E3);
3991 }
3992 
3993 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3994                                     uint64_t value)
3995 {
3996     /* Invalidate by IPA. This has to invalidate any structures that
3997      * contain only stage 2 translation information, but does not need
3998      * to apply to structures that contain combined stage 1 and stage 2
3999      * translation information.
4000      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4001      */
4002     ARMCPU *cpu = arm_env_get_cpu(env);
4003     CPUState *cs = CPU(cpu);
4004     uint64_t pageaddr;
4005 
4006     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4007         return;
4008     }
4009 
4010     pageaddr = sextract64(value << 12, 0, 48);
4011 
4012     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
4013 }
4014 
4015 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4016                                       uint64_t value)
4017 {
4018     CPUState *cs = ENV_GET_CPU(env);
4019     uint64_t pageaddr;
4020 
4021     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4022         return;
4023     }
4024 
4025     pageaddr = sextract64(value << 12, 0, 48);
4026 
4027     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4028                                              ARMMMUIdxBit_S2NS);
4029 }
4030 
4031 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4032                                       bool isread)
4033 {
4034     /* We don't implement EL2, so the only control on DC ZVA is the
4035      * bit in the SCTLR which can prohibit access for EL0.
4036      */
4037     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4038         return CP_ACCESS_TRAP;
4039     }
4040     return CP_ACCESS_OK;
4041 }
4042 
4043 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4044 {
4045     ARMCPU *cpu = arm_env_get_cpu(env);
4046     int dzp_bit = 1 << 4;
4047 
4048     /* DZP indicates whether DC ZVA access is allowed */
4049     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4050         dzp_bit = 0;
4051     }
4052     return cpu->dcz_blocksize | dzp_bit;
4053 }
4054 
4055 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4056                                     bool isread)
4057 {
4058     if (!(env->pstate & PSTATE_SP)) {
4059         /* Access to SP_EL0 is undefined if it's being used as
4060          * the stack pointer.
4061          */
4062         return CP_ACCESS_TRAP_UNCATEGORIZED;
4063     }
4064     return CP_ACCESS_OK;
4065 }
4066 
4067 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4068 {
4069     return env->pstate & PSTATE_SP;
4070 }
4071 
4072 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4073 {
4074     update_spsel(env, val);
4075 }
4076 
4077 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4078                         uint64_t value)
4079 {
4080     ARMCPU *cpu = arm_env_get_cpu(env);
4081 
4082     if (raw_read(env, ri) == value) {
4083         /* Skip the TLB flush if nothing actually changed; Linux likes
4084          * to do a lot of pointless SCTLR writes.
4085          */
4086         return;
4087     }
4088 
4089     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4090         /* M bit is RAZ/WI for PMSA with no MPU implemented */
4091         value &= ~SCTLR_M;
4092     }
4093 
4094     raw_write(env, ri, value);
4095     /* ??? Lots of these bits are not implemented.  */
4096     /* This may enable/disable the MMU, so do a TLB flush.  */
4097     tlb_flush(CPU(cpu));
4098 }
4099 
4100 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4101                                      bool isread)
4102 {
4103     if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4104         return CP_ACCESS_TRAP_FP_EL2;
4105     }
4106     if (env->cp15.cptr_el[3] & CPTR_TFP) {
4107         return CP_ACCESS_TRAP_FP_EL3;
4108     }
4109     return CP_ACCESS_OK;
4110 }
4111 
4112 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4113                        uint64_t value)
4114 {
4115     env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4116 }
4117 
4118 static const ARMCPRegInfo v8_cp_reginfo[] = {
4119     /* Minimal set of EL0-visible registers. This will need to be expanded
4120      * significantly for system emulation of AArch64 CPUs.
4121      */
4122     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4123       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4124       .access = PL0_RW, .type = ARM_CP_NZCV },
4125     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4126       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4127       .type = ARM_CP_NO_RAW,
4128       .access = PL0_RW, .accessfn = aa64_daif_access,
4129       .fieldoffset = offsetof(CPUARMState, daif),
4130       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4131     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4132       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4133       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4134       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4135     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4136       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4137       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4138       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4139     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4140       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4141       .access = PL0_R, .type = ARM_CP_NO_RAW,
4142       .readfn = aa64_dczid_read },
4143     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4144       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4145       .access = PL0_W, .type = ARM_CP_DC_ZVA,
4146 #ifndef CONFIG_USER_ONLY
4147       /* Avoid overhead of an access check that always passes in user-mode */
4148       .accessfn = aa64_zva_access,
4149 #endif
4150     },
4151     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4152       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4153       .access = PL1_R, .type = ARM_CP_CURRENTEL },
4154     /* Cache ops: all NOPs since we don't emulate caches */
4155     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4156       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4157       .access = PL1_W, .type = ARM_CP_NOP },
4158     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4159       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4160       .access = PL1_W, .type = ARM_CP_NOP },
4161     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4162       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4163       .access = PL0_W, .type = ARM_CP_NOP,
4164       .accessfn = aa64_cacheop_access },
4165     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4166       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4167       .access = PL1_W, .type = ARM_CP_NOP },
4168     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4169       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4170       .access = PL1_W, .type = ARM_CP_NOP },
4171     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4172       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4173       .access = PL0_W, .type = ARM_CP_NOP,
4174       .accessfn = aa64_cacheop_access },
4175     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4176       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4177       .access = PL1_W, .type = ARM_CP_NOP },
4178     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4179       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4180       .access = PL0_W, .type = ARM_CP_NOP,
4181       .accessfn = aa64_cacheop_access },
4182     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4183       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4184       .access = PL0_W, .type = ARM_CP_NOP,
4185       .accessfn = aa64_cacheop_access },
4186     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4187       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4188       .access = PL1_W, .type = ARM_CP_NOP },
4189     /* TLBI operations */
4190     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4191       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4192       .access = PL1_W, .type = ARM_CP_NO_RAW,
4193       .writefn = tlbi_aa64_vmalle1is_write },
4194     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4195       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4196       .access = PL1_W, .type = ARM_CP_NO_RAW,
4197       .writefn = tlbi_aa64_vae1is_write },
4198     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4199       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4200       .access = PL1_W, .type = ARM_CP_NO_RAW,
4201       .writefn = tlbi_aa64_vmalle1is_write },
4202     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4203       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4204       .access = PL1_W, .type = ARM_CP_NO_RAW,
4205       .writefn = tlbi_aa64_vae1is_write },
4206     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4207       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4208       .access = PL1_W, .type = ARM_CP_NO_RAW,
4209       .writefn = tlbi_aa64_vae1is_write },
4210     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4211       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4212       .access = PL1_W, .type = ARM_CP_NO_RAW,
4213       .writefn = tlbi_aa64_vae1is_write },
4214     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4215       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4216       .access = PL1_W, .type = ARM_CP_NO_RAW,
4217       .writefn = tlbi_aa64_vmalle1_write },
4218     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4219       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4220       .access = PL1_W, .type = ARM_CP_NO_RAW,
4221       .writefn = tlbi_aa64_vae1_write },
4222     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4223       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4224       .access = PL1_W, .type = ARM_CP_NO_RAW,
4225       .writefn = tlbi_aa64_vmalle1_write },
4226     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4227       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4228       .access = PL1_W, .type = ARM_CP_NO_RAW,
4229       .writefn = tlbi_aa64_vae1_write },
4230     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4231       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4232       .access = PL1_W, .type = ARM_CP_NO_RAW,
4233       .writefn = tlbi_aa64_vae1_write },
4234     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4235       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4236       .access = PL1_W, .type = ARM_CP_NO_RAW,
4237       .writefn = tlbi_aa64_vae1_write },
4238     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4239       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4240       .access = PL2_W, .type = ARM_CP_NO_RAW,
4241       .writefn = tlbi_aa64_ipas2e1is_write },
4242     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4243       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4244       .access = PL2_W, .type = ARM_CP_NO_RAW,
4245       .writefn = tlbi_aa64_ipas2e1is_write },
4246     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4247       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4248       .access = PL2_W, .type = ARM_CP_NO_RAW,
4249       .writefn = tlbi_aa64_alle1is_write },
4250     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4251       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4252       .access = PL2_W, .type = ARM_CP_NO_RAW,
4253       .writefn = tlbi_aa64_alle1is_write },
4254     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4255       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4256       .access = PL2_W, .type = ARM_CP_NO_RAW,
4257       .writefn = tlbi_aa64_ipas2e1_write },
4258     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4259       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4260       .access = PL2_W, .type = ARM_CP_NO_RAW,
4261       .writefn = tlbi_aa64_ipas2e1_write },
4262     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4263       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4264       .access = PL2_W, .type = ARM_CP_NO_RAW,
4265       .writefn = tlbi_aa64_alle1_write },
4266     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4267       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4268       .access = PL2_W, .type = ARM_CP_NO_RAW,
4269       .writefn = tlbi_aa64_alle1is_write },
4270 #ifndef CONFIG_USER_ONLY
4271     /* 64 bit address translation operations */
4272     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4273       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4274       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4275     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4276       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4277       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4278     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4279       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4280       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4281     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4282       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4283       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4284     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4285       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4286       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4287     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4288       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4289       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4290     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4291       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4292       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4293     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4294       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4295       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4296     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4297     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4298       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4299       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4300     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4301       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4302       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4303     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4304       .type = ARM_CP_ALIAS,
4305       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4306       .access = PL1_RW, .resetvalue = 0,
4307       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4308       .writefn = par_write },
4309 #endif
4310     /* TLB invalidate last level of translation table walk */
4311     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4312       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
4313     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4314       .type = ARM_CP_NO_RAW, .access = PL1_W,
4315       .writefn = tlbimvaa_is_write },
4316     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4317       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
4318     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4319       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
4320     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4321       .type = ARM_CP_NO_RAW, .access = PL2_W,
4322       .writefn = tlbimva_hyp_write },
4323     { .name = "TLBIMVALHIS",
4324       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4325       .type = ARM_CP_NO_RAW, .access = PL2_W,
4326       .writefn = tlbimva_hyp_is_write },
4327     { .name = "TLBIIPAS2",
4328       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4329       .type = ARM_CP_NO_RAW, .access = PL2_W,
4330       .writefn = tlbiipas2_write },
4331     { .name = "TLBIIPAS2IS",
4332       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4333       .type = ARM_CP_NO_RAW, .access = PL2_W,
4334       .writefn = tlbiipas2_is_write },
4335     { .name = "TLBIIPAS2L",
4336       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4337       .type = ARM_CP_NO_RAW, .access = PL2_W,
4338       .writefn = tlbiipas2_write },
4339     { .name = "TLBIIPAS2LIS",
4340       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4341       .type = ARM_CP_NO_RAW, .access = PL2_W,
4342       .writefn = tlbiipas2_is_write },
4343     /* 32 bit cache operations */
4344     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4345       .type = ARM_CP_NOP, .access = PL1_W },
4346     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4347       .type = ARM_CP_NOP, .access = PL1_W },
4348     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4349       .type = ARM_CP_NOP, .access = PL1_W },
4350     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4351       .type = ARM_CP_NOP, .access = PL1_W },
4352     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4353       .type = ARM_CP_NOP, .access = PL1_W },
4354     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4355       .type = ARM_CP_NOP, .access = PL1_W },
4356     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4357       .type = ARM_CP_NOP, .access = PL1_W },
4358     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4359       .type = ARM_CP_NOP, .access = PL1_W },
4360     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4361       .type = ARM_CP_NOP, .access = PL1_W },
4362     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4363       .type = ARM_CP_NOP, .access = PL1_W },
4364     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4365       .type = ARM_CP_NOP, .access = PL1_W },
4366     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4367       .type = ARM_CP_NOP, .access = PL1_W },
4368     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4369       .type = ARM_CP_NOP, .access = PL1_W },
4370     /* MMU Domain access control / MPU write buffer control */
4371     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4372       .access = PL1_RW, .resetvalue = 0,
4373       .writefn = dacr_write, .raw_writefn = raw_write,
4374       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4375                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
4376     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
4377       .type = ARM_CP_ALIAS,
4378       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
4379       .access = PL1_RW,
4380       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
4381     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
4382       .type = ARM_CP_ALIAS,
4383       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
4384       .access = PL1_RW,
4385       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
4386     /* We rely on the access checks not allowing the guest to write to the
4387      * state field when SPSel indicates that it's being used as the stack
4388      * pointer.
4389      */
4390     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4391       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4392       .access = PL1_RW, .accessfn = sp_el0_access,
4393       .type = ARM_CP_ALIAS,
4394       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
4395     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4396       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
4397       .access = PL2_RW, .type = ARM_CP_ALIAS,
4398       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
4399     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4400       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
4401       .type = ARM_CP_NO_RAW,
4402       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
4403     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4404       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4405       .type = ARM_CP_ALIAS,
4406       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4407       .access = PL2_RW, .accessfn = fpexc32_access },
4408     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4409       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4410       .access = PL2_RW, .resetvalue = 0,
4411       .writefn = dacr_write, .raw_writefn = raw_write,
4412       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4413     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4414       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4415       .access = PL2_RW, .resetvalue = 0,
4416       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4417     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4418       .type = ARM_CP_ALIAS,
4419       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4420       .access = PL2_RW,
4421       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4422     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4423       .type = ARM_CP_ALIAS,
4424       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4425       .access = PL2_RW,
4426       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4427     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4428       .type = ARM_CP_ALIAS,
4429       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4430       .access = PL2_RW,
4431       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4432     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4433       .type = ARM_CP_ALIAS,
4434       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4435       .access = PL2_RW,
4436       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
4437     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4438       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4439       .resetvalue = 0,
4440       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4441     { .name = "SDCR", .type = ARM_CP_ALIAS,
4442       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4443       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4444       .writefn = sdcr_write,
4445       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
4446     REGINFO_SENTINEL
4447 };
4448 
4449 /* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
4450 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
4451     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4452       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4453       .access = PL2_RW,
4454       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
4455     { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
4456       .type = ARM_CP_NO_RAW,
4457       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4458       .access = PL2_RW,
4459       .type = ARM_CP_CONST, .resetvalue = 0 },
4460     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4461       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4462       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4463     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4464       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4465       .access = PL2_RW,
4466       .type = ARM_CP_CONST, .resetvalue = 0 },
4467     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4468       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4469       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4470     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4471       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4472       .access = PL2_RW, .type = ARM_CP_CONST,
4473       .resetvalue = 0 },
4474     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4475       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4476       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4477     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4478       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4479       .access = PL2_RW, .type = ARM_CP_CONST,
4480       .resetvalue = 0 },
4481     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4482       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4483       .access = PL2_RW, .type = ARM_CP_CONST,
4484       .resetvalue = 0 },
4485     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4486       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4487       .access = PL2_RW, .type = ARM_CP_CONST,
4488       .resetvalue = 0 },
4489     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4490       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4491       .access = PL2_RW, .type = ARM_CP_CONST,
4492       .resetvalue = 0 },
4493     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4494       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4495       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4496     { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
4497       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4498       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4499       .type = ARM_CP_CONST, .resetvalue = 0 },
4500     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4501       .cp = 15, .opc1 = 6, .crm = 2,
4502       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4503       .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
4504     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4505       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4506       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4507     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4508       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4509       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4510     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4511       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4512       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4513     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4514       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4515       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4516     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4517       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4518       .resetvalue = 0 },
4519     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4520       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4521       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4522     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4523       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4524       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4525     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4526       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4527       .resetvalue = 0 },
4528     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4529       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4530       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4531     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4532       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4533       .resetvalue = 0 },
4534     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4535       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4536       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4537     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4538       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4539       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4540     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4541       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4542       .access = PL2_RW, .accessfn = access_tda,
4543       .type = ARM_CP_CONST, .resetvalue = 0 },
4544     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
4545       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4546       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4547       .type = ARM_CP_CONST, .resetvalue = 0 },
4548     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4549       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4550       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4551     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4552       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4553       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4554     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4555       .type = ARM_CP_CONST,
4556       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4557       .access = PL2_RW, .resetvalue = 0 },
4558     REGINFO_SENTINEL
4559 };
4560 
4561 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4562 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
4563     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4564       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4565       .access = PL2_RW,
4566       .type = ARM_CP_CONST, .resetvalue = 0 },
4567     REGINFO_SENTINEL
4568 };
4569 
4570 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4571 {
4572     ARMCPU *cpu = arm_env_get_cpu(env);
4573     uint64_t valid_mask = HCR_MASK;
4574 
4575     if (arm_feature(env, ARM_FEATURE_EL3)) {
4576         valid_mask &= ~HCR_HCD;
4577     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
4578         /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4579          * However, if we're using the SMC PSCI conduit then QEMU is
4580          * effectively acting like EL3 firmware and so the guest at
4581          * EL2 should retain the ability to prevent EL1 from being
4582          * able to make SMC calls into the ersatz firmware, so in
4583          * that case HCR.TSC should be read/write.
4584          */
4585         valid_mask &= ~HCR_TSC;
4586     }
4587     if (cpu_isar_feature(aa64_lor, cpu)) {
4588         valid_mask |= HCR_TLOR;
4589     }
4590     if (cpu_isar_feature(aa64_pauth, cpu)) {
4591         valid_mask |= HCR_API | HCR_APK;
4592     }
4593 
4594     /* Clear RES0 bits.  */
4595     value &= valid_mask;
4596 
4597     /* These bits change the MMU setup:
4598      * HCR_VM enables stage 2 translation
4599      * HCR_PTW forbids certain page-table setups
4600      * HCR_DC Disables stage1 and enables stage2 translation
4601      */
4602     if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
4603         tlb_flush(CPU(cpu));
4604     }
4605     env->cp15.hcr_el2 = value;
4606 
4607     /*
4608      * Updates to VI and VF require us to update the status of
4609      * virtual interrupts, which are the logical OR of these bits
4610      * and the state of the input lines from the GIC. (This requires
4611      * that we have the iothread lock, which is done by marking the
4612      * reginfo structs as ARM_CP_IO.)
4613      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4614      * possible for it to be taken immediately, because VIRQ and
4615      * VFIQ are masked unless running at EL0 or EL1, and HCR
4616      * can only be written at EL2.
4617      */
4618     g_assert(qemu_mutex_iothread_locked());
4619     arm_cpu_update_virq(cpu);
4620     arm_cpu_update_vfiq(cpu);
4621 }
4622 
4623 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
4624                           uint64_t value)
4625 {
4626     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4627     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
4628     hcr_write(env, NULL, value);
4629 }
4630 
4631 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
4632                          uint64_t value)
4633 {
4634     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4635     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
4636     hcr_write(env, NULL, value);
4637 }
4638 
4639 /*
4640  * Return the effective value of HCR_EL2.
4641  * Bits that are not included here:
4642  * RW       (read from SCR_EL3.RW as needed)
4643  */
4644 uint64_t arm_hcr_el2_eff(CPUARMState *env)
4645 {
4646     uint64_t ret = env->cp15.hcr_el2;
4647 
4648     if (arm_is_secure_below_el3(env)) {
4649         /*
4650          * "This register has no effect if EL2 is not enabled in the
4651          * current Security state".  This is ARMv8.4-SecEL2 speak for
4652          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4653          *
4654          * Prior to that, the language was "In an implementation that
4655          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4656          * as if this field is 0 for all purposes other than a direct
4657          * read or write access of HCR_EL2".  With lots of enumeration
4658          * on a per-field basis.  In current QEMU, this is condition
4659          * is arm_is_secure_below_el3.
4660          *
4661          * Since the v8.4 language applies to the entire register, and
4662          * appears to be backward compatible, use that.
4663          */
4664         ret = 0;
4665     } else if (ret & HCR_TGE) {
4666         /* These bits are up-to-date as of ARMv8.4.  */
4667         if (ret & HCR_E2H) {
4668             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
4669                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
4670                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4671                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
4672         } else {
4673             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
4674         }
4675         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
4676                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
4677                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
4678                  HCR_TLOR);
4679     }
4680 
4681     return ret;
4682 }
4683 
4684 static const ARMCPRegInfo el2_cp_reginfo[] = {
4685     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4686       .type = ARM_CP_IO,
4687       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4688       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4689       .writefn = hcr_write },
4690     { .name = "HCR", .state = ARM_CP_STATE_AA32,
4691       .type = ARM_CP_ALIAS | ARM_CP_IO,
4692       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4693       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4694       .writefn = hcr_writelow },
4695     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4696       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4697       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4698     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4699       .type = ARM_CP_ALIAS,
4700       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4701       .access = PL2_RW,
4702       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4703     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4704       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4705       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4706     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4707       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4708       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4709     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4710       .type = ARM_CP_ALIAS,
4711       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4712       .access = PL2_RW,
4713       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4714     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4715       .type = ARM_CP_ALIAS,
4716       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4717       .access = PL2_RW,
4718       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4719     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4720       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4721       .access = PL2_RW, .writefn = vbar_write,
4722       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4723       .resetvalue = 0 },
4724     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4725       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4726       .access = PL3_RW, .type = ARM_CP_ALIAS,
4727       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4728     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4729       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4730       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4731       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
4732     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4733       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4734       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4735       .resetvalue = 0 },
4736     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4737       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4738       .access = PL2_RW, .type = ARM_CP_ALIAS,
4739       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4740     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4741       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4742       .access = PL2_RW, .type = ARM_CP_CONST,
4743       .resetvalue = 0 },
4744     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4745     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4746       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4747       .access = PL2_RW, .type = ARM_CP_CONST,
4748       .resetvalue = 0 },
4749     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4750       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4751       .access = PL2_RW, .type = ARM_CP_CONST,
4752       .resetvalue = 0 },
4753     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4754       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4755       .access = PL2_RW, .type = ARM_CP_CONST,
4756       .resetvalue = 0 },
4757     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4758       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4759       .access = PL2_RW,
4760       /* no .writefn needed as this can't cause an ASID change;
4761        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4762        */
4763       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4764     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4765       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4766       .type = ARM_CP_ALIAS,
4767       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4768       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4769     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4770       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4771       .access = PL2_RW,
4772       /* no .writefn needed as this can't cause an ASID change;
4773        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4774        */
4775       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4776     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4777       .cp = 15, .opc1 = 6, .crm = 2,
4778       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4779       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4780       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4781       .writefn = vttbr_write },
4782     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4783       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4784       .access = PL2_RW, .writefn = vttbr_write,
4785       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4786     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4787       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4788       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4789       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4790     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4791       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4792       .access = PL2_RW, .resetvalue = 0,
4793       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4794     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4795       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4796       .access = PL2_RW, .resetvalue = 0,
4797       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4798     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4799       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4800       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4801     { .name = "TLBIALLNSNH",
4802       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4803       .type = ARM_CP_NO_RAW, .access = PL2_W,
4804       .writefn = tlbiall_nsnh_write },
4805     { .name = "TLBIALLNSNHIS",
4806       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4807       .type = ARM_CP_NO_RAW, .access = PL2_W,
4808       .writefn = tlbiall_nsnh_is_write },
4809     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4810       .type = ARM_CP_NO_RAW, .access = PL2_W,
4811       .writefn = tlbiall_hyp_write },
4812     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4813       .type = ARM_CP_NO_RAW, .access = PL2_W,
4814       .writefn = tlbiall_hyp_is_write },
4815     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4816       .type = ARM_CP_NO_RAW, .access = PL2_W,
4817       .writefn = tlbimva_hyp_write },
4818     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4819       .type = ARM_CP_NO_RAW, .access = PL2_W,
4820       .writefn = tlbimva_hyp_is_write },
4821     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4822       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4823       .type = ARM_CP_NO_RAW, .access = PL2_W,
4824       .writefn = tlbi_aa64_alle2_write },
4825     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4826       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4827       .type = ARM_CP_NO_RAW, .access = PL2_W,
4828       .writefn = tlbi_aa64_vae2_write },
4829     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4830       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4831       .access = PL2_W, .type = ARM_CP_NO_RAW,
4832       .writefn = tlbi_aa64_vae2_write },
4833     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4834       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4835       .access = PL2_W, .type = ARM_CP_NO_RAW,
4836       .writefn = tlbi_aa64_alle2is_write },
4837     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4838       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4839       .type = ARM_CP_NO_RAW, .access = PL2_W,
4840       .writefn = tlbi_aa64_vae2is_write },
4841     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4842       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4843       .access = PL2_W, .type = ARM_CP_NO_RAW,
4844       .writefn = tlbi_aa64_vae2is_write },
4845 #ifndef CONFIG_USER_ONLY
4846     /* Unlike the other EL2-related AT operations, these must
4847      * UNDEF from EL3 if EL2 is not implemented, which is why we
4848      * define them here rather than with the rest of the AT ops.
4849      */
4850     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4851       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4852       .access = PL2_W, .accessfn = at_s1e2_access,
4853       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4854     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4855       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4856       .access = PL2_W, .accessfn = at_s1e2_access,
4857       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4858     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4859      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4860      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4861      * to behave as if SCR.NS was 1.
4862      */
4863     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4864       .access = PL2_W,
4865       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4866     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4867       .access = PL2_W,
4868       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4869     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4870       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4871       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4872        * reset values as IMPDEF. We choose to reset to 3 to comply with
4873        * both ARMv7 and ARMv8.
4874        */
4875       .access = PL2_RW, .resetvalue = 3,
4876       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4877     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4878       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4879       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4880       .writefn = gt_cntvoff_write,
4881       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4882     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4883       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4884       .writefn = gt_cntvoff_write,
4885       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4886     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4887       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4888       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4889       .type = ARM_CP_IO, .access = PL2_RW,
4890       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4891     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4892       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4893       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4894       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4895     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4896       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4897       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4898       .resetfn = gt_hyp_timer_reset,
4899       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4900     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4901       .type = ARM_CP_IO,
4902       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4903       .access = PL2_RW,
4904       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4905       .resetvalue = 0,
4906       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4907 #endif
4908     /* The only field of MDCR_EL2 that has a defined architectural reset value
4909      * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4910      * don't implement any PMU event counters, so using zero as a reset
4911      * value for MDCR_EL2 is okay
4912      */
4913     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4914       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4915       .access = PL2_RW, .resetvalue = 0,
4916       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4917     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4918       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4919       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4920       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4921     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4922       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4923       .access = PL2_RW,
4924       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4925     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4926       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4927       .access = PL2_RW,
4928       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4929     REGINFO_SENTINEL
4930 };
4931 
4932 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4933     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4934       .type = ARM_CP_ALIAS | ARM_CP_IO,
4935       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4936       .access = PL2_RW,
4937       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4938       .writefn = hcr_writehigh },
4939     REGINFO_SENTINEL
4940 };
4941 
4942 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4943                                    bool isread)
4944 {
4945     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4946      * At Secure EL1 it traps to EL3.
4947      */
4948     if (arm_current_el(env) == 3) {
4949         return CP_ACCESS_OK;
4950     }
4951     if (arm_is_secure_below_el3(env)) {
4952         return CP_ACCESS_TRAP_EL3;
4953     }
4954     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4955     if (isread) {
4956         return CP_ACCESS_OK;
4957     }
4958     return CP_ACCESS_TRAP_UNCATEGORIZED;
4959 }
4960 
4961 static const ARMCPRegInfo el3_cp_reginfo[] = {
4962     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4963       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4964       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4965       .resetvalue = 0, .writefn = scr_write },
4966     { .name = "SCR",  .type = ARM_CP_ALIAS,
4967       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4968       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4969       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4970       .writefn = scr_write },
4971     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4972       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4973       .access = PL3_RW, .resetvalue = 0,
4974       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4975     { .name = "SDER",
4976       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4977       .access = PL3_RW, .resetvalue = 0,
4978       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4979     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4980       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4981       .writefn = vbar_write, .resetvalue = 0,
4982       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4983     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4984       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4985       .access = PL3_RW, .resetvalue = 0,
4986       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4987     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4988       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4989       .access = PL3_RW,
4990       /* no .writefn needed as this can't cause an ASID change;
4991        * we must provide a .raw_writefn and .resetfn because we handle
4992        * reset and migration for the AArch32 TTBCR(S), which might be
4993        * using mask and base_mask.
4994        */
4995       .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4996       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4997     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4998       .type = ARM_CP_ALIAS,
4999       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5000       .access = PL3_RW,
5001       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5002     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5003       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5004       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5005     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5006       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5007       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5008     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5009       .type = ARM_CP_ALIAS,
5010       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5011       .access = PL3_RW,
5012       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5013     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5014       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5015       .access = PL3_RW, .writefn = vbar_write,
5016       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5017       .resetvalue = 0 },
5018     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5019       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5020       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5021       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5022     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5023       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5024       .access = PL3_RW, .resetvalue = 0,
5025       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5026     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5027       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5028       .access = PL3_RW, .type = ARM_CP_CONST,
5029       .resetvalue = 0 },
5030     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5031       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5032       .access = PL3_RW, .type = ARM_CP_CONST,
5033       .resetvalue = 0 },
5034     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5035       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5036       .access = PL3_RW, .type = ARM_CP_CONST,
5037       .resetvalue = 0 },
5038     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5039       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5040       .access = PL3_W, .type = ARM_CP_NO_RAW,
5041       .writefn = tlbi_aa64_alle3is_write },
5042     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5043       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5044       .access = PL3_W, .type = ARM_CP_NO_RAW,
5045       .writefn = tlbi_aa64_vae3is_write },
5046     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5047       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5048       .access = PL3_W, .type = ARM_CP_NO_RAW,
5049       .writefn = tlbi_aa64_vae3is_write },
5050     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5051       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5052       .access = PL3_W, .type = ARM_CP_NO_RAW,
5053       .writefn = tlbi_aa64_alle3_write },
5054     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5055       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5056       .access = PL3_W, .type = ARM_CP_NO_RAW,
5057       .writefn = tlbi_aa64_vae3_write },
5058     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5059       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5060       .access = PL3_W, .type = ARM_CP_NO_RAW,
5061       .writefn = tlbi_aa64_vae3_write },
5062     REGINFO_SENTINEL
5063 };
5064 
5065 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5066                                      bool isread)
5067 {
5068     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5069      * but the AArch32 CTR has its own reginfo struct)
5070      */
5071     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5072         return CP_ACCESS_TRAP;
5073     }
5074     return CP_ACCESS_OK;
5075 }
5076 
5077 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5078                         uint64_t value)
5079 {
5080     /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5081      * read via a bit in OSLSR_EL1.
5082      */
5083     int oslock;
5084 
5085     if (ri->state == ARM_CP_STATE_AA32) {
5086         oslock = (value == 0xC5ACCE55);
5087     } else {
5088         oslock = value & 1;
5089     }
5090 
5091     env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5092 }
5093 
5094 static const ARMCPRegInfo debug_cp_reginfo[] = {
5095     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5096      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5097      * unlike DBGDRAR it is never accessible from EL0.
5098      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5099      * accessor.
5100      */
5101     { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
5102       .access = PL0_R, .accessfn = access_tdra,
5103       .type = ARM_CP_CONST, .resetvalue = 0 },
5104     { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
5105       .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5106       .access = PL1_R, .accessfn = access_tdra,
5107       .type = ARM_CP_CONST, .resetvalue = 0 },
5108     { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
5109       .access = PL0_R, .accessfn = access_tdra,
5110       .type = ARM_CP_CONST, .resetvalue = 0 },
5111     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5112     { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
5113       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5114       .access = PL1_RW, .accessfn = access_tda,
5115       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
5116       .resetvalue = 0 },
5117     /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5118      * We don't implement the configurable EL0 access.
5119      */
5120     { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
5121       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5122       .type = ARM_CP_ALIAS,
5123       .access = PL1_R, .accessfn = access_tda,
5124       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
5125     { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
5126       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
5127       .access = PL1_W, .type = ARM_CP_NO_RAW,
5128       .accessfn = access_tdosa,
5129       .writefn = oslar_write },
5130     { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
5131       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
5132       .access = PL1_R, .resetvalue = 10,
5133       .accessfn = access_tdosa,
5134       .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
5135     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5136     { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
5137       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
5138       .access = PL1_RW, .accessfn = access_tdosa,
5139       .type = ARM_CP_NOP },
5140     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5141      * implement vector catch debug events yet.
5142      */
5143     { .name = "DBGVCR",
5144       .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5145       .access = PL1_RW, .accessfn = access_tda,
5146       .type = ARM_CP_NOP },
5147     /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5148      * to save and restore a 32-bit guest's DBGVCR)
5149      */
5150     { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
5151       .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
5152       .access = PL2_RW, .accessfn = access_tda,
5153       .type = ARM_CP_NOP },
5154     /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5155      * Channel but Linux may try to access this register. The 32-bit
5156      * alias is DBGDCCINT.
5157      */
5158     { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
5159       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5160       .access = PL1_RW, .accessfn = access_tda,
5161       .type = ARM_CP_NOP },
5162     REGINFO_SENTINEL
5163 };
5164 
5165 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
5166     /* 64 bit access versions of the (dummy) debug registers */
5167     { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
5168       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5169     { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
5170       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5171     REGINFO_SENTINEL
5172 };
5173 
5174 /* Return the exception level to which exceptions should be taken
5175  * via SVEAccessTrap.  If an exception should be routed through
5176  * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5177  * take care of raising that exception.
5178  * C.f. the ARM pseudocode function CheckSVEEnabled.
5179  */
5180 int sve_exception_el(CPUARMState *env, int el)
5181 {
5182 #ifndef CONFIG_USER_ONLY
5183     if (el <= 1) {
5184         bool disabled = false;
5185 
5186         /* The CPACR.ZEN controls traps to EL1:
5187          * 0, 2 : trap EL0 and EL1 accesses
5188          * 1    : trap only EL0 accesses
5189          * 3    : trap no accesses
5190          */
5191         if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
5192             disabled = true;
5193         } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
5194             disabled = el == 0;
5195         }
5196         if (disabled) {
5197             /* route_to_el2 */
5198             return (arm_feature(env, ARM_FEATURE_EL2)
5199                     && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
5200         }
5201 
5202         /* Check CPACR.FPEN.  */
5203         if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
5204             disabled = true;
5205         } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
5206             disabled = el == 0;
5207         }
5208         if (disabled) {
5209             return 0;
5210         }
5211     }
5212 
5213     /* CPTR_EL2.  Since TZ and TFP are positive,
5214      * they will be zero when EL2 is not present.
5215      */
5216     if (el <= 2 && !arm_is_secure_below_el3(env)) {
5217         if (env->cp15.cptr_el[2] & CPTR_TZ) {
5218             return 2;
5219         }
5220         if (env->cp15.cptr_el[2] & CPTR_TFP) {
5221             return 0;
5222         }
5223     }
5224 
5225     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
5226     if (arm_feature(env, ARM_FEATURE_EL3)
5227         && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
5228         return 3;
5229     }
5230 #endif
5231     return 0;
5232 }
5233 
5234 /*
5235  * Given that SVE is enabled, return the vector length for EL.
5236  */
5237 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
5238 {
5239     ARMCPU *cpu = arm_env_get_cpu(env);
5240     uint32_t zcr_len = cpu->sve_max_vq - 1;
5241 
5242     if (el <= 1) {
5243         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
5244     }
5245     if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
5246         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
5247     }
5248     if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
5249         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
5250     }
5251     return zcr_len;
5252 }
5253 
5254 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5255                       uint64_t value)
5256 {
5257     int cur_el = arm_current_el(env);
5258     int old_len = sve_zcr_len_for_el(env, cur_el);
5259     int new_len;
5260 
5261     /* Bits other than [3:0] are RAZ/WI.  */
5262     raw_write(env, ri, value & 0xf);
5263 
5264     /*
5265      * Because we arrived here, we know both FP and SVE are enabled;
5266      * otherwise we would have trapped access to the ZCR_ELn register.
5267      */
5268     new_len = sve_zcr_len_for_el(env, cur_el);
5269     if (new_len < old_len) {
5270         aarch64_sve_narrow_vq(env, new_len + 1);
5271     }
5272 }
5273 
5274 static const ARMCPRegInfo zcr_el1_reginfo = {
5275     .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
5276     .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
5277     .access = PL1_RW, .type = ARM_CP_SVE,
5278     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
5279     .writefn = zcr_write, .raw_writefn = raw_write
5280 };
5281 
5282 static const ARMCPRegInfo zcr_el2_reginfo = {
5283     .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5284     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5285     .access = PL2_RW, .type = ARM_CP_SVE,
5286     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
5287     .writefn = zcr_write, .raw_writefn = raw_write
5288 };
5289 
5290 static const ARMCPRegInfo zcr_no_el2_reginfo = {
5291     .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5292     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5293     .access = PL2_RW, .type = ARM_CP_SVE,
5294     .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
5295 };
5296 
5297 static const ARMCPRegInfo zcr_el3_reginfo = {
5298     .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
5299     .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
5300     .access = PL3_RW, .type = ARM_CP_SVE,
5301     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
5302     .writefn = zcr_write, .raw_writefn = raw_write
5303 };
5304 
5305 void hw_watchpoint_update(ARMCPU *cpu, int n)
5306 {
5307     CPUARMState *env = &cpu->env;
5308     vaddr len = 0;
5309     vaddr wvr = env->cp15.dbgwvr[n];
5310     uint64_t wcr = env->cp15.dbgwcr[n];
5311     int mask;
5312     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
5313 
5314     if (env->cpu_watchpoint[n]) {
5315         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
5316         env->cpu_watchpoint[n] = NULL;
5317     }
5318 
5319     if (!extract64(wcr, 0, 1)) {
5320         /* E bit clear : watchpoint disabled */
5321         return;
5322     }
5323 
5324     switch (extract64(wcr, 3, 2)) {
5325     case 0:
5326         /* LSC 00 is reserved and must behave as if the wp is disabled */
5327         return;
5328     case 1:
5329         flags |= BP_MEM_READ;
5330         break;
5331     case 2:
5332         flags |= BP_MEM_WRITE;
5333         break;
5334     case 3:
5335         flags |= BP_MEM_ACCESS;
5336         break;
5337     }
5338 
5339     /* Attempts to use both MASK and BAS fields simultaneously are
5340      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5341      * thus generating a watchpoint for every byte in the masked region.
5342      */
5343     mask = extract64(wcr, 24, 4);
5344     if (mask == 1 || mask == 2) {
5345         /* Reserved values of MASK; we must act as if the mask value was
5346          * some non-reserved value, or as if the watchpoint were disabled.
5347          * We choose the latter.
5348          */
5349         return;
5350     } else if (mask) {
5351         /* Watchpoint covers an aligned area up to 2GB in size */
5352         len = 1ULL << mask;
5353         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5354          * whether the watchpoint fires when the unmasked bits match; we opt
5355          * to generate the exceptions.
5356          */
5357         wvr &= ~(len - 1);
5358     } else {
5359         /* Watchpoint covers bytes defined by the byte address select bits */
5360         int bas = extract64(wcr, 5, 8);
5361         int basstart;
5362 
5363         if (bas == 0) {
5364             /* This must act as if the watchpoint is disabled */
5365             return;
5366         }
5367 
5368         if (extract64(wvr, 2, 1)) {
5369             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5370              * ignored, and BAS[3:0] define which bytes to watch.
5371              */
5372             bas &= 0xf;
5373         }
5374         /* The BAS bits are supposed to be programmed to indicate a contiguous
5375          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5376          * we fire for each byte in the word/doubleword addressed by the WVR.
5377          * We choose to ignore any non-zero bits after the first range of 1s.
5378          */
5379         basstart = ctz32(bas);
5380         len = cto32(bas >> basstart);
5381         wvr += basstart;
5382     }
5383 
5384     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
5385                           &env->cpu_watchpoint[n]);
5386 }
5387 
5388 void hw_watchpoint_update_all(ARMCPU *cpu)
5389 {
5390     int i;
5391     CPUARMState *env = &cpu->env;
5392 
5393     /* Completely clear out existing QEMU watchpoints and our array, to
5394      * avoid possible stale entries following migration load.
5395      */
5396     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
5397     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
5398 
5399     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
5400         hw_watchpoint_update(cpu, i);
5401     }
5402 }
5403 
5404 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5405                          uint64_t value)
5406 {
5407     ARMCPU *cpu = arm_env_get_cpu(env);
5408     int i = ri->crm;
5409 
5410     /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5411      * register reads and behaves as if values written are sign extended.
5412      * Bits [1:0] are RES0.
5413      */
5414     value = sextract64(value, 0, 49) & ~3ULL;
5415 
5416     raw_write(env, ri, value);
5417     hw_watchpoint_update(cpu, i);
5418 }
5419 
5420 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5421                          uint64_t value)
5422 {
5423     ARMCPU *cpu = arm_env_get_cpu(env);
5424     int i = ri->crm;
5425 
5426     raw_write(env, ri, value);
5427     hw_watchpoint_update(cpu, i);
5428 }
5429 
5430 void hw_breakpoint_update(ARMCPU *cpu, int n)
5431 {
5432     CPUARMState *env = &cpu->env;
5433     uint64_t bvr = env->cp15.dbgbvr[n];
5434     uint64_t bcr = env->cp15.dbgbcr[n];
5435     vaddr addr;
5436     int bt;
5437     int flags = BP_CPU;
5438 
5439     if (env->cpu_breakpoint[n]) {
5440         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
5441         env->cpu_breakpoint[n] = NULL;
5442     }
5443 
5444     if (!extract64(bcr, 0, 1)) {
5445         /* E bit clear : watchpoint disabled */
5446         return;
5447     }
5448 
5449     bt = extract64(bcr, 20, 4);
5450 
5451     switch (bt) {
5452     case 4: /* unlinked address mismatch (reserved if AArch64) */
5453     case 5: /* linked address mismatch (reserved if AArch64) */
5454         qemu_log_mask(LOG_UNIMP,
5455                       "arm: address mismatch breakpoint types not implemented\n");
5456         return;
5457     case 0: /* unlinked address match */
5458     case 1: /* linked address match */
5459     {
5460         /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5461          * we behave as if the register was sign extended. Bits [1:0] are
5462          * RES0. The BAS field is used to allow setting breakpoints on 16
5463          * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5464          * a bp will fire if the addresses covered by the bp and the addresses
5465          * covered by the insn overlap but the insn doesn't start at the
5466          * start of the bp address range. We choose to require the insn and
5467          * the bp to have the same address. The constraints on writing to
5468          * BAS enforced in dbgbcr_write mean we have only four cases:
5469          *  0b0000  => no breakpoint
5470          *  0b0011  => breakpoint on addr
5471          *  0b1100  => breakpoint on addr + 2
5472          *  0b1111  => breakpoint on addr
5473          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5474          */
5475         int bas = extract64(bcr, 5, 4);
5476         addr = sextract64(bvr, 0, 49) & ~3ULL;
5477         if (bas == 0) {
5478             return;
5479         }
5480         if (bas == 0xc) {
5481             addr += 2;
5482         }
5483         break;
5484     }
5485     case 2: /* unlinked context ID match */
5486     case 8: /* unlinked VMID match (reserved if no EL2) */
5487     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5488         qemu_log_mask(LOG_UNIMP,
5489                       "arm: unlinked context breakpoint types not implemented\n");
5490         return;
5491     case 9: /* linked VMID match (reserved if no EL2) */
5492     case 11: /* linked context ID and VMID match (reserved if no EL2) */
5493     case 3: /* linked context ID match */
5494     default:
5495         /* We must generate no events for Linked context matches (unless
5496          * they are linked to by some other bp/wp, which is handled in
5497          * updates for the linking bp/wp). We choose to also generate no events
5498          * for reserved values.
5499          */
5500         return;
5501     }
5502 
5503     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
5504 }
5505 
5506 void hw_breakpoint_update_all(ARMCPU *cpu)
5507 {
5508     int i;
5509     CPUARMState *env = &cpu->env;
5510 
5511     /* Completely clear out existing QEMU breakpoints and our array, to
5512      * avoid possible stale entries following migration load.
5513      */
5514     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
5515     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
5516 
5517     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
5518         hw_breakpoint_update(cpu, i);
5519     }
5520 }
5521 
5522 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5523                          uint64_t value)
5524 {
5525     ARMCPU *cpu = arm_env_get_cpu(env);
5526     int i = ri->crm;
5527 
5528     raw_write(env, ri, value);
5529     hw_breakpoint_update(cpu, i);
5530 }
5531 
5532 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5533                          uint64_t value)
5534 {
5535     ARMCPU *cpu = arm_env_get_cpu(env);
5536     int i = ri->crm;
5537 
5538     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5539      * copy of BAS[0].
5540      */
5541     value = deposit64(value, 6, 1, extract64(value, 5, 1));
5542     value = deposit64(value, 8, 1, extract64(value, 7, 1));
5543 
5544     raw_write(env, ri, value);
5545     hw_breakpoint_update(cpu, i);
5546 }
5547 
5548 static void define_debug_regs(ARMCPU *cpu)
5549 {
5550     /* Define v7 and v8 architectural debug registers.
5551      * These are just dummy implementations for now.
5552      */
5553     int i;
5554     int wrps, brps, ctx_cmps;
5555     ARMCPRegInfo dbgdidr = {
5556         .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
5557         .access = PL0_R, .accessfn = access_tda,
5558         .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
5559     };
5560 
5561     /* Note that all these register fields hold "number of Xs minus 1". */
5562     brps = extract32(cpu->dbgdidr, 24, 4);
5563     wrps = extract32(cpu->dbgdidr, 28, 4);
5564     ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
5565 
5566     assert(ctx_cmps <= brps);
5567 
5568     /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5569      * of the debug registers such as number of breakpoints;
5570      * check that if they both exist then they agree.
5571      */
5572     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
5573         assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
5574         assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
5575         assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
5576     }
5577 
5578     define_one_arm_cp_reg(cpu, &dbgdidr);
5579     define_arm_cp_regs(cpu, debug_cp_reginfo);
5580 
5581     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
5582         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
5583     }
5584 
5585     for (i = 0; i < brps + 1; i++) {
5586         ARMCPRegInfo dbgregs[] = {
5587             { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
5588               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
5589               .access = PL1_RW, .accessfn = access_tda,
5590               .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
5591               .writefn = dbgbvr_write, .raw_writefn = raw_write
5592             },
5593             { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
5594               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
5595               .access = PL1_RW, .accessfn = access_tda,
5596               .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
5597               .writefn = dbgbcr_write, .raw_writefn = raw_write
5598             },
5599             REGINFO_SENTINEL
5600         };
5601         define_arm_cp_regs(cpu, dbgregs);
5602     }
5603 
5604     for (i = 0; i < wrps + 1; i++) {
5605         ARMCPRegInfo dbgregs[] = {
5606             { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
5607               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
5608               .access = PL1_RW, .accessfn = access_tda,
5609               .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
5610               .writefn = dbgwvr_write, .raw_writefn = raw_write
5611             },
5612             { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
5613               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
5614               .access = PL1_RW, .accessfn = access_tda,
5615               .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
5616               .writefn = dbgwcr_write, .raw_writefn = raw_write
5617             },
5618             REGINFO_SENTINEL
5619         };
5620         define_arm_cp_regs(cpu, dbgregs);
5621     }
5622 }
5623 
5624 /* We don't know until after realize whether there's a GICv3
5625  * attached, and that is what registers the gicv3 sysregs.
5626  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5627  * at runtime.
5628  */
5629 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5630 {
5631     ARMCPU *cpu = arm_env_get_cpu(env);
5632     uint64_t pfr1 = cpu->id_pfr1;
5633 
5634     if (env->gicv3state) {
5635         pfr1 |= 1 << 28;
5636     }
5637     return pfr1;
5638 }
5639 
5640 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5641 {
5642     ARMCPU *cpu = arm_env_get_cpu(env);
5643     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
5644 
5645     if (env->gicv3state) {
5646         pfr0 |= 1 << 24;
5647     }
5648     return pfr0;
5649 }
5650 
5651 /* Shared logic between LORID and the rest of the LOR* registers.
5652  * Secure state has already been delt with.
5653  */
5654 static CPAccessResult access_lor_ns(CPUARMState *env)
5655 {
5656     int el = arm_current_el(env);
5657 
5658     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5659         return CP_ACCESS_TRAP_EL2;
5660     }
5661     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5662         return CP_ACCESS_TRAP_EL3;
5663     }
5664     return CP_ACCESS_OK;
5665 }
5666 
5667 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
5668                                    bool isread)
5669 {
5670     if (arm_is_secure_below_el3(env)) {
5671         /* Access ok in secure mode.  */
5672         return CP_ACCESS_OK;
5673     }
5674     return access_lor_ns(env);
5675 }
5676 
5677 static CPAccessResult access_lor_other(CPUARMState *env,
5678                                        const ARMCPRegInfo *ri, bool isread)
5679 {
5680     if (arm_is_secure_below_el3(env)) {
5681         /* Access denied in secure mode.  */
5682         return CP_ACCESS_TRAP;
5683     }
5684     return access_lor_ns(env);
5685 }
5686 
5687 #ifdef TARGET_AARCH64
5688 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5689                                    bool isread)
5690 {
5691     int el = arm_current_el(env);
5692 
5693     if (el < 2 &&
5694         arm_feature(env, ARM_FEATURE_EL2) &&
5695         !(arm_hcr_el2_eff(env) & HCR_APK)) {
5696         return CP_ACCESS_TRAP_EL2;
5697     }
5698     if (el < 3 &&
5699         arm_feature(env, ARM_FEATURE_EL3) &&
5700         !(env->cp15.scr_el3 & SCR_APK)) {
5701         return CP_ACCESS_TRAP_EL3;
5702     }
5703     return CP_ACCESS_OK;
5704 }
5705 
5706 static const ARMCPRegInfo pauth_reginfo[] = {
5707     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5708       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5709       .access = PL1_RW, .accessfn = access_pauth,
5710       .fieldoffset = offsetof(CPUARMState, apda_key.lo) },
5711     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5712       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5713       .access = PL1_RW, .accessfn = access_pauth,
5714       .fieldoffset = offsetof(CPUARMState, apda_key.hi) },
5715     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5716       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5717       .access = PL1_RW, .accessfn = access_pauth,
5718       .fieldoffset = offsetof(CPUARMState, apdb_key.lo) },
5719     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5720       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5721       .access = PL1_RW, .accessfn = access_pauth,
5722       .fieldoffset = offsetof(CPUARMState, apdb_key.hi) },
5723     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5724       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5725       .access = PL1_RW, .accessfn = access_pauth,
5726       .fieldoffset = offsetof(CPUARMState, apga_key.lo) },
5727     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5728       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5729       .access = PL1_RW, .accessfn = access_pauth,
5730       .fieldoffset = offsetof(CPUARMState, apga_key.hi) },
5731     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5732       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5733       .access = PL1_RW, .accessfn = access_pauth,
5734       .fieldoffset = offsetof(CPUARMState, apia_key.lo) },
5735     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5736       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5737       .access = PL1_RW, .accessfn = access_pauth,
5738       .fieldoffset = offsetof(CPUARMState, apia_key.hi) },
5739     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5740       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5741       .access = PL1_RW, .accessfn = access_pauth,
5742       .fieldoffset = offsetof(CPUARMState, apib_key.lo) },
5743     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5744       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5745       .access = PL1_RW, .accessfn = access_pauth,
5746       .fieldoffset = offsetof(CPUARMState, apib_key.hi) },
5747     REGINFO_SENTINEL
5748 };
5749 #endif
5750 
5751 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5752                                      bool isread)
5753 {
5754     int el = arm_current_el(env);
5755 
5756     if (el == 0) {
5757         uint64_t sctlr = arm_sctlr(env, el);
5758         if (!(sctlr & SCTLR_EnRCTX)) {
5759             return CP_ACCESS_TRAP;
5760         }
5761     } else if (el == 1) {
5762         uint64_t hcr = arm_hcr_el2_eff(env);
5763         if (hcr & HCR_NV) {
5764             return CP_ACCESS_TRAP_EL2;
5765         }
5766     }
5767     return CP_ACCESS_OK;
5768 }
5769 
5770 static const ARMCPRegInfo predinv_reginfo[] = {
5771     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5772       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5773       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5774     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5775       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5776       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5777     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5778       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5779       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5780     /*
5781      * Note the AArch32 opcodes have a different OPC1.
5782      */
5783     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5784       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5785       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5786     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5787       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5788       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5789     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5790       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5791       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5792     REGINFO_SENTINEL
5793 };
5794 
5795 void register_cp_regs_for_features(ARMCPU *cpu)
5796 {
5797     /* Register all the coprocessor registers based on feature bits */
5798     CPUARMState *env = &cpu->env;
5799     if (arm_feature(env, ARM_FEATURE_M)) {
5800         /* M profile has no coprocessor registers */
5801         return;
5802     }
5803 
5804     define_arm_cp_regs(cpu, cp_reginfo);
5805     if (!arm_feature(env, ARM_FEATURE_V8)) {
5806         /* Must go early as it is full of wildcards that may be
5807          * overridden by later definitions.
5808          */
5809         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
5810     }
5811 
5812     if (arm_feature(env, ARM_FEATURE_V6)) {
5813         /* The ID registers all have impdef reset values */
5814         ARMCPRegInfo v6_idregs[] = {
5815             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
5816               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5817               .access = PL1_R, .type = ARM_CP_CONST,
5818               .resetvalue = cpu->id_pfr0 },
5819             /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5820              * the value of the GIC field until after we define these regs.
5821              */
5822             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
5823               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
5824               .access = PL1_R, .type = ARM_CP_NO_RAW,
5825               .readfn = id_pfr1_read,
5826               .writefn = arm_cp_write_ignore },
5827             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
5828               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
5829               .access = PL1_R, .type = ARM_CP_CONST,
5830               .resetvalue = cpu->id_dfr0 },
5831             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
5832               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
5833               .access = PL1_R, .type = ARM_CP_CONST,
5834               .resetvalue = cpu->id_afr0 },
5835             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
5836               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
5837               .access = PL1_R, .type = ARM_CP_CONST,
5838               .resetvalue = cpu->id_mmfr0 },
5839             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
5840               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
5841               .access = PL1_R, .type = ARM_CP_CONST,
5842               .resetvalue = cpu->id_mmfr1 },
5843             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
5844               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
5845               .access = PL1_R, .type = ARM_CP_CONST,
5846               .resetvalue = cpu->id_mmfr2 },
5847             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
5848               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
5849               .access = PL1_R, .type = ARM_CP_CONST,
5850               .resetvalue = cpu->id_mmfr3 },
5851             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
5852               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5853               .access = PL1_R, .type = ARM_CP_CONST,
5854               .resetvalue = cpu->isar.id_isar0 },
5855             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
5856               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
5857               .access = PL1_R, .type = ARM_CP_CONST,
5858               .resetvalue = cpu->isar.id_isar1 },
5859             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
5860               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5861               .access = PL1_R, .type = ARM_CP_CONST,
5862               .resetvalue = cpu->isar.id_isar2 },
5863             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
5864               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
5865               .access = PL1_R, .type = ARM_CP_CONST,
5866               .resetvalue = cpu->isar.id_isar3 },
5867             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
5868               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
5869               .access = PL1_R, .type = ARM_CP_CONST,
5870               .resetvalue = cpu->isar.id_isar4 },
5871             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
5872               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
5873               .access = PL1_R, .type = ARM_CP_CONST,
5874               .resetvalue = cpu->isar.id_isar5 },
5875             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
5876               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
5877               .access = PL1_R, .type = ARM_CP_CONST,
5878               .resetvalue = cpu->id_mmfr4 },
5879             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
5880               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
5881               .access = PL1_R, .type = ARM_CP_CONST,
5882               .resetvalue = cpu->isar.id_isar6 },
5883             REGINFO_SENTINEL
5884         };
5885         define_arm_cp_regs(cpu, v6_idregs);
5886         define_arm_cp_regs(cpu, v6_cp_reginfo);
5887     } else {
5888         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
5889     }
5890     if (arm_feature(env, ARM_FEATURE_V6K)) {
5891         define_arm_cp_regs(cpu, v6k_cp_reginfo);
5892     }
5893     if (arm_feature(env, ARM_FEATURE_V7MP) &&
5894         !arm_feature(env, ARM_FEATURE_PMSA)) {
5895         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
5896     }
5897     if (arm_feature(env, ARM_FEATURE_V7VE)) {
5898         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
5899     }
5900     if (arm_feature(env, ARM_FEATURE_V7)) {
5901         /* v7 performance monitor control register: same implementor
5902          * field as main ID register, and we implement four counters in
5903          * addition to the cycle count register.
5904          */
5905         unsigned int i, pmcrn = 4;
5906         ARMCPRegInfo pmcr = {
5907             .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
5908             .access = PL0_RW,
5909             .type = ARM_CP_IO | ARM_CP_ALIAS,
5910             .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
5911             .accessfn = pmreg_access, .writefn = pmcr_write,
5912             .raw_writefn = raw_write,
5913         };
5914         ARMCPRegInfo pmcr64 = {
5915             .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
5916             .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
5917             .access = PL0_RW, .accessfn = pmreg_access,
5918             .type = ARM_CP_IO,
5919             .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
5920             .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
5921             .writefn = pmcr_write, .raw_writefn = raw_write,
5922         };
5923         define_one_arm_cp_reg(cpu, &pmcr);
5924         define_one_arm_cp_reg(cpu, &pmcr64);
5925         for (i = 0; i < pmcrn; i++) {
5926             char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
5927             char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
5928             char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
5929             char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
5930             ARMCPRegInfo pmev_regs[] = {
5931                 { .name = pmevcntr_name, .cp = 15, .crn = 14,
5932                   .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
5933                   .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
5934                   .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
5935                   .accessfn = pmreg_access },
5936                 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
5937                   .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
5938                   .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
5939                   .type = ARM_CP_IO,
5940                   .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
5941                   .raw_readfn = pmevcntr_rawread,
5942                   .raw_writefn = pmevcntr_rawwrite },
5943                 { .name = pmevtyper_name, .cp = 15, .crn = 14,
5944                   .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
5945                   .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
5946                   .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
5947                   .accessfn = pmreg_access },
5948                 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
5949                   .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
5950                   .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
5951                   .type = ARM_CP_IO,
5952                   .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
5953                   .raw_writefn = pmevtyper_rawwrite },
5954                 REGINFO_SENTINEL
5955             };
5956             define_arm_cp_regs(cpu, pmev_regs);
5957             g_free(pmevcntr_name);
5958             g_free(pmevcntr_el0_name);
5959             g_free(pmevtyper_name);
5960             g_free(pmevtyper_el0_name);
5961         }
5962         ARMCPRegInfo clidr = {
5963             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
5964             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
5965             .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
5966         };
5967         define_one_arm_cp_reg(cpu, &clidr);
5968         define_arm_cp_regs(cpu, v7_cp_reginfo);
5969         define_debug_regs(cpu);
5970     } else {
5971         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
5972     }
5973     if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
5974             FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
5975         ARMCPRegInfo v81_pmu_regs[] = {
5976             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
5977               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
5978               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5979               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
5980             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
5981               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
5982               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5983               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
5984             REGINFO_SENTINEL
5985         };
5986         define_arm_cp_regs(cpu, v81_pmu_regs);
5987     }
5988     if (arm_feature(env, ARM_FEATURE_V8)) {
5989         /* AArch64 ID registers, which all have impdef reset values.
5990          * Note that within the ID register ranges the unused slots
5991          * must all RAZ, not UNDEF; future architecture versions may
5992          * define new registers here.
5993          */
5994         ARMCPRegInfo v8_idregs[] = {
5995             /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5996              * know the right value for the GIC field until after we
5997              * define these regs.
5998              */
5999             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6000               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6001               .access = PL1_R, .type = ARM_CP_NO_RAW,
6002               .readfn = id_aa64pfr0_read,
6003               .writefn = arm_cp_write_ignore },
6004             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6005               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6006               .access = PL1_R, .type = ARM_CP_CONST,
6007               .resetvalue = cpu->isar.id_aa64pfr1},
6008             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6009               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6010               .access = PL1_R, .type = ARM_CP_CONST,
6011               .resetvalue = 0 },
6012             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6013               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6014               .access = PL1_R, .type = ARM_CP_CONST,
6015               .resetvalue = 0 },
6016             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6017               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6018               .access = PL1_R, .type = ARM_CP_CONST,
6019               /* At present, only SVEver == 0 is defined anyway.  */
6020               .resetvalue = 0 },
6021             { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6022               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6023               .access = PL1_R, .type = ARM_CP_CONST,
6024               .resetvalue = 0 },
6025             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6026               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6027               .access = PL1_R, .type = ARM_CP_CONST,
6028               .resetvalue = 0 },
6029             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6030               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6031               .access = PL1_R, .type = ARM_CP_CONST,
6032               .resetvalue = 0 },
6033             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6034               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6035               .access = PL1_R, .type = ARM_CP_CONST,
6036               .resetvalue = cpu->id_aa64dfr0 },
6037             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6038               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6039               .access = PL1_R, .type = ARM_CP_CONST,
6040               .resetvalue = cpu->id_aa64dfr1 },
6041             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6042               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6043               .access = PL1_R, .type = ARM_CP_CONST,
6044               .resetvalue = 0 },
6045             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6046               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6047               .access = PL1_R, .type = ARM_CP_CONST,
6048               .resetvalue = 0 },
6049             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6050               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6051               .access = PL1_R, .type = ARM_CP_CONST,
6052               .resetvalue = cpu->id_aa64afr0 },
6053             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6054               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6055               .access = PL1_R, .type = ARM_CP_CONST,
6056               .resetvalue = cpu->id_aa64afr1 },
6057             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6058               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6059               .access = PL1_R, .type = ARM_CP_CONST,
6060               .resetvalue = 0 },
6061             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6062               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6063               .access = PL1_R, .type = ARM_CP_CONST,
6064               .resetvalue = 0 },
6065             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6066               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6067               .access = PL1_R, .type = ARM_CP_CONST,
6068               .resetvalue = cpu->isar.id_aa64isar0 },
6069             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6070               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6071               .access = PL1_R, .type = ARM_CP_CONST,
6072               .resetvalue = cpu->isar.id_aa64isar1 },
6073             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6074               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6075               .access = PL1_R, .type = ARM_CP_CONST,
6076               .resetvalue = 0 },
6077             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6078               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6079               .access = PL1_R, .type = ARM_CP_CONST,
6080               .resetvalue = 0 },
6081             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6082               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6083               .access = PL1_R, .type = ARM_CP_CONST,
6084               .resetvalue = 0 },
6085             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6086               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6087               .access = PL1_R, .type = ARM_CP_CONST,
6088               .resetvalue = 0 },
6089             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6090               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6091               .access = PL1_R, .type = ARM_CP_CONST,
6092               .resetvalue = 0 },
6093             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6094               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6095               .access = PL1_R, .type = ARM_CP_CONST,
6096               .resetvalue = 0 },
6097             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6098               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6099               .access = PL1_R, .type = ARM_CP_CONST,
6100               .resetvalue = cpu->isar.id_aa64mmfr0 },
6101             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6102               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6103               .access = PL1_R, .type = ARM_CP_CONST,
6104               .resetvalue = cpu->isar.id_aa64mmfr1 },
6105             { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6106               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6107               .access = PL1_R, .type = ARM_CP_CONST,
6108               .resetvalue = 0 },
6109             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6110               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6111               .access = PL1_R, .type = ARM_CP_CONST,
6112               .resetvalue = 0 },
6113             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6114               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6115               .access = PL1_R, .type = ARM_CP_CONST,
6116               .resetvalue = 0 },
6117             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6118               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6119               .access = PL1_R, .type = ARM_CP_CONST,
6120               .resetvalue = 0 },
6121             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6122               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6123               .access = PL1_R, .type = ARM_CP_CONST,
6124               .resetvalue = 0 },
6125             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6126               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6127               .access = PL1_R, .type = ARM_CP_CONST,
6128               .resetvalue = 0 },
6129             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6130               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6131               .access = PL1_R, .type = ARM_CP_CONST,
6132               .resetvalue = cpu->isar.mvfr0 },
6133             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6134               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6135               .access = PL1_R, .type = ARM_CP_CONST,
6136               .resetvalue = cpu->isar.mvfr1 },
6137             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6138               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6139               .access = PL1_R, .type = ARM_CP_CONST,
6140               .resetvalue = cpu->isar.mvfr2 },
6141             { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6142               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6143               .access = PL1_R, .type = ARM_CP_CONST,
6144               .resetvalue = 0 },
6145             { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6146               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6147               .access = PL1_R, .type = ARM_CP_CONST,
6148               .resetvalue = 0 },
6149             { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6150               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6151               .access = PL1_R, .type = ARM_CP_CONST,
6152               .resetvalue = 0 },
6153             { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6154               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6155               .access = PL1_R, .type = ARM_CP_CONST,
6156               .resetvalue = 0 },
6157             { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6158               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6159               .access = PL1_R, .type = ARM_CP_CONST,
6160               .resetvalue = 0 },
6161             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
6162               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
6163               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6164               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
6165             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
6166               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
6167               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6168               .resetvalue = cpu->pmceid0 },
6169             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
6170               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
6171               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6172               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
6173             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
6174               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
6175               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6176               .resetvalue = cpu->pmceid1 },
6177             REGINFO_SENTINEL
6178         };
6179 #ifdef CONFIG_USER_ONLY
6180         ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6181             { .name = "ID_AA64PFR0_EL1",
6182               .exported_bits = 0x000f000f00ff0000,
6183               .fixed_bits    = 0x0000000000000011 },
6184             { .name = "ID_AA64PFR1_EL1",
6185               .exported_bits = 0x00000000000000f0 },
6186             { .name = "ID_AA64PFR*_EL1_RESERVED",
6187               .is_glob = true                     },
6188             { .name = "ID_AA64ZFR0_EL1"           },
6189             { .name = "ID_AA64MMFR0_EL1",
6190               .fixed_bits    = 0x00000000ff000000 },
6191             { .name = "ID_AA64MMFR1_EL1"          },
6192             { .name = "ID_AA64MMFR*_EL1_RESERVED",
6193               .is_glob = true                     },
6194             { .name = "ID_AA64DFR0_EL1",
6195               .fixed_bits    = 0x0000000000000006 },
6196             { .name = "ID_AA64DFR1_EL1"           },
6197             { .name = "ID_AA64DFR*_EL1_RESERVED",
6198               .is_glob = true                     },
6199             { .name = "ID_AA64AFR*",
6200               .is_glob = true                     },
6201             { .name = "ID_AA64ISAR0_EL1",
6202               .exported_bits = 0x00fffffff0fffff0 },
6203             { .name = "ID_AA64ISAR1_EL1",
6204               .exported_bits = 0x000000f0ffffffff },
6205             { .name = "ID_AA64ISAR*_EL1_RESERVED",
6206               .is_glob = true                     },
6207             REGUSERINFO_SENTINEL
6208         };
6209         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6210 #endif
6211         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6212         if (!arm_feature(env, ARM_FEATURE_EL3) &&
6213             !arm_feature(env, ARM_FEATURE_EL2)) {
6214             ARMCPRegInfo rvbar = {
6215                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
6216                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6217                 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
6218             };
6219             define_one_arm_cp_reg(cpu, &rvbar);
6220         }
6221         define_arm_cp_regs(cpu, v8_idregs);
6222         define_arm_cp_regs(cpu, v8_cp_reginfo);
6223     }
6224     if (arm_feature(env, ARM_FEATURE_EL2)) {
6225         uint64_t vmpidr_def = mpidr_read_val(env);
6226         ARMCPRegInfo vpidr_regs[] = {
6227             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6228               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6229               .access = PL2_RW, .accessfn = access_el3_aa32ns,
6230               .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
6231               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6232             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6233               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6234               .access = PL2_RW, .resetvalue = cpu->midr,
6235               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6236             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6237               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6238               .access = PL2_RW, .accessfn = access_el3_aa32ns,
6239               .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
6240               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6241             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6242               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6243               .access = PL2_RW,
6244               .resetvalue = vmpidr_def,
6245               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6246             REGINFO_SENTINEL
6247         };
6248         define_arm_cp_regs(cpu, vpidr_regs);
6249         define_arm_cp_regs(cpu, el2_cp_reginfo);
6250         if (arm_feature(env, ARM_FEATURE_V8)) {
6251             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6252         }
6253         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6254         if (!arm_feature(env, ARM_FEATURE_EL3)) {
6255             ARMCPRegInfo rvbar = {
6256                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6257                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6258                 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
6259             };
6260             define_one_arm_cp_reg(cpu, &rvbar);
6261         }
6262     } else {
6263         /* If EL2 is missing but higher ELs are enabled, we need to
6264          * register the no_el2 reginfos.
6265          */
6266         if (arm_feature(env, ARM_FEATURE_EL3)) {
6267             /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6268              * of MIDR_EL1 and MPIDR_EL1.
6269              */
6270             ARMCPRegInfo vpidr_regs[] = {
6271                 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6272                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6273                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6274                   .type = ARM_CP_CONST, .resetvalue = cpu->midr,
6275                   .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6276                 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6277                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6278                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6279                   .type = ARM_CP_NO_RAW,
6280                   .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
6281                 REGINFO_SENTINEL
6282             };
6283             define_arm_cp_regs(cpu, vpidr_regs);
6284             define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
6285             if (arm_feature(env, ARM_FEATURE_V8)) {
6286                 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
6287             }
6288         }
6289     }
6290     if (arm_feature(env, ARM_FEATURE_EL3)) {
6291         define_arm_cp_regs(cpu, el3_cp_reginfo);
6292         ARMCPRegInfo el3_regs[] = {
6293             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6294               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6295               .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
6296             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6297               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6298               .access = PL3_RW,
6299               .raw_writefn = raw_write, .writefn = sctlr_write,
6300               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6301               .resetvalue = cpu->reset_sctlr },
6302             REGINFO_SENTINEL
6303         };
6304 
6305         define_arm_cp_regs(cpu, el3_regs);
6306     }
6307     /* The behaviour of NSACR is sufficiently various that we don't
6308      * try to describe it in a single reginfo:
6309      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
6310      *     reads as constant 0xc00 from NS EL1 and NS EL2
6311      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6312      *  if v7 without EL3, register doesn't exist
6313      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6314      */
6315     if (arm_feature(env, ARM_FEATURE_EL3)) {
6316         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6317             ARMCPRegInfo nsacr = {
6318                 .name = "NSACR", .type = ARM_CP_CONST,
6319                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6320                 .access = PL1_RW, .accessfn = nsacr_access,
6321                 .resetvalue = 0xc00
6322             };
6323             define_one_arm_cp_reg(cpu, &nsacr);
6324         } else {
6325             ARMCPRegInfo nsacr = {
6326                 .name = "NSACR",
6327                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6328                 .access = PL3_RW | PL1_R,
6329                 .resetvalue = 0,
6330                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6331             };
6332             define_one_arm_cp_reg(cpu, &nsacr);
6333         }
6334     } else {
6335         if (arm_feature(env, ARM_FEATURE_V8)) {
6336             ARMCPRegInfo nsacr = {
6337                 .name = "NSACR", .type = ARM_CP_CONST,
6338                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6339                 .access = PL1_R,
6340                 .resetvalue = 0xc00
6341             };
6342             define_one_arm_cp_reg(cpu, &nsacr);
6343         }
6344     }
6345 
6346     if (arm_feature(env, ARM_FEATURE_PMSA)) {
6347         if (arm_feature(env, ARM_FEATURE_V6)) {
6348             /* PMSAv6 not implemented */
6349             assert(arm_feature(env, ARM_FEATURE_V7));
6350             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6351             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6352         } else {
6353             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6354         }
6355     } else {
6356         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6357         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6358         /* TTCBR2 is introduced with ARMv8.2-A32HPD.  */
6359         if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
6360             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6361         }
6362     }
6363     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6364         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6365     }
6366     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6367         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6368     }
6369     if (arm_feature(env, ARM_FEATURE_VAPA)) {
6370         define_arm_cp_regs(cpu, vapa_cp_reginfo);
6371     }
6372     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6373         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6374     }
6375     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6376         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6377     }
6378     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6379         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6380     }
6381     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6382         define_arm_cp_regs(cpu, omap_cp_reginfo);
6383     }
6384     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6385         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6386     }
6387     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6388         define_arm_cp_regs(cpu, xscale_cp_reginfo);
6389     }
6390     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6391         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6392     }
6393     if (arm_feature(env, ARM_FEATURE_LPAE)) {
6394         define_arm_cp_regs(cpu, lpae_cp_reginfo);
6395     }
6396     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6397      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6398      * be read-only (ie write causes UNDEF exception).
6399      */
6400     {
6401         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6402             /* Pre-v8 MIDR space.
6403              * Note that the MIDR isn't a simple constant register because
6404              * of the TI925 behaviour where writes to another register can
6405              * cause the MIDR value to change.
6406              *
6407              * Unimplemented registers in the c15 0 0 0 space default to
6408              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6409              * and friends override accordingly.
6410              */
6411             { .name = "MIDR",
6412               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
6413               .access = PL1_R, .resetvalue = cpu->midr,
6414               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
6415               .readfn = midr_read,
6416               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6417               .type = ARM_CP_OVERRIDE },
6418             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6419             { .name = "DUMMY",
6420               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6421               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6422             { .name = "DUMMY",
6423               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6424               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6425             { .name = "DUMMY",
6426               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6427               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6428             { .name = "DUMMY",
6429               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6430               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6431             { .name = "DUMMY",
6432               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6433               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6434             REGINFO_SENTINEL
6435         };
6436         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
6437             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6438               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
6439               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6440               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6441               .readfn = midr_read },
6442             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6443             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6444               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6445               .access = PL1_R, .resetvalue = cpu->midr },
6446             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6447               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6448               .access = PL1_R, .resetvalue = cpu->midr },
6449             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6450               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
6451               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
6452             REGINFO_SENTINEL
6453         };
6454         ARMCPRegInfo id_cp_reginfo[] = {
6455             /* These are common to v8 and pre-v8 */
6456             { .name = "CTR",
6457               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
6458               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6459             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6460               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6461               .access = PL0_R, .accessfn = ctr_el0_access,
6462               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6463             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6464             { .name = "TCMTR",
6465               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
6466               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6467             REGINFO_SENTINEL
6468         };
6469         /* TLBTR is specific to VMSA */
6470         ARMCPRegInfo id_tlbtr_reginfo = {
6471               .name = "TLBTR",
6472               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
6473               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
6474         };
6475         /* MPUIR is specific to PMSA V6+ */
6476         ARMCPRegInfo id_mpuir_reginfo = {
6477               .name = "MPUIR",
6478               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6479               .access = PL1_R, .type = ARM_CP_CONST,
6480               .resetvalue = cpu->pmsav7_dregion << 8
6481         };
6482         ARMCPRegInfo crn0_wi_reginfo = {
6483             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6484             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6485             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6486         };
6487 #ifdef CONFIG_USER_ONLY
6488         ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6489             { .name = "MIDR_EL1",
6490               .exported_bits = 0x00000000ffffffff },
6491             { .name = "REVIDR_EL1"                },
6492             REGUSERINFO_SENTINEL
6493         };
6494         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6495 #endif
6496         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6497             arm_feature(env, ARM_FEATURE_STRONGARM)) {
6498             ARMCPRegInfo *r;
6499             /* Register the blanket "writes ignored" value first to cover the
6500              * whole space. Then update the specific ID registers to allow write
6501              * access, so that they ignore writes rather than causing them to
6502              * UNDEF.
6503              */
6504             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
6505             for (r = id_pre_v8_midr_cp_reginfo;
6506                  r->type != ARM_CP_SENTINEL; r++) {
6507                 r->access = PL1_RW;
6508             }
6509             for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
6510                 r->access = PL1_RW;
6511             }
6512             id_mpuir_reginfo.access = PL1_RW;
6513             id_tlbtr_reginfo.access = PL1_RW;
6514         }
6515         if (arm_feature(env, ARM_FEATURE_V8)) {
6516             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6517         } else {
6518             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6519         }
6520         define_arm_cp_regs(cpu, id_cp_reginfo);
6521         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6522             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
6523         } else if (arm_feature(env, ARM_FEATURE_V7)) {
6524             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6525         }
6526     }
6527 
6528     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
6529         ARMCPRegInfo mpidr_cp_reginfo[] = {
6530             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6531               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6532               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6533             REGINFO_SENTINEL
6534         };
6535 #ifdef CONFIG_USER_ONLY
6536         ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
6537             { .name = "MPIDR_EL1",
6538               .fixed_bits = 0x0000000080000000 },
6539             REGUSERINFO_SENTINEL
6540         };
6541         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
6542 #endif
6543         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
6544     }
6545 
6546     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
6547         ARMCPRegInfo auxcr_reginfo[] = {
6548             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
6549               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
6550               .access = PL1_RW, .type = ARM_CP_CONST,
6551               .resetvalue = cpu->reset_auxcr },
6552             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
6553               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
6554               .access = PL2_RW, .type = ARM_CP_CONST,
6555               .resetvalue = 0 },
6556             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
6557               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
6558               .access = PL3_RW, .type = ARM_CP_CONST,
6559               .resetvalue = 0 },
6560             REGINFO_SENTINEL
6561         };
6562         define_arm_cp_regs(cpu, auxcr_reginfo);
6563         if (arm_feature(env, ARM_FEATURE_V8)) {
6564             /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6565             ARMCPRegInfo hactlr2_reginfo = {
6566                 .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
6567                 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
6568                 .access = PL2_RW, .type = ARM_CP_CONST,
6569                 .resetvalue = 0
6570             };
6571             define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
6572         }
6573     }
6574 
6575     if (arm_feature(env, ARM_FEATURE_CBAR)) {
6576         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6577             /* 32 bit view is [31:18] 0...0 [43:32]. */
6578             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
6579                 | extract64(cpu->reset_cbar, 32, 12);
6580             ARMCPRegInfo cbar_reginfo[] = {
6581                 { .name = "CBAR",
6582                   .type = ARM_CP_CONST,
6583                   .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6584                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
6585                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
6586                   .type = ARM_CP_CONST,
6587                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
6588                   .access = PL1_R, .resetvalue = cbar32 },
6589                 REGINFO_SENTINEL
6590             };
6591             /* We don't implement a r/w 64 bit CBAR currently */
6592             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
6593             define_arm_cp_regs(cpu, cbar_reginfo);
6594         } else {
6595             ARMCPRegInfo cbar = {
6596                 .name = "CBAR",
6597                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6598                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
6599                 .fieldoffset = offsetof(CPUARMState,
6600                                         cp15.c15_config_base_address)
6601             };
6602             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
6603                 cbar.access = PL1_R;
6604                 cbar.fieldoffset = 0;
6605                 cbar.type = ARM_CP_CONST;
6606             }
6607             define_one_arm_cp_reg(cpu, &cbar);
6608         }
6609     }
6610 
6611     if (arm_feature(env, ARM_FEATURE_VBAR)) {
6612         ARMCPRegInfo vbar_cp_reginfo[] = {
6613             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
6614               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
6615               .access = PL1_RW, .writefn = vbar_write,
6616               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
6617                                      offsetof(CPUARMState, cp15.vbar_ns) },
6618               .resetvalue = 0 },
6619             REGINFO_SENTINEL
6620         };
6621         define_arm_cp_regs(cpu, vbar_cp_reginfo);
6622     }
6623 
6624     /* Generic registers whose values depend on the implementation */
6625     {
6626         ARMCPRegInfo sctlr = {
6627             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
6628             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6629             .access = PL1_RW,
6630             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
6631                                    offsetof(CPUARMState, cp15.sctlr_ns) },
6632             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
6633             .raw_writefn = raw_write,
6634         };
6635         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6636             /* Normally we would always end the TB on an SCTLR write, but Linux
6637              * arch/arm/mach-pxa/sleep.S expects two instructions following
6638              * an MMU enable to execute from cache.  Imitate this behaviour.
6639              */
6640             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
6641         }
6642         define_one_arm_cp_reg(cpu, &sctlr);
6643     }
6644 
6645     if (cpu_isar_feature(aa64_lor, cpu)) {
6646         /*
6647          * A trivial implementation of ARMv8.1-LOR leaves all of these
6648          * registers fixed at 0, which indicates that there are zero
6649          * supported Limited Ordering regions.
6650          */
6651         static const ARMCPRegInfo lor_reginfo[] = {
6652             { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6653               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6654               .access = PL1_RW, .accessfn = access_lor_other,
6655               .type = ARM_CP_CONST, .resetvalue = 0 },
6656             { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6657               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6658               .access = PL1_RW, .accessfn = access_lor_other,
6659               .type = ARM_CP_CONST, .resetvalue = 0 },
6660             { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6661               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6662               .access = PL1_RW, .accessfn = access_lor_other,
6663               .type = ARM_CP_CONST, .resetvalue = 0 },
6664             { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6665               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6666               .access = PL1_RW, .accessfn = access_lor_other,
6667               .type = ARM_CP_CONST, .resetvalue = 0 },
6668             { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6669               .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6670               .access = PL1_R, .accessfn = access_lorid,
6671               .type = ARM_CP_CONST, .resetvalue = 0 },
6672             REGINFO_SENTINEL
6673         };
6674         define_arm_cp_regs(cpu, lor_reginfo);
6675     }
6676 
6677     if (cpu_isar_feature(aa64_sve, cpu)) {
6678         define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
6679         if (arm_feature(env, ARM_FEATURE_EL2)) {
6680             define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
6681         } else {
6682             define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
6683         }
6684         if (arm_feature(env, ARM_FEATURE_EL3)) {
6685             define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
6686         }
6687     }
6688 
6689 #ifdef TARGET_AARCH64
6690     if (cpu_isar_feature(aa64_pauth, cpu)) {
6691         define_arm_cp_regs(cpu, pauth_reginfo);
6692     }
6693 #endif
6694 
6695     /*
6696      * While all v8.0 cpus support aarch64, QEMU does have configurations
6697      * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
6698      * which will set ID_ISAR6.
6699      */
6700     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
6701         ? cpu_isar_feature(aa64_predinv, cpu)
6702         : cpu_isar_feature(aa32_predinv, cpu)) {
6703         define_arm_cp_regs(cpu, predinv_reginfo);
6704     }
6705 }
6706 
6707 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
6708 {
6709     CPUState *cs = CPU(cpu);
6710     CPUARMState *env = &cpu->env;
6711 
6712     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6713         gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
6714                                  aarch64_fpu_gdb_set_reg,
6715                                  34, "aarch64-fpu.xml", 0);
6716     } else if (arm_feature(env, ARM_FEATURE_NEON)) {
6717         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6718                                  51, "arm-neon.xml", 0);
6719     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
6720         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6721                                  35, "arm-vfp3.xml", 0);
6722     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
6723         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6724                                  19, "arm-vfp.xml", 0);
6725     }
6726     gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
6727                              arm_gen_dynamic_xml(cs),
6728                              "system-registers.xml", 0);
6729 }
6730 
6731 /* Sort alphabetically by type name, except for "any". */
6732 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
6733 {
6734     ObjectClass *class_a = (ObjectClass *)a;
6735     ObjectClass *class_b = (ObjectClass *)b;
6736     const char *name_a, *name_b;
6737 
6738     name_a = object_class_get_name(class_a);
6739     name_b = object_class_get_name(class_b);
6740     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
6741         return 1;
6742     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
6743         return -1;
6744     } else {
6745         return strcmp(name_a, name_b);
6746     }
6747 }
6748 
6749 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
6750 {
6751     ObjectClass *oc = data;
6752     const char *typename;
6753     char *name;
6754 
6755     typename = object_class_get_name(oc);
6756     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
6757     qemu_printf("  %s\n", name);
6758     g_free(name);
6759 }
6760 
6761 void arm_cpu_list(void)
6762 {
6763     GSList *list;
6764 
6765     list = object_class_get_list(TYPE_ARM_CPU, false);
6766     list = g_slist_sort(list, arm_cpu_list_compare);
6767     qemu_printf("Available CPUs:\n");
6768     g_slist_foreach(list, arm_cpu_list_entry, NULL);
6769     g_slist_free(list);
6770 }
6771 
6772 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
6773 {
6774     ObjectClass *oc = data;
6775     CpuDefinitionInfoList **cpu_list = user_data;
6776     CpuDefinitionInfoList *entry;
6777     CpuDefinitionInfo *info;
6778     const char *typename;
6779 
6780     typename = object_class_get_name(oc);
6781     info = g_malloc0(sizeof(*info));
6782     info->name = g_strndup(typename,
6783                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
6784     info->q_typename = g_strdup(typename);
6785 
6786     entry = g_malloc0(sizeof(*entry));
6787     entry->value = info;
6788     entry->next = *cpu_list;
6789     *cpu_list = entry;
6790 }
6791 
6792 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
6793 {
6794     CpuDefinitionInfoList *cpu_list = NULL;
6795     GSList *list;
6796 
6797     list = object_class_get_list(TYPE_ARM_CPU, false);
6798     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
6799     g_slist_free(list);
6800 
6801     return cpu_list;
6802 }
6803 
6804 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
6805                                    void *opaque, int state, int secstate,
6806                                    int crm, int opc1, int opc2,
6807                                    const char *name)
6808 {
6809     /* Private utility function for define_one_arm_cp_reg_with_opaque():
6810      * add a single reginfo struct to the hash table.
6811      */
6812     uint32_t *key = g_new(uint32_t, 1);
6813     ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
6814     int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
6815     int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
6816 
6817     r2->name = g_strdup(name);
6818     /* Reset the secure state to the specific incoming state.  This is
6819      * necessary as the register may have been defined with both states.
6820      */
6821     r2->secure = secstate;
6822 
6823     if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
6824         /* Register is banked (using both entries in array).
6825          * Overwriting fieldoffset as the array is only used to define
6826          * banked registers but later only fieldoffset is used.
6827          */
6828         r2->fieldoffset = r->bank_fieldoffsets[ns];
6829     }
6830 
6831     if (state == ARM_CP_STATE_AA32) {
6832         if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
6833             /* If the register is banked then we don't need to migrate or
6834              * reset the 32-bit instance in certain cases:
6835              *
6836              * 1) If the register has both 32-bit and 64-bit instances then we
6837              *    can count on the 64-bit instance taking care of the
6838              *    non-secure bank.
6839              * 2) If ARMv8 is enabled then we can count on a 64-bit version
6840              *    taking care of the secure bank.  This requires that separate
6841              *    32 and 64-bit definitions are provided.
6842              */
6843             if ((r->state == ARM_CP_STATE_BOTH && ns) ||
6844                 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
6845                 r2->type |= ARM_CP_ALIAS;
6846             }
6847         } else if ((secstate != r->secure) && !ns) {
6848             /* The register is not banked so we only want to allow migration of
6849              * the non-secure instance.
6850              */
6851             r2->type |= ARM_CP_ALIAS;
6852         }
6853 
6854         if (r->state == ARM_CP_STATE_BOTH) {
6855             /* We assume it is a cp15 register if the .cp field is left unset.
6856              */
6857             if (r2->cp == 0) {
6858                 r2->cp = 15;
6859             }
6860 
6861 #ifdef HOST_WORDS_BIGENDIAN
6862             if (r2->fieldoffset) {
6863                 r2->fieldoffset += sizeof(uint32_t);
6864             }
6865 #endif
6866         }
6867     }
6868     if (state == ARM_CP_STATE_AA64) {
6869         /* To allow abbreviation of ARMCPRegInfo
6870          * definitions, we treat cp == 0 as equivalent to
6871          * the value for "standard guest-visible sysreg".
6872          * STATE_BOTH definitions are also always "standard
6873          * sysreg" in their AArch64 view (the .cp value may
6874          * be non-zero for the benefit of the AArch32 view).
6875          */
6876         if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
6877             r2->cp = CP_REG_ARM64_SYSREG_CP;
6878         }
6879         *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
6880                                   r2->opc0, opc1, opc2);
6881     } else {
6882         *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
6883     }
6884     if (opaque) {
6885         r2->opaque = opaque;
6886     }
6887     /* reginfo passed to helpers is correct for the actual access,
6888      * and is never ARM_CP_STATE_BOTH:
6889      */
6890     r2->state = state;
6891     /* Make sure reginfo passed to helpers for wildcarded regs
6892      * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6893      */
6894     r2->crm = crm;
6895     r2->opc1 = opc1;
6896     r2->opc2 = opc2;
6897     /* By convention, for wildcarded registers only the first
6898      * entry is used for migration; the others are marked as
6899      * ALIAS so we don't try to transfer the register
6900      * multiple times. Special registers (ie NOP/WFI) are
6901      * never migratable and not even raw-accessible.
6902      */
6903     if ((r->type & ARM_CP_SPECIAL)) {
6904         r2->type |= ARM_CP_NO_RAW;
6905     }
6906     if (((r->crm == CP_ANY) && crm != 0) ||
6907         ((r->opc1 == CP_ANY) && opc1 != 0) ||
6908         ((r->opc2 == CP_ANY) && opc2 != 0)) {
6909         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
6910     }
6911 
6912     /* Check that raw accesses are either forbidden or handled. Note that
6913      * we can't assert this earlier because the setup of fieldoffset for
6914      * banked registers has to be done first.
6915      */
6916     if (!(r2->type & ARM_CP_NO_RAW)) {
6917         assert(!raw_accessors_invalid(r2));
6918     }
6919 
6920     /* Overriding of an existing definition must be explicitly
6921      * requested.
6922      */
6923     if (!(r->type & ARM_CP_OVERRIDE)) {
6924         ARMCPRegInfo *oldreg;
6925         oldreg = g_hash_table_lookup(cpu->cp_regs, key);
6926         if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
6927             fprintf(stderr, "Register redefined: cp=%d %d bit "
6928                     "crn=%d crm=%d opc1=%d opc2=%d, "
6929                     "was %s, now %s\n", r2->cp, 32 + 32 * is64,
6930                     r2->crn, r2->crm, r2->opc1, r2->opc2,
6931                     oldreg->name, r2->name);
6932             g_assert_not_reached();
6933         }
6934     }
6935     g_hash_table_insert(cpu->cp_regs, key, r2);
6936 }
6937 
6938 
6939 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
6940                                        const ARMCPRegInfo *r, void *opaque)
6941 {
6942     /* Define implementations of coprocessor registers.
6943      * We store these in a hashtable because typically
6944      * there are less than 150 registers in a space which
6945      * is 16*16*16*8*8 = 262144 in size.
6946      * Wildcarding is supported for the crm, opc1 and opc2 fields.
6947      * If a register is defined twice then the second definition is
6948      * used, so this can be used to define some generic registers and
6949      * then override them with implementation specific variations.
6950      * At least one of the original and the second definition should
6951      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6952      * against accidental use.
6953      *
6954      * The state field defines whether the register is to be
6955      * visible in the AArch32 or AArch64 execution state. If the
6956      * state is set to ARM_CP_STATE_BOTH then we synthesise a
6957      * reginfo structure for the AArch32 view, which sees the lower
6958      * 32 bits of the 64 bit register.
6959      *
6960      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6961      * be wildcarded. AArch64 registers are always considered to be 64
6962      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6963      * the register, if any.
6964      */
6965     int crm, opc1, opc2, state;
6966     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
6967     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
6968     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
6969     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
6970     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
6971     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
6972     /* 64 bit registers have only CRm and Opc1 fields */
6973     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
6974     /* op0 only exists in the AArch64 encodings */
6975     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
6976     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6977     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
6978     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6979      * encodes a minimum access level for the register. We roll this
6980      * runtime check into our general permission check code, so check
6981      * here that the reginfo's specified permissions are strict enough
6982      * to encompass the generic architectural permission check.
6983      */
6984     if (r->state != ARM_CP_STATE_AA32) {
6985         int mask = 0;
6986         switch (r->opc1) {
6987         case 0:
6988             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
6989             mask = PL0U_R | PL1_RW;
6990             break;
6991         case 1: case 2:
6992             /* min_EL EL1 */
6993             mask = PL1_RW;
6994             break;
6995         case 3:
6996             /* min_EL EL0 */
6997             mask = PL0_RW;
6998             break;
6999         case 4:
7000             /* min_EL EL2 */
7001             mask = PL2_RW;
7002             break;
7003         case 5:
7004             /* unallocated encoding, so not possible */
7005             assert(false);
7006             break;
7007         case 6:
7008             /* min_EL EL3 */
7009             mask = PL3_RW;
7010             break;
7011         case 7:
7012             /* min_EL EL1, secure mode only (we don't check the latter) */
7013             mask = PL1_RW;
7014             break;
7015         default:
7016             /* broken reginfo with out-of-range opc1 */
7017             assert(false);
7018             break;
7019         }
7020         /* assert our permissions are not too lax (stricter is fine) */
7021         assert((r->access & ~mask) == 0);
7022     }
7023 
7024     /* Check that the register definition has enough info to handle
7025      * reads and writes if they are permitted.
7026      */
7027     if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
7028         if (r->access & PL3_R) {
7029             assert((r->fieldoffset ||
7030                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7031                    r->readfn);
7032         }
7033         if (r->access & PL3_W) {
7034             assert((r->fieldoffset ||
7035                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7036                    r->writefn);
7037         }
7038     }
7039     /* Bad type field probably means missing sentinel at end of reg list */
7040     assert(cptype_valid(r->type));
7041     for (crm = crmmin; crm <= crmmax; crm++) {
7042         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7043             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
7044                 for (state = ARM_CP_STATE_AA32;
7045                      state <= ARM_CP_STATE_AA64; state++) {
7046                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7047                         continue;
7048                     }
7049                     if (state == ARM_CP_STATE_AA32) {
7050                         /* Under AArch32 CP registers can be common
7051                          * (same for secure and non-secure world) or banked.
7052                          */
7053                         char *name;
7054 
7055                         switch (r->secure) {
7056                         case ARM_CP_SECSTATE_S:
7057                         case ARM_CP_SECSTATE_NS:
7058                             add_cpreg_to_hashtable(cpu, r, opaque, state,
7059                                                    r->secure, crm, opc1, opc2,
7060                                                    r->name);
7061                             break;
7062                         default:
7063                             name = g_strdup_printf("%s_S", r->name);
7064                             add_cpreg_to_hashtable(cpu, r, opaque, state,
7065                                                    ARM_CP_SECSTATE_S,
7066                                                    crm, opc1, opc2, name);
7067                             g_free(name);
7068                             add_cpreg_to_hashtable(cpu, r, opaque, state,
7069                                                    ARM_CP_SECSTATE_NS,
7070                                                    crm, opc1, opc2, r->name);
7071                             break;
7072                         }
7073                     } else {
7074                         /* AArch64 registers get mapped to non-secure instance
7075                          * of AArch32 */
7076                         add_cpreg_to_hashtable(cpu, r, opaque, state,
7077                                                ARM_CP_SECSTATE_NS,
7078                                                crm, opc1, opc2, r->name);
7079                     }
7080                 }
7081             }
7082         }
7083     }
7084 }
7085 
7086 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
7087                                     const ARMCPRegInfo *regs, void *opaque)
7088 {
7089     /* Define a whole list of registers */
7090     const ARMCPRegInfo *r;
7091     for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7092         define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
7093     }
7094 }
7095 
7096 /*
7097  * Modify ARMCPRegInfo for access from userspace.
7098  *
7099  * This is a data driven modification directed by
7100  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7101  * user-space cannot alter any values and dynamic values pertaining to
7102  * execution state are hidden from user space view anyway.
7103  */
7104 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
7105 {
7106     const ARMCPRegUserSpaceInfo *m;
7107     ARMCPRegInfo *r;
7108 
7109     for (m = mods; m->name; m++) {
7110         GPatternSpec *pat = NULL;
7111         if (m->is_glob) {
7112             pat = g_pattern_spec_new(m->name);
7113         }
7114         for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7115             if (pat && g_pattern_match_string(pat, r->name)) {
7116                 r->type = ARM_CP_CONST;
7117                 r->access = PL0U_R;
7118                 r->resetvalue = 0;
7119                 /* continue */
7120             } else if (strcmp(r->name, m->name) == 0) {
7121                 r->type = ARM_CP_CONST;
7122                 r->access = PL0U_R;
7123                 r->resetvalue &= m->exported_bits;
7124                 r->resetvalue |= m->fixed_bits;
7125                 break;
7126             }
7127         }
7128         if (pat) {
7129             g_pattern_spec_free(pat);
7130         }
7131     }
7132 }
7133 
7134 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
7135 {
7136     return g_hash_table_lookup(cpregs, &encoded_cp);
7137 }
7138 
7139 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7140                          uint64_t value)
7141 {
7142     /* Helper coprocessor write function for write-ignore registers */
7143 }
7144 
7145 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
7146 {
7147     /* Helper coprocessor write function for read-as-zero registers */
7148     return 0;
7149 }
7150 
7151 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7152 {
7153     /* Helper coprocessor reset function for do-nothing-on-reset registers */
7154 }
7155 
7156 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
7157 {
7158     /* Return true if it is not valid for us to switch to
7159      * this CPU mode (ie all the UNPREDICTABLE cases in
7160      * the ARM ARM CPSRWriteByInstr pseudocode).
7161      */
7162 
7163     /* Changes to or from Hyp via MSR and CPS are illegal. */
7164     if (write_type == CPSRWriteByInstr &&
7165         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7166          mode == ARM_CPU_MODE_HYP)) {
7167         return 1;
7168     }
7169 
7170     switch (mode) {
7171     case ARM_CPU_MODE_USR:
7172         return 0;
7173     case ARM_CPU_MODE_SYS:
7174     case ARM_CPU_MODE_SVC:
7175     case ARM_CPU_MODE_ABT:
7176     case ARM_CPU_MODE_UND:
7177     case ARM_CPU_MODE_IRQ:
7178     case ARM_CPU_MODE_FIQ:
7179         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7180          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7181          */
7182         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7183          * and CPS are treated as illegal mode changes.
7184          */
7185         if (write_type == CPSRWriteByInstr &&
7186             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7187             (arm_hcr_el2_eff(env) & HCR_TGE)) {
7188             return 1;
7189         }
7190         return 0;
7191     case ARM_CPU_MODE_HYP:
7192         return !arm_feature(env, ARM_FEATURE_EL2)
7193             || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
7194     case ARM_CPU_MODE_MON:
7195         return arm_current_el(env) < 3;
7196     default:
7197         return 1;
7198     }
7199 }
7200 
7201 uint32_t cpsr_read(CPUARMState *env)
7202 {
7203     int ZF;
7204     ZF = (env->ZF == 0);
7205     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
7206         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7207         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7208         | ((env->condexec_bits & 0xfc) << 8)
7209         | (env->GE << 16) | (env->daif & CPSR_AIF);
7210 }
7211 
7212 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7213                 CPSRWriteType write_type)
7214 {
7215     uint32_t changed_daif;
7216 
7217     if (mask & CPSR_NZCV) {
7218         env->ZF = (~val) & CPSR_Z;
7219         env->NF = val;
7220         env->CF = (val >> 29) & 1;
7221         env->VF = (val << 3) & 0x80000000;
7222     }
7223     if (mask & CPSR_Q)
7224         env->QF = ((val & CPSR_Q) != 0);
7225     if (mask & CPSR_T)
7226         env->thumb = ((val & CPSR_T) != 0);
7227     if (mask & CPSR_IT_0_1) {
7228         env->condexec_bits &= ~3;
7229         env->condexec_bits |= (val >> 25) & 3;
7230     }
7231     if (mask & CPSR_IT_2_7) {
7232         env->condexec_bits &= 3;
7233         env->condexec_bits |= (val >> 8) & 0xfc;
7234     }
7235     if (mask & CPSR_GE) {
7236         env->GE = (val >> 16) & 0xf;
7237     }
7238 
7239     /* In a V7 implementation that includes the security extensions but does
7240      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7241      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7242      * bits respectively.
7243      *
7244      * In a V8 implementation, it is permitted for privileged software to
7245      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7246      */
7247     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
7248         arm_feature(env, ARM_FEATURE_EL3) &&
7249         !arm_feature(env, ARM_FEATURE_EL2) &&
7250         !arm_is_secure(env)) {
7251 
7252         changed_daif = (env->daif ^ val) & mask;
7253 
7254         if (changed_daif & CPSR_A) {
7255             /* Check to see if we are allowed to change the masking of async
7256              * abort exceptions from a non-secure state.
7257              */
7258             if (!(env->cp15.scr_el3 & SCR_AW)) {
7259                 qemu_log_mask(LOG_GUEST_ERROR,
7260                               "Ignoring attempt to switch CPSR_A flag from "
7261                               "non-secure world with SCR.AW bit clear\n");
7262                 mask &= ~CPSR_A;
7263             }
7264         }
7265 
7266         if (changed_daif & CPSR_F) {
7267             /* Check to see if we are allowed to change the masking of FIQ
7268              * exceptions from a non-secure state.
7269              */
7270             if (!(env->cp15.scr_el3 & SCR_FW)) {
7271                 qemu_log_mask(LOG_GUEST_ERROR,
7272                               "Ignoring attempt to switch CPSR_F flag from "
7273                               "non-secure world with SCR.FW bit clear\n");
7274                 mask &= ~CPSR_F;
7275             }
7276 
7277             /* Check whether non-maskable FIQ (NMFI) support is enabled.
7278              * If this bit is set software is not allowed to mask
7279              * FIQs, but is allowed to set CPSR_F to 0.
7280              */
7281             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7282                 (val & CPSR_F)) {
7283                 qemu_log_mask(LOG_GUEST_ERROR,
7284                               "Ignoring attempt to enable CPSR_F flag "
7285                               "(non-maskable FIQ [NMFI] support enabled)\n");
7286                 mask &= ~CPSR_F;
7287             }
7288         }
7289     }
7290 
7291     env->daif &= ~(CPSR_AIF & mask);
7292     env->daif |= val & CPSR_AIF & mask;
7293 
7294     if (write_type != CPSRWriteRaw &&
7295         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
7296         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7297             /* Note that we can only get here in USR mode if this is a
7298              * gdb stub write; for this case we follow the architectural
7299              * behaviour for guest writes in USR mode of ignoring an attempt
7300              * to switch mode. (Those are caught by translate.c for writes
7301              * triggered by guest instructions.)
7302              */
7303             mask &= ~CPSR_M;
7304         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
7305             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7306              * v7, and has defined behaviour in v8:
7307              *  + leave CPSR.M untouched
7308              *  + allow changes to the other CPSR fields
7309              *  + set PSTATE.IL
7310              * For user changes via the GDB stub, we don't set PSTATE.IL,
7311              * as this would be unnecessarily harsh for a user error.
7312              */
7313             mask &= ~CPSR_M;
7314             if (write_type != CPSRWriteByGDBStub &&
7315                 arm_feature(env, ARM_FEATURE_V8)) {
7316                 mask |= CPSR_IL;
7317                 val |= CPSR_IL;
7318             }
7319             qemu_log_mask(LOG_GUEST_ERROR,
7320                           "Illegal AArch32 mode switch attempt from %s to %s\n",
7321                           aarch32_mode_name(env->uncached_cpsr),
7322                           aarch32_mode_name(val));
7323         } else {
7324             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7325                           write_type == CPSRWriteExceptionReturn ?
7326                           "Exception return from AArch32" :
7327                           "AArch32 mode switch from",
7328                           aarch32_mode_name(env->uncached_cpsr),
7329                           aarch32_mode_name(val), env->regs[15]);
7330             switch_mode(env, val & CPSR_M);
7331         }
7332     }
7333     mask &= ~CACHED_CPSR_BITS;
7334     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7335 }
7336 
7337 /* Sign/zero extend */
7338 uint32_t HELPER(sxtb16)(uint32_t x)
7339 {
7340     uint32_t res;
7341     res = (uint16_t)(int8_t)x;
7342     res |= (uint32_t)(int8_t)(x >> 16) << 16;
7343     return res;
7344 }
7345 
7346 uint32_t HELPER(uxtb16)(uint32_t x)
7347 {
7348     uint32_t res;
7349     res = (uint16_t)(uint8_t)x;
7350     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
7351     return res;
7352 }
7353 
7354 int32_t HELPER(sdiv)(int32_t num, int32_t den)
7355 {
7356     if (den == 0)
7357       return 0;
7358     if (num == INT_MIN && den == -1)
7359       return INT_MIN;
7360     return num / den;
7361 }
7362 
7363 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
7364 {
7365     if (den == 0)
7366       return 0;
7367     return num / den;
7368 }
7369 
7370 uint32_t HELPER(rbit)(uint32_t x)
7371 {
7372     return revbit32(x);
7373 }
7374 
7375 #ifdef CONFIG_USER_ONLY
7376 
7377 /* These should probably raise undefined insn exceptions.  */
7378 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
7379 {
7380     ARMCPU *cpu = arm_env_get_cpu(env);
7381 
7382     cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
7383 }
7384 
7385 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
7386 {
7387     ARMCPU *cpu = arm_env_get_cpu(env);
7388 
7389     cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
7390     return 0;
7391 }
7392 
7393 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
7394 {
7395     /* translate.c should never generate calls here in user-only mode */
7396     g_assert_not_reached();
7397 }
7398 
7399 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
7400 {
7401     /* translate.c should never generate calls here in user-only mode */
7402     g_assert_not_reached();
7403 }
7404 
7405 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
7406 {
7407     /* translate.c should never generate calls here in user-only mode */
7408     g_assert_not_reached();
7409 }
7410 
7411 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
7412 {
7413     /* translate.c should never generate calls here in user-only mode */
7414     g_assert_not_reached();
7415 }
7416 
7417 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
7418 {
7419     /* translate.c should never generate calls here in user-only mode */
7420     g_assert_not_reached();
7421 }
7422 
7423 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
7424 {
7425     /* The TT instructions can be used by unprivileged code, but in
7426      * user-only emulation we don't have the MPU.
7427      * Luckily since we know we are NonSecure unprivileged (and that in
7428      * turn means that the A flag wasn't specified), all the bits in the
7429      * register must be zero:
7430      *  IREGION: 0 because IRVALID is 0
7431      *  IRVALID: 0 because NS
7432      *  S: 0 because NS
7433      *  NSRW: 0 because NS
7434      *  NSR: 0 because NS
7435      *  RW: 0 because unpriv and A flag not set
7436      *  R: 0 because unpriv and A flag not set
7437      *  SRVALID: 0 because NS
7438      *  MRVALID: 0 because unpriv and A flag not set
7439      *  SREGION: 0 becaus SRVALID is 0
7440      *  MREGION: 0 because MRVALID is 0
7441      */
7442     return 0;
7443 }
7444 
7445 static void switch_mode(CPUARMState *env, int mode)
7446 {
7447     ARMCPU *cpu = arm_env_get_cpu(env);
7448 
7449     if (mode != ARM_CPU_MODE_USR) {
7450         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7451     }
7452 }
7453 
7454 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7455                                  uint32_t cur_el, bool secure)
7456 {
7457     return 1;
7458 }
7459 
7460 void aarch64_sync_64_to_32(CPUARMState *env)
7461 {
7462     g_assert_not_reached();
7463 }
7464 
7465 #else
7466 
7467 static void switch_mode(CPUARMState *env, int mode)
7468 {
7469     int old_mode;
7470     int i;
7471 
7472     old_mode = env->uncached_cpsr & CPSR_M;
7473     if (mode == old_mode)
7474         return;
7475 
7476     if (old_mode == ARM_CPU_MODE_FIQ) {
7477         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
7478         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
7479     } else if (mode == ARM_CPU_MODE_FIQ) {
7480         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
7481         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
7482     }
7483 
7484     i = bank_number(old_mode);
7485     env->banked_r13[i] = env->regs[13];
7486     env->banked_spsr[i] = env->spsr;
7487 
7488     i = bank_number(mode);
7489     env->regs[13] = env->banked_r13[i];
7490     env->spsr = env->banked_spsr[i];
7491 
7492     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7493     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
7494 }
7495 
7496 /* Physical Interrupt Target EL Lookup Table
7497  *
7498  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7499  *
7500  * The below multi-dimensional table is used for looking up the target
7501  * exception level given numerous condition criteria.  Specifically, the
7502  * target EL is based on SCR and HCR routing controls as well as the
7503  * currently executing EL and secure state.
7504  *
7505  *    Dimensions:
7506  *    target_el_table[2][2][2][2][2][4]
7507  *                    |  |  |  |  |  +--- Current EL
7508  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
7509  *                    |  |  |  +--------- HCR mask override
7510  *                    |  |  +------------ SCR exec state control
7511  *                    |  +--------------- SCR mask override
7512  *                    +------------------ 32-bit(0)/64-bit(1) EL3
7513  *
7514  *    The table values are as such:
7515  *    0-3 = EL0-EL3
7516  *     -1 = Cannot occur
7517  *
7518  * The ARM ARM target EL table includes entries indicating that an "exception
7519  * is not taken".  The two cases where this is applicable are:
7520  *    1) An exception is taken from EL3 but the SCR does not have the exception
7521  *    routed to EL3.
7522  *    2) An exception is taken from EL2 but the HCR does not have the exception
7523  *    routed to EL2.
7524  * In these two cases, the below table contain a target of EL1.  This value is
7525  * returned as it is expected that the consumer of the table data will check
7526  * for "target EL >= current EL" to ensure the exception is not taken.
7527  *
7528  *            SCR     HCR
7529  *         64  EA     AMO                 From
7530  *        BIT IRQ     IMO      Non-secure         Secure
7531  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
7532  */
7533 static const int8_t target_el_table[2][2][2][2][2][4] = {
7534     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
7535        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
7536       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
7537        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
7538      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
7539        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
7540       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
7541        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
7542     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
7543        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
7544       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
7545        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
7546      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
7547        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
7548       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
7549        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
7550 };
7551 
7552 /*
7553  * Determine the target EL for physical exceptions
7554  */
7555 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7556                                  uint32_t cur_el, bool secure)
7557 {
7558     CPUARMState *env = cs->env_ptr;
7559     bool rw;
7560     bool scr;
7561     bool hcr;
7562     int target_el;
7563     /* Is the highest EL AArch64? */
7564     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
7565     uint64_t hcr_el2;
7566 
7567     if (arm_feature(env, ARM_FEATURE_EL3)) {
7568         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
7569     } else {
7570         /* Either EL2 is the highest EL (and so the EL2 register width
7571          * is given by is64); or there is no EL2 or EL3, in which case
7572          * the value of 'rw' does not affect the table lookup anyway.
7573          */
7574         rw = is64;
7575     }
7576 
7577     hcr_el2 = arm_hcr_el2_eff(env);
7578     switch (excp_idx) {
7579     case EXCP_IRQ:
7580         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
7581         hcr = hcr_el2 & HCR_IMO;
7582         break;
7583     case EXCP_FIQ:
7584         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
7585         hcr = hcr_el2 & HCR_FMO;
7586         break;
7587     default:
7588         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
7589         hcr = hcr_el2 & HCR_AMO;
7590         break;
7591     };
7592 
7593     /* Perform a table-lookup for the target EL given the current state */
7594     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
7595 
7596     assert(target_el > 0);
7597 
7598     return target_el;
7599 }
7600 
7601 /*
7602  * Return true if the v7M CPACR permits access to the FPU for the specified
7603  * security state and privilege level.
7604  */
7605 static bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv)
7606 {
7607     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
7608     case 0:
7609     case 2: /* UNPREDICTABLE: we treat like 0 */
7610         return false;
7611     case 1:
7612         return is_priv;
7613     case 3:
7614         return true;
7615     default:
7616         g_assert_not_reached();
7617     }
7618 }
7619 
7620 /*
7621  * What kind of stack write are we doing? This affects how exceptions
7622  * generated during the stacking are treated.
7623  */
7624 typedef enum StackingMode {
7625     STACK_NORMAL,
7626     STACK_IGNFAULTS,
7627     STACK_LAZYFP,
7628 } StackingMode;
7629 
7630 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
7631                             ARMMMUIdx mmu_idx, StackingMode mode)
7632 {
7633     CPUState *cs = CPU(cpu);
7634     CPUARMState *env = &cpu->env;
7635     MemTxAttrs attrs = {};
7636     MemTxResult txres;
7637     target_ulong page_size;
7638     hwaddr physaddr;
7639     int prot;
7640     ARMMMUFaultInfo fi = {};
7641     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
7642     int exc;
7643     bool exc_secure;
7644 
7645     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
7646                       &attrs, &prot, &page_size, &fi, NULL)) {
7647         /* MPU/SAU lookup failed */
7648         if (fi.type == ARMFault_QEMU_SFault) {
7649             if (mode == STACK_LAZYFP) {
7650                 qemu_log_mask(CPU_LOG_INT,
7651                               "...SecureFault with SFSR.LSPERR "
7652                               "during lazy stacking\n");
7653                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
7654             } else {
7655                 qemu_log_mask(CPU_LOG_INT,
7656                               "...SecureFault with SFSR.AUVIOL "
7657                               "during stacking\n");
7658                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
7659             }
7660             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
7661             env->v7m.sfar = addr;
7662             exc = ARMV7M_EXCP_SECURE;
7663             exc_secure = false;
7664         } else {
7665             if (mode == STACK_LAZYFP) {
7666                 qemu_log_mask(CPU_LOG_INT,
7667                               "...MemManageFault with CFSR.MLSPERR\n");
7668                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
7669             } else {
7670                 qemu_log_mask(CPU_LOG_INT,
7671                               "...MemManageFault with CFSR.MSTKERR\n");
7672                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
7673             }
7674             exc = ARMV7M_EXCP_MEM;
7675             exc_secure = secure;
7676         }
7677         goto pend_fault;
7678     }
7679     address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
7680                          attrs, &txres);
7681     if (txres != MEMTX_OK) {
7682         /* BusFault trying to write the data */
7683         if (mode == STACK_LAZYFP) {
7684             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
7685             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
7686         } else {
7687             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
7688             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
7689         }
7690         exc = ARMV7M_EXCP_BUS;
7691         exc_secure = false;
7692         goto pend_fault;
7693     }
7694     return true;
7695 
7696 pend_fault:
7697     /* By pending the exception at this point we are making
7698      * the IMPDEF choice "overridden exceptions pended" (see the
7699      * MergeExcInfo() pseudocode). The other choice would be to not
7700      * pend them now and then make a choice about which to throw away
7701      * later if we have two derived exceptions.
7702      * The only case when we must not pend the exception but instead
7703      * throw it away is if we are doing the push of the callee registers
7704      * and we've already generated a derived exception (this is indicated
7705      * by the caller passing STACK_IGNFAULTS). Even in this case we will
7706      * still update the fault status registers.
7707      */
7708     switch (mode) {
7709     case STACK_NORMAL:
7710         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
7711         break;
7712     case STACK_LAZYFP:
7713         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
7714         break;
7715     case STACK_IGNFAULTS:
7716         break;
7717     }
7718     return false;
7719 }
7720 
7721 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
7722                            ARMMMUIdx mmu_idx)
7723 {
7724     CPUState *cs = CPU(cpu);
7725     CPUARMState *env = &cpu->env;
7726     MemTxAttrs attrs = {};
7727     MemTxResult txres;
7728     target_ulong page_size;
7729     hwaddr physaddr;
7730     int prot;
7731     ARMMMUFaultInfo fi = {};
7732     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
7733     int exc;
7734     bool exc_secure;
7735     uint32_t value;
7736 
7737     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
7738                       &attrs, &prot, &page_size, &fi, NULL)) {
7739         /* MPU/SAU lookup failed */
7740         if (fi.type == ARMFault_QEMU_SFault) {
7741             qemu_log_mask(CPU_LOG_INT,
7742                           "...SecureFault with SFSR.AUVIOL during unstack\n");
7743             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
7744             env->v7m.sfar = addr;
7745             exc = ARMV7M_EXCP_SECURE;
7746             exc_secure = false;
7747         } else {
7748             qemu_log_mask(CPU_LOG_INT,
7749                           "...MemManageFault with CFSR.MUNSTKERR\n");
7750             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
7751             exc = ARMV7M_EXCP_MEM;
7752             exc_secure = secure;
7753         }
7754         goto pend_fault;
7755     }
7756 
7757     value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
7758                               attrs, &txres);
7759     if (txres != MEMTX_OK) {
7760         /* BusFault trying to read the data */
7761         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
7762         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
7763         exc = ARMV7M_EXCP_BUS;
7764         exc_secure = false;
7765         goto pend_fault;
7766     }
7767 
7768     *dest = value;
7769     return true;
7770 
7771 pend_fault:
7772     /* By pending the exception at this point we are making
7773      * the IMPDEF choice "overridden exceptions pended" (see the
7774      * MergeExcInfo() pseudocode). The other choice would be to not
7775      * pend them now and then make a choice about which to throw away
7776      * later if we have two derived exceptions.
7777      */
7778     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
7779     return false;
7780 }
7781 
7782 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
7783 {
7784     /*
7785      * Preserve FP state (because LSPACT was set and we are about
7786      * to execute an FP instruction). This corresponds to the
7787      * PreserveFPState() pseudocode.
7788      * We may throw an exception if the stacking fails.
7789      */
7790     ARMCPU *cpu = arm_env_get_cpu(env);
7791     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
7792     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
7793     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
7794     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
7795     uint32_t fpcar = env->v7m.fpcar[is_secure];
7796     bool stacked_ok = true;
7797     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
7798     bool take_exception;
7799 
7800     /* Take the iothread lock as we are going to touch the NVIC */
7801     qemu_mutex_lock_iothread();
7802 
7803     /* Check the background context had access to the FPU */
7804     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
7805         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
7806         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
7807         stacked_ok = false;
7808     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
7809         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
7810         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
7811         stacked_ok = false;
7812     }
7813 
7814     if (!splimviol && stacked_ok) {
7815         /* We only stack if the stack limit wasn't violated */
7816         int i;
7817         ARMMMUIdx mmu_idx;
7818 
7819         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
7820         for (i = 0; i < (ts ? 32 : 16); i += 2) {
7821             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
7822             uint32_t faddr = fpcar + 4 * i;
7823             uint32_t slo = extract64(dn, 0, 32);
7824             uint32_t shi = extract64(dn, 32, 32);
7825 
7826             if (i >= 16) {
7827                 faddr += 8; /* skip the slot for the FPSCR */
7828             }
7829             stacked_ok = stacked_ok &&
7830                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
7831                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
7832         }
7833 
7834         stacked_ok = stacked_ok &&
7835             v7m_stack_write(cpu, fpcar + 0x40,
7836                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
7837     }
7838 
7839     /*
7840      * We definitely pended an exception, but it's possible that it
7841      * might not be able to be taken now. If its priority permits us
7842      * to take it now, then we must not update the LSPACT or FP regs,
7843      * but instead jump out to take the exception immediately.
7844      * If it's just pending and won't be taken until the current
7845      * handler exits, then we do update LSPACT and the FP regs.
7846      */
7847     take_exception = !stacked_ok &&
7848         armv7m_nvic_can_take_pending_exception(env->nvic);
7849 
7850     qemu_mutex_unlock_iothread();
7851 
7852     if (take_exception) {
7853         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
7854     }
7855 
7856     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
7857 
7858     if (ts) {
7859         /* Clear s0 to s31 and the FPSCR */
7860         int i;
7861 
7862         for (i = 0; i < 32; i += 2) {
7863             *aa32_vfp_dreg(env, i / 2) = 0;
7864         }
7865         vfp_set_fpscr(env, 0);
7866     }
7867     /*
7868      * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
7869      * unchanged.
7870      */
7871 }
7872 
7873 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7874  * This may change the current stack pointer between Main and Process
7875  * stack pointers if it is done for the CONTROL register for the current
7876  * security state.
7877  */
7878 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
7879                                                  bool new_spsel,
7880                                                  bool secstate)
7881 {
7882     bool old_is_psp = v7m_using_psp(env);
7883 
7884     env->v7m.control[secstate] =
7885         deposit32(env->v7m.control[secstate],
7886                   R_V7M_CONTROL_SPSEL_SHIFT,
7887                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
7888 
7889     if (secstate == env->v7m.secure) {
7890         bool new_is_psp = v7m_using_psp(env);
7891         uint32_t tmp;
7892 
7893         if (old_is_psp != new_is_psp) {
7894             tmp = env->v7m.other_sp;
7895             env->v7m.other_sp = env->regs[13];
7896             env->regs[13] = tmp;
7897         }
7898     }
7899 }
7900 
7901 /* Write to v7M CONTROL.SPSEL bit. This may change the current
7902  * stack pointer between Main and Process stack pointers.
7903  */
7904 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
7905 {
7906     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
7907 }
7908 
7909 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
7910 {
7911     /* Write a new value to v7m.exception, thus transitioning into or out
7912      * of Handler mode; this may result in a change of active stack pointer.
7913      */
7914     bool new_is_psp, old_is_psp = v7m_using_psp(env);
7915     uint32_t tmp;
7916 
7917     env->v7m.exception = new_exc;
7918 
7919     new_is_psp = v7m_using_psp(env);
7920 
7921     if (old_is_psp != new_is_psp) {
7922         tmp = env->v7m.other_sp;
7923         env->v7m.other_sp = env->regs[13];
7924         env->regs[13] = tmp;
7925     }
7926 }
7927 
7928 /* Switch M profile security state between NS and S */
7929 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
7930 {
7931     uint32_t new_ss_msp, new_ss_psp;
7932 
7933     if (env->v7m.secure == new_secstate) {
7934         return;
7935     }
7936 
7937     /* All the banked state is accessed by looking at env->v7m.secure
7938      * except for the stack pointer; rearrange the SP appropriately.
7939      */
7940     new_ss_msp = env->v7m.other_ss_msp;
7941     new_ss_psp = env->v7m.other_ss_psp;
7942 
7943     if (v7m_using_psp(env)) {
7944         env->v7m.other_ss_psp = env->regs[13];
7945         env->v7m.other_ss_msp = env->v7m.other_sp;
7946     } else {
7947         env->v7m.other_ss_msp = env->regs[13];
7948         env->v7m.other_ss_psp = env->v7m.other_sp;
7949     }
7950 
7951     env->v7m.secure = new_secstate;
7952 
7953     if (v7m_using_psp(env)) {
7954         env->regs[13] = new_ss_psp;
7955         env->v7m.other_sp = new_ss_msp;
7956     } else {
7957         env->regs[13] = new_ss_msp;
7958         env->v7m.other_sp = new_ss_psp;
7959     }
7960 }
7961 
7962 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
7963 {
7964     /* Handle v7M BXNS:
7965      *  - if the return value is a magic value, do exception return (like BX)
7966      *  - otherwise bit 0 of the return value is the target security state
7967      */
7968     uint32_t min_magic;
7969 
7970     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7971         /* Covers FNC_RETURN and EXC_RETURN magic */
7972         min_magic = FNC_RETURN_MIN_MAGIC;
7973     } else {
7974         /* EXC_RETURN magic only */
7975         min_magic = EXC_RETURN_MIN_MAGIC;
7976     }
7977 
7978     if (dest >= min_magic) {
7979         /* This is an exception return magic value; put it where
7980          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7981          * Note that if we ever add gen_ss_advance() singlestep support to
7982          * M profile this should count as an "instruction execution complete"
7983          * event (compare gen_bx_excret_final_code()).
7984          */
7985         env->regs[15] = dest & ~1;
7986         env->thumb = dest & 1;
7987         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
7988         /* notreached */
7989     }
7990 
7991     /* translate.c should have made BXNS UNDEF unless we're secure */
7992     assert(env->v7m.secure);
7993 
7994     if (!(dest & 1)) {
7995         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
7996     }
7997     switch_v7m_security_state(env, dest & 1);
7998     env->thumb = 1;
7999     env->regs[15] = dest & ~1;
8000 }
8001 
8002 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
8003 {
8004     /* Handle v7M BLXNS:
8005      *  - bit 0 of the destination address is the target security state
8006      */
8007 
8008     /* At this point regs[15] is the address just after the BLXNS */
8009     uint32_t nextinst = env->regs[15] | 1;
8010     uint32_t sp = env->regs[13] - 8;
8011     uint32_t saved_psr;
8012 
8013     /* translate.c will have made BLXNS UNDEF unless we're secure */
8014     assert(env->v7m.secure);
8015 
8016     if (dest & 1) {
8017         /* target is Secure, so this is just a normal BLX,
8018          * except that the low bit doesn't indicate Thumb/not.
8019          */
8020         env->regs[14] = nextinst;
8021         env->thumb = 1;
8022         env->regs[15] = dest & ~1;
8023         return;
8024     }
8025 
8026     /* Target is non-secure: first push a stack frame */
8027     if (!QEMU_IS_ALIGNED(sp, 8)) {
8028         qemu_log_mask(LOG_GUEST_ERROR,
8029                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
8030     }
8031 
8032     if (sp < v7m_sp_limit(env)) {
8033         raise_exception(env, EXCP_STKOF, 0, 1);
8034     }
8035 
8036     saved_psr = env->v7m.exception;
8037     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
8038         saved_psr |= XPSR_SFPA;
8039     }
8040 
8041     /* Note that these stores can throw exceptions on MPU faults */
8042     cpu_stl_data(env, sp, nextinst);
8043     cpu_stl_data(env, sp + 4, saved_psr);
8044 
8045     env->regs[13] = sp;
8046     env->regs[14] = 0xfeffffff;
8047     if (arm_v7m_is_handler_mode(env)) {
8048         /* Write a dummy value to IPSR, to avoid leaking the current secure
8049          * exception number to non-secure code. This is guaranteed not
8050          * to cause write_v7m_exception() to actually change stacks.
8051          */
8052         write_v7m_exception(env, 1);
8053     }
8054     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
8055     switch_v7m_security_state(env, 0);
8056     env->thumb = 1;
8057     env->regs[15] = dest;
8058 }
8059 
8060 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
8061                                 bool spsel)
8062 {
8063     /* Return a pointer to the location where we currently store the
8064      * stack pointer for the requested security state and thread mode.
8065      * This pointer will become invalid if the CPU state is updated
8066      * such that the stack pointers are switched around (eg changing
8067      * the SPSEL control bit).
8068      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
8069      * Unlike that pseudocode, we require the caller to pass us in the
8070      * SPSEL control bit value; this is because we also use this
8071      * function in handling of pushing of the callee-saves registers
8072      * part of the v8M stack frame (pseudocode PushCalleeStack()),
8073      * and in the tailchain codepath the SPSEL bit comes from the exception
8074      * return magic LR value from the previous exception. The pseudocode
8075      * opencodes the stack-selection in PushCalleeStack(), but we prefer
8076      * to make this utility function generic enough to do the job.
8077      */
8078     bool want_psp = threadmode && spsel;
8079 
8080     if (secure == env->v7m.secure) {
8081         if (want_psp == v7m_using_psp(env)) {
8082             return &env->regs[13];
8083         } else {
8084             return &env->v7m.other_sp;
8085         }
8086     } else {
8087         if (want_psp) {
8088             return &env->v7m.other_ss_psp;
8089         } else {
8090             return &env->v7m.other_ss_msp;
8091         }
8092     }
8093 }
8094 
8095 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
8096                                 uint32_t *pvec)
8097 {
8098     CPUState *cs = CPU(cpu);
8099     CPUARMState *env = &cpu->env;
8100     MemTxResult result;
8101     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
8102     uint32_t vector_entry;
8103     MemTxAttrs attrs = {};
8104     ARMMMUIdx mmu_idx;
8105     bool exc_secure;
8106 
8107     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
8108 
8109     /* We don't do a get_phys_addr() here because the rules for vector
8110      * loads are special: they always use the default memory map, and
8111      * the default memory map permits reads from all addresses.
8112      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
8113      * that we want this special case which would always say "yes",
8114      * we just do the SAU lookup here followed by a direct physical load.
8115      */
8116     attrs.secure = targets_secure;
8117     attrs.user = false;
8118 
8119     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8120         V8M_SAttributes sattrs = {};
8121 
8122         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
8123         if (sattrs.ns) {
8124             attrs.secure = false;
8125         } else if (!targets_secure) {
8126             /* NS access to S memory */
8127             goto load_fail;
8128         }
8129     }
8130 
8131     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
8132                                      attrs, &result);
8133     if (result != MEMTX_OK) {
8134         goto load_fail;
8135     }
8136     *pvec = vector_entry;
8137     return true;
8138 
8139 load_fail:
8140     /* All vector table fetch fails are reported as HardFault, with
8141      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
8142      * technically the underlying exception is a MemManage or BusFault
8143      * that is escalated to HardFault.) This is a terminal exception,
8144      * so we will either take the HardFault immediately or else enter
8145      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
8146      */
8147     exc_secure = targets_secure ||
8148         !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
8149     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
8150     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
8151     return false;
8152 }
8153 
8154 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
8155 {
8156     /*
8157      * Return the integrity signature value for the callee-saves
8158      * stack frame section. @lr is the exception return payload/LR value
8159      * whose FType bit forms bit 0 of the signature if FP is present.
8160      */
8161     uint32_t sig = 0xfefa125a;
8162 
8163     if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
8164         sig |= 1;
8165     }
8166     return sig;
8167 }
8168 
8169 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
8170                                   bool ignore_faults)
8171 {
8172     /* For v8M, push the callee-saves register part of the stack frame.
8173      * Compare the v8M pseudocode PushCalleeStack().
8174      * In the tailchaining case this may not be the current stack.
8175      */
8176     CPUARMState *env = &cpu->env;
8177     uint32_t *frame_sp_p;
8178     uint32_t frameptr;
8179     ARMMMUIdx mmu_idx;
8180     bool stacked_ok;
8181     uint32_t limit;
8182     bool want_psp;
8183     uint32_t sig;
8184     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
8185 
8186     if (dotailchain) {
8187         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
8188         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
8189             !mode;
8190 
8191         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
8192         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
8193                                     lr & R_V7M_EXCRET_SPSEL_MASK);
8194         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
8195         if (want_psp) {
8196             limit = env->v7m.psplim[M_REG_S];
8197         } else {
8198             limit = env->v7m.msplim[M_REG_S];
8199         }
8200     } else {
8201         mmu_idx = arm_mmu_idx(env);
8202         frame_sp_p = &env->regs[13];
8203         limit = v7m_sp_limit(env);
8204     }
8205 
8206     frameptr = *frame_sp_p - 0x28;
8207     if (frameptr < limit) {
8208         /*
8209          * Stack limit failure: set SP to the limit value, and generate
8210          * STKOF UsageFault. Stack pushes below the limit must not be
8211          * performed. It is IMPDEF whether pushes above the limit are
8212          * performed; we choose not to.
8213          */
8214         qemu_log_mask(CPU_LOG_INT,
8215                       "...STKOF during callee-saves register stacking\n");
8216         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
8217         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8218                                 env->v7m.secure);
8219         *frame_sp_p = limit;
8220         return true;
8221     }
8222 
8223     /* Write as much of the stack frame as we can. A write failure may
8224      * cause us to pend a derived exception.
8225      */
8226     sig = v7m_integrity_sig(env, lr);
8227     stacked_ok =
8228         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
8229         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
8230         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
8231         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
8232         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
8233         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
8234         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
8235         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
8236         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
8237 
8238     /* Update SP regardless of whether any of the stack accesses failed. */
8239     *frame_sp_p = frameptr;
8240 
8241     return !stacked_ok;
8242 }
8243 
8244 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
8245                                 bool ignore_stackfaults)
8246 {
8247     /* Do the "take the exception" parts of exception entry,
8248      * but not the pushing of state to the stack. This is
8249      * similar to the pseudocode ExceptionTaken() function.
8250      */
8251     CPUARMState *env = &cpu->env;
8252     uint32_t addr;
8253     bool targets_secure;
8254     int exc;
8255     bool push_failed = false;
8256 
8257     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
8258     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
8259                   targets_secure ? "secure" : "nonsecure", exc);
8260 
8261     if (dotailchain) {
8262         /* Sanitize LR FType and PREFIX bits */
8263         if (!arm_feature(env, ARM_FEATURE_VFP)) {
8264             lr |= R_V7M_EXCRET_FTYPE_MASK;
8265         }
8266         lr = deposit32(lr, 24, 8, 0xff);
8267     }
8268 
8269     if (arm_feature(env, ARM_FEATURE_V8)) {
8270         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
8271             (lr & R_V7M_EXCRET_S_MASK)) {
8272             /* The background code (the owner of the registers in the
8273              * exception frame) is Secure. This means it may either already
8274              * have or now needs to push callee-saves registers.
8275              */
8276             if (targets_secure) {
8277                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
8278                     /* We took an exception from Secure to NonSecure
8279                      * (which means the callee-saved registers got stacked)
8280                      * and are now tailchaining to a Secure exception.
8281                      * Clear DCRS so eventual return from this Secure
8282                      * exception unstacks the callee-saved registers.
8283                      */
8284                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
8285                 }
8286             } else {
8287                 /* We're going to a non-secure exception; push the
8288                  * callee-saves registers to the stack now, if they're
8289                  * not already saved.
8290                  */
8291                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
8292                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
8293                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
8294                                                         ignore_stackfaults);
8295                 }
8296                 lr |= R_V7M_EXCRET_DCRS_MASK;
8297             }
8298         }
8299 
8300         lr &= ~R_V7M_EXCRET_ES_MASK;
8301         if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8302             lr |= R_V7M_EXCRET_ES_MASK;
8303         }
8304         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
8305         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
8306             lr |= R_V7M_EXCRET_SPSEL_MASK;
8307         }
8308 
8309         /* Clear registers if necessary to prevent non-secure exception
8310          * code being able to see register values from secure code.
8311          * Where register values become architecturally UNKNOWN we leave
8312          * them with their previous values.
8313          */
8314         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8315             if (!targets_secure) {
8316                 /* Always clear the caller-saved registers (they have been
8317                  * pushed to the stack earlier in v7m_push_stack()).
8318                  * Clear callee-saved registers if the background code is
8319                  * Secure (in which case these regs were saved in
8320                  * v7m_push_callee_stack()).
8321                  */
8322                 int i;
8323 
8324                 for (i = 0; i < 13; i++) {
8325                     /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
8326                     if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
8327                         env->regs[i] = 0;
8328                     }
8329                 }
8330                 /* Clear EAPSR */
8331                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
8332             }
8333         }
8334     }
8335 
8336     if (push_failed && !ignore_stackfaults) {
8337         /* Derived exception on callee-saves register stacking:
8338          * we might now want to take a different exception which
8339          * targets a different security state, so try again from the top.
8340          */
8341         qemu_log_mask(CPU_LOG_INT,
8342                       "...derived exception on callee-saves register stacking");
8343         v7m_exception_taken(cpu, lr, true, true);
8344         return;
8345     }
8346 
8347     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
8348         /* Vector load failed: derived exception */
8349         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
8350         v7m_exception_taken(cpu, lr, true, true);
8351         return;
8352     }
8353 
8354     /* Now we've done everything that might cause a derived exception
8355      * we can go ahead and activate whichever exception we're going to
8356      * take (which might now be the derived exception).
8357      */
8358     armv7m_nvic_acknowledge_irq(env->nvic);
8359 
8360     /* Switch to target security state -- must do this before writing SPSEL */
8361     switch_v7m_security_state(env, targets_secure);
8362     write_v7m_control_spsel(env, 0);
8363     arm_clear_exclusive(env);
8364     /* Clear SFPA and FPCA (has no effect if no FPU) */
8365     env->v7m.control[M_REG_S] &=
8366         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
8367     /* Clear IT bits */
8368     env->condexec_bits = 0;
8369     env->regs[14] = lr;
8370     env->regs[15] = addr & 0xfffffffe;
8371     env->thumb = addr & 1;
8372 }
8373 
8374 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
8375                              bool apply_splim)
8376 {
8377     /*
8378      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
8379      * that we will need later in order to do lazy FP reg stacking.
8380      */
8381     bool is_secure = env->v7m.secure;
8382     void *nvic = env->nvic;
8383     /*
8384      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
8385      * are banked and we want to update the bit in the bank for the
8386      * current security state; and in one case we want to specifically
8387      * update the NS banked version of a bit even if we are secure.
8388      */
8389     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
8390     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
8391     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
8392     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
8393 
8394     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
8395 
8396     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
8397         bool splimviol;
8398         uint32_t splim = v7m_sp_limit(env);
8399         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
8400             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
8401 
8402         splimviol = !ign && frameptr < splim;
8403         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
8404     }
8405 
8406     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
8407 
8408     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
8409 
8410     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
8411 
8412     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
8413                         !arm_v7m_is_handler_mode(env));
8414 
8415     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
8416     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
8417 
8418     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
8419     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
8420 
8421     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
8422     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
8423 
8424     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
8425     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
8426 
8427     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
8428     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
8429 
8430     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8431         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
8432         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
8433 
8434         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
8435         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
8436     }
8437 }
8438 
8439 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
8440 {
8441     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
8442     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
8443     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
8444 
8445     assert(env->v7m.secure);
8446 
8447     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
8448         return;
8449     }
8450 
8451     /* Check access to the coprocessor is permitted */
8452     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
8453         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
8454     }
8455 
8456     if (lspact) {
8457         /* LSPACT should not be active when there is active FP state */
8458         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
8459     }
8460 
8461     if (fptr & 7) {
8462         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
8463     }
8464 
8465     /*
8466      * Note that we do not use v7m_stack_write() here, because the
8467      * accesses should not set the FSR bits for stacking errors if they
8468      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
8469      * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
8470      * and longjmp out.
8471      */
8472     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
8473         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
8474         int i;
8475 
8476         for (i = 0; i < (ts ? 32 : 16); i += 2) {
8477             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
8478             uint32_t faddr = fptr + 4 * i;
8479             uint32_t slo = extract64(dn, 0, 32);
8480             uint32_t shi = extract64(dn, 32, 32);
8481 
8482             if (i >= 16) {
8483                 faddr += 8; /* skip the slot for the FPSCR */
8484             }
8485             cpu_stl_data(env, faddr, slo);
8486             cpu_stl_data(env, faddr + 4, shi);
8487         }
8488         cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
8489 
8490         /*
8491          * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
8492          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
8493          */
8494         if (ts) {
8495             for (i = 0; i < 32; i += 2) {
8496                 *aa32_vfp_dreg(env, i / 2) = 0;
8497             }
8498             vfp_set_fpscr(env, 0);
8499         }
8500     } else {
8501         v7m_update_fpccr(env, fptr, false);
8502     }
8503 
8504     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
8505 }
8506 
8507 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
8508 {
8509     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
8510     assert(env->v7m.secure);
8511 
8512     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
8513         return;
8514     }
8515 
8516     /* Check access to the coprocessor is permitted */
8517     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
8518         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
8519     }
8520 
8521     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
8522         /* State in FP is still valid */
8523         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
8524     } else {
8525         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
8526         int i;
8527         uint32_t fpscr;
8528 
8529         if (fptr & 7) {
8530             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
8531         }
8532 
8533         for (i = 0; i < (ts ? 32 : 16); i += 2) {
8534             uint32_t slo, shi;
8535             uint64_t dn;
8536             uint32_t faddr = fptr + 4 * i;
8537 
8538             if (i >= 16) {
8539                 faddr += 8; /* skip the slot for the FPSCR */
8540             }
8541 
8542             slo = cpu_ldl_data(env, faddr);
8543             shi = cpu_ldl_data(env, faddr + 4);
8544 
8545             dn = (uint64_t) shi << 32 | slo;
8546             *aa32_vfp_dreg(env, i / 2) = dn;
8547         }
8548         fpscr = cpu_ldl_data(env, fptr + 0x40);
8549         vfp_set_fpscr(env, fpscr);
8550     }
8551 
8552     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
8553 }
8554 
8555 static bool v7m_push_stack(ARMCPU *cpu)
8556 {
8557     /* Do the "set up stack frame" part of exception entry,
8558      * similar to pseudocode PushStack().
8559      * Return true if we generate a derived exception (and so
8560      * should ignore further stack faults trying to process
8561      * that derived exception.)
8562      */
8563     bool stacked_ok = true, limitviol = false;
8564     CPUARMState *env = &cpu->env;
8565     uint32_t xpsr = xpsr_read(env);
8566     uint32_t frameptr = env->regs[13];
8567     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
8568     uint32_t framesize;
8569     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
8570 
8571     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
8572         (env->v7m.secure || nsacr_cp10)) {
8573         if (env->v7m.secure &&
8574             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
8575             framesize = 0xa8;
8576         } else {
8577             framesize = 0x68;
8578         }
8579     } else {
8580         framesize = 0x20;
8581     }
8582 
8583     /* Align stack pointer if the guest wants that */
8584     if ((frameptr & 4) &&
8585         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
8586         frameptr -= 4;
8587         xpsr |= XPSR_SPREALIGN;
8588     }
8589 
8590     xpsr &= ~XPSR_SFPA;
8591     if (env->v7m.secure &&
8592         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
8593         xpsr |= XPSR_SFPA;
8594     }
8595 
8596     frameptr -= framesize;
8597 
8598     if (arm_feature(env, ARM_FEATURE_V8)) {
8599         uint32_t limit = v7m_sp_limit(env);
8600 
8601         if (frameptr < limit) {
8602             /*
8603              * Stack limit failure: set SP to the limit value, and generate
8604              * STKOF UsageFault. Stack pushes below the limit must not be
8605              * performed. It is IMPDEF whether pushes above the limit are
8606              * performed; we choose not to.
8607              */
8608             qemu_log_mask(CPU_LOG_INT,
8609                           "...STKOF during stacking\n");
8610             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
8611             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8612                                     env->v7m.secure);
8613             env->regs[13] = limit;
8614             /*
8615              * We won't try to perform any further memory accesses but
8616              * we must continue through the following code to check for
8617              * permission faults during FPU state preservation, and we
8618              * must update FPCCR if lazy stacking is enabled.
8619              */
8620             limitviol = true;
8621             stacked_ok = false;
8622         }
8623     }
8624 
8625     /* Write as much of the stack frame as we can. If we fail a stack
8626      * write this will result in a derived exception being pended
8627      * (which may be taken in preference to the one we started with
8628      * if it has higher priority).
8629      */
8630     stacked_ok = stacked_ok &&
8631         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
8632         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
8633                         mmu_idx, STACK_NORMAL) &&
8634         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
8635                         mmu_idx, STACK_NORMAL) &&
8636         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
8637                         mmu_idx, STACK_NORMAL) &&
8638         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
8639                         mmu_idx, STACK_NORMAL) &&
8640         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
8641                         mmu_idx, STACK_NORMAL) &&
8642         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
8643                         mmu_idx, STACK_NORMAL) &&
8644         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
8645 
8646     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
8647         /* FPU is active, try to save its registers */
8648         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
8649         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
8650 
8651         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8652             qemu_log_mask(CPU_LOG_INT,
8653                           "...SecureFault because LSPACT and FPCA both set\n");
8654             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
8655             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8656         } else if (!env->v7m.secure && !nsacr_cp10) {
8657             qemu_log_mask(CPU_LOG_INT,
8658                           "...Secure UsageFault with CFSR.NOCP because "
8659                           "NSACR.CP10 prevents stacking FP regs\n");
8660             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
8661             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
8662         } else {
8663             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
8664                 /* Lazy stacking disabled, save registers now */
8665                 int i;
8666                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
8667                                                  arm_current_el(env) != 0);
8668 
8669                 if (stacked_ok && !cpacr_pass) {
8670                     /*
8671                      * Take UsageFault if CPACR forbids access. The pseudocode
8672                      * here does a full CheckCPEnabled() but we know the NSACR
8673                      * check can never fail as we have already handled that.
8674                      */
8675                     qemu_log_mask(CPU_LOG_INT,
8676                                   "...UsageFault with CFSR.NOCP because "
8677                                   "CPACR.CP10 prevents stacking FP regs\n");
8678                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
8679                                             env->v7m.secure);
8680                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
8681                     stacked_ok = false;
8682                 }
8683 
8684                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
8685                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
8686                     uint32_t faddr = frameptr + 0x20 + 4 * i;
8687                     uint32_t slo = extract64(dn, 0, 32);
8688                     uint32_t shi = extract64(dn, 32, 32);
8689 
8690                     if (i >= 16) {
8691                         faddr += 8; /* skip the slot for the FPSCR */
8692                     }
8693                     stacked_ok = stacked_ok &&
8694                         v7m_stack_write(cpu, faddr, slo,
8695                                         mmu_idx, STACK_NORMAL) &&
8696                         v7m_stack_write(cpu, faddr + 4, shi,
8697                                         mmu_idx, STACK_NORMAL);
8698                 }
8699                 stacked_ok = stacked_ok &&
8700                     v7m_stack_write(cpu, frameptr + 0x60,
8701                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
8702                 if (cpacr_pass) {
8703                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
8704                         *aa32_vfp_dreg(env, i / 2) = 0;
8705                     }
8706                     vfp_set_fpscr(env, 0);
8707                 }
8708             } else {
8709                 /* Lazy stacking enabled, save necessary info to stack later */
8710                 v7m_update_fpccr(env, frameptr + 0x20, true);
8711             }
8712         }
8713     }
8714 
8715     /*
8716      * If we broke a stack limit then SP was already updated earlier;
8717      * otherwise we update SP regardless of whether any of the stack
8718      * accesses failed or we took some other kind of fault.
8719      */
8720     if (!limitviol) {
8721         env->regs[13] = frameptr;
8722     }
8723 
8724     return !stacked_ok;
8725 }
8726 
8727 static void do_v7m_exception_exit(ARMCPU *cpu)
8728 {
8729     CPUARMState *env = &cpu->env;
8730     uint32_t excret;
8731     uint32_t xpsr, xpsr_mask;
8732     bool ufault = false;
8733     bool sfault = false;
8734     bool return_to_sp_process;
8735     bool return_to_handler;
8736     bool rettobase = false;
8737     bool exc_secure = false;
8738     bool return_to_secure;
8739     bool ftype;
8740     bool restore_s16_s31;
8741 
8742     /* If we're not in Handler mode then jumps to magic exception-exit
8743      * addresses don't have magic behaviour. However for the v8M
8744      * security extensions the magic secure-function-return has to
8745      * work in thread mode too, so to avoid doing an extra check in
8746      * the generated code we allow exception-exit magic to also cause the
8747      * internal exception and bring us here in thread mode. Correct code
8748      * will never try to do this (the following insn fetch will always
8749      * fault) so we the overhead of having taken an unnecessary exception
8750      * doesn't matter.
8751      */
8752     if (!arm_v7m_is_handler_mode(env)) {
8753         return;
8754     }
8755 
8756     /* In the spec pseudocode ExceptionReturn() is called directly
8757      * from BXWritePC() and gets the full target PC value including
8758      * bit zero. In QEMU's implementation we treat it as a normal
8759      * jump-to-register (which is then caught later on), and so split
8760      * the target value up between env->regs[15] and env->thumb in
8761      * gen_bx(). Reconstitute it.
8762      */
8763     excret = env->regs[15];
8764     if (env->thumb) {
8765         excret |= 1;
8766     }
8767 
8768     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
8769                   " previous exception %d\n",
8770                   excret, env->v7m.exception);
8771 
8772     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
8773         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
8774                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
8775                       excret);
8776     }
8777 
8778     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
8779 
8780     if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
8781         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
8782                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
8783                       "if FPU not present\n",
8784                       excret);
8785         ftype = true;
8786     }
8787 
8788     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8789         /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8790          * we pick which FAULTMASK to clear.
8791          */
8792         if (!env->v7m.secure &&
8793             ((excret & R_V7M_EXCRET_ES_MASK) ||
8794              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
8795             sfault = 1;
8796             /* For all other purposes, treat ES as 0 (R_HXSR) */
8797             excret &= ~R_V7M_EXCRET_ES_MASK;
8798         }
8799         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
8800     }
8801 
8802     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
8803         /* Auto-clear FAULTMASK on return from other than NMI.
8804          * If the security extension is implemented then this only
8805          * happens if the raw execution priority is >= 0; the
8806          * value of the ES bit in the exception return value indicates
8807          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8808          */
8809         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8810             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
8811                 env->v7m.faultmask[exc_secure] = 0;
8812             }
8813         } else {
8814             env->v7m.faultmask[M_REG_NS] = 0;
8815         }
8816     }
8817 
8818     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
8819                                      exc_secure)) {
8820     case -1:
8821         /* attempt to exit an exception that isn't active */
8822         ufault = true;
8823         break;
8824     case 0:
8825         /* still an irq active now */
8826         break;
8827     case 1:
8828         /* we returned to base exception level, no nesting.
8829          * (In the pseudocode this is written using "NestedActivation != 1"
8830          * where we have 'rettobase == false'.)
8831          */
8832         rettobase = true;
8833         break;
8834     default:
8835         g_assert_not_reached();
8836     }
8837 
8838     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
8839     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
8840     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
8841         (excret & R_V7M_EXCRET_S_MASK);
8842 
8843     if (arm_feature(env, ARM_FEATURE_V8)) {
8844         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
8845             /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8846              * we choose to take the UsageFault.
8847              */
8848             if ((excret & R_V7M_EXCRET_S_MASK) ||
8849                 (excret & R_V7M_EXCRET_ES_MASK) ||
8850                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
8851                 ufault = true;
8852             }
8853         }
8854         if (excret & R_V7M_EXCRET_RES0_MASK) {
8855             ufault = true;
8856         }
8857     } else {
8858         /* For v7M we only recognize certain combinations of the low bits */
8859         switch (excret & 0xf) {
8860         case 1: /* Return to Handler */
8861             break;
8862         case 13: /* Return to Thread using Process stack */
8863         case 9: /* Return to Thread using Main stack */
8864             /* We only need to check NONBASETHRDENA for v7M, because in
8865              * v8M this bit does not exist (it is RES1).
8866              */
8867             if (!rettobase &&
8868                 !(env->v7m.ccr[env->v7m.secure] &
8869                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
8870                 ufault = true;
8871             }
8872             break;
8873         default:
8874             ufault = true;
8875         }
8876     }
8877 
8878     /*
8879      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8880      * Handler mode (and will be until we write the new XPSR.Interrupt
8881      * field) this does not switch around the current stack pointer.
8882      * We must do this before we do any kind of tailchaining, including
8883      * for the derived exceptions on integrity check failures, or we will
8884      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8885      */
8886     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
8887 
8888     /*
8889      * Clear scratch FP values left in caller saved registers; this
8890      * must happen before any kind of tail chaining.
8891      */
8892     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
8893         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
8894         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
8895             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
8896             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8897             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
8898                           "stackframe: error during lazy state deactivation\n");
8899             v7m_exception_taken(cpu, excret, true, false);
8900             return;
8901         } else {
8902             /* Clear s0..s15 and FPSCR */
8903             int i;
8904 
8905             for (i = 0; i < 16; i += 2) {
8906                 *aa32_vfp_dreg(env, i / 2) = 0;
8907             }
8908             vfp_set_fpscr(env, 0);
8909         }
8910     }
8911 
8912     if (sfault) {
8913         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
8914         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8915         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
8916                       "stackframe: failed EXC_RETURN.ES validity check\n");
8917         v7m_exception_taken(cpu, excret, true, false);
8918         return;
8919     }
8920 
8921     if (ufault) {
8922         /* Bad exception return: instead of popping the exception
8923          * stack, directly take a usage fault on the current stack.
8924          */
8925         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
8926         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
8927         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
8928                       "stackframe: failed exception return integrity check\n");
8929         v7m_exception_taken(cpu, excret, true, false);
8930         return;
8931     }
8932 
8933     /*
8934      * Tailchaining: if there is currently a pending exception that
8935      * is high enough priority to preempt execution at the level we're
8936      * about to return to, then just directly take that exception now,
8937      * avoiding an unstack-and-then-stack. Note that now we have
8938      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
8939      * our current execution priority is already the execution priority we are
8940      * returning to -- none of the state we would unstack or set based on
8941      * the EXCRET value affects it.
8942      */
8943     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
8944         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
8945         v7m_exception_taken(cpu, excret, true, false);
8946         return;
8947     }
8948 
8949     switch_v7m_security_state(env, return_to_secure);
8950 
8951     {
8952         /* The stack pointer we should be reading the exception frame from
8953          * depends on bits in the magic exception return type value (and
8954          * for v8M isn't necessarily the stack pointer we will eventually
8955          * end up resuming execution with). Get a pointer to the location
8956          * in the CPU state struct where the SP we need is currently being
8957          * stored; we will use and modify it in place.
8958          * We use this limited C variable scope so we don't accidentally
8959          * use 'frame_sp_p' after we do something that makes it invalid.
8960          */
8961         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
8962                                               return_to_secure,
8963                                               !return_to_handler,
8964                                               return_to_sp_process);
8965         uint32_t frameptr = *frame_sp_p;
8966         bool pop_ok = true;
8967         ARMMMUIdx mmu_idx;
8968         bool return_to_priv = return_to_handler ||
8969             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
8970 
8971         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
8972                                                         return_to_priv);
8973 
8974         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
8975             arm_feature(env, ARM_FEATURE_V8)) {
8976             qemu_log_mask(LOG_GUEST_ERROR,
8977                           "M profile exception return with non-8-aligned SP "
8978                           "for destination state is UNPREDICTABLE\n");
8979         }
8980 
8981         /* Do we need to pop callee-saved registers? */
8982         if (return_to_secure &&
8983             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
8984              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
8985             uint32_t actual_sig;
8986 
8987             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
8988 
8989             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
8990                 /* Take a SecureFault on the current stack */
8991                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
8992                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
8993                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
8994                               "stackframe: failed exception return integrity "
8995                               "signature check\n");
8996                 v7m_exception_taken(cpu, excret, true, false);
8997                 return;
8998             }
8999 
9000             pop_ok = pop_ok &&
9001                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
9002                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
9003                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
9004                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
9005                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
9006                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
9007                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
9008                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
9009 
9010             frameptr += 0x28;
9011         }
9012 
9013         /* Pop registers */
9014         pop_ok = pop_ok &&
9015             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
9016             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
9017             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
9018             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
9019             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
9020             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
9021             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
9022             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
9023 
9024         if (!pop_ok) {
9025             /* v7m_stack_read() pended a fault, so take it (as a tail
9026              * chained exception on the same stack frame)
9027              */
9028             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
9029             v7m_exception_taken(cpu, excret, true, false);
9030             return;
9031         }
9032 
9033         /* Returning from an exception with a PC with bit 0 set is defined
9034          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
9035          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
9036          * the lsbit, and there are several RTOSes out there which incorrectly
9037          * assume the r15 in the stack frame should be a Thumb-style "lsbit
9038          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
9039          * complain about the badly behaved guest.
9040          */
9041         if (env->regs[15] & 1) {
9042             env->regs[15] &= ~1U;
9043             if (!arm_feature(env, ARM_FEATURE_V8)) {
9044                 qemu_log_mask(LOG_GUEST_ERROR,
9045                               "M profile return from interrupt with misaligned "
9046                               "PC is UNPREDICTABLE on v7M\n");
9047             }
9048         }
9049 
9050         if (arm_feature(env, ARM_FEATURE_V8)) {
9051             /* For v8M we have to check whether the xPSR exception field
9052              * matches the EXCRET value for return to handler/thread
9053              * before we commit to changing the SP and xPSR.
9054              */
9055             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
9056             if (return_to_handler != will_be_handler) {
9057                 /* Take an INVPC UsageFault on the current stack.
9058                  * By this point we will have switched to the security state
9059                  * for the background state, so this UsageFault will target
9060                  * that state.
9061                  */
9062                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
9063                                         env->v7m.secure);
9064                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
9065                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
9066                               "stackframe: failed exception return integrity "
9067                               "check\n");
9068                 v7m_exception_taken(cpu, excret, true, false);
9069                 return;
9070             }
9071         }
9072 
9073         if (!ftype) {
9074             /* FP present and we need to handle it */
9075             if (!return_to_secure &&
9076                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
9077                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
9078                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
9079                 qemu_log_mask(CPU_LOG_INT,
9080                               "...taking SecureFault on existing stackframe: "
9081                               "Secure LSPACT set but exception return is "
9082                               "not to secure state\n");
9083                 v7m_exception_taken(cpu, excret, true, false);
9084                 return;
9085             }
9086 
9087             restore_s16_s31 = return_to_secure &&
9088                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
9089 
9090             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
9091                 /* State in FPU is still valid, just clear LSPACT */
9092                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
9093             } else {
9094                 int i;
9095                 uint32_t fpscr;
9096                 bool cpacr_pass, nsacr_pass;
9097 
9098                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
9099                                             return_to_priv);
9100                 nsacr_pass = return_to_secure ||
9101                     extract32(env->v7m.nsacr, 10, 1);
9102 
9103                 if (!cpacr_pass) {
9104                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
9105                                             return_to_secure);
9106                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
9107                     qemu_log_mask(CPU_LOG_INT,
9108                                   "...taking UsageFault on existing "
9109                                   "stackframe: CPACR.CP10 prevents unstacking "
9110                                   "FP regs\n");
9111                     v7m_exception_taken(cpu, excret, true, false);
9112                     return;
9113                 } else if (!nsacr_pass) {
9114                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
9115                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
9116                     qemu_log_mask(CPU_LOG_INT,
9117                                   "...taking Secure UsageFault on existing "
9118                                   "stackframe: NSACR.CP10 prevents unstacking "
9119                                   "FP regs\n");
9120                     v7m_exception_taken(cpu, excret, true, false);
9121                     return;
9122                 }
9123 
9124                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
9125                     uint32_t slo, shi;
9126                     uint64_t dn;
9127                     uint32_t faddr = frameptr + 0x20 + 4 * i;
9128 
9129                     if (i >= 16) {
9130                         faddr += 8; /* Skip the slot for the FPSCR */
9131                     }
9132 
9133                     pop_ok = pop_ok &&
9134                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
9135                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
9136 
9137                     if (!pop_ok) {
9138                         break;
9139                     }
9140 
9141                     dn = (uint64_t)shi << 32 | slo;
9142                     *aa32_vfp_dreg(env, i / 2) = dn;
9143                 }
9144                 pop_ok = pop_ok &&
9145                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
9146                 if (pop_ok) {
9147                     vfp_set_fpscr(env, fpscr);
9148                 }
9149                 if (!pop_ok) {
9150                     /*
9151                      * These regs are 0 if security extension present;
9152                      * otherwise merely UNKNOWN. We zero always.
9153                      */
9154                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
9155                         *aa32_vfp_dreg(env, i / 2) = 0;
9156                     }
9157                     vfp_set_fpscr(env, 0);
9158                 }
9159             }
9160         }
9161         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
9162                                                V7M_CONTROL, FPCA, !ftype);
9163 
9164         /* Commit to consuming the stack frame */
9165         frameptr += 0x20;
9166         if (!ftype) {
9167             frameptr += 0x48;
9168             if (restore_s16_s31) {
9169                 frameptr += 0x40;
9170             }
9171         }
9172         /* Undo stack alignment (the SPREALIGN bit indicates that the original
9173          * pre-exception SP was not 8-aligned and we added a padding word to
9174          * align it, so we undo this by ORing in the bit that increases it
9175          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
9176          * would work too but a logical OR is how the pseudocode specifies it.)
9177          */
9178         if (xpsr & XPSR_SPREALIGN) {
9179             frameptr |= 4;
9180         }
9181         *frame_sp_p = frameptr;
9182     }
9183 
9184     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
9185     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
9186         xpsr_mask &= ~XPSR_GE;
9187     }
9188     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
9189     xpsr_write(env, xpsr, xpsr_mask);
9190 
9191     if (env->v7m.secure) {
9192         bool sfpa = xpsr & XPSR_SFPA;
9193 
9194         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
9195                                                V7M_CONTROL, SFPA, sfpa);
9196     }
9197 
9198     /* The restored xPSR exception field will be zero if we're
9199      * resuming in Thread mode. If that doesn't match what the
9200      * exception return excret specified then this is a UsageFault.
9201      * v7M requires we make this check here; v8M did it earlier.
9202      */
9203     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
9204         /* Take an INVPC UsageFault by pushing the stack again;
9205          * we know we're v7M so this is never a Secure UsageFault.
9206          */
9207         bool ignore_stackfaults;
9208 
9209         assert(!arm_feature(env, ARM_FEATURE_V8));
9210         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
9211         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
9212         ignore_stackfaults = v7m_push_stack(cpu);
9213         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
9214                       "failed exception return integrity check\n");
9215         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
9216         return;
9217     }
9218 
9219     /* Otherwise, we have a successful exception exit. */
9220     arm_clear_exclusive(env);
9221     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
9222 }
9223 
9224 static bool do_v7m_function_return(ARMCPU *cpu)
9225 {
9226     /* v8M security extensions magic function return.
9227      * We may either:
9228      *  (1) throw an exception (longjump)
9229      *  (2) return true if we successfully handled the function return
9230      *  (3) return false if we failed a consistency check and have
9231      *      pended a UsageFault that needs to be taken now
9232      *
9233      * At this point the magic return value is split between env->regs[15]
9234      * and env->thumb. We don't bother to reconstitute it because we don't
9235      * need it (all values are handled the same way).
9236      */
9237     CPUARMState *env = &cpu->env;
9238     uint32_t newpc, newpsr, newpsr_exc;
9239 
9240     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
9241 
9242     {
9243         bool threadmode, spsel;
9244         TCGMemOpIdx oi;
9245         ARMMMUIdx mmu_idx;
9246         uint32_t *frame_sp_p;
9247         uint32_t frameptr;
9248 
9249         /* Pull the return address and IPSR from the Secure stack */
9250         threadmode = !arm_v7m_is_handler_mode(env);
9251         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
9252 
9253         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
9254         frameptr = *frame_sp_p;
9255 
9256         /* These loads may throw an exception (for MPU faults). We want to
9257          * do them as secure, so work out what MMU index that is.
9258          */
9259         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
9260         oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
9261         newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
9262         newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
9263 
9264         /* Consistency checks on new IPSR */
9265         newpsr_exc = newpsr & XPSR_EXCP;
9266         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
9267               (env->v7m.exception == 1 && newpsr_exc != 0))) {
9268             /* Pend the fault and tell our caller to take it */
9269             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
9270             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
9271                                     env->v7m.secure);
9272             qemu_log_mask(CPU_LOG_INT,
9273                           "...taking INVPC UsageFault: "
9274                           "IPSR consistency check failed\n");
9275             return false;
9276         }
9277 
9278         *frame_sp_p = frameptr + 8;
9279     }
9280 
9281     /* This invalidates frame_sp_p */
9282     switch_v7m_security_state(env, true);
9283     env->v7m.exception = newpsr_exc;
9284     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
9285     if (newpsr & XPSR_SFPA) {
9286         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
9287     }
9288     xpsr_write(env, 0, XPSR_IT);
9289     env->thumb = newpc & 1;
9290     env->regs[15] = newpc & ~1;
9291 
9292     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
9293     return true;
9294 }
9295 
9296 static void arm_log_exception(int idx)
9297 {
9298     if (qemu_loglevel_mask(CPU_LOG_INT)) {
9299         const char *exc = NULL;
9300         static const char * const excnames[] = {
9301             [EXCP_UDEF] = "Undefined Instruction",
9302             [EXCP_SWI] = "SVC",
9303             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9304             [EXCP_DATA_ABORT] = "Data Abort",
9305             [EXCP_IRQ] = "IRQ",
9306             [EXCP_FIQ] = "FIQ",
9307             [EXCP_BKPT] = "Breakpoint",
9308             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9309             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9310             [EXCP_HVC] = "Hypervisor Call",
9311             [EXCP_HYP_TRAP] = "Hypervisor Trap",
9312             [EXCP_SMC] = "Secure Monitor Call",
9313             [EXCP_VIRQ] = "Virtual IRQ",
9314             [EXCP_VFIQ] = "Virtual FIQ",
9315             [EXCP_SEMIHOST] = "Semihosting call",
9316             [EXCP_NOCP] = "v7M NOCP UsageFault",
9317             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9318             [EXCP_STKOF] = "v8M STKOF UsageFault",
9319             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9320             [EXCP_LSERR] = "v8M LSERR UsageFault",
9321             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
9322         };
9323 
9324         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9325             exc = excnames[idx];
9326         }
9327         if (!exc) {
9328             exc = "unknown";
9329         }
9330         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
9331     }
9332 }
9333 
9334 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
9335                                uint32_t addr, uint16_t *insn)
9336 {
9337     /* Load a 16-bit portion of a v7M instruction, returning true on success,
9338      * or false on failure (in which case we will have pended the appropriate
9339      * exception).
9340      * We need to do the instruction fetch's MPU and SAU checks
9341      * like this because there is no MMU index that would allow
9342      * doing the load with a single function call. Instead we must
9343      * first check that the security attributes permit the load
9344      * and that they don't mismatch on the two halves of the instruction,
9345      * and then we do the load as a secure load (ie using the security
9346      * attributes of the address, not the CPU, as architecturally required).
9347      */
9348     CPUState *cs = CPU(cpu);
9349     CPUARMState *env = &cpu->env;
9350     V8M_SAttributes sattrs = {};
9351     MemTxAttrs attrs = {};
9352     ARMMMUFaultInfo fi = {};
9353     MemTxResult txres;
9354     target_ulong page_size;
9355     hwaddr physaddr;
9356     int prot;
9357 
9358     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
9359     if (!sattrs.nsc || sattrs.ns) {
9360         /* This must be the second half of the insn, and it straddles a
9361          * region boundary with the second half not being S&NSC.
9362          */
9363         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
9364         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
9365         qemu_log_mask(CPU_LOG_INT,
9366                       "...really SecureFault with SFSR.INVEP\n");
9367         return false;
9368     }
9369     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
9370                       &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
9371         /* the MPU lookup failed */
9372         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
9373         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
9374         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
9375         return false;
9376     }
9377     *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
9378                                  attrs, &txres);
9379     if (txres != MEMTX_OK) {
9380         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
9381         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
9382         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
9383         return false;
9384     }
9385     return true;
9386 }
9387 
9388 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
9389 {
9390     /* Check whether this attempt to execute code in a Secure & NS-Callable
9391      * memory region is for an SG instruction; if so, then emulate the
9392      * effect of the SG instruction and return true. Otherwise pend
9393      * the correct kind of exception and return false.
9394      */
9395     CPUARMState *env = &cpu->env;
9396     ARMMMUIdx mmu_idx;
9397     uint16_t insn;
9398 
9399     /* We should never get here unless get_phys_addr_pmsav8() caused
9400      * an exception for NS executing in S&NSC memory.
9401      */
9402     assert(!env->v7m.secure);
9403     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
9404 
9405     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
9406     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
9407 
9408     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
9409         return false;
9410     }
9411 
9412     if (!env->thumb) {
9413         goto gen_invep;
9414     }
9415 
9416     if (insn != 0xe97f) {
9417         /* Not an SG instruction first half (we choose the IMPDEF
9418          * early-SG-check option).
9419          */
9420         goto gen_invep;
9421     }
9422 
9423     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
9424         return false;
9425     }
9426 
9427     if (insn != 0xe97f) {
9428         /* Not an SG instruction second half (yes, both halves of the SG
9429          * insn have the same hex value)
9430          */
9431         goto gen_invep;
9432     }
9433 
9434     /* OK, we have confirmed that we really have an SG instruction.
9435      * We know we're NS in S memory so don't need to repeat those checks.
9436      */
9437     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
9438                   ", executing it\n", env->regs[15]);
9439     env->regs[14] &= ~1;
9440     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
9441     switch_v7m_security_state(env, true);
9442     xpsr_write(env, 0, XPSR_IT);
9443     env->regs[15] += 4;
9444     return true;
9445 
9446 gen_invep:
9447     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
9448     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
9449     qemu_log_mask(CPU_LOG_INT,
9450                   "...really SecureFault with SFSR.INVEP\n");
9451     return false;
9452 }
9453 
9454 void arm_v7m_cpu_do_interrupt(CPUState *cs)
9455 {
9456     ARMCPU *cpu = ARM_CPU(cs);
9457     CPUARMState *env = &cpu->env;
9458     uint32_t lr;
9459     bool ignore_stackfaults;
9460 
9461     arm_log_exception(cs->exception_index);
9462 
9463     /* For exceptions we just mark as pending on the NVIC, and let that
9464        handle it.  */
9465     switch (cs->exception_index) {
9466     case EXCP_UDEF:
9467         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
9468         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
9469         break;
9470     case EXCP_NOCP:
9471     {
9472         /*
9473          * NOCP might be directed to something other than the current
9474          * security state if this fault is because of NSACR; we indicate
9475          * the target security state using exception.target_el.
9476          */
9477         int target_secstate;
9478 
9479         if (env->exception.target_el == 3) {
9480             target_secstate = M_REG_S;
9481         } else {
9482             target_secstate = env->v7m.secure;
9483         }
9484         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
9485         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
9486         break;
9487     }
9488     case EXCP_INVSTATE:
9489         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
9490         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
9491         break;
9492     case EXCP_STKOF:
9493         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
9494         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
9495         break;
9496     case EXCP_LSERR:
9497         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
9498         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
9499         break;
9500     case EXCP_UNALIGNED:
9501         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
9502         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
9503         break;
9504     case EXCP_SWI:
9505         /* The PC already points to the next instruction.  */
9506         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
9507         break;
9508     case EXCP_PREFETCH_ABORT:
9509     case EXCP_DATA_ABORT:
9510         /* Note that for M profile we don't have a guest facing FSR, but
9511          * the env->exception.fsr will be populated by the code that
9512          * raises the fault, in the A profile short-descriptor format.
9513          */
9514         switch (env->exception.fsr & 0xf) {
9515         case M_FAKE_FSR_NSC_EXEC:
9516             /* Exception generated when we try to execute code at an address
9517              * which is marked as Secure & Non-Secure Callable and the CPU
9518              * is in the Non-Secure state. The only instruction which can
9519              * be executed like this is SG (and that only if both halves of
9520              * the SG instruction have the same security attributes.)
9521              * Everything else must generate an INVEP SecureFault, so we
9522              * emulate the SG instruction here.
9523              */
9524             if (v7m_handle_execute_nsc(cpu)) {
9525                 return;
9526             }
9527             break;
9528         case M_FAKE_FSR_SFAULT:
9529             /* Various flavours of SecureFault for attempts to execute or
9530              * access data in the wrong security state.
9531              */
9532             switch (cs->exception_index) {
9533             case EXCP_PREFETCH_ABORT:
9534                 if (env->v7m.secure) {
9535                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
9536                     qemu_log_mask(CPU_LOG_INT,
9537                                   "...really SecureFault with SFSR.INVTRAN\n");
9538                 } else {
9539                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
9540                     qemu_log_mask(CPU_LOG_INT,
9541                                   "...really SecureFault with SFSR.INVEP\n");
9542                 }
9543                 break;
9544             case EXCP_DATA_ABORT:
9545                 /* This must be an NS access to S memory */
9546                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
9547                 qemu_log_mask(CPU_LOG_INT,
9548                               "...really SecureFault with SFSR.AUVIOL\n");
9549                 break;
9550             }
9551             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
9552             break;
9553         case 0x8: /* External Abort */
9554             switch (cs->exception_index) {
9555             case EXCP_PREFETCH_ABORT:
9556                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
9557                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
9558                 break;
9559             case EXCP_DATA_ABORT:
9560                 env->v7m.cfsr[M_REG_NS] |=
9561                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
9562                 env->v7m.bfar = env->exception.vaddress;
9563                 qemu_log_mask(CPU_LOG_INT,
9564                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
9565                               env->v7m.bfar);
9566                 break;
9567             }
9568             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
9569             break;
9570         default:
9571             /* All other FSR values are either MPU faults or "can't happen
9572              * for M profile" cases.
9573              */
9574             switch (cs->exception_index) {
9575             case EXCP_PREFETCH_ABORT:
9576                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
9577                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
9578                 break;
9579             case EXCP_DATA_ABORT:
9580                 env->v7m.cfsr[env->v7m.secure] |=
9581                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
9582                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
9583                 qemu_log_mask(CPU_LOG_INT,
9584                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
9585                               env->v7m.mmfar[env->v7m.secure]);
9586                 break;
9587             }
9588             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
9589                                     env->v7m.secure);
9590             break;
9591         }
9592         break;
9593     case EXCP_BKPT:
9594         if (semihosting_enabled()) {
9595             int nr;
9596             nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
9597             if (nr == 0xab) {
9598                 env->regs[15] += 2;
9599                 qemu_log_mask(CPU_LOG_INT,
9600                               "...handling as semihosting call 0x%x\n",
9601                               env->regs[0]);
9602                 env->regs[0] = do_arm_semihosting(env);
9603                 return;
9604             }
9605         }
9606         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
9607         break;
9608     case EXCP_IRQ:
9609         break;
9610     case EXCP_EXCEPTION_EXIT:
9611         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
9612             /* Must be v8M security extension function return */
9613             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
9614             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
9615             if (do_v7m_function_return(cpu)) {
9616                 return;
9617             }
9618         } else {
9619             do_v7m_exception_exit(cpu);
9620             return;
9621         }
9622         break;
9623     case EXCP_LAZYFP:
9624         /*
9625          * We already pended the specific exception in the NVIC in the
9626          * v7m_preserve_fp_state() helper function.
9627          */
9628         break;
9629     default:
9630         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9631         return; /* Never happens.  Keep compiler happy.  */
9632     }
9633 
9634     if (arm_feature(env, ARM_FEATURE_V8)) {
9635         lr = R_V7M_EXCRET_RES1_MASK |
9636             R_V7M_EXCRET_DCRS_MASK;
9637         /* The S bit indicates whether we should return to Secure
9638          * or NonSecure (ie our current state).
9639          * The ES bit indicates whether we're taking this exception
9640          * to Secure or NonSecure (ie our target state). We set it
9641          * later, in v7m_exception_taken().
9642          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
9643          * This corresponds to the ARM ARM pseudocode for v8M setting
9644          * some LR bits in PushStack() and some in ExceptionTaken();
9645          * the distinction matters for the tailchain cases where we
9646          * can take an exception without pushing the stack.
9647          */
9648         if (env->v7m.secure) {
9649             lr |= R_V7M_EXCRET_S_MASK;
9650         }
9651         if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
9652             lr |= R_V7M_EXCRET_FTYPE_MASK;
9653         }
9654     } else {
9655         lr = R_V7M_EXCRET_RES1_MASK |
9656             R_V7M_EXCRET_S_MASK |
9657             R_V7M_EXCRET_DCRS_MASK |
9658             R_V7M_EXCRET_FTYPE_MASK |
9659             R_V7M_EXCRET_ES_MASK;
9660         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
9661             lr |= R_V7M_EXCRET_SPSEL_MASK;
9662         }
9663     }
9664     if (!arm_v7m_is_handler_mode(env)) {
9665         lr |= R_V7M_EXCRET_MODE_MASK;
9666     }
9667 
9668     ignore_stackfaults = v7m_push_stack(cpu);
9669     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
9670 }
9671 
9672 /* Function used to synchronize QEMU's AArch64 register set with AArch32
9673  * register set.  This is necessary when switching between AArch32 and AArch64
9674  * execution state.
9675  */
9676 void aarch64_sync_32_to_64(CPUARMState *env)
9677 {
9678     int i;
9679     uint32_t mode = env->uncached_cpsr & CPSR_M;
9680 
9681     /* We can blanket copy R[0:7] to X[0:7] */
9682     for (i = 0; i < 8; i++) {
9683         env->xregs[i] = env->regs[i];
9684     }
9685 
9686     /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9687      * Otherwise, they come from the banked user regs.
9688      */
9689     if (mode == ARM_CPU_MODE_FIQ) {
9690         for (i = 8; i < 13; i++) {
9691             env->xregs[i] = env->usr_regs[i - 8];
9692         }
9693     } else {
9694         for (i = 8; i < 13; i++) {
9695             env->xregs[i] = env->regs[i];
9696         }
9697     }
9698 
9699     /* Registers x13-x23 are the various mode SP and FP registers. Registers
9700      * r13 and r14 are only copied if we are in that mode, otherwise we copy
9701      * from the mode banked register.
9702      */
9703     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9704         env->xregs[13] = env->regs[13];
9705         env->xregs[14] = env->regs[14];
9706     } else {
9707         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9708         /* HYP is an exception in that it is copied from r14 */
9709         if (mode == ARM_CPU_MODE_HYP) {
9710             env->xregs[14] = env->regs[14];
9711         } else {
9712             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9713         }
9714     }
9715 
9716     if (mode == ARM_CPU_MODE_HYP) {
9717         env->xregs[15] = env->regs[13];
9718     } else {
9719         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9720     }
9721 
9722     if (mode == ARM_CPU_MODE_IRQ) {
9723         env->xregs[16] = env->regs[14];
9724         env->xregs[17] = env->regs[13];
9725     } else {
9726         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9727         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9728     }
9729 
9730     if (mode == ARM_CPU_MODE_SVC) {
9731         env->xregs[18] = env->regs[14];
9732         env->xregs[19] = env->regs[13];
9733     } else {
9734         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9735         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9736     }
9737 
9738     if (mode == ARM_CPU_MODE_ABT) {
9739         env->xregs[20] = env->regs[14];
9740         env->xregs[21] = env->regs[13];
9741     } else {
9742         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9743         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9744     }
9745 
9746     if (mode == ARM_CPU_MODE_UND) {
9747         env->xregs[22] = env->regs[14];
9748         env->xregs[23] = env->regs[13];
9749     } else {
9750         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9751         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9752     }
9753 
9754     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9755      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
9756      * FIQ bank for r8-r14.
9757      */
9758     if (mode == ARM_CPU_MODE_FIQ) {
9759         for (i = 24; i < 31; i++) {
9760             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
9761         }
9762     } else {
9763         for (i = 24; i < 29; i++) {
9764             env->xregs[i] = env->fiq_regs[i - 24];
9765         }
9766         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9767         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9768     }
9769 
9770     env->pc = env->regs[15];
9771 }
9772 
9773 /* Function used to synchronize QEMU's AArch32 register set with AArch64
9774  * register set.  This is necessary when switching between AArch32 and AArch64
9775  * execution state.
9776  */
9777 void aarch64_sync_64_to_32(CPUARMState *env)
9778 {
9779     int i;
9780     uint32_t mode = env->uncached_cpsr & CPSR_M;
9781 
9782     /* We can blanket copy X[0:7] to R[0:7] */
9783     for (i = 0; i < 8; i++) {
9784         env->regs[i] = env->xregs[i];
9785     }
9786 
9787     /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9788      * Otherwise, we copy x8-x12 into the banked user regs.
9789      */
9790     if (mode == ARM_CPU_MODE_FIQ) {
9791         for (i = 8; i < 13; i++) {
9792             env->usr_regs[i - 8] = env->xregs[i];
9793         }
9794     } else {
9795         for (i = 8; i < 13; i++) {
9796             env->regs[i] = env->xregs[i];
9797         }
9798     }
9799 
9800     /* Registers r13 & r14 depend on the current mode.
9801      * If we are in a given mode, we copy the corresponding x registers to r13
9802      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
9803      * for the mode.
9804      */
9805     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9806         env->regs[13] = env->xregs[13];
9807         env->regs[14] = env->xregs[14];
9808     } else {
9809         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9810 
9811         /* HYP is an exception in that it does not have its own banked r14 but
9812          * shares the USR r14
9813          */
9814         if (mode == ARM_CPU_MODE_HYP) {
9815             env->regs[14] = env->xregs[14];
9816         } else {
9817             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9818         }
9819     }
9820 
9821     if (mode == ARM_CPU_MODE_HYP) {
9822         env->regs[13] = env->xregs[15];
9823     } else {
9824         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9825     }
9826 
9827     if (mode == ARM_CPU_MODE_IRQ) {
9828         env->regs[14] = env->xregs[16];
9829         env->regs[13] = env->xregs[17];
9830     } else {
9831         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9832         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9833     }
9834 
9835     if (mode == ARM_CPU_MODE_SVC) {
9836         env->regs[14] = env->xregs[18];
9837         env->regs[13] = env->xregs[19];
9838     } else {
9839         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9840         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9841     }
9842 
9843     if (mode == ARM_CPU_MODE_ABT) {
9844         env->regs[14] = env->xregs[20];
9845         env->regs[13] = env->xregs[21];
9846     } else {
9847         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9848         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9849     }
9850 
9851     if (mode == ARM_CPU_MODE_UND) {
9852         env->regs[14] = env->xregs[22];
9853         env->regs[13] = env->xregs[23];
9854     } else {
9855         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9856         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9857     }
9858 
9859     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9860      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
9861      * FIQ bank for r8-r14.
9862      */
9863     if (mode == ARM_CPU_MODE_FIQ) {
9864         for (i = 24; i < 31; i++) {
9865             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
9866         }
9867     } else {
9868         for (i = 24; i < 29; i++) {
9869             env->fiq_regs[i - 24] = env->xregs[i];
9870         }
9871         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9872         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9873     }
9874 
9875     env->regs[15] = env->pc;
9876 }
9877 
9878 static void take_aarch32_exception(CPUARMState *env, int new_mode,
9879                                    uint32_t mask, uint32_t offset,
9880                                    uint32_t newpc)
9881 {
9882     /* Change the CPU state so as to actually take the exception. */
9883     switch_mode(env, new_mode);
9884     /*
9885      * For exceptions taken to AArch32 we must clear the SS bit in both
9886      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9887      */
9888     env->uncached_cpsr &= ~PSTATE_SS;
9889     env->spsr = cpsr_read(env);
9890     /* Clear IT bits.  */
9891     env->condexec_bits = 0;
9892     /* Switch to the new mode, and to the correct instruction set.  */
9893     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9894     /* Set new mode endianness */
9895     env->uncached_cpsr &= ~CPSR_E;
9896     if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
9897         env->uncached_cpsr |= CPSR_E;
9898     }
9899     /* J and IL must always be cleared for exception entry */
9900     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9901     env->daif |= mask;
9902 
9903     if (new_mode == ARM_CPU_MODE_HYP) {
9904         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9905         env->elr_el[2] = env->regs[15];
9906     } else {
9907         /*
9908          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9909          * and we should just guard the thumb mode on V4
9910          */
9911         if (arm_feature(env, ARM_FEATURE_V4T)) {
9912             env->thumb =
9913                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9914         }
9915         env->regs[14] = env->regs[15] + offset;
9916     }
9917     env->regs[15] = newpc;
9918 }
9919 
9920 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9921 {
9922     /*
9923      * Handle exception entry to Hyp mode; this is sufficiently
9924      * different to entry to other AArch32 modes that we handle it
9925      * separately here.
9926      *
9927      * The vector table entry used is always the 0x14 Hyp mode entry point,
9928      * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9929      * The offset applied to the preferred return address is always zero
9930      * (see DDI0487C.a section G1.12.3).
9931      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9932      */
9933     uint32_t addr, mask;
9934     ARMCPU *cpu = ARM_CPU(cs);
9935     CPUARMState *env = &cpu->env;
9936 
9937     switch (cs->exception_index) {
9938     case EXCP_UDEF:
9939         addr = 0x04;
9940         break;
9941     case EXCP_SWI:
9942         addr = 0x14;
9943         break;
9944     case EXCP_BKPT:
9945         /* Fall through to prefetch abort.  */
9946     case EXCP_PREFETCH_ABORT:
9947         env->cp15.ifar_s = env->exception.vaddress;
9948         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9949                       (uint32_t)env->exception.vaddress);
9950         addr = 0x0c;
9951         break;
9952     case EXCP_DATA_ABORT:
9953         env->cp15.dfar_s = env->exception.vaddress;
9954         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9955                       (uint32_t)env->exception.vaddress);
9956         addr = 0x10;
9957         break;
9958     case EXCP_IRQ:
9959         addr = 0x18;
9960         break;
9961     case EXCP_FIQ:
9962         addr = 0x1c;
9963         break;
9964     case EXCP_HVC:
9965         addr = 0x08;
9966         break;
9967     case EXCP_HYP_TRAP:
9968         addr = 0x14;
9969     default:
9970         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9971     }
9972 
9973     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9974         if (!arm_feature(env, ARM_FEATURE_V8)) {
9975             /*
9976              * QEMU syndrome values are v8-style. v7 has the IL bit
9977              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9978              * If this is a v7 CPU, squash the IL bit in those cases.
9979              */
9980             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9981                 (cs->exception_index == EXCP_DATA_ABORT &&
9982                  !(env->exception.syndrome & ARM_EL_ISV)) ||
9983                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9984                 env->exception.syndrome &= ~ARM_EL_IL;
9985             }
9986         }
9987         env->cp15.esr_el[2] = env->exception.syndrome;
9988     }
9989 
9990     if (arm_current_el(env) != 2 && addr < 0x14) {
9991         addr = 0x14;
9992     }
9993 
9994     mask = 0;
9995     if (!(env->cp15.scr_el3 & SCR_EA)) {
9996         mask |= CPSR_A;
9997     }
9998     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9999         mask |= CPSR_I;
10000     }
10001     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
10002         mask |= CPSR_F;
10003     }
10004 
10005     addr += env->cp15.hvbar;
10006 
10007     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
10008 }
10009 
10010 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
10011 {
10012     ARMCPU *cpu = ARM_CPU(cs);
10013     CPUARMState *env = &cpu->env;
10014     uint32_t addr;
10015     uint32_t mask;
10016     int new_mode;
10017     uint32_t offset;
10018     uint32_t moe;
10019 
10020     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
10021     switch (syn_get_ec(env->exception.syndrome)) {
10022     case EC_BREAKPOINT:
10023     case EC_BREAKPOINT_SAME_EL:
10024         moe = 1;
10025         break;
10026     case EC_WATCHPOINT:
10027     case EC_WATCHPOINT_SAME_EL:
10028         moe = 10;
10029         break;
10030     case EC_AA32_BKPT:
10031         moe = 3;
10032         break;
10033     case EC_VECTORCATCH:
10034         moe = 5;
10035         break;
10036     default:
10037         moe = 0;
10038         break;
10039     }
10040 
10041     if (moe) {
10042         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
10043     }
10044 
10045     if (env->exception.target_el == 2) {
10046         arm_cpu_do_interrupt_aarch32_hyp(cs);
10047         return;
10048     }
10049 
10050     switch (cs->exception_index) {
10051     case EXCP_UDEF:
10052         new_mode = ARM_CPU_MODE_UND;
10053         addr = 0x04;
10054         mask = CPSR_I;
10055         if (env->thumb)
10056             offset = 2;
10057         else
10058             offset = 4;
10059         break;
10060     case EXCP_SWI:
10061         new_mode = ARM_CPU_MODE_SVC;
10062         addr = 0x08;
10063         mask = CPSR_I;
10064         /* The PC already points to the next instruction.  */
10065         offset = 0;
10066         break;
10067     case EXCP_BKPT:
10068         /* Fall through to prefetch abort.  */
10069     case EXCP_PREFETCH_ABORT:
10070         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
10071         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
10072         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
10073                       env->exception.fsr, (uint32_t)env->exception.vaddress);
10074         new_mode = ARM_CPU_MODE_ABT;
10075         addr = 0x0c;
10076         mask = CPSR_A | CPSR_I;
10077         offset = 4;
10078         break;
10079     case EXCP_DATA_ABORT:
10080         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10081         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
10082         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
10083                       env->exception.fsr,
10084                       (uint32_t)env->exception.vaddress);
10085         new_mode = ARM_CPU_MODE_ABT;
10086         addr = 0x10;
10087         mask = CPSR_A | CPSR_I;
10088         offset = 8;
10089         break;
10090     case EXCP_IRQ:
10091         new_mode = ARM_CPU_MODE_IRQ;
10092         addr = 0x18;
10093         /* Disable IRQ and imprecise data aborts.  */
10094         mask = CPSR_A | CPSR_I;
10095         offset = 4;
10096         if (env->cp15.scr_el3 & SCR_IRQ) {
10097             /* IRQ routed to monitor mode */
10098             new_mode = ARM_CPU_MODE_MON;
10099             mask |= CPSR_F;
10100         }
10101         break;
10102     case EXCP_FIQ:
10103         new_mode = ARM_CPU_MODE_FIQ;
10104         addr = 0x1c;
10105         /* Disable FIQ, IRQ and imprecise data aborts.  */
10106         mask = CPSR_A | CPSR_I | CPSR_F;
10107         if (env->cp15.scr_el3 & SCR_FIQ) {
10108             /* FIQ routed to monitor mode */
10109             new_mode = ARM_CPU_MODE_MON;
10110         }
10111         offset = 4;
10112         break;
10113     case EXCP_VIRQ:
10114         new_mode = ARM_CPU_MODE_IRQ;
10115         addr = 0x18;
10116         /* Disable IRQ and imprecise data aborts.  */
10117         mask = CPSR_A | CPSR_I;
10118         offset = 4;
10119         break;
10120     case EXCP_VFIQ:
10121         new_mode = ARM_CPU_MODE_FIQ;
10122         addr = 0x1c;
10123         /* Disable FIQ, IRQ and imprecise data aborts.  */
10124         mask = CPSR_A | CPSR_I | CPSR_F;
10125         offset = 4;
10126         break;
10127     case EXCP_SMC:
10128         new_mode = ARM_CPU_MODE_MON;
10129         addr = 0x08;
10130         mask = CPSR_A | CPSR_I | CPSR_F;
10131         offset = 0;
10132         break;
10133     default:
10134         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10135         return; /* Never happens.  Keep compiler happy.  */
10136     }
10137 
10138     if (new_mode == ARM_CPU_MODE_MON) {
10139         addr += env->cp15.mvbar;
10140     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
10141         /* High vectors. When enabled, base address cannot be remapped. */
10142         addr += 0xffff0000;
10143     } else {
10144         /* ARM v7 architectures provide a vector base address register to remap
10145          * the interrupt vector table.
10146          * This register is only followed in non-monitor mode, and is banked.
10147          * Note: only bits 31:5 are valid.
10148          */
10149         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
10150     }
10151 
10152     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10153         env->cp15.scr_el3 &= ~SCR_NS;
10154     }
10155 
10156     take_aarch32_exception(env, new_mode, mask, offset, addr);
10157 }
10158 
10159 /* Handle exception entry to a target EL which is using AArch64 */
10160 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
10161 {
10162     ARMCPU *cpu = ARM_CPU(cs);
10163     CPUARMState *env = &cpu->env;
10164     unsigned int new_el = env->exception.target_el;
10165     target_ulong addr = env->cp15.vbar_el[new_el];
10166     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
10167     unsigned int cur_el = arm_current_el(env);
10168 
10169     /*
10170      * Note that new_el can never be 0.  If cur_el is 0, then
10171      * el0_a64 is is_a64(), else el0_a64 is ignored.
10172      */
10173     aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
10174 
10175     if (cur_el < new_el) {
10176         /* Entry vector offset depends on whether the implemented EL
10177          * immediately lower than the target level is using AArch32 or AArch64
10178          */
10179         bool is_aa64;
10180 
10181         switch (new_el) {
10182         case 3:
10183             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
10184             break;
10185         case 2:
10186             is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
10187             break;
10188         case 1:
10189             is_aa64 = is_a64(env);
10190             break;
10191         default:
10192             g_assert_not_reached();
10193         }
10194 
10195         if (is_aa64) {
10196             addr += 0x400;
10197         } else {
10198             addr += 0x600;
10199         }
10200     } else if (pstate_read(env) & PSTATE_SP) {
10201         addr += 0x200;
10202     }
10203 
10204     switch (cs->exception_index) {
10205     case EXCP_PREFETCH_ABORT:
10206     case EXCP_DATA_ABORT:
10207         env->cp15.far_el[new_el] = env->exception.vaddress;
10208         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
10209                       env->cp15.far_el[new_el]);
10210         /* fall through */
10211     case EXCP_BKPT:
10212     case EXCP_UDEF:
10213     case EXCP_SWI:
10214     case EXCP_HVC:
10215     case EXCP_HYP_TRAP:
10216     case EXCP_SMC:
10217         if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
10218             /*
10219              * QEMU internal FP/SIMD syndromes from AArch32 include the
10220              * TA and coproc fields which are only exposed if the exception
10221              * is taken to AArch32 Hyp mode. Mask them out to get a valid
10222              * AArch64 format syndrome.
10223              */
10224             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
10225         }
10226         env->cp15.esr_el[new_el] = env->exception.syndrome;
10227         break;
10228     case EXCP_IRQ:
10229     case EXCP_VIRQ:
10230         addr += 0x80;
10231         break;
10232     case EXCP_FIQ:
10233     case EXCP_VFIQ:
10234         addr += 0x100;
10235         break;
10236     case EXCP_SEMIHOST:
10237         qemu_log_mask(CPU_LOG_INT,
10238                       "...handling as semihosting call 0x%" PRIx64 "\n",
10239                       env->xregs[0]);
10240         env->xregs[0] = do_arm_semihosting(env);
10241         return;
10242     default:
10243         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10244     }
10245 
10246     if (is_a64(env)) {
10247         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
10248         aarch64_save_sp(env, arm_current_el(env));
10249         env->elr_el[new_el] = env->pc;
10250     } else {
10251         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
10252         env->elr_el[new_el] = env->regs[15];
10253 
10254         aarch64_sync_32_to_64(env);
10255 
10256         env->condexec_bits = 0;
10257     }
10258     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
10259                   env->elr_el[new_el]);
10260 
10261     pstate_write(env, PSTATE_DAIF | new_mode);
10262     env->aarch64 = 1;
10263     aarch64_restore_sp(env, new_el);
10264 
10265     env->pc = addr;
10266 
10267     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
10268                   new_el, env->pc, pstate_read(env));
10269 }
10270 
10271 static inline bool check_for_semihosting(CPUState *cs)
10272 {
10273     /* Check whether this exception is a semihosting call; if so
10274      * then handle it and return true; otherwise return false.
10275      */
10276     ARMCPU *cpu = ARM_CPU(cs);
10277     CPUARMState *env = &cpu->env;
10278 
10279     if (is_a64(env)) {
10280         if (cs->exception_index == EXCP_SEMIHOST) {
10281             /* This is always the 64-bit semihosting exception.
10282              * The "is this usermode" and "is semihosting enabled"
10283              * checks have been done at translate time.
10284              */
10285             qemu_log_mask(CPU_LOG_INT,
10286                           "...handling as semihosting call 0x%" PRIx64 "\n",
10287                           env->xregs[0]);
10288             env->xregs[0] = do_arm_semihosting(env);
10289             return true;
10290         }
10291         return false;
10292     } else {
10293         uint32_t imm;
10294 
10295         /* Only intercept calls from privileged modes, to provide some
10296          * semblance of security.
10297          */
10298         if (cs->exception_index != EXCP_SEMIHOST &&
10299             (!semihosting_enabled() ||
10300              ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
10301             return false;
10302         }
10303 
10304         switch (cs->exception_index) {
10305         case EXCP_SEMIHOST:
10306             /* This is always a semihosting call; the "is this usermode"
10307              * and "is semihosting enabled" checks have been done at
10308              * translate time.
10309              */
10310             break;
10311         case EXCP_SWI:
10312             /* Check for semihosting interrupt.  */
10313             if (env->thumb) {
10314                 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
10315                     & 0xff;
10316                 if (imm == 0xab) {
10317                     break;
10318                 }
10319             } else {
10320                 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
10321                     & 0xffffff;
10322                 if (imm == 0x123456) {
10323                     break;
10324                 }
10325             }
10326             return false;
10327         case EXCP_BKPT:
10328             /* See if this is a semihosting syscall.  */
10329             if (env->thumb) {
10330                 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
10331                     & 0xff;
10332                 if (imm == 0xab) {
10333                     env->regs[15] += 2;
10334                     break;
10335                 }
10336             }
10337             return false;
10338         default:
10339             return false;
10340         }
10341 
10342         qemu_log_mask(CPU_LOG_INT,
10343                       "...handling as semihosting call 0x%x\n",
10344                       env->regs[0]);
10345         env->regs[0] = do_arm_semihosting(env);
10346         return true;
10347     }
10348 }
10349 
10350 /* Handle a CPU exception for A and R profile CPUs.
10351  * Do any appropriate logging, handle PSCI calls, and then hand off
10352  * to the AArch64-entry or AArch32-entry function depending on the
10353  * target exception level's register width.
10354  */
10355 void arm_cpu_do_interrupt(CPUState *cs)
10356 {
10357     ARMCPU *cpu = ARM_CPU(cs);
10358     CPUARMState *env = &cpu->env;
10359     unsigned int new_el = env->exception.target_el;
10360 
10361     assert(!arm_feature(env, ARM_FEATURE_M));
10362 
10363     arm_log_exception(cs->exception_index);
10364     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10365                   new_el);
10366     if (qemu_loglevel_mask(CPU_LOG_INT)
10367         && !excp_is_internal(cs->exception_index)) {
10368         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
10369                       syn_get_ec(env->exception.syndrome),
10370                       env->exception.syndrome);
10371     }
10372 
10373     if (arm_is_psci_call(cpu, cs->exception_index)) {
10374         arm_handle_psci_call(cpu);
10375         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10376         return;
10377     }
10378 
10379     /* Semihosting semantics depend on the register width of the
10380      * code that caused the exception, not the target exception level,
10381      * so must be handled here.
10382      */
10383     if (check_for_semihosting(cs)) {
10384         return;
10385     }
10386 
10387     /* Hooks may change global state so BQL should be held, also the
10388      * BQL needs to be held for any modification of
10389      * cs->interrupt_request.
10390      */
10391     g_assert(qemu_mutex_iothread_locked());
10392 
10393     arm_call_pre_el_change_hook(cpu);
10394 
10395     assert(!excp_is_internal(cs->exception_index));
10396     if (arm_el_is_aa64(env, new_el)) {
10397         arm_cpu_do_interrupt_aarch64(cs);
10398     } else {
10399         arm_cpu_do_interrupt_aarch32(cs);
10400     }
10401 
10402     arm_call_el_change_hook(cpu);
10403 
10404     if (!kvm_enabled()) {
10405         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10406     }
10407 }
10408 #endif /* !CONFIG_USER_ONLY */
10409 
10410 /* Return the exception level which controls this address translation regime */
10411 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
10412 {
10413     switch (mmu_idx) {
10414     case ARMMMUIdx_S2NS:
10415     case ARMMMUIdx_S1E2:
10416         return 2;
10417     case ARMMMUIdx_S1E3:
10418         return 3;
10419     case ARMMMUIdx_S1SE0:
10420         return arm_el_is_aa64(env, 3) ? 1 : 3;
10421     case ARMMMUIdx_S1SE1:
10422     case ARMMMUIdx_S1NSE0:
10423     case ARMMMUIdx_S1NSE1:
10424     case ARMMMUIdx_MPrivNegPri:
10425     case ARMMMUIdx_MUserNegPri:
10426     case ARMMMUIdx_MPriv:
10427     case ARMMMUIdx_MUser:
10428     case ARMMMUIdx_MSPrivNegPri:
10429     case ARMMMUIdx_MSUserNegPri:
10430     case ARMMMUIdx_MSPriv:
10431     case ARMMMUIdx_MSUser:
10432         return 1;
10433     default:
10434         g_assert_not_reached();
10435     }
10436 }
10437 
10438 #ifndef CONFIG_USER_ONLY
10439 
10440 /* Return the SCTLR value which controls this address translation regime */
10441 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
10442 {
10443     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
10444 }
10445 
10446 /* Return true if the specified stage of address translation is disabled */
10447 static inline bool regime_translation_disabled(CPUARMState *env,
10448                                                ARMMMUIdx mmu_idx)
10449 {
10450     if (arm_feature(env, ARM_FEATURE_M)) {
10451         switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
10452                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
10453         case R_V7M_MPU_CTRL_ENABLE_MASK:
10454             /* Enabled, but not for HardFault and NMI */
10455             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
10456         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
10457             /* Enabled for all cases */
10458             return false;
10459         case 0:
10460         default:
10461             /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10462              * we warned about that in armv7m_nvic.c when the guest set it.
10463              */
10464             return true;
10465         }
10466     }
10467 
10468     if (mmu_idx == ARMMMUIdx_S2NS) {
10469         /* HCR.DC means HCR.VM behaves as 1 */
10470         return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
10471     }
10472 
10473     if (env->cp15.hcr_el2 & HCR_TGE) {
10474         /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10475         if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
10476             return true;
10477         }
10478     }
10479 
10480     if ((env->cp15.hcr_el2 & HCR_DC) &&
10481         (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
10482         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10483         return true;
10484     }
10485 
10486     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
10487 }
10488 
10489 static inline bool regime_translation_big_endian(CPUARMState *env,
10490                                                  ARMMMUIdx mmu_idx)
10491 {
10492     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
10493 }
10494 
10495 /* Return the TTBR associated with this translation regime */
10496 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
10497                                    int ttbrn)
10498 {
10499     if (mmu_idx == ARMMMUIdx_S2NS) {
10500         return env->cp15.vttbr_el2;
10501     }
10502     if (ttbrn == 0) {
10503         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
10504     } else {
10505         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
10506     }
10507 }
10508 
10509 #endif /* !CONFIG_USER_ONLY */
10510 
10511 /* Return the TCR controlling this translation regime */
10512 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
10513 {
10514     if (mmu_idx == ARMMMUIdx_S2NS) {
10515         return &env->cp15.vtcr_el2;
10516     }
10517     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
10518 }
10519 
10520 /* Convert a possible stage1+2 MMU index into the appropriate
10521  * stage 1 MMU index
10522  */
10523 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
10524 {
10525     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
10526         mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
10527     }
10528     return mmu_idx;
10529 }
10530 
10531 /* Return true if the translation regime is using LPAE format page tables */
10532 static inline bool regime_using_lpae_format(CPUARMState *env,
10533                                             ARMMMUIdx mmu_idx)
10534 {
10535     int el = regime_el(env, mmu_idx);
10536     if (el == 2 || arm_el_is_aa64(env, el)) {
10537         return true;
10538     }
10539     if (arm_feature(env, ARM_FEATURE_LPAE)
10540         && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
10541         return true;
10542     }
10543     return false;
10544 }
10545 
10546 /* Returns true if the stage 1 translation regime is using LPAE format page
10547  * tables. Used when raising alignment exceptions, whose FSR changes depending
10548  * on whether the long or short descriptor format is in use. */
10549 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
10550 {
10551     mmu_idx = stage_1_mmu_idx(mmu_idx);
10552 
10553     return regime_using_lpae_format(env, mmu_idx);
10554 }
10555 
10556 #ifndef CONFIG_USER_ONLY
10557 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
10558 {
10559     switch (mmu_idx) {
10560     case ARMMMUIdx_S1SE0:
10561     case ARMMMUIdx_S1NSE0:
10562     case ARMMMUIdx_MUser:
10563     case ARMMMUIdx_MSUser:
10564     case ARMMMUIdx_MUserNegPri:
10565     case ARMMMUIdx_MSUserNegPri:
10566         return true;
10567     default:
10568         return false;
10569     case ARMMMUIdx_S12NSE0:
10570     case ARMMMUIdx_S12NSE1:
10571         g_assert_not_reached();
10572     }
10573 }
10574 
10575 /* Translate section/page access permissions to page
10576  * R/W protection flags
10577  *
10578  * @env:         CPUARMState
10579  * @mmu_idx:     MMU index indicating required translation regime
10580  * @ap:          The 3-bit access permissions (AP[2:0])
10581  * @domain_prot: The 2-bit domain access permissions
10582  */
10583 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
10584                                 int ap, int domain_prot)
10585 {
10586     bool is_user = regime_is_user(env, mmu_idx);
10587 
10588     if (domain_prot == 3) {
10589         return PAGE_READ | PAGE_WRITE;
10590     }
10591 
10592     switch (ap) {
10593     case 0:
10594         if (arm_feature(env, ARM_FEATURE_V7)) {
10595             return 0;
10596         }
10597         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
10598         case SCTLR_S:
10599             return is_user ? 0 : PAGE_READ;
10600         case SCTLR_R:
10601             return PAGE_READ;
10602         default:
10603             return 0;
10604         }
10605     case 1:
10606         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10607     case 2:
10608         if (is_user) {
10609             return PAGE_READ;
10610         } else {
10611             return PAGE_READ | PAGE_WRITE;
10612         }
10613     case 3:
10614         return PAGE_READ | PAGE_WRITE;
10615     case 4: /* Reserved.  */
10616         return 0;
10617     case 5:
10618         return is_user ? 0 : PAGE_READ;
10619     case 6:
10620         return PAGE_READ;
10621     case 7:
10622         if (!arm_feature(env, ARM_FEATURE_V6K)) {
10623             return 0;
10624         }
10625         return PAGE_READ;
10626     default:
10627         g_assert_not_reached();
10628     }
10629 }
10630 
10631 /* Translate section/page access permissions to page
10632  * R/W protection flags.
10633  *
10634  * @ap:      The 2-bit simple AP (AP[2:1])
10635  * @is_user: TRUE if accessing from PL0
10636  */
10637 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
10638 {
10639     switch (ap) {
10640     case 0:
10641         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10642     case 1:
10643         return PAGE_READ | PAGE_WRITE;
10644     case 2:
10645         return is_user ? 0 : PAGE_READ;
10646     case 3:
10647         return PAGE_READ;
10648     default:
10649         g_assert_not_reached();
10650     }
10651 }
10652 
10653 static inline int
10654 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
10655 {
10656     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
10657 }
10658 
10659 /* Translate S2 section/page access permissions to protection flags
10660  *
10661  * @env:     CPUARMState
10662  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
10663  * @xn:      XN (execute-never) bit
10664  */
10665 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
10666 {
10667     int prot = 0;
10668 
10669     if (s2ap & 1) {
10670         prot |= PAGE_READ;
10671     }
10672     if (s2ap & 2) {
10673         prot |= PAGE_WRITE;
10674     }
10675     if (!xn) {
10676         if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
10677             prot |= PAGE_EXEC;
10678         }
10679     }
10680     return prot;
10681 }
10682 
10683 /* Translate section/page access permissions to protection flags
10684  *
10685  * @env:     CPUARMState
10686  * @mmu_idx: MMU index indicating required translation regime
10687  * @is_aa64: TRUE if AArch64
10688  * @ap:      The 2-bit simple AP (AP[2:1])
10689  * @ns:      NS (non-secure) bit
10690  * @xn:      XN (execute-never) bit
10691  * @pxn:     PXN (privileged execute-never) bit
10692  */
10693 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
10694                       int ap, int ns, int xn, int pxn)
10695 {
10696     bool is_user = regime_is_user(env, mmu_idx);
10697     int prot_rw, user_rw;
10698     bool have_wxn;
10699     int wxn = 0;
10700 
10701     assert(mmu_idx != ARMMMUIdx_S2NS);
10702 
10703     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
10704     if (is_user) {
10705         prot_rw = user_rw;
10706     } else {
10707         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
10708     }
10709 
10710     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
10711         return prot_rw;
10712     }
10713 
10714     /* TODO have_wxn should be replaced with
10715      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10716      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10717      * compatible processors have EL2, which is required for [U]WXN.
10718      */
10719     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
10720 
10721     if (have_wxn) {
10722         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
10723     }
10724 
10725     if (is_aa64) {
10726         switch (regime_el(env, mmu_idx)) {
10727         case 1:
10728             if (!is_user) {
10729                 xn = pxn || (user_rw & PAGE_WRITE);
10730             }
10731             break;
10732         case 2:
10733         case 3:
10734             break;
10735         }
10736     } else if (arm_feature(env, ARM_FEATURE_V7)) {
10737         switch (regime_el(env, mmu_idx)) {
10738         case 1:
10739         case 3:
10740             if (is_user) {
10741                 xn = xn || !(user_rw & PAGE_READ);
10742             } else {
10743                 int uwxn = 0;
10744                 if (have_wxn) {
10745                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
10746                 }
10747                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
10748                      (uwxn && (user_rw & PAGE_WRITE));
10749             }
10750             break;
10751         case 2:
10752             break;
10753         }
10754     } else {
10755         xn = wxn = 0;
10756     }
10757 
10758     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
10759         return prot_rw;
10760     }
10761     return prot_rw | PAGE_EXEC;
10762 }
10763 
10764 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
10765                                      uint32_t *table, uint32_t address)
10766 {
10767     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10768     TCR *tcr = regime_tcr(env, mmu_idx);
10769 
10770     if (address & tcr->mask) {
10771         if (tcr->raw_tcr & TTBCR_PD1) {
10772             /* Translation table walk disabled for TTBR1 */
10773             return false;
10774         }
10775         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
10776     } else {
10777         if (tcr->raw_tcr & TTBCR_PD0) {
10778             /* Translation table walk disabled for TTBR0 */
10779             return false;
10780         }
10781         *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
10782     }
10783     *table |= (address >> 18) & 0x3ffc;
10784     return true;
10785 }
10786 
10787 /* Translate a S1 pagetable walk through S2 if needed.  */
10788 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
10789                                hwaddr addr, MemTxAttrs txattrs,
10790                                ARMMMUFaultInfo *fi)
10791 {
10792     if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
10793         !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10794         target_ulong s2size;
10795         hwaddr s2pa;
10796         int s2prot;
10797         int ret;
10798         ARMCacheAttrs cacheattrs = {};
10799         ARMCacheAttrs *pcacheattrs = NULL;
10800 
10801         if (env->cp15.hcr_el2 & HCR_PTW) {
10802             /*
10803              * PTW means we must fault if this S1 walk touches S2 Device
10804              * memory; otherwise we don't care about the attributes and can
10805              * save the S2 translation the effort of computing them.
10806              */
10807             pcacheattrs = &cacheattrs;
10808         }
10809 
10810         ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
10811                                  &txattrs, &s2prot, &s2size, fi, pcacheattrs);
10812         if (ret) {
10813             assert(fi->type != ARMFault_None);
10814             fi->s2addr = addr;
10815             fi->stage2 = true;
10816             fi->s1ptw = true;
10817             return ~0;
10818         }
10819         if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
10820             /* Access was to Device memory: generate Permission fault */
10821             fi->type = ARMFault_Permission;
10822             fi->s2addr = addr;
10823             fi->stage2 = true;
10824             fi->s1ptw = true;
10825             return ~0;
10826         }
10827         addr = s2pa;
10828     }
10829     return addr;
10830 }
10831 
10832 /* All loads done in the course of a page table walk go through here. */
10833 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10834                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10835 {
10836     ARMCPU *cpu = ARM_CPU(cs);
10837     CPUARMState *env = &cpu->env;
10838     MemTxAttrs attrs = {};
10839     MemTxResult result = MEMTX_OK;
10840     AddressSpace *as;
10841     uint32_t data;
10842 
10843     attrs.secure = is_secure;
10844     as = arm_addressspace(cs, attrs);
10845     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
10846     if (fi->s1ptw) {
10847         return 0;
10848     }
10849     if (regime_translation_big_endian(env, mmu_idx)) {
10850         data = address_space_ldl_be(as, addr, attrs, &result);
10851     } else {
10852         data = address_space_ldl_le(as, addr, attrs, &result);
10853     }
10854     if (result == MEMTX_OK) {
10855         return data;
10856     }
10857     fi->type = ARMFault_SyncExternalOnWalk;
10858     fi->ea = arm_extabort_type(result);
10859     return 0;
10860 }
10861 
10862 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10863                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10864 {
10865     ARMCPU *cpu = ARM_CPU(cs);
10866     CPUARMState *env = &cpu->env;
10867     MemTxAttrs attrs = {};
10868     MemTxResult result = MEMTX_OK;
10869     AddressSpace *as;
10870     uint64_t data;
10871 
10872     attrs.secure = is_secure;
10873     as = arm_addressspace(cs, attrs);
10874     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
10875     if (fi->s1ptw) {
10876         return 0;
10877     }
10878     if (regime_translation_big_endian(env, mmu_idx)) {
10879         data = address_space_ldq_be(as, addr, attrs, &result);
10880     } else {
10881         data = address_space_ldq_le(as, addr, attrs, &result);
10882     }
10883     if (result == MEMTX_OK) {
10884         return data;
10885     }
10886     fi->type = ARMFault_SyncExternalOnWalk;
10887     fi->ea = arm_extabort_type(result);
10888     return 0;
10889 }
10890 
10891 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
10892                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
10893                              hwaddr *phys_ptr, int *prot,
10894                              target_ulong *page_size,
10895                              ARMMMUFaultInfo *fi)
10896 {
10897     CPUState *cs = CPU(arm_env_get_cpu(env));
10898     int level = 1;
10899     uint32_t table;
10900     uint32_t desc;
10901     int type;
10902     int ap;
10903     int domain = 0;
10904     int domain_prot;
10905     hwaddr phys_addr;
10906     uint32_t dacr;
10907 
10908     /* Pagetable walk.  */
10909     /* Lookup l1 descriptor.  */
10910     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10911         /* Section translation fault if page walk is disabled by PD0 or PD1 */
10912         fi->type = ARMFault_Translation;
10913         goto do_fault;
10914     }
10915     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10916                        mmu_idx, fi);
10917     if (fi->type != ARMFault_None) {
10918         goto do_fault;
10919     }
10920     type = (desc & 3);
10921     domain = (desc >> 5) & 0x0f;
10922     if (regime_el(env, mmu_idx) == 1) {
10923         dacr = env->cp15.dacr_ns;
10924     } else {
10925         dacr = env->cp15.dacr_s;
10926     }
10927     domain_prot = (dacr >> (domain * 2)) & 3;
10928     if (type == 0) {
10929         /* Section translation fault.  */
10930         fi->type = ARMFault_Translation;
10931         goto do_fault;
10932     }
10933     if (type != 2) {
10934         level = 2;
10935     }
10936     if (domain_prot == 0 || domain_prot == 2) {
10937         fi->type = ARMFault_Domain;
10938         goto do_fault;
10939     }
10940     if (type == 2) {
10941         /* 1Mb section.  */
10942         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10943         ap = (desc >> 10) & 3;
10944         *page_size = 1024 * 1024;
10945     } else {
10946         /* Lookup l2 entry.  */
10947         if (type == 1) {
10948             /* Coarse pagetable.  */
10949             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10950         } else {
10951             /* Fine pagetable.  */
10952             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
10953         }
10954         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10955                            mmu_idx, fi);
10956         if (fi->type != ARMFault_None) {
10957             goto do_fault;
10958         }
10959         switch (desc & 3) {
10960         case 0: /* Page translation fault.  */
10961             fi->type = ARMFault_Translation;
10962             goto do_fault;
10963         case 1: /* 64k page.  */
10964             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10965             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10966             *page_size = 0x10000;
10967             break;
10968         case 2: /* 4k page.  */
10969             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10970             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10971             *page_size = 0x1000;
10972             break;
10973         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10974             if (type == 1) {
10975                 /* ARMv6/XScale extended small page format */
10976                 if (arm_feature(env, ARM_FEATURE_XSCALE)
10977                     || arm_feature(env, ARM_FEATURE_V6)) {
10978                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10979                     *page_size = 0x1000;
10980                 } else {
10981                     /* UNPREDICTABLE in ARMv5; we choose to take a
10982                      * page translation fault.
10983                      */
10984                     fi->type = ARMFault_Translation;
10985                     goto do_fault;
10986                 }
10987             } else {
10988                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
10989                 *page_size = 0x400;
10990             }
10991             ap = (desc >> 4) & 3;
10992             break;
10993         default:
10994             /* Never happens, but compiler isn't smart enough to tell.  */
10995             abort();
10996         }
10997     }
10998     *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10999     *prot |= *prot ? PAGE_EXEC : 0;
11000     if (!(*prot & (1 << access_type))) {
11001         /* Access permission fault.  */
11002         fi->type = ARMFault_Permission;
11003         goto do_fault;
11004     }
11005     *phys_ptr = phys_addr;
11006     return false;
11007 do_fault:
11008     fi->domain = domain;
11009     fi->level = level;
11010     return true;
11011 }
11012 
11013 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
11014                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
11015                              hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
11016                              target_ulong *page_size, ARMMMUFaultInfo *fi)
11017 {
11018     CPUState *cs = CPU(arm_env_get_cpu(env));
11019     int level = 1;
11020     uint32_t table;
11021     uint32_t desc;
11022     uint32_t xn;
11023     uint32_t pxn = 0;
11024     int type;
11025     int ap;
11026     int domain = 0;
11027     int domain_prot;
11028     hwaddr phys_addr;
11029     uint32_t dacr;
11030     bool ns;
11031 
11032     /* Pagetable walk.  */
11033     /* Lookup l1 descriptor.  */
11034     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
11035         /* Section translation fault if page walk is disabled by PD0 or PD1 */
11036         fi->type = ARMFault_Translation;
11037         goto do_fault;
11038     }
11039     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
11040                        mmu_idx, fi);
11041     if (fi->type != ARMFault_None) {
11042         goto do_fault;
11043     }
11044     type = (desc & 3);
11045     if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
11046         /* Section translation fault, or attempt to use the encoding
11047          * which is Reserved on implementations without PXN.
11048          */
11049         fi->type = ARMFault_Translation;
11050         goto do_fault;
11051     }
11052     if ((type == 1) || !(desc & (1 << 18))) {
11053         /* Page or Section.  */
11054         domain = (desc >> 5) & 0x0f;
11055     }
11056     if (regime_el(env, mmu_idx) == 1) {
11057         dacr = env->cp15.dacr_ns;
11058     } else {
11059         dacr = env->cp15.dacr_s;
11060     }
11061     if (type == 1) {
11062         level = 2;
11063     }
11064     domain_prot = (dacr >> (domain * 2)) & 3;
11065     if (domain_prot == 0 || domain_prot == 2) {
11066         /* Section or Page domain fault */
11067         fi->type = ARMFault_Domain;
11068         goto do_fault;
11069     }
11070     if (type != 1) {
11071         if (desc & (1 << 18)) {
11072             /* Supersection.  */
11073             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
11074             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
11075             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
11076             *page_size = 0x1000000;
11077         } else {
11078             /* Section.  */
11079             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
11080             *page_size = 0x100000;
11081         }
11082         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
11083         xn = desc & (1 << 4);
11084         pxn = desc & 1;
11085         ns = extract32(desc, 19, 1);
11086     } else {
11087         if (arm_feature(env, ARM_FEATURE_PXN)) {
11088             pxn = (desc >> 2) & 1;
11089         }
11090         ns = extract32(desc, 3, 1);
11091         /* Lookup l2 entry.  */
11092         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
11093         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
11094                            mmu_idx, fi);
11095         if (fi->type != ARMFault_None) {
11096             goto do_fault;
11097         }
11098         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
11099         switch (desc & 3) {
11100         case 0: /* Page translation fault.  */
11101             fi->type = ARMFault_Translation;
11102             goto do_fault;
11103         case 1: /* 64k page.  */
11104             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
11105             xn = desc & (1 << 15);
11106             *page_size = 0x10000;
11107             break;
11108         case 2: case 3: /* 4k page.  */
11109             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
11110             xn = desc & 1;
11111             *page_size = 0x1000;
11112             break;
11113         default:
11114             /* Never happens, but compiler isn't smart enough to tell.  */
11115             abort();
11116         }
11117     }
11118     if (domain_prot == 3) {
11119         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11120     } else {
11121         if (pxn && !regime_is_user(env, mmu_idx)) {
11122             xn = 1;
11123         }
11124         if (xn && access_type == MMU_INST_FETCH) {
11125             fi->type = ARMFault_Permission;
11126             goto do_fault;
11127         }
11128 
11129         if (arm_feature(env, ARM_FEATURE_V6K) &&
11130                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
11131             /* The simplified model uses AP[0] as an access control bit.  */
11132             if ((ap & 1) == 0) {
11133                 /* Access flag fault.  */
11134                 fi->type = ARMFault_AccessFlag;
11135                 goto do_fault;
11136             }
11137             *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
11138         } else {
11139             *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
11140         }
11141         if (*prot && !xn) {
11142             *prot |= PAGE_EXEC;
11143         }
11144         if (!(*prot & (1 << access_type))) {
11145             /* Access permission fault.  */
11146             fi->type = ARMFault_Permission;
11147             goto do_fault;
11148         }
11149     }
11150     if (ns) {
11151         /* The NS bit will (as required by the architecture) have no effect if
11152          * the CPU doesn't support TZ or this is a non-secure translation
11153          * regime, because the attribute will already be non-secure.
11154          */
11155         attrs->secure = false;
11156     }
11157     *phys_ptr = phys_addr;
11158     return false;
11159 do_fault:
11160     fi->domain = domain;
11161     fi->level = level;
11162     return true;
11163 }
11164 
11165 /*
11166  * check_s2_mmu_setup
11167  * @cpu:        ARMCPU
11168  * @is_aa64:    True if the translation regime is in AArch64 state
11169  * @startlevel: Suggested starting level
11170  * @inputsize:  Bitsize of IPAs
11171  * @stride:     Page-table stride (See the ARM ARM)
11172  *
11173  * Returns true if the suggested S2 translation parameters are OK and
11174  * false otherwise.
11175  */
11176 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
11177                                int inputsize, int stride)
11178 {
11179     const int grainsize = stride + 3;
11180     int startsizecheck;
11181 
11182     /* Negative levels are never allowed.  */
11183     if (level < 0) {
11184         return false;
11185     }
11186 
11187     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
11188     if (startsizecheck < 1 || startsizecheck > stride + 4) {
11189         return false;
11190     }
11191 
11192     if (is_aa64) {
11193         CPUARMState *env = &cpu->env;
11194         unsigned int pamax = arm_pamax(cpu);
11195 
11196         switch (stride) {
11197         case 13: /* 64KB Pages.  */
11198             if (level == 0 || (level == 1 && pamax <= 42)) {
11199                 return false;
11200             }
11201             break;
11202         case 11: /* 16KB Pages.  */
11203             if (level == 0 || (level == 1 && pamax <= 40)) {
11204                 return false;
11205             }
11206             break;
11207         case 9: /* 4KB Pages.  */
11208             if (level == 0 && pamax <= 42) {
11209                 return false;
11210             }
11211             break;
11212         default:
11213             g_assert_not_reached();
11214         }
11215 
11216         /* Inputsize checks.  */
11217         if (inputsize > pamax &&
11218             (arm_el_is_aa64(env, 1) || inputsize > 40)) {
11219             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
11220             return false;
11221         }
11222     } else {
11223         /* AArch32 only supports 4KB pages. Assert on that.  */
11224         assert(stride == 9);
11225 
11226         if (level == 0) {
11227             return false;
11228         }
11229     }
11230     return true;
11231 }
11232 
11233 /* Translate from the 4-bit stage 2 representation of
11234  * memory attributes (without cache-allocation hints) to
11235  * the 8-bit representation of the stage 1 MAIR registers
11236  * (which includes allocation hints).
11237  *
11238  * ref: shared/translation/attrs/S2AttrDecode()
11239  *      .../S2ConvertAttrsHints()
11240  */
11241 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
11242 {
11243     uint8_t hiattr = extract32(s2attrs, 2, 2);
11244     uint8_t loattr = extract32(s2attrs, 0, 2);
11245     uint8_t hihint = 0, lohint = 0;
11246 
11247     if (hiattr != 0) { /* normal memory */
11248         if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
11249             hiattr = loattr = 1; /* non-cacheable */
11250         } else {
11251             if (hiattr != 1) { /* Write-through or write-back */
11252                 hihint = 3; /* RW allocate */
11253             }
11254             if (loattr != 1) { /* Write-through or write-back */
11255                 lohint = 3; /* RW allocate */
11256             }
11257         }
11258     }
11259 
11260     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
11261 }
11262 #endif /* !CONFIG_USER_ONLY */
11263 
11264 ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
11265                                         ARMMMUIdx mmu_idx)
11266 {
11267     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11268     uint32_t el = regime_el(env, mmu_idx);
11269     bool tbi, tbid, epd, hpd, using16k, using64k;
11270     int select, tsz;
11271 
11272     /*
11273      * Bit 55 is always between the two regions, and is canonical for
11274      * determining if address tagging is enabled.
11275      */
11276     select = extract64(va, 55, 1);
11277 
11278     if (el > 1) {
11279         tsz = extract32(tcr, 0, 6);
11280         using64k = extract32(tcr, 14, 1);
11281         using16k = extract32(tcr, 15, 1);
11282         if (mmu_idx == ARMMMUIdx_S2NS) {
11283             /* VTCR_EL2 */
11284             tbi = tbid = hpd = false;
11285         } else {
11286             tbi = extract32(tcr, 20, 1);
11287             hpd = extract32(tcr, 24, 1);
11288             tbid = extract32(tcr, 29, 1);
11289         }
11290         epd = false;
11291     } else if (!select) {
11292         tsz = extract32(tcr, 0, 6);
11293         epd = extract32(tcr, 7, 1);
11294         using64k = extract32(tcr, 14, 1);
11295         using16k = extract32(tcr, 15, 1);
11296         tbi = extract64(tcr, 37, 1);
11297         hpd = extract64(tcr, 41, 1);
11298         tbid = extract64(tcr, 51, 1);
11299     } else {
11300         int tg = extract32(tcr, 30, 2);
11301         using16k = tg == 1;
11302         using64k = tg == 3;
11303         tsz = extract32(tcr, 16, 6);
11304         epd = extract32(tcr, 23, 1);
11305         tbi = extract64(tcr, 38, 1);
11306         hpd = extract64(tcr, 42, 1);
11307         tbid = extract64(tcr, 52, 1);
11308     }
11309     tsz = MIN(tsz, 39);  /* TODO: ARMv8.4-TTST */
11310     tsz = MAX(tsz, 16);  /* TODO: ARMv8.2-LVA  */
11311 
11312     return (ARMVAParameters) {
11313         .tsz = tsz,
11314         .select = select,
11315         .tbi = tbi,
11316         .tbid = tbid,
11317         .epd = epd,
11318         .hpd = hpd,
11319         .using16k = using16k,
11320         .using64k = using64k,
11321     };
11322 }
11323 
11324 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11325                                    ARMMMUIdx mmu_idx, bool data)
11326 {
11327     ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx);
11328 
11329     /* Present TBI as a composite with TBID.  */
11330     ret.tbi &= (data || !ret.tbid);
11331     return ret;
11332 }
11333 
11334 #ifndef CONFIG_USER_ONLY
11335 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
11336                                           ARMMMUIdx mmu_idx)
11337 {
11338     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11339     uint32_t el = regime_el(env, mmu_idx);
11340     int select, tsz;
11341     bool epd, hpd;
11342 
11343     if (mmu_idx == ARMMMUIdx_S2NS) {
11344         /* VTCR */
11345         bool sext = extract32(tcr, 4, 1);
11346         bool sign = extract32(tcr, 3, 1);
11347 
11348         /*
11349          * If the sign-extend bit is not the same as t0sz[3], the result
11350          * is unpredictable. Flag this as a guest error.
11351          */
11352         if (sign != sext) {
11353             qemu_log_mask(LOG_GUEST_ERROR,
11354                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11355         }
11356         tsz = sextract32(tcr, 0, 4) + 8;
11357         select = 0;
11358         hpd = false;
11359         epd = false;
11360     } else if (el == 2) {
11361         /* HTCR */
11362         tsz = extract32(tcr, 0, 3);
11363         select = 0;
11364         hpd = extract64(tcr, 24, 1);
11365         epd = false;
11366     } else {
11367         int t0sz = extract32(tcr, 0, 3);
11368         int t1sz = extract32(tcr, 16, 3);
11369 
11370         if (t1sz == 0) {
11371             select = va > (0xffffffffu >> t0sz);
11372         } else {
11373             /* Note that we will detect errors later.  */
11374             select = va >= ~(0xffffffffu >> t1sz);
11375         }
11376         if (!select) {
11377             tsz = t0sz;
11378             epd = extract32(tcr, 7, 1);
11379             hpd = extract64(tcr, 41, 1);
11380         } else {
11381             tsz = t1sz;
11382             epd = extract32(tcr, 23, 1);
11383             hpd = extract64(tcr, 42, 1);
11384         }
11385         /* For aarch32, hpd0 is not enabled without t2e as well.  */
11386         hpd &= extract32(tcr, 6, 1);
11387     }
11388 
11389     return (ARMVAParameters) {
11390         .tsz = tsz,
11391         .select = select,
11392         .epd = epd,
11393         .hpd = hpd,
11394     };
11395 }
11396 
11397 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
11398                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
11399                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
11400                                target_ulong *page_size_ptr,
11401                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
11402 {
11403     ARMCPU *cpu = arm_env_get_cpu(env);
11404     CPUState *cs = CPU(cpu);
11405     /* Read an LPAE long-descriptor translation table. */
11406     ARMFaultType fault_type = ARMFault_Translation;
11407     uint32_t level;
11408     ARMVAParameters param;
11409     uint64_t ttbr;
11410     hwaddr descaddr, indexmask, indexmask_grainsize;
11411     uint32_t tableattrs;
11412     target_ulong page_size;
11413     uint32_t attrs;
11414     int32_t stride;
11415     int addrsize, inputsize;
11416     TCR *tcr = regime_tcr(env, mmu_idx);
11417     int ap, ns, xn, pxn;
11418     uint32_t el = regime_el(env, mmu_idx);
11419     bool ttbr1_valid;
11420     uint64_t descaddrmask;
11421     bool aarch64 = arm_el_is_aa64(env, el);
11422     bool guarded = false;
11423 
11424     /* TODO:
11425      * This code does not handle the different format TCR for VTCR_EL2.
11426      * This code also does not support shareability levels.
11427      * Attribute and permission bit handling should also be checked when adding
11428      * support for those page table walks.
11429      */
11430     if (aarch64) {
11431         param = aa64_va_parameters(env, address, mmu_idx,
11432                                    access_type != MMU_INST_FETCH);
11433         level = 0;
11434         /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
11435          * invalid.
11436          */
11437         ttbr1_valid = (el < 2);
11438         addrsize = 64 - 8 * param.tbi;
11439         inputsize = 64 - param.tsz;
11440     } else {
11441         param = aa32_va_parameters(env, address, mmu_idx);
11442         level = 1;
11443         /* There is no TTBR1 for EL2 */
11444         ttbr1_valid = (el != 2);
11445         addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32);
11446         inputsize = addrsize - param.tsz;
11447     }
11448 
11449     /*
11450      * We determined the region when collecting the parameters, but we
11451      * have not yet validated that the address is valid for the region.
11452      * Extract the top bits and verify that they all match select.
11453      *
11454      * For aa32, if inputsize == addrsize, then we have selected the
11455      * region by exclusion in aa32_va_parameters and there is no more
11456      * validation to do here.
11457      */
11458     if (inputsize < addrsize) {
11459         target_ulong top_bits = sextract64(address, inputsize,
11460                                            addrsize - inputsize);
11461         if (-top_bits != param.select || (param.select && !ttbr1_valid)) {
11462             /* The gap between the two regions is a Translation fault */
11463             fault_type = ARMFault_Translation;
11464             goto do_fault;
11465         }
11466     }
11467 
11468     if (param.using64k) {
11469         stride = 13;
11470     } else if (param.using16k) {
11471         stride = 11;
11472     } else {
11473         stride = 9;
11474     }
11475 
11476     /* Note that QEMU ignores shareability and cacheability attributes,
11477      * so we don't need to do anything with the SH, ORGN, IRGN fields
11478      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
11479      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11480      * implement any ASID-like capability so we can ignore it (instead
11481      * we will always flush the TLB any time the ASID is changed).
11482      */
11483     ttbr = regime_ttbr(env, mmu_idx, param.select);
11484 
11485     /* Here we should have set up all the parameters for the translation:
11486      * inputsize, ttbr, epd, stride, tbi
11487      */
11488 
11489     if (param.epd) {
11490         /* Translation table walk disabled => Translation fault on TLB miss
11491          * Note: This is always 0 on 64-bit EL2 and EL3.
11492          */
11493         goto do_fault;
11494     }
11495 
11496     if (mmu_idx != ARMMMUIdx_S2NS) {
11497         /* The starting level depends on the virtual address size (which can
11498          * be up to 48 bits) and the translation granule size. It indicates
11499          * the number of strides (stride bits at a time) needed to
11500          * consume the bits of the input address. In the pseudocode this is:
11501          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
11502          * where their 'inputsize' is our 'inputsize', 'grainsize' is
11503          * our 'stride + 3' and 'stride' is our 'stride'.
11504          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11505          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11506          * = 4 - (inputsize - 4) / stride;
11507          */
11508         level = 4 - (inputsize - 4) / stride;
11509     } else {
11510         /* For stage 2 translations the starting level is specified by the
11511          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11512          */
11513         uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
11514         uint32_t startlevel;
11515         bool ok;
11516 
11517         if (!aarch64 || stride == 9) {
11518             /* AArch32 or 4KB pages */
11519             startlevel = 2 - sl0;
11520         } else {
11521             /* 16KB or 64KB pages */
11522             startlevel = 3 - sl0;
11523         }
11524 
11525         /* Check that the starting level is valid. */
11526         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
11527                                 inputsize, stride);
11528         if (!ok) {
11529             fault_type = ARMFault_Translation;
11530             goto do_fault;
11531         }
11532         level = startlevel;
11533     }
11534 
11535     indexmask_grainsize = (1ULL << (stride + 3)) - 1;
11536     indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
11537 
11538     /* Now we can extract the actual base address from the TTBR */
11539     descaddr = extract64(ttbr, 0, 48);
11540     descaddr &= ~indexmask;
11541 
11542     /* The address field in the descriptor goes up to bit 39 for ARMv7
11543      * but up to bit 47 for ARMv8, but we use the descaddrmask
11544      * up to bit 39 for AArch32, because we don't need other bits in that case
11545      * to construct next descriptor address (anyway they should be all zeroes).
11546      */
11547     descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
11548                    ~indexmask_grainsize;
11549 
11550     /* Secure accesses start with the page table in secure memory and
11551      * can be downgraded to non-secure at any step. Non-secure accesses
11552      * remain non-secure. We implement this by just ORing in the NSTable/NS
11553      * bits at each step.
11554      */
11555     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
11556     for (;;) {
11557         uint64_t descriptor;
11558         bool nstable;
11559 
11560         descaddr |= (address >> (stride * (4 - level))) & indexmask;
11561         descaddr &= ~7ULL;
11562         nstable = extract32(tableattrs, 4, 1);
11563         descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
11564         if (fi->type != ARMFault_None) {
11565             goto do_fault;
11566         }
11567 
11568         if (!(descriptor & 1) ||
11569             (!(descriptor & 2) && (level == 3))) {
11570             /* Invalid, or the Reserved level 3 encoding */
11571             goto do_fault;
11572         }
11573         descaddr = descriptor & descaddrmask;
11574 
11575         if ((descriptor & 2) && (level < 3)) {
11576             /* Table entry. The top five bits are attributes which may
11577              * propagate down through lower levels of the table (and
11578              * which are all arranged so that 0 means "no effect", so
11579              * we can gather them up by ORing in the bits at each level).
11580              */
11581             tableattrs |= extract64(descriptor, 59, 5);
11582             level++;
11583             indexmask = indexmask_grainsize;
11584             continue;
11585         }
11586         /* Block entry at level 1 or 2, or page entry at level 3.
11587          * These are basically the same thing, although the number
11588          * of bits we pull in from the vaddr varies.
11589          */
11590         page_size = (1ULL << ((stride * (4 - level)) + 3));
11591         descaddr |= (address & (page_size - 1));
11592         /* Extract attributes from the descriptor */
11593         attrs = extract64(descriptor, 2, 10)
11594             | (extract64(descriptor, 52, 12) << 10);
11595 
11596         if (mmu_idx == ARMMMUIdx_S2NS) {
11597             /* Stage 2 table descriptors do not include any attribute fields */
11598             break;
11599         }
11600         /* Merge in attributes from table descriptors */
11601         attrs |= nstable << 3; /* NS */
11602         guarded = extract64(descriptor, 50, 1);  /* GP */
11603         if (param.hpd) {
11604             /* HPD disables all the table attributes except NSTable.  */
11605             break;
11606         }
11607         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
11608         /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11609          * means "force PL1 access only", which means forcing AP[1] to 0.
11610          */
11611         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
11612         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
11613         break;
11614     }
11615     /* Here descaddr is the final physical address, and attributes
11616      * are all in attrs.
11617      */
11618     fault_type = ARMFault_AccessFlag;
11619     if ((attrs & (1 << 8)) == 0) {
11620         /* Access flag */
11621         goto do_fault;
11622     }
11623 
11624     ap = extract32(attrs, 4, 2);
11625     xn = extract32(attrs, 12, 1);
11626 
11627     if (mmu_idx == ARMMMUIdx_S2NS) {
11628         ns = true;
11629         *prot = get_S2prot(env, ap, xn);
11630     } else {
11631         ns = extract32(attrs, 3, 1);
11632         pxn = extract32(attrs, 11, 1);
11633         *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
11634     }
11635 
11636     fault_type = ARMFault_Permission;
11637     if (!(*prot & (1 << access_type))) {
11638         goto do_fault;
11639     }
11640 
11641     if (ns) {
11642         /* The NS bit will (as required by the architecture) have no effect if
11643          * the CPU doesn't support TZ or this is a non-secure translation
11644          * regime, because the attribute will already be non-secure.
11645          */
11646         txattrs->secure = false;
11647     }
11648     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
11649     if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
11650         txattrs->target_tlb_bit0 = true;
11651     }
11652 
11653     if (cacheattrs != NULL) {
11654         if (mmu_idx == ARMMMUIdx_S2NS) {
11655             cacheattrs->attrs = convert_stage2_attrs(env,
11656                                                      extract32(attrs, 0, 4));
11657         } else {
11658             /* Index into MAIR registers for cache attributes */
11659             uint8_t attrindx = extract32(attrs, 0, 3);
11660             uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
11661             assert(attrindx <= 7);
11662             cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
11663         }
11664         cacheattrs->shareability = extract32(attrs, 6, 2);
11665     }
11666 
11667     *phys_ptr = descaddr;
11668     *page_size_ptr = page_size;
11669     return false;
11670 
11671 do_fault:
11672     fi->type = fault_type;
11673     fi->level = level;
11674     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
11675     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
11676     return true;
11677 }
11678 
11679 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
11680                                                 ARMMMUIdx mmu_idx,
11681                                                 int32_t address, int *prot)
11682 {
11683     if (!arm_feature(env, ARM_FEATURE_M)) {
11684         *prot = PAGE_READ | PAGE_WRITE;
11685         switch (address) {
11686         case 0xF0000000 ... 0xFFFFFFFF:
11687             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
11688                 /* hivecs execing is ok */
11689                 *prot |= PAGE_EXEC;
11690             }
11691             break;
11692         case 0x00000000 ... 0x7FFFFFFF:
11693             *prot |= PAGE_EXEC;
11694             break;
11695         }
11696     } else {
11697         /* Default system address map for M profile cores.
11698          * The architecture specifies which regions are execute-never;
11699          * at the MPU level no other checks are defined.
11700          */
11701         switch (address) {
11702         case 0x00000000 ... 0x1fffffff: /* ROM */
11703         case 0x20000000 ... 0x3fffffff: /* SRAM */
11704         case 0x60000000 ... 0x7fffffff: /* RAM */
11705         case 0x80000000 ... 0x9fffffff: /* RAM */
11706             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11707             break;
11708         case 0x40000000 ... 0x5fffffff: /* Peripheral */
11709         case 0xa0000000 ... 0xbfffffff: /* Device */
11710         case 0xc0000000 ... 0xdfffffff: /* Device */
11711         case 0xe0000000 ... 0xffffffff: /* System */
11712             *prot = PAGE_READ | PAGE_WRITE;
11713             break;
11714         default:
11715             g_assert_not_reached();
11716         }
11717     }
11718 }
11719 
11720 static bool pmsav7_use_background_region(ARMCPU *cpu,
11721                                          ARMMMUIdx mmu_idx, bool is_user)
11722 {
11723     /* Return true if we should use the default memory map as a
11724      * "background" region if there are no hits against any MPU regions.
11725      */
11726     CPUARMState *env = &cpu->env;
11727 
11728     if (is_user) {
11729         return false;
11730     }
11731 
11732     if (arm_feature(env, ARM_FEATURE_M)) {
11733         return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
11734             & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
11735     } else {
11736         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
11737     }
11738 }
11739 
11740 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
11741 {
11742     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11743     return arm_feature(env, ARM_FEATURE_M) &&
11744         extract32(address, 20, 12) == 0xe00;
11745 }
11746 
11747 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
11748 {
11749     /* True if address is in the M profile system region
11750      * 0xe0000000 - 0xffffffff
11751      */
11752     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
11753 }
11754 
11755 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
11756                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
11757                                  hwaddr *phys_ptr, int *prot,
11758                                  target_ulong *page_size,
11759                                  ARMMMUFaultInfo *fi)
11760 {
11761     ARMCPU *cpu = arm_env_get_cpu(env);
11762     int n;
11763     bool is_user = regime_is_user(env, mmu_idx);
11764 
11765     *phys_ptr = address;
11766     *page_size = TARGET_PAGE_SIZE;
11767     *prot = 0;
11768 
11769     if (regime_translation_disabled(env, mmu_idx) ||
11770         m_is_ppb_region(env, address)) {
11771         /* MPU disabled or M profile PPB access: use default memory map.
11772          * The other case which uses the default memory map in the
11773          * v7M ARM ARM pseudocode is exception vector reads from the vector
11774          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11775          * which always does a direct read using address_space_ldl(), rather
11776          * than going via this function, so we don't need to check that here.
11777          */
11778         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11779     } else { /* MPU enabled */
11780         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11781             /* region search */
11782             uint32_t base = env->pmsav7.drbar[n];
11783             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
11784             uint32_t rmask;
11785             bool srdis = false;
11786 
11787             if (!(env->pmsav7.drsr[n] & 0x1)) {
11788                 continue;
11789             }
11790 
11791             if (!rsize) {
11792                 qemu_log_mask(LOG_GUEST_ERROR,
11793                               "DRSR[%d]: Rsize field cannot be 0\n", n);
11794                 continue;
11795             }
11796             rsize++;
11797             rmask = (1ull << rsize) - 1;
11798 
11799             if (base & rmask) {
11800                 qemu_log_mask(LOG_GUEST_ERROR,
11801                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
11802                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
11803                               n, base, rmask);
11804                 continue;
11805             }
11806 
11807             if (address < base || address > base + rmask) {
11808                 /*
11809                  * Address not in this region. We must check whether the
11810                  * region covers addresses in the same page as our address.
11811                  * In that case we must not report a size that covers the
11812                  * whole page for a subsequent hit against a different MPU
11813                  * region or the background region, because it would result in
11814                  * incorrect TLB hits for subsequent accesses to addresses that
11815                  * are in this MPU region.
11816                  */
11817                 if (ranges_overlap(base, rmask,
11818                                    address & TARGET_PAGE_MASK,
11819                                    TARGET_PAGE_SIZE)) {
11820                     *page_size = 1;
11821                 }
11822                 continue;
11823             }
11824 
11825             /* Region matched */
11826 
11827             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
11828                 int i, snd;
11829                 uint32_t srdis_mask;
11830 
11831                 rsize -= 3; /* sub region size (power of 2) */
11832                 snd = ((address - base) >> rsize) & 0x7;
11833                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
11834 
11835                 srdis_mask = srdis ? 0x3 : 0x0;
11836                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
11837                     /* This will check in groups of 2, 4 and then 8, whether
11838                      * the subregion bits are consistent. rsize is incremented
11839                      * back up to give the region size, considering consistent
11840                      * adjacent subregions as one region. Stop testing if rsize
11841                      * is already big enough for an entire QEMU page.
11842                      */
11843                     int snd_rounded = snd & ~(i - 1);
11844                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
11845                                                      snd_rounded + 8, i);
11846                     if (srdis_mask ^ srdis_multi) {
11847                         break;
11848                     }
11849                     srdis_mask = (srdis_mask << i) | srdis_mask;
11850                     rsize++;
11851                 }
11852             }
11853             if (srdis) {
11854                 continue;
11855             }
11856             if (rsize < TARGET_PAGE_BITS) {
11857                 *page_size = 1 << rsize;
11858             }
11859             break;
11860         }
11861 
11862         if (n == -1) { /* no hits */
11863             if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11864                 /* background fault */
11865                 fi->type = ARMFault_Background;
11866                 return true;
11867             }
11868             get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11869         } else { /* a MPU hit! */
11870             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
11871             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
11872 
11873             if (m_is_system_region(env, address)) {
11874                 /* System space is always execute never */
11875                 xn = 1;
11876             }
11877 
11878             if (is_user) { /* User mode AP bit decoding */
11879                 switch (ap) {
11880                 case 0:
11881                 case 1:
11882                 case 5:
11883                     break; /* no access */
11884                 case 3:
11885                     *prot |= PAGE_WRITE;
11886                     /* fall through */
11887                 case 2:
11888                 case 6:
11889                     *prot |= PAGE_READ | PAGE_EXEC;
11890                     break;
11891                 case 7:
11892                     /* for v7M, same as 6; for R profile a reserved value */
11893                     if (arm_feature(env, ARM_FEATURE_M)) {
11894                         *prot |= PAGE_READ | PAGE_EXEC;
11895                         break;
11896                     }
11897                     /* fall through */
11898                 default:
11899                     qemu_log_mask(LOG_GUEST_ERROR,
11900                                   "DRACR[%d]: Bad value for AP bits: 0x%"
11901                                   PRIx32 "\n", n, ap);
11902                 }
11903             } else { /* Priv. mode AP bits decoding */
11904                 switch (ap) {
11905                 case 0:
11906                     break; /* no access */
11907                 case 1:
11908                 case 2:
11909                 case 3:
11910                     *prot |= PAGE_WRITE;
11911                     /* fall through */
11912                 case 5:
11913                 case 6:
11914                     *prot |= PAGE_READ | PAGE_EXEC;
11915                     break;
11916                 case 7:
11917                     /* for v7M, same as 6; for R profile a reserved value */
11918                     if (arm_feature(env, ARM_FEATURE_M)) {
11919                         *prot |= PAGE_READ | PAGE_EXEC;
11920                         break;
11921                     }
11922                     /* fall through */
11923                 default:
11924                     qemu_log_mask(LOG_GUEST_ERROR,
11925                                   "DRACR[%d]: Bad value for AP bits: 0x%"
11926                                   PRIx32 "\n", n, ap);
11927                 }
11928             }
11929 
11930             /* execute never */
11931             if (xn) {
11932                 *prot &= ~PAGE_EXEC;
11933             }
11934         }
11935     }
11936 
11937     fi->type = ARMFault_Permission;
11938     fi->level = 1;
11939     return !(*prot & (1 << access_type));
11940 }
11941 
11942 static bool v8m_is_sau_exempt(CPUARMState *env,
11943                               uint32_t address, MMUAccessType access_type)
11944 {
11945     /* The architecture specifies that certain address ranges are
11946      * exempt from v8M SAU/IDAU checks.
11947      */
11948     return
11949         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
11950         (address >= 0xe0000000 && address <= 0xe0002fff) ||
11951         (address >= 0xe000e000 && address <= 0xe000efff) ||
11952         (address >= 0xe002e000 && address <= 0xe002efff) ||
11953         (address >= 0xe0040000 && address <= 0xe0041fff) ||
11954         (address >= 0xe00ff000 && address <= 0xe00fffff);
11955 }
11956 
11957 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
11958                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11959                                 V8M_SAttributes *sattrs)
11960 {
11961     /* Look up the security attributes for this address. Compare the
11962      * pseudocode SecurityCheck() function.
11963      * We assume the caller has zero-initialized *sattrs.
11964      */
11965     ARMCPU *cpu = arm_env_get_cpu(env);
11966     int r;
11967     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
11968     int idau_region = IREGION_NOTVALID;
11969     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11970     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11971 
11972     if (cpu->idau) {
11973         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
11974         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
11975 
11976         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
11977                    &idau_nsc);
11978     }
11979 
11980     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
11981         /* 0xf0000000..0xffffffff is always S for insn fetches */
11982         return;
11983     }
11984 
11985     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
11986         sattrs->ns = !regime_is_secure(env, mmu_idx);
11987         return;
11988     }
11989 
11990     if (idau_region != IREGION_NOTVALID) {
11991         sattrs->irvalid = true;
11992         sattrs->iregion = idau_region;
11993     }
11994 
11995     switch (env->sau.ctrl & 3) {
11996     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11997         break;
11998     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11999         sattrs->ns = true;
12000         break;
12001     default: /* SAU.ENABLE == 1 */
12002         for (r = 0; r < cpu->sau_sregion; r++) {
12003             if (env->sau.rlar[r] & 1) {
12004                 uint32_t base = env->sau.rbar[r] & ~0x1f;
12005                 uint32_t limit = env->sau.rlar[r] | 0x1f;
12006 
12007                 if (base <= address && limit >= address) {
12008                     if (base > addr_page_base || limit < addr_page_limit) {
12009                         sattrs->subpage = true;
12010                     }
12011                     if (sattrs->srvalid) {
12012                         /* If we hit in more than one region then we must report
12013                          * as Secure, not NS-Callable, with no valid region
12014                          * number info.
12015                          */
12016                         sattrs->ns = false;
12017                         sattrs->nsc = false;
12018                         sattrs->sregion = 0;
12019                         sattrs->srvalid = false;
12020                         break;
12021                     } else {
12022                         if (env->sau.rlar[r] & 2) {
12023                             sattrs->nsc = true;
12024                         } else {
12025                             sattrs->ns = true;
12026                         }
12027                         sattrs->srvalid = true;
12028                         sattrs->sregion = r;
12029                     }
12030                 } else {
12031                     /*
12032                      * Address not in this region. We must check whether the
12033                      * region covers addresses in the same page as our address.
12034                      * In that case we must not report a size that covers the
12035                      * whole page for a subsequent hit against a different MPU
12036                      * region or the background region, because it would result
12037                      * in incorrect TLB hits for subsequent accesses to
12038                      * addresses that are in this MPU region.
12039                      */
12040                     if (limit >= base &&
12041                         ranges_overlap(base, limit - base + 1,
12042                                        addr_page_base,
12043                                        TARGET_PAGE_SIZE)) {
12044                         sattrs->subpage = true;
12045                     }
12046                 }
12047             }
12048         }
12049         break;
12050     }
12051 
12052     /*
12053      * The IDAU will override the SAU lookup results if it specifies
12054      * higher security than the SAU does.
12055      */
12056     if (!idau_ns) {
12057         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
12058             sattrs->ns = false;
12059             sattrs->nsc = idau_nsc;
12060         }
12061     }
12062 }
12063 
12064 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
12065                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
12066                               hwaddr *phys_ptr, MemTxAttrs *txattrs,
12067                               int *prot, bool *is_subpage,
12068                               ARMMMUFaultInfo *fi, uint32_t *mregion)
12069 {
12070     /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
12071      * that a full phys-to-virt translation does).
12072      * mregion is (if not NULL) set to the region number which matched,
12073      * or -1 if no region number is returned (MPU off, address did not
12074      * hit a region, address hit in multiple regions).
12075      * We set is_subpage to true if the region hit doesn't cover the
12076      * entire TARGET_PAGE the address is within.
12077      */
12078     ARMCPU *cpu = arm_env_get_cpu(env);
12079     bool is_user = regime_is_user(env, mmu_idx);
12080     uint32_t secure = regime_is_secure(env, mmu_idx);
12081     int n;
12082     int matchregion = -1;
12083     bool hit = false;
12084     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
12085     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
12086 
12087     *is_subpage = false;
12088     *phys_ptr = address;
12089     *prot = 0;
12090     if (mregion) {
12091         *mregion = -1;
12092     }
12093 
12094     /* Unlike the ARM ARM pseudocode, we don't need to check whether this
12095      * was an exception vector read from the vector table (which is always
12096      * done using the default system address map), because those accesses
12097      * are done in arm_v7m_load_vector(), which always does a direct
12098      * read using address_space_ldl(), rather than going via this function.
12099      */
12100     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
12101         hit = true;
12102     } else if (m_is_ppb_region(env, address)) {
12103         hit = true;
12104     } else {
12105         if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
12106             hit = true;
12107         }
12108 
12109         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
12110             /* region search */
12111             /* Note that the base address is bits [31:5] from the register
12112              * with bits [4:0] all zeroes, but the limit address is bits
12113              * [31:5] from the register with bits [4:0] all ones.
12114              */
12115             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
12116             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
12117 
12118             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
12119                 /* Region disabled */
12120                 continue;
12121             }
12122 
12123             if (address < base || address > limit) {
12124                 /*
12125                  * Address not in this region. We must check whether the
12126                  * region covers addresses in the same page as our address.
12127                  * In that case we must not report a size that covers the
12128                  * whole page for a subsequent hit against a different MPU
12129                  * region or the background region, because it would result in
12130                  * incorrect TLB hits for subsequent accesses to addresses that
12131                  * are in this MPU region.
12132                  */
12133                 if (limit >= base &&
12134                     ranges_overlap(base, limit - base + 1,
12135                                    addr_page_base,
12136                                    TARGET_PAGE_SIZE)) {
12137                     *is_subpage = true;
12138                 }
12139                 continue;
12140             }
12141 
12142             if (base > addr_page_base || limit < addr_page_limit) {
12143                 *is_subpage = true;
12144             }
12145 
12146             if (matchregion != -1) {
12147                 /* Multiple regions match -- always a failure (unlike
12148                  * PMSAv7 where highest-numbered-region wins)
12149                  */
12150                 fi->type = ARMFault_Permission;
12151                 fi->level = 1;
12152                 return true;
12153             }
12154 
12155             matchregion = n;
12156             hit = true;
12157         }
12158     }
12159 
12160     if (!hit) {
12161         /* background fault */
12162         fi->type = ARMFault_Background;
12163         return true;
12164     }
12165 
12166     if (matchregion == -1) {
12167         /* hit using the background region */
12168         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
12169     } else {
12170         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
12171         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
12172 
12173         if (m_is_system_region(env, address)) {
12174             /* System space is always execute never */
12175             xn = 1;
12176         }
12177 
12178         *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
12179         if (*prot && !xn) {
12180             *prot |= PAGE_EXEC;
12181         }
12182         /* We don't need to look the attribute up in the MAIR0/MAIR1
12183          * registers because that only tells us about cacheability.
12184          */
12185         if (mregion) {
12186             *mregion = matchregion;
12187         }
12188     }
12189 
12190     fi->type = ARMFault_Permission;
12191     fi->level = 1;
12192     return !(*prot & (1 << access_type));
12193 }
12194 
12195 
12196 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
12197                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
12198                                  hwaddr *phys_ptr, MemTxAttrs *txattrs,
12199                                  int *prot, target_ulong *page_size,
12200                                  ARMMMUFaultInfo *fi)
12201 {
12202     uint32_t secure = regime_is_secure(env, mmu_idx);
12203     V8M_SAttributes sattrs = {};
12204     bool ret;
12205     bool mpu_is_subpage;
12206 
12207     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12208         v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
12209         if (access_type == MMU_INST_FETCH) {
12210             /* Instruction fetches always use the MMU bank and the
12211              * transaction attribute determined by the fetch address,
12212              * regardless of CPU state. This is painful for QEMU
12213              * to handle, because it would mean we need to encode
12214              * into the mmu_idx not just the (user, negpri) information
12215              * for the current security state but also that for the
12216              * other security state, which would balloon the number
12217              * of mmu_idx values needed alarmingly.
12218              * Fortunately we can avoid this because it's not actually
12219              * possible to arbitrarily execute code from memory with
12220              * the wrong security attribute: it will always generate
12221              * an exception of some kind or another, apart from the
12222              * special case of an NS CPU executing an SG instruction
12223              * in S&NSC memory. So we always just fail the translation
12224              * here and sort things out in the exception handler
12225              * (including possibly emulating an SG instruction).
12226              */
12227             if (sattrs.ns != !secure) {
12228                 if (sattrs.nsc) {
12229                     fi->type = ARMFault_QEMU_NSCExec;
12230                 } else {
12231                     fi->type = ARMFault_QEMU_SFault;
12232                 }
12233                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
12234                 *phys_ptr = address;
12235                 *prot = 0;
12236                 return true;
12237             }
12238         } else {
12239             /* For data accesses we always use the MMU bank indicated
12240              * by the current CPU state, but the security attributes
12241              * might downgrade a secure access to nonsecure.
12242              */
12243             if (sattrs.ns) {
12244                 txattrs->secure = false;
12245             } else if (!secure) {
12246                 /* NS access to S memory must fault.
12247                  * Architecturally we should first check whether the
12248                  * MPU information for this address indicates that we
12249                  * are doing an unaligned access to Device memory, which
12250                  * should generate a UsageFault instead. QEMU does not
12251                  * currently check for that kind of unaligned access though.
12252                  * If we added it we would need to do so as a special case
12253                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12254                  */
12255                 fi->type = ARMFault_QEMU_SFault;
12256                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
12257                 *phys_ptr = address;
12258                 *prot = 0;
12259                 return true;
12260             }
12261         }
12262     }
12263 
12264     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
12265                             txattrs, prot, &mpu_is_subpage, fi, NULL);
12266     *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
12267     return ret;
12268 }
12269 
12270 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
12271                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
12272                                  hwaddr *phys_ptr, int *prot,
12273                                  ARMMMUFaultInfo *fi)
12274 {
12275     int n;
12276     uint32_t mask;
12277     uint32_t base;
12278     bool is_user = regime_is_user(env, mmu_idx);
12279 
12280     if (regime_translation_disabled(env, mmu_idx)) {
12281         /* MPU disabled.  */
12282         *phys_ptr = address;
12283         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12284         return false;
12285     }
12286 
12287     *phys_ptr = address;
12288     for (n = 7; n >= 0; n--) {
12289         base = env->cp15.c6_region[n];
12290         if ((base & 1) == 0) {
12291             continue;
12292         }
12293         mask = 1 << ((base >> 1) & 0x1f);
12294         /* Keep this shift separate from the above to avoid an
12295            (undefined) << 32.  */
12296         mask = (mask << 1) - 1;
12297         if (((base ^ address) & ~mask) == 0) {
12298             break;
12299         }
12300     }
12301     if (n < 0) {
12302         fi->type = ARMFault_Background;
12303         return true;
12304     }
12305 
12306     if (access_type == MMU_INST_FETCH) {
12307         mask = env->cp15.pmsav5_insn_ap;
12308     } else {
12309         mask = env->cp15.pmsav5_data_ap;
12310     }
12311     mask = (mask >> (n * 4)) & 0xf;
12312     switch (mask) {
12313     case 0:
12314         fi->type = ARMFault_Permission;
12315         fi->level = 1;
12316         return true;
12317     case 1:
12318         if (is_user) {
12319             fi->type = ARMFault_Permission;
12320             fi->level = 1;
12321             return true;
12322         }
12323         *prot = PAGE_READ | PAGE_WRITE;
12324         break;
12325     case 2:
12326         *prot = PAGE_READ;
12327         if (!is_user) {
12328             *prot |= PAGE_WRITE;
12329         }
12330         break;
12331     case 3:
12332         *prot = PAGE_READ | PAGE_WRITE;
12333         break;
12334     case 5:
12335         if (is_user) {
12336             fi->type = ARMFault_Permission;
12337             fi->level = 1;
12338             return true;
12339         }
12340         *prot = PAGE_READ;
12341         break;
12342     case 6:
12343         *prot = PAGE_READ;
12344         break;
12345     default:
12346         /* Bad permission.  */
12347         fi->type = ARMFault_Permission;
12348         fi->level = 1;
12349         return true;
12350     }
12351     *prot |= PAGE_EXEC;
12352     return false;
12353 }
12354 
12355 /* Combine either inner or outer cacheability attributes for normal
12356  * memory, according to table D4-42 and pseudocode procedure
12357  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12358  *
12359  * NB: only stage 1 includes allocation hints (RW bits), leading to
12360  * some asymmetry.
12361  */
12362 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
12363 {
12364     if (s1 == 4 || s2 == 4) {
12365         /* non-cacheable has precedence */
12366         return 4;
12367     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
12368         /* stage 1 write-through takes precedence */
12369         return s1;
12370     } else if (extract32(s2, 2, 2) == 2) {
12371         /* stage 2 write-through takes precedence, but the allocation hint
12372          * is still taken from stage 1
12373          */
12374         return (2 << 2) | extract32(s1, 0, 2);
12375     } else { /* write-back */
12376         return s1;
12377     }
12378 }
12379 
12380 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12381  * and CombineS1S2Desc()
12382  *
12383  * @s1:      Attributes from stage 1 walk
12384  * @s2:      Attributes from stage 2 walk
12385  */
12386 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
12387 {
12388     uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
12389     uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
12390     ARMCacheAttrs ret;
12391 
12392     /* Combine shareability attributes (table D4-43) */
12393     if (s1.shareability == 2 || s2.shareability == 2) {
12394         /* if either are outer-shareable, the result is outer-shareable */
12395         ret.shareability = 2;
12396     } else if (s1.shareability == 3 || s2.shareability == 3) {
12397         /* if either are inner-shareable, the result is inner-shareable */
12398         ret.shareability = 3;
12399     } else {
12400         /* both non-shareable */
12401         ret.shareability = 0;
12402     }
12403 
12404     /* Combine memory type and cacheability attributes */
12405     if (s1hi == 0 || s2hi == 0) {
12406         /* Device has precedence over normal */
12407         if (s1lo == 0 || s2lo == 0) {
12408             /* nGnRnE has precedence over anything */
12409             ret.attrs = 0;
12410         } else if (s1lo == 4 || s2lo == 4) {
12411             /* non-Reordering has precedence over Reordering */
12412             ret.attrs = 4;  /* nGnRE */
12413         } else if (s1lo == 8 || s2lo == 8) {
12414             /* non-Gathering has precedence over Gathering */
12415             ret.attrs = 8;  /* nGRE */
12416         } else {
12417             ret.attrs = 0xc; /* GRE */
12418         }
12419 
12420         /* Any location for which the resultant memory type is any
12421          * type of Device memory is always treated as Outer Shareable.
12422          */
12423         ret.shareability = 2;
12424     } else { /* Normal memory */
12425         /* Outer/inner cacheability combine independently */
12426         ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
12427                   | combine_cacheattr_nibble(s1lo, s2lo);
12428 
12429         if (ret.attrs == 0x44) {
12430             /* Any location for which the resultant memory type is Normal
12431              * Inner Non-cacheable, Outer Non-cacheable is always treated
12432              * as Outer Shareable.
12433              */
12434             ret.shareability = 2;
12435         }
12436     }
12437 
12438     return ret;
12439 }
12440 
12441 
12442 /* get_phys_addr - get the physical address for this virtual address
12443  *
12444  * Find the physical address corresponding to the given virtual address,
12445  * by doing a translation table walk on MMU based systems or using the
12446  * MPU state on MPU based systems.
12447  *
12448  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12449  * prot and page_size may not be filled in, and the populated fsr value provides
12450  * information on why the translation aborted, in the format of a
12451  * DFSR/IFSR fault register, with the following caveats:
12452  *  * we honour the short vs long DFSR format differences.
12453  *  * the WnR bit is never set (the caller must do this).
12454  *  * for PSMAv5 based systems we don't bother to return a full FSR format
12455  *    value.
12456  *
12457  * @env: CPUARMState
12458  * @address: virtual address to get physical address for
12459  * @access_type: 0 for read, 1 for write, 2 for execute
12460  * @mmu_idx: MMU index indicating required translation regime
12461  * @phys_ptr: set to the physical address corresponding to the virtual address
12462  * @attrs: set to the memory transaction attributes to use
12463  * @prot: set to the permissions for the page containing phys_ptr
12464  * @page_size: set to the size of the page containing phys_ptr
12465  * @fi: set to fault info if the translation fails
12466  * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12467  */
12468 static bool get_phys_addr(CPUARMState *env, target_ulong address,
12469                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
12470                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
12471                           target_ulong *page_size,
12472                           ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
12473 {
12474     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
12475         /* Call ourselves recursively to do the stage 1 and then stage 2
12476          * translations.
12477          */
12478         if (arm_feature(env, ARM_FEATURE_EL2)) {
12479             hwaddr ipa;
12480             int s2_prot;
12481             int ret;
12482             ARMCacheAttrs cacheattrs2 = {};
12483 
12484             ret = get_phys_addr(env, address, access_type,
12485                                 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
12486                                 prot, page_size, fi, cacheattrs);
12487 
12488             /* If S1 fails or S2 is disabled, return early.  */
12489             if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
12490                 *phys_ptr = ipa;
12491                 return ret;
12492             }
12493 
12494             /* S1 is done. Now do S2 translation.  */
12495             ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
12496                                      phys_ptr, attrs, &s2_prot,
12497                                      page_size, fi,
12498                                      cacheattrs != NULL ? &cacheattrs2 : NULL);
12499             fi->s2addr = ipa;
12500             /* Combine the S1 and S2 perms.  */
12501             *prot &= s2_prot;
12502 
12503             /* Combine the S1 and S2 cache attributes, if needed */
12504             if (!ret && cacheattrs != NULL) {
12505                 if (env->cp15.hcr_el2 & HCR_DC) {
12506                     /*
12507                      * HCR.DC forces the first stage attributes to
12508                      *  Normal Non-Shareable,
12509                      *  Inner Write-Back Read-Allocate Write-Allocate,
12510                      *  Outer Write-Back Read-Allocate Write-Allocate.
12511                      */
12512                     cacheattrs->attrs = 0xff;
12513                     cacheattrs->shareability = 0;
12514                 }
12515                 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
12516             }
12517 
12518             return ret;
12519         } else {
12520             /*
12521              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12522              */
12523             mmu_idx = stage_1_mmu_idx(mmu_idx);
12524         }
12525     }
12526 
12527     /* The page table entries may downgrade secure to non-secure, but
12528      * cannot upgrade an non-secure translation regime's attributes
12529      * to secure.
12530      */
12531     attrs->secure = regime_is_secure(env, mmu_idx);
12532     attrs->user = regime_is_user(env, mmu_idx);
12533 
12534     /* Fast Context Switch Extension. This doesn't exist at all in v8.
12535      * In v7 and earlier it affects all stage 1 translations.
12536      */
12537     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
12538         && !arm_feature(env, ARM_FEATURE_V8)) {
12539         if (regime_el(env, mmu_idx) == 3) {
12540             address += env->cp15.fcseidr_s;
12541         } else {
12542             address += env->cp15.fcseidr_ns;
12543         }
12544     }
12545 
12546     if (arm_feature(env, ARM_FEATURE_PMSA)) {
12547         bool ret;
12548         *page_size = TARGET_PAGE_SIZE;
12549 
12550         if (arm_feature(env, ARM_FEATURE_V8)) {
12551             /* PMSAv8 */
12552             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
12553                                        phys_ptr, attrs, prot, page_size, fi);
12554         } else if (arm_feature(env, ARM_FEATURE_V7)) {
12555             /* PMSAv7 */
12556             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
12557                                        phys_ptr, prot, page_size, fi);
12558         } else {
12559             /* Pre-v7 MPU */
12560             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
12561                                        phys_ptr, prot, fi);
12562         }
12563         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
12564                       " mmu_idx %u -> %s (prot %c%c%c)\n",
12565                       access_type == MMU_DATA_LOAD ? "reading" :
12566                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
12567                       (uint32_t)address, mmu_idx,
12568                       ret ? "Miss" : "Hit",
12569                       *prot & PAGE_READ ? 'r' : '-',
12570                       *prot & PAGE_WRITE ? 'w' : '-',
12571                       *prot & PAGE_EXEC ? 'x' : '-');
12572 
12573         return ret;
12574     }
12575 
12576     /* Definitely a real MMU, not an MPU */
12577 
12578     if (regime_translation_disabled(env, mmu_idx)) {
12579         /* MMU disabled. */
12580         *phys_ptr = address;
12581         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12582         *page_size = TARGET_PAGE_SIZE;
12583         return 0;
12584     }
12585 
12586     if (regime_using_lpae_format(env, mmu_idx)) {
12587         return get_phys_addr_lpae(env, address, access_type, mmu_idx,
12588                                   phys_ptr, attrs, prot, page_size,
12589                                   fi, cacheattrs);
12590     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
12591         return get_phys_addr_v6(env, address, access_type, mmu_idx,
12592                                 phys_ptr, attrs, prot, page_size, fi);
12593     } else {
12594         return get_phys_addr_v5(env, address, access_type, mmu_idx,
12595                                     phys_ptr, prot, page_size, fi);
12596     }
12597 }
12598 
12599 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
12600                                          MemTxAttrs *attrs)
12601 {
12602     ARMCPU *cpu = ARM_CPU(cs);
12603     CPUARMState *env = &cpu->env;
12604     hwaddr phys_addr;
12605     target_ulong page_size;
12606     int prot;
12607     bool ret;
12608     ARMMMUFaultInfo fi = {};
12609     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
12610 
12611     *attrs = (MemTxAttrs) {};
12612 
12613     ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
12614                         attrs, &prot, &page_size, &fi, NULL);
12615 
12616     if (ret) {
12617         return -1;
12618     }
12619     return phys_addr;
12620 }
12621 
12622 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
12623 {
12624     uint32_t mask;
12625     unsigned el = arm_current_el(env);
12626 
12627     /* First handle registers which unprivileged can read */
12628 
12629     switch (reg) {
12630     case 0 ... 7: /* xPSR sub-fields */
12631         mask = 0;
12632         if ((reg & 1) && el) {
12633             mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
12634         }
12635         if (!(reg & 4)) {
12636             mask |= XPSR_NZCV | XPSR_Q; /* APSR */
12637             if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
12638                 mask |= XPSR_GE;
12639             }
12640         }
12641         /* EPSR reads as zero */
12642         return xpsr_read(env) & mask;
12643         break;
12644     case 20: /* CONTROL */
12645     {
12646         uint32_t value = env->v7m.control[env->v7m.secure];
12647         if (!env->v7m.secure) {
12648             /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
12649             value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
12650         }
12651         return value;
12652     }
12653     case 0x94: /* CONTROL_NS */
12654         /* We have to handle this here because unprivileged Secure code
12655          * can read the NS CONTROL register.
12656          */
12657         if (!env->v7m.secure) {
12658             return 0;
12659         }
12660         return env->v7m.control[M_REG_NS] |
12661             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
12662     }
12663 
12664     if (el == 0) {
12665         return 0; /* unprivileged reads others as zero */
12666     }
12667 
12668     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12669         switch (reg) {
12670         case 0x88: /* MSP_NS */
12671             if (!env->v7m.secure) {
12672                 return 0;
12673             }
12674             return env->v7m.other_ss_msp;
12675         case 0x89: /* PSP_NS */
12676             if (!env->v7m.secure) {
12677                 return 0;
12678             }
12679             return env->v7m.other_ss_psp;
12680         case 0x8a: /* MSPLIM_NS */
12681             if (!env->v7m.secure) {
12682                 return 0;
12683             }
12684             return env->v7m.msplim[M_REG_NS];
12685         case 0x8b: /* PSPLIM_NS */
12686             if (!env->v7m.secure) {
12687                 return 0;
12688             }
12689             return env->v7m.psplim[M_REG_NS];
12690         case 0x90: /* PRIMASK_NS */
12691             if (!env->v7m.secure) {
12692                 return 0;
12693             }
12694             return env->v7m.primask[M_REG_NS];
12695         case 0x91: /* BASEPRI_NS */
12696             if (!env->v7m.secure) {
12697                 return 0;
12698             }
12699             return env->v7m.basepri[M_REG_NS];
12700         case 0x93: /* FAULTMASK_NS */
12701             if (!env->v7m.secure) {
12702                 return 0;
12703             }
12704             return env->v7m.faultmask[M_REG_NS];
12705         case 0x98: /* SP_NS */
12706         {
12707             /* This gives the non-secure SP selected based on whether we're
12708              * currently in handler mode or not, using the NS CONTROL.SPSEL.
12709              */
12710             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
12711 
12712             if (!env->v7m.secure) {
12713                 return 0;
12714             }
12715             if (!arm_v7m_is_handler_mode(env) && spsel) {
12716                 return env->v7m.other_ss_psp;
12717             } else {
12718                 return env->v7m.other_ss_msp;
12719             }
12720         }
12721         default:
12722             break;
12723         }
12724     }
12725 
12726     switch (reg) {
12727     case 8: /* MSP */
12728         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
12729     case 9: /* PSP */
12730         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
12731     case 10: /* MSPLIM */
12732         if (!arm_feature(env, ARM_FEATURE_V8)) {
12733             goto bad_reg;
12734         }
12735         return env->v7m.msplim[env->v7m.secure];
12736     case 11: /* PSPLIM */
12737         if (!arm_feature(env, ARM_FEATURE_V8)) {
12738             goto bad_reg;
12739         }
12740         return env->v7m.psplim[env->v7m.secure];
12741     case 16: /* PRIMASK */
12742         return env->v7m.primask[env->v7m.secure];
12743     case 17: /* BASEPRI */
12744     case 18: /* BASEPRI_MAX */
12745         return env->v7m.basepri[env->v7m.secure];
12746     case 19: /* FAULTMASK */
12747         return env->v7m.faultmask[env->v7m.secure];
12748     default:
12749     bad_reg:
12750         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
12751                                        " register %d\n", reg);
12752         return 0;
12753     }
12754 }
12755 
12756 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
12757 {
12758     /* We're passed bits [11..0] of the instruction; extract
12759      * SYSm and the mask bits.
12760      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
12761      * we choose to treat them as if the mask bits were valid.
12762      * NB that the pseudocode 'mask' variable is bits [11..10],
12763      * whereas ours is [11..8].
12764      */
12765     uint32_t mask = extract32(maskreg, 8, 4);
12766     uint32_t reg = extract32(maskreg, 0, 8);
12767     int cur_el = arm_current_el(env);
12768 
12769     if (cur_el == 0 && reg > 7 && reg != 20) {
12770         /*
12771          * only xPSR sub-fields and CONTROL.SFPA may be written by
12772          * unprivileged code
12773          */
12774         return;
12775     }
12776 
12777     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12778         switch (reg) {
12779         case 0x88: /* MSP_NS */
12780             if (!env->v7m.secure) {
12781                 return;
12782             }
12783             env->v7m.other_ss_msp = val;
12784             return;
12785         case 0x89: /* PSP_NS */
12786             if (!env->v7m.secure) {
12787                 return;
12788             }
12789             env->v7m.other_ss_psp = val;
12790             return;
12791         case 0x8a: /* MSPLIM_NS */
12792             if (!env->v7m.secure) {
12793                 return;
12794             }
12795             env->v7m.msplim[M_REG_NS] = val & ~7;
12796             return;
12797         case 0x8b: /* PSPLIM_NS */
12798             if (!env->v7m.secure) {
12799                 return;
12800             }
12801             env->v7m.psplim[M_REG_NS] = val & ~7;
12802             return;
12803         case 0x90: /* PRIMASK_NS */
12804             if (!env->v7m.secure) {
12805                 return;
12806             }
12807             env->v7m.primask[M_REG_NS] = val & 1;
12808             return;
12809         case 0x91: /* BASEPRI_NS */
12810             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
12811                 return;
12812             }
12813             env->v7m.basepri[M_REG_NS] = val & 0xff;
12814             return;
12815         case 0x93: /* FAULTMASK_NS */
12816             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
12817                 return;
12818             }
12819             env->v7m.faultmask[M_REG_NS] = val & 1;
12820             return;
12821         case 0x94: /* CONTROL_NS */
12822             if (!env->v7m.secure) {
12823                 return;
12824             }
12825             write_v7m_control_spsel_for_secstate(env,
12826                                                  val & R_V7M_CONTROL_SPSEL_MASK,
12827                                                  M_REG_NS);
12828             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
12829                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
12830                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
12831             }
12832             /*
12833              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
12834              * RES0 if the FPU is not present, and is stored in the S bank
12835              */
12836             if (arm_feature(env, ARM_FEATURE_VFP) &&
12837                 extract32(env->v7m.nsacr, 10, 1)) {
12838                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
12839                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
12840             }
12841             return;
12842         case 0x98: /* SP_NS */
12843         {
12844             /* This gives the non-secure SP selected based on whether we're
12845              * currently in handler mode or not, using the NS CONTROL.SPSEL.
12846              */
12847             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
12848             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
12849             uint32_t limit;
12850 
12851             if (!env->v7m.secure) {
12852                 return;
12853             }
12854 
12855             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
12856 
12857             if (val < limit) {
12858                 CPUState *cs = CPU(arm_env_get_cpu(env));
12859 
12860                 cpu_restore_state(cs, GETPC(), true);
12861                 raise_exception(env, EXCP_STKOF, 0, 1);
12862             }
12863 
12864             if (is_psp) {
12865                 env->v7m.other_ss_psp = val;
12866             } else {
12867                 env->v7m.other_ss_msp = val;
12868             }
12869             return;
12870         }
12871         default:
12872             break;
12873         }
12874     }
12875 
12876     switch (reg) {
12877     case 0 ... 7: /* xPSR sub-fields */
12878         /* only APSR is actually writable */
12879         if (!(reg & 4)) {
12880             uint32_t apsrmask = 0;
12881 
12882             if (mask & 8) {
12883                 apsrmask |= XPSR_NZCV | XPSR_Q;
12884             }
12885             if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
12886                 apsrmask |= XPSR_GE;
12887             }
12888             xpsr_write(env, val, apsrmask);
12889         }
12890         break;
12891     case 8: /* MSP */
12892         if (v7m_using_psp(env)) {
12893             env->v7m.other_sp = val;
12894         } else {
12895             env->regs[13] = val;
12896         }
12897         break;
12898     case 9: /* PSP */
12899         if (v7m_using_psp(env)) {
12900             env->regs[13] = val;
12901         } else {
12902             env->v7m.other_sp = val;
12903         }
12904         break;
12905     case 10: /* MSPLIM */
12906         if (!arm_feature(env, ARM_FEATURE_V8)) {
12907             goto bad_reg;
12908         }
12909         env->v7m.msplim[env->v7m.secure] = val & ~7;
12910         break;
12911     case 11: /* PSPLIM */
12912         if (!arm_feature(env, ARM_FEATURE_V8)) {
12913             goto bad_reg;
12914         }
12915         env->v7m.psplim[env->v7m.secure] = val & ~7;
12916         break;
12917     case 16: /* PRIMASK */
12918         env->v7m.primask[env->v7m.secure] = val & 1;
12919         break;
12920     case 17: /* BASEPRI */
12921         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
12922             goto bad_reg;
12923         }
12924         env->v7m.basepri[env->v7m.secure] = val & 0xff;
12925         break;
12926     case 18: /* BASEPRI_MAX */
12927         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
12928             goto bad_reg;
12929         }
12930         val &= 0xff;
12931         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
12932                          || env->v7m.basepri[env->v7m.secure] == 0)) {
12933             env->v7m.basepri[env->v7m.secure] = val;
12934         }
12935         break;
12936     case 19: /* FAULTMASK */
12937         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
12938             goto bad_reg;
12939         }
12940         env->v7m.faultmask[env->v7m.secure] = val & 1;
12941         break;
12942     case 20: /* CONTROL */
12943         /*
12944          * Writing to the SPSEL bit only has an effect if we are in
12945          * thread mode; other bits can be updated by any privileged code.
12946          * write_v7m_control_spsel() deals with updating the SPSEL bit in
12947          * env->v7m.control, so we only need update the others.
12948          * For v7M, we must just ignore explicit writes to SPSEL in handler
12949          * mode; for v8M the write is permitted but will have no effect.
12950          * All these bits are writes-ignored from non-privileged code,
12951          * except for SFPA.
12952          */
12953         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
12954                            !arm_v7m_is_handler_mode(env))) {
12955             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
12956         }
12957         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
12958             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
12959             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
12960         }
12961         if (arm_feature(env, ARM_FEATURE_VFP)) {
12962             /*
12963              * SFPA is RAZ/WI from NS or if no FPU.
12964              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
12965              * Both are stored in the S bank.
12966              */
12967             if (env->v7m.secure) {
12968                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
12969                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
12970             }
12971             if (cur_el > 0 &&
12972                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
12973                  extract32(env->v7m.nsacr, 10, 1))) {
12974                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
12975                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
12976             }
12977         }
12978         break;
12979     default:
12980     bad_reg:
12981         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
12982                                        " register %d\n", reg);
12983         return;
12984     }
12985 }
12986 
12987 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
12988 {
12989     /* Implement the TT instruction. op is bits [7:6] of the insn. */
12990     bool forceunpriv = op & 1;
12991     bool alt = op & 2;
12992     V8M_SAttributes sattrs = {};
12993     uint32_t tt_resp;
12994     bool r, rw, nsr, nsrw, mrvalid;
12995     int prot;
12996     ARMMMUFaultInfo fi = {};
12997     MemTxAttrs attrs = {};
12998     hwaddr phys_addr;
12999     ARMMMUIdx mmu_idx;
13000     uint32_t mregion;
13001     bool targetpriv;
13002     bool targetsec = env->v7m.secure;
13003     bool is_subpage;
13004 
13005     /* Work out what the security state and privilege level we're
13006      * interested in is...
13007      */
13008     if (alt) {
13009         targetsec = !targetsec;
13010     }
13011 
13012     if (forceunpriv) {
13013         targetpriv = false;
13014     } else {
13015         targetpriv = arm_v7m_is_handler_mode(env) ||
13016             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
13017     }
13018 
13019     /* ...and then figure out which MMU index this is */
13020     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
13021 
13022     /* We know that the MPU and SAU don't care about the access type
13023      * for our purposes beyond that we don't want to claim to be
13024      * an insn fetch, so we arbitrarily call this a read.
13025      */
13026 
13027     /* MPU region info only available for privileged or if
13028      * inspecting the other MPU state.
13029      */
13030     if (arm_current_el(env) != 0 || alt) {
13031         /* We can ignore the return value as prot is always set */
13032         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
13033                           &phys_addr, &attrs, &prot, &is_subpage,
13034                           &fi, &mregion);
13035         if (mregion == -1) {
13036             mrvalid = false;
13037             mregion = 0;
13038         } else {
13039             mrvalid = true;
13040         }
13041         r = prot & PAGE_READ;
13042         rw = prot & PAGE_WRITE;
13043     } else {
13044         r = false;
13045         rw = false;
13046         mrvalid = false;
13047         mregion = 0;
13048     }
13049 
13050     if (env->v7m.secure) {
13051         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
13052         nsr = sattrs.ns && r;
13053         nsrw = sattrs.ns && rw;
13054     } else {
13055         sattrs.ns = true;
13056         nsr = false;
13057         nsrw = false;
13058     }
13059 
13060     tt_resp = (sattrs.iregion << 24) |
13061         (sattrs.irvalid << 23) |
13062         ((!sattrs.ns) << 22) |
13063         (nsrw << 21) |
13064         (nsr << 20) |
13065         (rw << 19) |
13066         (r << 18) |
13067         (sattrs.srvalid << 17) |
13068         (mrvalid << 16) |
13069         (sattrs.sregion << 8) |
13070         mregion;
13071 
13072     return tt_resp;
13073 }
13074 
13075 #endif
13076 
13077 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
13078                       MMUAccessType access_type, int mmu_idx,
13079                       bool probe, uintptr_t retaddr)
13080 {
13081     ARMCPU *cpu = ARM_CPU(cs);
13082 
13083 #ifdef CONFIG_USER_ONLY
13084     cpu->env.exception.vaddress = address;
13085     if (access_type == MMU_INST_FETCH) {
13086         cs->exception_index = EXCP_PREFETCH_ABORT;
13087     } else {
13088         cs->exception_index = EXCP_DATA_ABORT;
13089     }
13090     cpu_loop_exit_restore(cs, retaddr);
13091 #else
13092     hwaddr phys_addr;
13093     target_ulong page_size;
13094     int prot, ret;
13095     MemTxAttrs attrs = {};
13096     ARMMMUFaultInfo fi = {};
13097 
13098     /*
13099      * Walk the page table and (if the mapping exists) add the page
13100      * to the TLB.  On success, return true.  Otherwise, if probing,
13101      * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
13102      * register format, and signal the fault.
13103      */
13104     ret = get_phys_addr(&cpu->env, address, access_type,
13105                         core_to_arm_mmu_idx(&cpu->env, mmu_idx),
13106                         &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
13107     if (likely(!ret)) {
13108         /*
13109          * Map a single [sub]page. Regions smaller than our declared
13110          * target page size are handled specially, so for those we
13111          * pass in the exact addresses.
13112          */
13113         if (page_size >= TARGET_PAGE_SIZE) {
13114             phys_addr &= TARGET_PAGE_MASK;
13115             address &= TARGET_PAGE_MASK;
13116         }
13117         tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
13118                                 prot, mmu_idx, page_size);
13119         return true;
13120     } else if (probe) {
13121         return false;
13122     } else {
13123         /* now we have a real cpu fault */
13124         cpu_restore_state(cs, retaddr, true);
13125         arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
13126     }
13127 #endif
13128 }
13129 
13130 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
13131 {
13132     /* Implement DC ZVA, which zeroes a fixed-length block of memory.
13133      * Note that we do not implement the (architecturally mandated)
13134      * alignment fault for attempts to use this on Device memory
13135      * (which matches the usual QEMU behaviour of not implementing either
13136      * alignment faults or any memory attribute handling).
13137      */
13138 
13139     ARMCPU *cpu = arm_env_get_cpu(env);
13140     uint64_t blocklen = 4 << cpu->dcz_blocksize;
13141     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
13142 
13143 #ifndef CONFIG_USER_ONLY
13144     {
13145         /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
13146          * the block size so we might have to do more than one TLB lookup.
13147          * We know that in fact for any v8 CPU the page size is at least 4K
13148          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
13149          * 1K as an artefact of legacy v5 subpage support being present in the
13150          * same QEMU executable. So in practice the hostaddr[] array has
13151          * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
13152          */
13153         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
13154         void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
13155         int try, i;
13156         unsigned mmu_idx = cpu_mmu_index(env, false);
13157         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
13158 
13159         assert(maxidx <= ARRAY_SIZE(hostaddr));
13160 
13161         for (try = 0; try < 2; try++) {
13162 
13163             for (i = 0; i < maxidx; i++) {
13164                 hostaddr[i] = tlb_vaddr_to_host(env,
13165                                                 vaddr + TARGET_PAGE_SIZE * i,
13166                                                 1, mmu_idx);
13167                 if (!hostaddr[i]) {
13168                     break;
13169                 }
13170             }
13171             if (i == maxidx) {
13172                 /* If it's all in the TLB it's fair game for just writing to;
13173                  * we know we don't need to update dirty status, etc.
13174                  */
13175                 for (i = 0; i < maxidx - 1; i++) {
13176                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
13177                 }
13178                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
13179                 return;
13180             }
13181             /* OK, try a store and see if we can populate the tlb. This
13182              * might cause an exception if the memory isn't writable,
13183              * in which case we will longjmp out of here. We must for
13184              * this purpose use the actual register value passed to us
13185              * so that we get the fault address right.
13186              */
13187             helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
13188             /* Now we can populate the other TLB entries, if any */
13189             for (i = 0; i < maxidx; i++) {
13190                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
13191                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
13192                     helper_ret_stb_mmu(env, va, 0, oi, GETPC());
13193                 }
13194             }
13195         }
13196 
13197         /* Slow path (probably attempt to do this to an I/O device or
13198          * similar, or clearing of a block of code we have translations
13199          * cached for). Just do a series of byte writes as the architecture
13200          * demands. It's not worth trying to use a cpu_physical_memory_map(),
13201          * memset(), unmap() sequence here because:
13202          *  + we'd need to account for the blocksize being larger than a page
13203          *  + the direct-RAM access case is almost always going to be dealt
13204          *    with in the fastpath code above, so there's no speed benefit
13205          *  + we would have to deal with the map returning NULL because the
13206          *    bounce buffer was in use
13207          */
13208         for (i = 0; i < blocklen; i++) {
13209             helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
13210         }
13211     }
13212 #else
13213     memset(g2h(vaddr), 0, blocklen);
13214 #endif
13215 }
13216 
13217 /* Note that signed overflow is undefined in C.  The following routines are
13218    careful to use unsigned types where modulo arithmetic is required.
13219    Failure to do so _will_ break on newer gcc.  */
13220 
13221 /* Signed saturating arithmetic.  */
13222 
13223 /* Perform 16-bit signed saturating addition.  */
13224 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
13225 {
13226     uint16_t res;
13227 
13228     res = a + b;
13229     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
13230         if (a & 0x8000)
13231             res = 0x8000;
13232         else
13233             res = 0x7fff;
13234     }
13235     return res;
13236 }
13237 
13238 /* Perform 8-bit signed saturating addition.  */
13239 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
13240 {
13241     uint8_t res;
13242 
13243     res = a + b;
13244     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
13245         if (a & 0x80)
13246             res = 0x80;
13247         else
13248             res = 0x7f;
13249     }
13250     return res;
13251 }
13252 
13253 /* Perform 16-bit signed saturating subtraction.  */
13254 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
13255 {
13256     uint16_t res;
13257 
13258     res = a - b;
13259     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
13260         if (a & 0x8000)
13261             res = 0x8000;
13262         else
13263             res = 0x7fff;
13264     }
13265     return res;
13266 }
13267 
13268 /* Perform 8-bit signed saturating subtraction.  */
13269 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
13270 {
13271     uint8_t res;
13272 
13273     res = a - b;
13274     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
13275         if (a & 0x80)
13276             res = 0x80;
13277         else
13278             res = 0x7f;
13279     }
13280     return res;
13281 }
13282 
13283 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
13284 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
13285 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
13286 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
13287 #define PFX q
13288 
13289 #include "op_addsub.h"
13290 
13291 /* Unsigned saturating arithmetic.  */
13292 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
13293 {
13294     uint16_t res;
13295     res = a + b;
13296     if (res < a)
13297         res = 0xffff;
13298     return res;
13299 }
13300 
13301 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
13302 {
13303     if (a > b)
13304         return a - b;
13305     else
13306         return 0;
13307 }
13308 
13309 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
13310 {
13311     uint8_t res;
13312     res = a + b;
13313     if (res < a)
13314         res = 0xff;
13315     return res;
13316 }
13317 
13318 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
13319 {
13320     if (a > b)
13321         return a - b;
13322     else
13323         return 0;
13324 }
13325 
13326 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
13327 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
13328 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
13329 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
13330 #define PFX uq
13331 
13332 #include "op_addsub.h"
13333 
13334 /* Signed modulo arithmetic.  */
13335 #define SARITH16(a, b, n, op) do { \
13336     int32_t sum; \
13337     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
13338     RESULT(sum, n, 16); \
13339     if (sum >= 0) \
13340         ge |= 3 << (n * 2); \
13341     } while(0)
13342 
13343 #define SARITH8(a, b, n, op) do { \
13344     int32_t sum; \
13345     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
13346     RESULT(sum, n, 8); \
13347     if (sum >= 0) \
13348         ge |= 1 << n; \
13349     } while(0)
13350 
13351 
13352 #define ADD16(a, b, n) SARITH16(a, b, n, +)
13353 #define SUB16(a, b, n) SARITH16(a, b, n, -)
13354 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
13355 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
13356 #define PFX s
13357 #define ARITH_GE
13358 
13359 #include "op_addsub.h"
13360 
13361 /* Unsigned modulo arithmetic.  */
13362 #define ADD16(a, b, n) do { \
13363     uint32_t sum; \
13364     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
13365     RESULT(sum, n, 16); \
13366     if ((sum >> 16) == 1) \
13367         ge |= 3 << (n * 2); \
13368     } while(0)
13369 
13370 #define ADD8(a, b, n) do { \
13371     uint32_t sum; \
13372     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
13373     RESULT(sum, n, 8); \
13374     if ((sum >> 8) == 1) \
13375         ge |= 1 << n; \
13376     } while(0)
13377 
13378 #define SUB16(a, b, n) do { \
13379     uint32_t sum; \
13380     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
13381     RESULT(sum, n, 16); \
13382     if ((sum >> 16) == 0) \
13383         ge |= 3 << (n * 2); \
13384     } while(0)
13385 
13386 #define SUB8(a, b, n) do { \
13387     uint32_t sum; \
13388     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
13389     RESULT(sum, n, 8); \
13390     if ((sum >> 8) == 0) \
13391         ge |= 1 << n; \
13392     } while(0)
13393 
13394 #define PFX u
13395 #define ARITH_GE
13396 
13397 #include "op_addsub.h"
13398 
13399 /* Halved signed arithmetic.  */
13400 #define ADD16(a, b, n) \
13401   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
13402 #define SUB16(a, b, n) \
13403   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
13404 #define ADD8(a, b, n) \
13405   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
13406 #define SUB8(a, b, n) \
13407   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
13408 #define PFX sh
13409 
13410 #include "op_addsub.h"
13411 
13412 /* Halved unsigned arithmetic.  */
13413 #define ADD16(a, b, n) \
13414   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13415 #define SUB16(a, b, n) \
13416   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13417 #define ADD8(a, b, n) \
13418   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13419 #define SUB8(a, b, n) \
13420   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13421 #define PFX uh
13422 
13423 #include "op_addsub.h"
13424 
13425 static inline uint8_t do_usad(uint8_t a, uint8_t b)
13426 {
13427     if (a > b)
13428         return a - b;
13429     else
13430         return b - a;
13431 }
13432 
13433 /* Unsigned sum of absolute byte differences.  */
13434 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
13435 {
13436     uint32_t sum;
13437     sum = do_usad(a, b);
13438     sum += do_usad(a >> 8, b >> 8);
13439     sum += do_usad(a >> 16, b >>16);
13440     sum += do_usad(a >> 24, b >> 24);
13441     return sum;
13442 }
13443 
13444 /* For ARMv6 SEL instruction.  */
13445 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
13446 {
13447     uint32_t mask;
13448 
13449     mask = 0;
13450     if (flags & 1)
13451         mask |= 0xff;
13452     if (flags & 2)
13453         mask |= 0xff00;
13454     if (flags & 4)
13455         mask |= 0xff0000;
13456     if (flags & 8)
13457         mask |= 0xff000000;
13458     return (a & mask) | (b & ~mask);
13459 }
13460 
13461 /* CRC helpers.
13462  * The upper bytes of val (above the number specified by 'bytes') must have
13463  * been zeroed out by the caller.
13464  */
13465 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
13466 {
13467     uint8_t buf[4];
13468 
13469     stl_le_p(buf, val);
13470 
13471     /* zlib crc32 converts the accumulator and output to one's complement.  */
13472     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
13473 }
13474 
13475 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
13476 {
13477     uint8_t buf[4];
13478 
13479     stl_le_p(buf, val);
13480 
13481     /* Linux crc32c converts the output to one's complement.  */
13482     return crc32c(acc, buf, bytes) ^ 0xffffffff;
13483 }
13484 
13485 /* Return the exception level to which FP-disabled exceptions should
13486  * be taken, or 0 if FP is enabled.
13487  */
13488 int fp_exception_el(CPUARMState *env, int cur_el)
13489 {
13490 #ifndef CONFIG_USER_ONLY
13491     int fpen;
13492 
13493     /* CPACR and the CPTR registers don't exist before v6, so FP is
13494      * always accessible
13495      */
13496     if (!arm_feature(env, ARM_FEATURE_V6)) {
13497         return 0;
13498     }
13499 
13500     if (arm_feature(env, ARM_FEATURE_M)) {
13501         /* CPACR can cause a NOCP UsageFault taken to current security state */
13502         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
13503             return 1;
13504         }
13505 
13506         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
13507             if (!extract32(env->v7m.nsacr, 10, 1)) {
13508                 /* FP insns cause a NOCP UsageFault taken to Secure */
13509                 return 3;
13510             }
13511         }
13512 
13513         return 0;
13514     }
13515 
13516     /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
13517      * 0, 2 : trap EL0 and EL1/PL1 accesses
13518      * 1    : trap only EL0 accesses
13519      * 3    : trap no accesses
13520      */
13521     fpen = extract32(env->cp15.cpacr_el1, 20, 2);
13522     switch (fpen) {
13523     case 0:
13524     case 2:
13525         if (cur_el == 0 || cur_el == 1) {
13526             /* Trap to PL1, which might be EL1 or EL3 */
13527             if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
13528                 return 3;
13529             }
13530             return 1;
13531         }
13532         if (cur_el == 3 && !is_a64(env)) {
13533             /* Secure PL1 running at EL3 */
13534             return 3;
13535         }
13536         break;
13537     case 1:
13538         if (cur_el == 0) {
13539             return 1;
13540         }
13541         break;
13542     case 3:
13543         break;
13544     }
13545 
13546     /* For the CPTR registers we don't need to guard with an ARM_FEATURE
13547      * check because zero bits in the registers mean "don't trap".
13548      */
13549 
13550     /* CPTR_EL2 : present in v7VE or v8 */
13551     if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
13552         && !arm_is_secure_below_el3(env)) {
13553         /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
13554         return 2;
13555     }
13556 
13557     /* CPTR_EL3 : present in v8 */
13558     if (extract32(env->cp15.cptr_el[3], 10, 1)) {
13559         /* Trap all FP ops to EL3 */
13560         return 3;
13561     }
13562 #endif
13563     return 0;
13564 }
13565 
13566 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
13567                               bool secstate, bool priv, bool negpri)
13568 {
13569     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
13570 
13571     if (priv) {
13572         mmu_idx |= ARM_MMU_IDX_M_PRIV;
13573     }
13574 
13575     if (negpri) {
13576         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
13577     }
13578 
13579     if (secstate) {
13580         mmu_idx |= ARM_MMU_IDX_M_S;
13581     }
13582 
13583     return mmu_idx;
13584 }
13585 
13586 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
13587                                                 bool secstate, bool priv)
13588 {
13589     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
13590 
13591     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
13592 }
13593 
13594 /* Return the MMU index for a v7M CPU in the specified security state */
13595 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
13596 {
13597     bool priv = arm_current_el(env) != 0;
13598 
13599     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
13600 }
13601 
13602 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
13603 {
13604     int el;
13605 
13606     if (arm_feature(env, ARM_FEATURE_M)) {
13607         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
13608     }
13609 
13610     el = arm_current_el(env);
13611     if (el < 2 && arm_is_secure_below_el3(env)) {
13612         return ARMMMUIdx_S1SE0 + el;
13613     } else {
13614         return ARMMMUIdx_S12NSE0 + el;
13615     }
13616 }
13617 
13618 int cpu_mmu_index(CPUARMState *env, bool ifetch)
13619 {
13620     return arm_to_core_mmu_idx(arm_mmu_idx(env));
13621 }
13622 
13623 #ifndef CONFIG_USER_ONLY
13624 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
13625 {
13626     return stage_1_mmu_idx(arm_mmu_idx(env));
13627 }
13628 #endif
13629 
13630 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
13631                           target_ulong *cs_base, uint32_t *pflags)
13632 {
13633     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
13634     int current_el = arm_current_el(env);
13635     int fp_el = fp_exception_el(env, current_el);
13636     uint32_t flags = 0;
13637 
13638     if (is_a64(env)) {
13639         ARMCPU *cpu = arm_env_get_cpu(env);
13640         uint64_t sctlr;
13641 
13642         *pc = env->pc;
13643         flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
13644 
13645         /* Get control bits for tagged addresses.  */
13646         {
13647             ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
13648             ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
13649             int tbii, tbid;
13650 
13651             /* FIXME: ARMv8.1-VHE S2 translation regime.  */
13652             if (regime_el(env, stage1) < 2) {
13653                 ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
13654                 tbid = (p1.tbi << 1) | p0.tbi;
13655                 tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
13656             } else {
13657                 tbid = p0.tbi;
13658                 tbii = tbid & !p0.tbid;
13659             }
13660 
13661             flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
13662             flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
13663         }
13664 
13665         if (cpu_isar_feature(aa64_sve, cpu)) {
13666             int sve_el = sve_exception_el(env, current_el);
13667             uint32_t zcr_len;
13668 
13669             /* If SVE is disabled, but FP is enabled,
13670              * then the effective len is 0.
13671              */
13672             if (sve_el != 0 && fp_el == 0) {
13673                 zcr_len = 0;
13674             } else {
13675                 zcr_len = sve_zcr_len_for_el(env, current_el);
13676             }
13677             flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
13678             flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
13679         }
13680 
13681         sctlr = arm_sctlr(env, current_el);
13682 
13683         if (cpu_isar_feature(aa64_pauth, cpu)) {
13684             /*
13685              * In order to save space in flags, we record only whether
13686              * pauth is "inactive", meaning all insns are implemented as
13687              * a nop, or "active" when some action must be performed.
13688              * The decision of which action to take is left to a helper.
13689              */
13690             if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
13691                 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
13692             }
13693         }
13694 
13695         if (cpu_isar_feature(aa64_bti, cpu)) {
13696             /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
13697             if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
13698                 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
13699             }
13700             flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
13701         }
13702     } else {
13703         *pc = env->regs[15];
13704         flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
13705         flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
13706         flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
13707         flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
13708         flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
13709         flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
13710         if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
13711             || arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
13712             flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
13713         }
13714         /* Note that XSCALE_CPAR shares bits with VECSTRIDE */
13715         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
13716             flags = FIELD_DP32(flags, TBFLAG_A32,
13717                                XSCALE_CPAR, env->cp15.c15_cpar);
13718         }
13719     }
13720 
13721     flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
13722 
13723     /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13724      * states defined in the ARM ARM for software singlestep:
13725      *  SS_ACTIVE   PSTATE.SS   State
13726      *     0            x       Inactive (the TB flag for SS is always 0)
13727      *     1            0       Active-pending
13728      *     1            1       Active-not-pending
13729      */
13730     if (arm_singlestep_active(env)) {
13731         flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
13732         if (is_a64(env)) {
13733             if (env->pstate & PSTATE_SS) {
13734                 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
13735             }
13736         } else {
13737             if (env->uncached_cpsr & PSTATE_SS) {
13738                 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
13739             }
13740         }
13741     }
13742     if (arm_cpu_data_is_big_endian(env)) {
13743         flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
13744     }
13745     flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
13746 
13747     if (arm_v7m_is_handler_mode(env)) {
13748         flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
13749     }
13750 
13751     /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
13752      * suppressing them because the requested execution priority is less than 0.
13753      */
13754     if (arm_feature(env, ARM_FEATURE_V8) &&
13755         arm_feature(env, ARM_FEATURE_M) &&
13756         !((mmu_idx  & ARM_MMU_IDX_M_NEGPRI) &&
13757           (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
13758         flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
13759     }
13760 
13761     if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13762         FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) {
13763         flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
13764     }
13765 
13766     if (arm_feature(env, ARM_FEATURE_M) &&
13767         (env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
13768         (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
13769          (env->v7m.secure &&
13770           !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
13771         /*
13772          * ASPEN is set, but FPCA/SFPA indicate that there is no active
13773          * FP context; we must create a new FP context before executing
13774          * any FP insn.
13775          */
13776         flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
13777     }
13778 
13779     if (arm_feature(env, ARM_FEATURE_M)) {
13780         bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
13781 
13782         if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
13783             flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
13784         }
13785     }
13786 
13787     *pflags = flags;
13788     *cs_base = 0;
13789 }
13790 
13791 #ifdef TARGET_AARCH64
13792 /*
13793  * The manual says that when SVE is enabled and VQ is widened the
13794  * implementation is allowed to zero the previously inaccessible
13795  * portion of the registers.  The corollary to that is that when
13796  * SVE is enabled and VQ is narrowed we are also allowed to zero
13797  * the now inaccessible portion of the registers.
13798  *
13799  * The intent of this is that no predicate bit beyond VQ is ever set.
13800  * Which means that some operations on predicate registers themselves
13801  * may operate on full uint64_t or even unrolled across the maximum
13802  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
13803  * may well be cheaper than conditionals to restrict the operation
13804  * to the relevant portion of a uint16_t[16].
13805  */
13806 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
13807 {
13808     int i, j;
13809     uint64_t pmask;
13810 
13811     assert(vq >= 1 && vq <= ARM_MAX_VQ);
13812     assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
13813 
13814     /* Zap the high bits of the zregs.  */
13815     for (i = 0; i < 32; i++) {
13816         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
13817     }
13818 
13819     /* Zap the high bits of the pregs and ffr.  */
13820     pmask = 0;
13821     if (vq & 3) {
13822         pmask = ~(-1ULL << (16 * (vq & 3)));
13823     }
13824     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
13825         for (i = 0; i < 17; ++i) {
13826             env->vfp.pregs[i].p[j] &= pmask;
13827         }
13828         pmask = 0;
13829     }
13830 }
13831 
13832 /*
13833  * Notice a change in SVE vector size when changing EL.
13834  */
13835 void aarch64_sve_change_el(CPUARMState *env, int old_el,
13836                            int new_el, bool el0_a64)
13837 {
13838     ARMCPU *cpu = arm_env_get_cpu(env);
13839     int old_len, new_len;
13840     bool old_a64, new_a64;
13841 
13842     /* Nothing to do if no SVE.  */
13843     if (!cpu_isar_feature(aa64_sve, cpu)) {
13844         return;
13845     }
13846 
13847     /* Nothing to do if FP is disabled in either EL.  */
13848     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
13849         return;
13850     }
13851 
13852     /*
13853      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13854      * at ELx, or not available because the EL is in AArch32 state, then
13855      * for all purposes other than a direct read, the ZCR_ELx.LEN field
13856      * has an effective value of 0".
13857      *
13858      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13859      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13860      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
13861      * we already have the correct register contents when encountering the
13862      * vq0->vq0 transition between EL0->EL1.
13863      */
13864     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
13865     old_len = (old_a64 && !sve_exception_el(env, old_el)
13866                ? sve_zcr_len_for_el(env, old_el) : 0);
13867     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
13868     new_len = (new_a64 && !sve_exception_el(env, new_el)
13869                ? sve_zcr_len_for_el(env, new_el) : 0);
13870 
13871     /* When changing vector length, clear inaccessible state.  */
13872     if (new_len < old_len) {
13873         aarch64_sve_narrow_vq(env, new_len + 1);
13874     }
13875 }
13876 #endif
13877